##// END OF EJS Templates
reachableroots: construct and sort baseset in revset module...
Yuya Nishihara -
r26094:df41c7be default
parent child Browse files
Show More
@@ -1,410 +1,406 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullid,
15 15 )
16 16
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 revlog,
21 revset,
22 21 util,
23 22 )
24 23
25 24 _defaultextra = {'branch': 'default'}
26 25
27 26 def _string_escape(text):
28 27 """
29 28 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
30 29 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
31 30 >>> s
32 31 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
33 32 >>> res = _string_escape(s)
34 33 >>> s == res.decode('string_escape')
35 34 True
36 35 """
37 36 # subset of the string_escape codec
38 37 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
39 38 return text.replace('\0', '\\0')
40 39
41 40 def decodeextra(text):
42 41 """
43 42 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
44 43 ... ).iteritems())
45 44 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
46 45 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
47 46 ... 'baz': chr(92) + chr(0) + '2'})
48 47 ... ).iteritems())
49 48 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
50 49 """
51 50 extra = _defaultextra.copy()
52 51 for l in text.split('\0'):
53 52 if l:
54 53 if '\\0' in l:
55 54 # fix up \0 without getting into trouble with \\0
56 55 l = l.replace('\\\\', '\\\\\n')
57 56 l = l.replace('\\0', '\0')
58 57 l = l.replace('\n', '')
59 58 k, v = l.decode('string_escape').split(':', 1)
60 59 extra[k] = v
61 60 return extra
62 61
63 62 def encodeextra(d):
64 63 # keys must be sorted to produce a deterministic changelog entry
65 64 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
66 65 return "\0".join(items)
67 66
68 67 def stripdesc(desc):
69 68 """strip trailing whitespace and leading and trailing empty lines"""
70 69 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
71 70
72 71 class appender(object):
73 72 '''the changelog index must be updated last on disk, so we use this class
74 73 to delay writes to it'''
75 74 def __init__(self, vfs, name, mode, buf):
76 75 self.data = buf
77 76 fp = vfs(name, mode)
78 77 self.fp = fp
79 78 self.offset = fp.tell()
80 79 self.size = vfs.fstat(fp).st_size
81 80
82 81 def end(self):
83 82 return self.size + len("".join(self.data))
84 83 def tell(self):
85 84 return self.offset
86 85 def flush(self):
87 86 pass
88 87 def close(self):
89 88 self.fp.close()
90 89
91 90 def seek(self, offset, whence=0):
92 91 '''virtual file offset spans real file and data'''
93 92 if whence == 0:
94 93 self.offset = offset
95 94 elif whence == 1:
96 95 self.offset += offset
97 96 elif whence == 2:
98 97 self.offset = self.end() + offset
99 98 if self.offset < self.size:
100 99 self.fp.seek(self.offset)
101 100
102 101 def read(self, count=-1):
103 102 '''only trick here is reads that span real file and data'''
104 103 ret = ""
105 104 if self.offset < self.size:
106 105 s = self.fp.read(count)
107 106 ret = s
108 107 self.offset += len(s)
109 108 if count > 0:
110 109 count -= len(s)
111 110 if count != 0:
112 111 doff = self.offset - self.size
113 112 self.data.insert(0, "".join(self.data))
114 113 del self.data[1:]
115 114 s = self.data[0][doff:doff + count]
116 115 self.offset += len(s)
117 116 ret += s
118 117 return ret
119 118
120 119 def write(self, s):
121 120 self.data.append(str(s))
122 121 self.offset += len(s)
123 122
124 123 def _divertopener(opener, target):
125 124 """build an opener that writes in 'target.a' instead of 'target'"""
126 125 def _divert(name, mode='r'):
127 126 if name != target:
128 127 return opener(name, mode)
129 128 return opener(name + ".a", mode)
130 129 return _divert
131 130
132 131 def _delayopener(opener, target, buf):
133 132 """build an opener that stores chunks in 'buf' instead of 'target'"""
134 133 def _delay(name, mode='r'):
135 134 if name != target:
136 135 return opener(name, mode)
137 136 return appender(opener, name, mode, buf)
138 137 return _delay
139 138
140 139 class changelog(revlog.revlog):
141 140 def __init__(self, opener):
142 141 revlog.revlog.__init__(self, opener, "00changelog.i")
143 142 if self._initempty:
144 143 # changelogs don't benefit from generaldelta
145 144 self.version &= ~revlog.REVLOGGENERALDELTA
146 145 self._generaldelta = False
147 146 self._realopener = opener
148 147 self._delayed = False
149 148 self._delaybuf = None
150 149 self._divert = False
151 150 self.filteredrevs = frozenset()
152 151
153 152 def tip(self):
154 153 """filtered version of revlog.tip"""
155 154 for i in xrange(len(self) -1, -2, -1):
156 155 if i not in self.filteredrevs:
157 156 return self.node(i)
158 157
159 158 def __contains__(self, rev):
160 159 """filtered version of revlog.__contains__"""
161 160 return (0 <= rev < len(self)
162 161 and rev not in self.filteredrevs)
163 162
164 163 def __iter__(self):
165 164 """filtered version of revlog.__iter__"""
166 165 if len(self.filteredrevs) == 0:
167 166 return revlog.revlog.__iter__(self)
168 167
169 168 def filterediter():
170 169 for i in xrange(len(self)):
171 170 if i not in self.filteredrevs:
172 171 yield i
173 172
174 173 return filterediter()
175 174
176 175 def revs(self, start=0, stop=None):
177 176 """filtered version of revlog.revs"""
178 177 for i in super(changelog, self).revs(start, stop):
179 178 if i not in self.filteredrevs:
180 179 yield i
181 180
182 181 @util.propertycache
183 182 def nodemap(self):
184 183 # XXX need filtering too
185 184 self.rev(self.node(0))
186 185 return self._nodecache
187 186
188 187 def reachableroots(self, minroot, heads, roots, includepath=False):
189 rroots = self.index.reachableroots2(minroot, heads, roots, includepath)
190 rroots = revset.baseset(rroots)
191 rroots.sort()
192 return rroots
188 return self.index.reachableroots2(minroot, heads, roots, includepath)
193 189
194 190 def headrevs(self):
195 191 if self.filteredrevs:
196 192 try:
197 193 return self.index.headrevsfiltered(self.filteredrevs)
198 194 # AttributeError covers non-c-extension environments and
199 195 # old c extensions without filter handling.
200 196 except AttributeError:
201 197 return self._headrevs()
202 198
203 199 return super(changelog, self).headrevs()
204 200
205 201 def strip(self, *args, **kwargs):
206 202 # XXX make something better than assert
207 203 # We can't expect proper strip behavior if we are filtered.
208 204 assert not self.filteredrevs
209 205 super(changelog, self).strip(*args, **kwargs)
210 206
211 207 def rev(self, node):
212 208 """filtered version of revlog.rev"""
213 209 r = super(changelog, self).rev(node)
214 210 if r in self.filteredrevs:
215 211 raise error.FilteredLookupError(hex(node), self.indexfile,
216 212 _('filtered node'))
217 213 return r
218 214
219 215 def node(self, rev):
220 216 """filtered version of revlog.node"""
221 217 if rev in self.filteredrevs:
222 218 raise error.FilteredIndexError(rev)
223 219 return super(changelog, self).node(rev)
224 220
225 221 def linkrev(self, rev):
226 222 """filtered version of revlog.linkrev"""
227 223 if rev in self.filteredrevs:
228 224 raise error.FilteredIndexError(rev)
229 225 return super(changelog, self).linkrev(rev)
230 226
231 227 def parentrevs(self, rev):
232 228 """filtered version of revlog.parentrevs"""
233 229 if rev in self.filteredrevs:
234 230 raise error.FilteredIndexError(rev)
235 231 return super(changelog, self).parentrevs(rev)
236 232
237 233 def flags(self, rev):
238 234 """filtered version of revlog.flags"""
239 235 if rev in self.filteredrevs:
240 236 raise error.FilteredIndexError(rev)
241 237 return super(changelog, self).flags(rev)
242 238
243 239 def delayupdate(self, tr):
244 240 "delay visibility of index updates to other readers"
245 241
246 242 if not self._delayed:
247 243 if len(self) == 0:
248 244 self._divert = True
249 245 if self._realopener.exists(self.indexfile + '.a'):
250 246 self._realopener.unlink(self.indexfile + '.a')
251 247 self.opener = _divertopener(self._realopener, self.indexfile)
252 248 else:
253 249 self._delaybuf = []
254 250 self.opener = _delayopener(self._realopener, self.indexfile,
255 251 self._delaybuf)
256 252 self._delayed = True
257 253 tr.addpending('cl-%i' % id(self), self._writepending)
258 254 tr.addfinalize('cl-%i' % id(self), self._finalize)
259 255
260 256 def _finalize(self, tr):
261 257 "finalize index updates"
262 258 self._delayed = False
263 259 self.opener = self._realopener
264 260 # move redirected index data back into place
265 261 if self._divert:
266 262 assert not self._delaybuf
267 263 tmpname = self.indexfile + ".a"
268 264 nfile = self.opener.open(tmpname)
269 265 nfile.close()
270 266 self.opener.rename(tmpname, self.indexfile)
271 267 elif self._delaybuf:
272 268 fp = self.opener(self.indexfile, 'a')
273 269 fp.write("".join(self._delaybuf))
274 270 fp.close()
275 271 self._delaybuf = None
276 272 self._divert = False
277 273 # split when we're done
278 274 self.checkinlinesize(tr)
279 275
280 276 def readpending(self, file):
281 277 """read index data from a "pending" file
282 278
283 279 During a transaction, the actual changeset data is already stored in the
284 280 main file, but not yet finalized in the on-disk index. Instead, a
285 281 "pending" index is written by the transaction logic. If this function
286 282 is running, we are likely in a subprocess invoked in a hook. The
287 283 subprocess is informed that it is within a transaction and needs to
288 284 access its content.
289 285
290 286 This function will read all the index data out of the pending file and
291 287 overwrite the main index."""
292 288
293 289 if not self.opener.exists(file):
294 290 return # no pending data for changelog
295 291 r = revlog.revlog(self.opener, file)
296 292 self.index = r.index
297 293 self.nodemap = r.nodemap
298 294 self._nodecache = r._nodecache
299 295 self._chunkcache = r._chunkcache
300 296
301 297 def _writepending(self, tr):
302 298 "create a file containing the unfinalized state for pretxnchangegroup"
303 299 if self._delaybuf:
304 300 # make a temporary copy of the index
305 301 fp1 = self._realopener(self.indexfile)
306 302 pendingfilename = self.indexfile + ".a"
307 303 # register as a temp file to ensure cleanup on failure
308 304 tr.registertmp(pendingfilename)
309 305 # write existing data
310 306 fp2 = self._realopener(pendingfilename, "w")
311 307 fp2.write(fp1.read())
312 308 # add pending data
313 309 fp2.write("".join(self._delaybuf))
314 310 fp2.close()
315 311 # switch modes so finalize can simply rename
316 312 self._delaybuf = None
317 313 self._divert = True
318 314 self.opener = _divertopener(self._realopener, self.indexfile)
319 315
320 316 if self._divert:
321 317 return True
322 318
323 319 return False
324 320
325 321 def checkinlinesize(self, tr, fp=None):
326 322 if not self._delayed:
327 323 revlog.revlog.checkinlinesize(self, tr, fp)
328 324
329 325 def read(self, node):
330 326 """
331 327 format used:
332 328 nodeid\n : manifest node in ascii
333 329 user\n : user, no \n or \r allowed
334 330 time tz extra\n : date (time is int or float, timezone is int)
335 331 : extra is metadata, encoded and separated by '\0'
336 332 : older versions ignore it
337 333 files\n\n : files modified by the cset, no \n or \r allowed
338 334 (.*) : comment (free text, ideally utf-8)
339 335
340 336 changelog v0 doesn't use extra
341 337 """
342 338 text = self.revision(node)
343 339 if not text:
344 340 return (nullid, "", (0, 0), [], "", _defaultextra)
345 341 last = text.index("\n\n")
346 342 desc = encoding.tolocal(text[last + 2:])
347 343 l = text[:last].split('\n')
348 344 manifest = bin(l[0])
349 345 user = encoding.tolocal(l[1])
350 346
351 347 tdata = l[2].split(' ', 2)
352 348 if len(tdata) != 3:
353 349 time = float(tdata[0])
354 350 try:
355 351 # various tools did silly things with the time zone field.
356 352 timezone = int(tdata[1])
357 353 except ValueError:
358 354 timezone = 0
359 355 extra = _defaultextra
360 356 else:
361 357 time, timezone = float(tdata[0]), int(tdata[1])
362 358 extra = decodeextra(tdata[2])
363 359
364 360 files = l[3:]
365 361 return (manifest, user, (time, timezone), files, desc, extra)
366 362
367 363 def add(self, manifest, files, desc, transaction, p1, p2,
368 364 user, date=None, extra=None):
369 365 # Convert to UTF-8 encoded bytestrings as the very first
370 366 # thing: calling any method on a localstr object will turn it
371 367 # into a str object and the cached UTF-8 string is thus lost.
372 368 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
373 369
374 370 user = user.strip()
375 371 # An empty username or a username with a "\n" will make the
376 372 # revision text contain two "\n\n" sequences -> corrupt
377 373 # repository since read cannot unpack the revision.
378 374 if not user:
379 375 raise error.RevlogError(_("empty username"))
380 376 if "\n" in user:
381 377 raise error.RevlogError(_("username %s contains a newline")
382 378 % repr(user))
383 379
384 380 desc = stripdesc(desc)
385 381
386 382 if date:
387 383 parseddate = "%d %d" % util.parsedate(date)
388 384 else:
389 385 parseddate = "%d %d" % util.makedate()
390 386 if extra:
391 387 branch = extra.get("branch")
392 388 if branch in ("default", ""):
393 389 del extra["branch"]
394 390 elif branch in (".", "null", "tip"):
395 391 raise error.RevlogError(_('the name \'%s\' is reserved')
396 392 % branch)
397 393 if extra:
398 394 extra = encodeextra(extra)
399 395 parseddate = "%s %s" % (parseddate, extra)
400 396 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
401 397 text = "\n".join(l)
402 398 return self.addrevision(text, transaction, len(self), p1, p2)
403 399
404 400 def branchinfo(self, rev):
405 401 """return the branch name and open/close state of a revision
406 402
407 403 This function exists because creating a changectx object
408 404 just to access this is costly."""
409 405 extra = self.read(rev)[5]
410 406 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,3722 +1,3723 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 return baseset()
95 return []
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 reachable = baseset(reachable)
127 reachable.sort()
128 126 return reachable
129 127
130 128 def reachableroots(repo, roots, heads, includepath=False):
131 129 """return (heads(::<roots> and ::<heads>))
132 130
133 131 If includepath is True, return (<roots>::<heads>)."""
134 132 if not roots:
135 133 return baseset()
136 134 minroot = roots.min()
137 135 roots = list(roots)
138 136 heads = list(heads)
139 137 try:
140 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 139 except AttributeError:
142 return reachablerootspure(repo, minroot, roots, heads, includepath)
140 revs = reachablerootspure(repo, minroot, roots, heads, includepath)
141 revs = baseset(revs)
142 revs.sort()
143 return revs
143 144
144 145 elements = {
145 146 # token-type: binding-strength, primary, prefix, infix, suffix
146 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
147 148 "##": (20, None, None, ("_concat", 20), None),
148 149 "~": (18, None, None, ("ancestor", 18), None),
149 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
150 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
151 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
152 153 ("dagrangepost", 17)),
153 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 155 ("dagrangepost", 17)),
155 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
156 157 "not": (10, None, ("not", 10), None, None),
157 158 "!": (10, None, ("not", 10), None, None),
158 159 "and": (5, None, None, ("and", 5), None),
159 160 "&": (5, None, None, ("and", 5), None),
160 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 162 "or": (4, None, None, ("or", 4), None),
162 163 "|": (4, None, None, ("or", 4), None),
163 164 "+": (4, None, None, ("or", 4), None),
164 165 "=": (3, None, None, ("keyvalue", 3), None),
165 166 ",": (2, None, None, ("list", 2), None),
166 167 ")": (0, None, None, None, None),
167 168 "symbol": (0, "symbol", None, None, None),
168 169 "string": (0, "string", None, None, None),
169 170 "end": (0, None, None, None, None),
170 171 }
171 172
172 173 keywords = set(['and', 'or', 'not'])
173 174
174 175 # default set of valid characters for the initial letter of symbols
175 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 177 if c.isalnum() or c in '._@' or ord(c) > 127)
177 178
178 179 # default set of valid characters for non-initial letters of symbols
179 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 182
182 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 184 '''
184 185 Parse a revset statement into a stream of tokens
185 186
186 187 ``syminitletters`` is the set of valid characters for the initial
187 188 letter of symbols.
188 189
189 190 By default, character ``c`` is recognized as valid for initial
190 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 192
192 193 ``symletters`` is the set of valid characters for non-initial
193 194 letters of symbols.
194 195
195 196 By default, character ``c`` is recognized as valid for non-initial
196 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 198
198 199 Check that @ is a valid unquoted token character (issue3686):
199 200 >>> list(tokenize("@::"))
200 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 202
202 203 '''
203 204 if syminitletters is None:
204 205 syminitletters = _syminitletters
205 206 if symletters is None:
206 207 symletters = _symletters
207 208
208 209 if program and lookup:
209 210 # attempt to parse old-style ranges first to deal with
210 211 # things like old-tag which contain query metacharacters
211 212 parts = program.split(':', 1)
212 213 if all(lookup(sym) for sym in parts if sym):
213 214 if parts[0]:
214 215 yield ('symbol', parts[0], 0)
215 216 if len(parts) > 1:
216 217 s = len(parts[0])
217 218 yield (':', None, s)
218 219 if parts[1]:
219 220 yield ('symbol', parts[1], s + 1)
220 221 yield ('end', None, len(program))
221 222 return
222 223
223 224 pos, l = 0, len(program)
224 225 while pos < l:
225 226 c = program[pos]
226 227 if c.isspace(): # skip inter-token whitespace
227 228 pass
228 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 230 yield ('::', None, pos)
230 231 pos += 1 # skip ahead
231 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 233 yield ('..', None, pos)
233 234 pos += 1 # skip ahead
234 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 236 yield ('##', None, pos)
236 237 pos += 1 # skip ahead
237 238 elif c in "():=,-|&+!~^%": # handle simple operators
238 239 yield (c, None, pos)
239 240 elif (c in '"\'' or c == 'r' and
240 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 242 if c == 'r':
242 243 pos += 1
243 244 c = program[pos]
244 245 decode = lambda x: x
245 246 else:
246 247 decode = lambda x: x.decode('string-escape')
247 248 pos += 1
248 249 s = pos
249 250 while pos < l: # find closing quote
250 251 d = program[pos]
251 252 if d == '\\': # skip over escaped characters
252 253 pos += 2
253 254 continue
254 255 if d == c:
255 256 yield ('string', decode(program[s:pos]), s)
256 257 break
257 258 pos += 1
258 259 else:
259 260 raise error.ParseError(_("unterminated string"), s)
260 261 # gather up a symbol/keyword
261 262 elif c in syminitletters:
262 263 s = pos
263 264 pos += 1
264 265 while pos < l: # find end of symbol
265 266 d = program[pos]
266 267 if d not in symletters:
267 268 break
268 269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 270 pos -= 1
270 271 break
271 272 pos += 1
272 273 sym = program[s:pos]
273 274 if sym in keywords: # operator keywords
274 275 yield (sym, None, s)
275 276 elif '-' in sym:
276 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 278 if lookup and lookup(sym):
278 279 # looks like a real symbol
279 280 yield ('symbol', sym, s)
280 281 else:
281 282 # looks like an expression
282 283 parts = sym.split('-')
283 284 for p in parts[:-1]:
284 285 if p: # possible consecutive -
285 286 yield ('symbol', p, s)
286 287 s += len(p)
287 288 yield ('-', None, pos)
288 289 s += 1
289 290 if parts[-1]: # possible trailing -
290 291 yield ('symbol', parts[-1], s)
291 292 else:
292 293 yield ('symbol', sym, s)
293 294 pos -= 1
294 295 else:
295 296 raise error.ParseError(_("syntax error in revset '%s'") %
296 297 program, pos)
297 298 pos += 1
298 299 yield ('end', None, pos)
299 300
300 301 def parseerrordetail(inst):
301 302 """Compose error message from specified ParseError object
302 303 """
303 304 if len(inst.args) > 1:
304 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
305 306 else:
306 307 return inst.args[0]
307 308
308 309 # helpers
309 310
310 311 def getstring(x, err):
311 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 313 return x[1]
313 314 raise error.ParseError(err)
314 315
315 316 def getlist(x):
316 317 if not x:
317 318 return []
318 319 if x[0] == 'list':
319 320 return getlist(x[1]) + [x[2]]
320 321 return [x]
321 322
322 323 def getargs(x, min, max, err):
323 324 l = getlist(x)
324 325 if len(l) < min or (max >= 0 and len(l) > max):
325 326 raise error.ParseError(err)
326 327 return l
327 328
328 329 def getargsdict(x, funcname, keys):
329 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 331 keyvaluenode='keyvalue', keynode='symbol')
331 332
332 333 def isvalidsymbol(tree):
333 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
334 335 """
335 336 return tree[0] == 'symbol' and len(tree) > 1
336 337
337 338 def getsymbol(tree):
338 339 """Get symbol name from valid ``symbol`` in ``tree``
339 340
340 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
341 342 """
342 343 return tree[1]
343 344
344 345 def isvalidfunc(tree):
345 346 """Examine whether specified ``tree`` is valid ``func`` or not
346 347 """
347 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
348 349
349 350 def getfuncname(tree):
350 351 """Get function name from valid ``func`` in ``tree``
351 352
352 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
353 354 """
354 355 return getsymbol(tree[1])
355 356
356 357 def getfuncargs(tree):
357 358 """Get list of function arguments from valid ``func`` in ``tree``
358 359
359 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
360 361 """
361 362 if len(tree) > 2:
362 363 return getlist(tree[2])
363 364 else:
364 365 return []
365 366
366 367 def getset(repo, subset, x):
367 368 if not x:
368 369 raise error.ParseError(_("missing argument"))
369 370 s = methods[x[0]](repo, subset, *x[1:])
370 371 if util.safehasattr(s, 'isascending'):
371 372 return s
372 373 if (repo.ui.configbool('devel', 'all-warnings')
373 374 or repo.ui.configbool('devel', 'old-revset')):
374 375 # else case should not happen, because all non-func are internal,
375 376 # ignoring for now.
376 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 379 '(upgrade your code)' % x[1][1])
379 380 return baseset(s)
380 381
381 382 def _getrevsource(repo, r):
382 383 extra = repo[r].extra()
383 384 for label in ('source', 'transplant_source', 'rebase_source'):
384 385 if label in extra:
385 386 try:
386 387 return repo[extra[label]].rev()
387 388 except error.RepoLookupError:
388 389 pass
389 390 return None
390 391
391 392 # operator methods
392 393
393 394 def stringset(repo, subset, x):
394 395 x = repo[x].rev()
395 396 if (x in subset
396 397 or x == node.nullrev and isinstance(subset, fullreposet)):
397 398 return baseset([x])
398 399 return baseset()
399 400
400 401 def rangeset(repo, subset, x, y):
401 402 m = getset(repo, fullreposet(repo), x)
402 403 n = getset(repo, fullreposet(repo), y)
403 404
404 405 if not m or not n:
405 406 return baseset()
406 407 m, n = m.first(), n.last()
407 408
408 409 if m == n:
409 410 r = baseset([m])
410 411 elif n == node.wdirrev:
411 412 r = spanset(repo, m, len(repo)) + baseset([n])
412 413 elif m == node.wdirrev:
413 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 415 elif m < n:
415 416 r = spanset(repo, m, n + 1)
416 417 else:
417 418 r = spanset(repo, m, n - 1)
418 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 420 # necessary to ensure we preserve the order in subset.
420 421 #
421 422 # This has performance implication, carrying the sorting over when possible
422 423 # would be more efficient.
423 424 return r & subset
424 425
425 426 def dagrange(repo, subset, x, y):
426 427 r = fullreposet(repo)
427 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 429 includepath=True)
429 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 431 # necessary to ensure we preserve the order in subset.
431 432 return xs & subset
432 433
433 434 def andset(repo, subset, x, y):
434 435 return getset(repo, getset(repo, subset, x), y)
435 436
436 437 def orset(repo, subset, *xs):
437 438 assert xs
438 439 if len(xs) == 1:
439 440 return getset(repo, subset, xs[0])
440 441 p = len(xs) // 2
441 442 a = orset(repo, subset, *xs[:p])
442 443 b = orset(repo, subset, *xs[p:])
443 444 return a + b
444 445
445 446 def notset(repo, subset, x):
446 447 return subset - getset(repo, subset, x)
447 448
448 449 def listset(repo, subset, a, b):
449 450 raise error.ParseError(_("can't use a list in this context"))
450 451
451 452 def keyvaluepair(repo, subset, k, v):
452 453 raise error.ParseError(_("can't use a key-value pair in this context"))
453 454
454 455 def func(repo, subset, a, b):
455 456 if a[0] == 'symbol' and a[1] in symbols:
456 457 return symbols[a[1]](repo, subset, b)
457 458
458 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
459 460
460 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
461 462 raise error.UnknownIdentifier(a[1], syms)
462 463
463 464 # functions
464 465
465 466 def adds(repo, subset, x):
466 467 """``adds(pattern)``
467 468 Changesets that add a file matching pattern.
468 469
469 470 The pattern without explicit kind like ``glob:`` is expected to be
470 471 relative to the current directory and match against a file or a
471 472 directory.
472 473 """
473 474 # i18n: "adds" is a keyword
474 475 pat = getstring(x, _("adds requires a pattern"))
475 476 return checkstatus(repo, subset, pat, 1)
476 477
477 478 def ancestor(repo, subset, x):
478 479 """``ancestor(*changeset)``
479 480 A greatest common ancestor of the changesets.
480 481
481 482 Accepts 0 or more changesets.
482 483 Will return empty list when passed no args.
483 484 Greatest common ancestor of a single changeset is that changeset.
484 485 """
485 486 # i18n: "ancestor" is a keyword
486 487 l = getlist(x)
487 488 rl = fullreposet(repo)
488 489 anc = None
489 490
490 491 # (getset(repo, rl, i) for i in l) generates a list of lists
491 492 for revs in (getset(repo, rl, i) for i in l):
492 493 for r in revs:
493 494 if anc is None:
494 495 anc = repo[r]
495 496 else:
496 497 anc = anc.ancestor(repo[r])
497 498
498 499 if anc is not None and anc.rev() in subset:
499 500 return baseset([anc.rev()])
500 501 return baseset()
501 502
502 503 def _ancestors(repo, subset, x, followfirst=False):
503 504 heads = getset(repo, fullreposet(repo), x)
504 505 if not heads:
505 506 return baseset()
506 507 s = _revancestors(repo, heads, followfirst)
507 508 return subset & s
508 509
509 510 def ancestors(repo, subset, x):
510 511 """``ancestors(set)``
511 512 Changesets that are ancestors of a changeset in set.
512 513 """
513 514 return _ancestors(repo, subset, x)
514 515
515 516 def _firstancestors(repo, subset, x):
516 517 # ``_firstancestors(set)``
517 518 # Like ``ancestors(set)`` but follows only the first parents.
518 519 return _ancestors(repo, subset, x, followfirst=True)
519 520
520 521 def ancestorspec(repo, subset, x, n):
521 522 """``set~n``
522 523 Changesets that are the Nth ancestor (first parents only) of a changeset
523 524 in set.
524 525 """
525 526 try:
526 527 n = int(n[1])
527 528 except (TypeError, ValueError):
528 529 raise error.ParseError(_("~ expects a number"))
529 530 ps = set()
530 531 cl = repo.changelog
531 532 for r in getset(repo, fullreposet(repo), x):
532 533 for i in range(n):
533 534 r = cl.parentrevs(r)[0]
534 535 ps.add(r)
535 536 return subset & ps
536 537
537 538 def author(repo, subset, x):
538 539 """``author(string)``
539 540 Alias for ``user(string)``.
540 541 """
541 542 # i18n: "author" is a keyword
542 543 n = encoding.lower(getstring(x, _("author requires a string")))
543 544 kind, pattern, matcher = _substringmatcher(n)
544 545 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
545 546
546 547 def bisect(repo, subset, x):
547 548 """``bisect(string)``
548 549 Changesets marked in the specified bisect status:
549 550
550 551 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 552 - ``goods``, ``bads`` : csets topologically good/bad
552 553 - ``range`` : csets taking part in the bisection
553 554 - ``pruned`` : csets that are goods, bads or skipped
554 555 - ``untested`` : csets whose fate is yet unknown
555 556 - ``ignored`` : csets ignored due to DAG topology
556 557 - ``current`` : the cset currently being bisected
557 558 """
558 559 # i18n: "bisect" is a keyword
559 560 status = getstring(x, _("bisect requires a string")).lower()
560 561 state = set(hbisect.get(repo, status))
561 562 return subset & state
562 563
563 564 # Backward-compatibility
564 565 # - no help entry so that we do not advertise it any more
565 566 def bisected(repo, subset, x):
566 567 return bisect(repo, subset, x)
567 568
568 569 def bookmark(repo, subset, x):
569 570 """``bookmark([name])``
570 571 The named bookmark or all bookmarks.
571 572
572 573 If `name` starts with `re:`, the remainder of the name is treated as
573 574 a regular expression. To match a bookmark that actually starts with `re:`,
574 575 use the prefix `literal:`.
575 576 """
576 577 # i18n: "bookmark" is a keyword
577 578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 579 if args:
579 580 bm = getstring(args[0],
580 581 # i18n: "bookmark" is a keyword
581 582 _('the argument to bookmark must be a string'))
582 583 kind, pattern, matcher = _stringmatcher(bm)
583 584 bms = set()
584 585 if kind == 'literal':
585 586 bmrev = repo._bookmarks.get(pattern, None)
586 587 if not bmrev:
587 588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 589 % bm)
589 590 bms.add(repo[bmrev].rev())
590 591 else:
591 592 matchrevs = set()
592 593 for name, bmrev in repo._bookmarks.iteritems():
593 594 if matcher(name):
594 595 matchrevs.add(bmrev)
595 596 if not matchrevs:
596 597 raise error.RepoLookupError(_("no bookmarks exist"
597 598 " that match '%s'") % pattern)
598 599 for bmrev in matchrevs:
599 600 bms.add(repo[bmrev].rev())
600 601 else:
601 602 bms = set([repo[r].rev()
602 603 for r in repo._bookmarks.values()])
603 604 bms -= set([node.nullrev])
604 605 return subset & bms
605 606
606 607 def branch(repo, subset, x):
607 608 """``branch(string or set)``
608 609 All changesets belonging to the given branch or the branches of the given
609 610 changesets.
610 611
611 612 If `string` starts with `re:`, the remainder of the name is treated as
612 613 a regular expression. To match a branch that actually starts with `re:`,
613 614 use the prefix `literal:`.
614 615 """
615 616 getbi = repo.revbranchcache().branchinfo
616 617
617 618 try:
618 619 b = getstring(x, '')
619 620 except error.ParseError:
620 621 # not a string, but another revspec, e.g. tip()
621 622 pass
622 623 else:
623 624 kind, pattern, matcher = _stringmatcher(b)
624 625 if kind == 'literal':
625 626 # note: falls through to the revspec case if no branch with
626 627 # this name exists
627 628 if pattern in repo.branchmap():
628 629 return subset.filter(lambda r: matcher(getbi(r)[0]))
629 630 else:
630 631 return subset.filter(lambda r: matcher(getbi(r)[0]))
631 632
632 633 s = getset(repo, fullreposet(repo), x)
633 634 b = set()
634 635 for r in s:
635 636 b.add(getbi(r)[0])
636 637 c = s.__contains__
637 638 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
638 639
639 640 def bumped(repo, subset, x):
640 641 """``bumped()``
641 642 Mutable changesets marked as successors of public changesets.
642 643
643 644 Only non-public and non-obsolete changesets can be `bumped`.
644 645 """
645 646 # i18n: "bumped" is a keyword
646 647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 648 bumped = obsmod.getrevs(repo, 'bumped')
648 649 return subset & bumped
649 650
650 651 def bundle(repo, subset, x):
651 652 """``bundle()``
652 653 Changesets in the bundle.
653 654
654 655 Bundle must be specified by the -R option."""
655 656
656 657 try:
657 658 bundlerevs = repo.changelog.bundlerevs
658 659 except AttributeError:
659 660 raise util.Abort(_("no bundle provided - specify with -R"))
660 661 return subset & bundlerevs
661 662
662 663 def checkstatus(repo, subset, pat, field):
663 664 hasset = matchmod.patkind(pat) == 'set'
664 665
665 666 mcache = [None]
666 667 def matches(x):
667 668 c = repo[x]
668 669 if not mcache[0] or hasset:
669 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 671 m = mcache[0]
671 672 fname = None
672 673 if not m.anypats() and len(m.files()) == 1:
673 674 fname = m.files()[0]
674 675 if fname is not None:
675 676 if fname not in c.files():
676 677 return False
677 678 else:
678 679 for f in c.files():
679 680 if m(f):
680 681 break
681 682 else:
682 683 return False
683 684 files = repo.status(c.p1().node(), c.node())[field]
684 685 if fname is not None:
685 686 if fname in files:
686 687 return True
687 688 else:
688 689 for f in files:
689 690 if m(f):
690 691 return True
691 692
692 693 return subset.filter(matches)
693 694
694 695 def _children(repo, narrow, parentset):
695 696 if not parentset:
696 697 return baseset()
697 698 cs = set()
698 699 pr = repo.changelog.parentrevs
699 700 minrev = parentset.min()
700 701 for r in narrow:
701 702 if r <= minrev:
702 703 continue
703 704 for p in pr(r):
704 705 if p in parentset:
705 706 cs.add(r)
706 707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 708 # This does not break because of other fullreposet misbehavior.
708 709 return baseset(cs)
709 710
710 711 def children(repo, subset, x):
711 712 """``children(set)``
712 713 Child changesets of changesets in set.
713 714 """
714 715 s = getset(repo, fullreposet(repo), x)
715 716 cs = _children(repo, subset, s)
716 717 return subset & cs
717 718
718 719 def closed(repo, subset, x):
719 720 """``closed()``
720 721 Changeset is closed.
721 722 """
722 723 # i18n: "closed" is a keyword
723 724 getargs(x, 0, 0, _("closed takes no arguments"))
724 725 return subset.filter(lambda r: repo[r].closesbranch())
725 726
726 727 def contains(repo, subset, x):
727 728 """``contains(pattern)``
728 729 The revision's manifest contains a file matching pattern (but might not
729 730 modify it). See :hg:`help patterns` for information about file patterns.
730 731
731 732 The pattern without explicit kind like ``glob:`` is expected to be
732 733 relative to the current directory and match against a file exactly
733 734 for efficiency.
734 735 """
735 736 # i18n: "contains" is a keyword
736 737 pat = getstring(x, _("contains requires a pattern"))
737 738
738 739 def matches(x):
739 740 if not matchmod.patkind(pat):
740 741 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 742 if pats in repo[x]:
742 743 return True
743 744 else:
744 745 c = repo[x]
745 746 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 747 for f in c.manifest():
747 748 if m(f):
748 749 return True
749 750 return False
750 751
751 752 return subset.filter(matches)
752 753
753 754 def converted(repo, subset, x):
754 755 """``converted([id])``
755 756 Changesets converted from the given identifier in the old repository if
756 757 present, or all converted changesets if no identifier is specified.
757 758 """
758 759
759 760 # There is exactly no chance of resolving the revision, so do a simple
760 761 # string compare and hope for the best
761 762
762 763 rev = None
763 764 # i18n: "converted" is a keyword
764 765 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 766 if l:
766 767 # i18n: "converted" is a keyword
767 768 rev = getstring(l[0], _('converted requires a revision'))
768 769
769 770 def _matchvalue(r):
770 771 source = repo[r].extra().get('convert_revision', None)
771 772 return source is not None and (rev is None or source.startswith(rev))
772 773
773 774 return subset.filter(lambda r: _matchvalue(r))
774 775
775 776 def date(repo, subset, x):
776 777 """``date(interval)``
777 778 Changesets within the interval, see :hg:`help dates`.
778 779 """
779 780 # i18n: "date" is a keyword
780 781 ds = getstring(x, _("date requires a string"))
781 782 dm = util.matchdate(ds)
782 783 return subset.filter(lambda x: dm(repo[x].date()[0]))
783 784
784 785 def desc(repo, subset, x):
785 786 """``desc(string)``
786 787 Search commit message for string. The match is case-insensitive.
787 788 """
788 789 # i18n: "desc" is a keyword
789 790 ds = encoding.lower(getstring(x, _("desc requires a string")))
790 791
791 792 def matches(x):
792 793 c = repo[x]
793 794 return ds in encoding.lower(c.description())
794 795
795 796 return subset.filter(matches)
796 797
797 798 def _descendants(repo, subset, x, followfirst=False):
798 799 roots = getset(repo, fullreposet(repo), x)
799 800 if not roots:
800 801 return baseset()
801 802 s = _revdescendants(repo, roots, followfirst)
802 803
803 804 # Both sets need to be ascending in order to lazily return the union
804 805 # in the correct order.
805 806 base = subset & roots
806 807 desc = subset & s
807 808 result = base + desc
808 809 if subset.isascending():
809 810 result.sort()
810 811 elif subset.isdescending():
811 812 result.sort(reverse=True)
812 813 else:
813 814 result = subset & result
814 815 return result
815 816
816 817 def descendants(repo, subset, x):
817 818 """``descendants(set)``
818 819 Changesets which are descendants of changesets in set.
819 820 """
820 821 return _descendants(repo, subset, x)
821 822
822 823 def _firstdescendants(repo, subset, x):
823 824 # ``_firstdescendants(set)``
824 825 # Like ``descendants(set)`` but follows only the first parents.
825 826 return _descendants(repo, subset, x, followfirst=True)
826 827
827 828 def destination(repo, subset, x):
828 829 """``destination([set])``
829 830 Changesets that were created by a graft, transplant or rebase operation,
830 831 with the given revisions specified as the source. Omitting the optional set
831 832 is the same as passing all().
832 833 """
833 834 if x is not None:
834 835 sources = getset(repo, fullreposet(repo), x)
835 836 else:
836 837 sources = fullreposet(repo)
837 838
838 839 dests = set()
839 840
840 841 # subset contains all of the possible destinations that can be returned, so
841 842 # iterate over them and see if their source(s) were provided in the arg set.
842 843 # Even if the immediate src of r is not in the arg set, src's source (or
843 844 # further back) may be. Scanning back further than the immediate src allows
844 845 # transitive transplants and rebases to yield the same results as transitive
845 846 # grafts.
846 847 for r in subset:
847 848 src = _getrevsource(repo, r)
848 849 lineage = None
849 850
850 851 while src is not None:
851 852 if lineage is None:
852 853 lineage = list()
853 854
854 855 lineage.append(r)
855 856
856 857 # The visited lineage is a match if the current source is in the arg
857 858 # set. Since every candidate dest is visited by way of iterating
858 859 # subset, any dests further back in the lineage will be tested by a
859 860 # different iteration over subset. Likewise, if the src was already
860 861 # selected, the current lineage can be selected without going back
861 862 # further.
862 863 if src in sources or src in dests:
863 864 dests.update(lineage)
864 865 break
865 866
866 867 r = src
867 868 src = _getrevsource(repo, r)
868 869
869 870 return subset.filter(dests.__contains__)
870 871
871 872 def divergent(repo, subset, x):
872 873 """``divergent()``
873 874 Final successors of changesets with an alternative set of final successors.
874 875 """
875 876 # i18n: "divergent" is a keyword
876 877 getargs(x, 0, 0, _("divergent takes no arguments"))
877 878 divergent = obsmod.getrevs(repo, 'divergent')
878 879 return subset & divergent
879 880
880 881 def extinct(repo, subset, x):
881 882 """``extinct()``
882 883 Obsolete changesets with obsolete descendants only.
883 884 """
884 885 # i18n: "extinct" is a keyword
885 886 getargs(x, 0, 0, _("extinct takes no arguments"))
886 887 extincts = obsmod.getrevs(repo, 'extinct')
887 888 return subset & extincts
888 889
889 890 def extra(repo, subset, x):
890 891 """``extra(label, [value])``
891 892 Changesets with the given label in the extra metadata, with the given
892 893 optional value.
893 894
894 895 If `value` starts with `re:`, the remainder of the value is treated as
895 896 a regular expression. To match a value that actually starts with `re:`,
896 897 use the prefix `literal:`.
897 898 """
898 899 args = getargsdict(x, 'extra', 'label value')
899 900 if 'label' not in args:
900 901 # i18n: "extra" is a keyword
901 902 raise error.ParseError(_('extra takes at least 1 argument'))
902 903 # i18n: "extra" is a keyword
903 904 label = getstring(args['label'], _('first argument to extra must be '
904 905 'a string'))
905 906 value = None
906 907
907 908 if 'value' in args:
908 909 # i18n: "extra" is a keyword
909 910 value = getstring(args['value'], _('second argument to extra must be '
910 911 'a string'))
911 912 kind, value, matcher = _stringmatcher(value)
912 913
913 914 def _matchvalue(r):
914 915 extra = repo[r].extra()
915 916 return label in extra and (value is None or matcher(extra[label]))
916 917
917 918 return subset.filter(lambda r: _matchvalue(r))
918 919
919 920 def filelog(repo, subset, x):
920 921 """``filelog(pattern)``
921 922 Changesets connected to the specified filelog.
922 923
923 924 For performance reasons, visits only revisions mentioned in the file-level
924 925 filelog, rather than filtering through all changesets (much faster, but
925 926 doesn't include deletes or duplicate changes). For a slower, more accurate
926 927 result, use ``file()``.
927 928
928 929 The pattern without explicit kind like ``glob:`` is expected to be
929 930 relative to the current directory and match against a file exactly
930 931 for efficiency.
931 932
932 933 If some linkrev points to revisions filtered by the current repoview, we'll
933 934 work around it to return a non-filtered value.
934 935 """
935 936
936 937 # i18n: "filelog" is a keyword
937 938 pat = getstring(x, _("filelog requires a pattern"))
938 939 s = set()
939 940 cl = repo.changelog
940 941
941 942 if not matchmod.patkind(pat):
942 943 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
943 944 files = [f]
944 945 else:
945 946 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
946 947 files = (f for f in repo[None] if m(f))
947 948
948 949 for f in files:
949 950 backrevref = {} # final value for: filerev -> changerev
950 951 lowestchild = {} # lowest known filerev child of a filerev
951 952 delayed = [] # filerev with filtered linkrev, for post-processing
952 953 lowesthead = None # cache for manifest content of all head revisions
953 954 fl = repo.file(f)
954 955 for fr in list(fl):
955 956 rev = fl.linkrev(fr)
956 957 if rev not in cl:
957 958 # changerev pointed in linkrev is filtered
958 959 # record it for post processing.
959 960 delayed.append((fr, rev))
960 961 continue
961 962 for p in fl.parentrevs(fr):
962 963 if 0 <= p and p not in lowestchild:
963 964 lowestchild[p] = fr
964 965 backrevref[fr] = rev
965 966 s.add(rev)
966 967
967 968 # Post-processing of all filerevs we skipped because they were
968 969 # filtered. If such filerevs have known and unfiltered children, this
969 970 # means they have an unfiltered appearance out there. We'll use linkrev
970 971 # adjustment to find one of these appearances. The lowest known child
971 972 # will be used as a starting point because it is the best upper-bound we
972 973 # have.
973 974 #
974 975 # This approach will fail when an unfiltered but linkrev-shadowed
975 976 # appearance exists in a head changeset without unfiltered filerev
976 977 # children anywhere.
977 978 while delayed:
978 979 # must be a descending iteration. To slowly fill lowest child
979 980 # information that is of potential use by the next item.
980 981 fr, rev = delayed.pop()
981 982 lkr = rev
982 983
983 984 child = lowestchild.get(fr)
984 985
985 986 if child is None:
986 987 # search for existence of this file revision in a head revision.
987 988 # There are three possibilities:
988 989 # - the revision exists in a head and we can find an
989 990 # introduction from there,
990 991 # - the revision does not exist in a head because it has been
991 992 # changed since its introduction: we would have found a child
992 993 # and be in the other 'else' clause,
993 994 # - all versions of the revision are hidden.
994 995 if lowesthead is None:
995 996 lowesthead = {}
996 997 for h in repo.heads():
997 998 fnode = repo[h].manifest().get(f)
998 999 if fnode is not None:
999 1000 lowesthead[fl.rev(fnode)] = h
1000 1001 headrev = lowesthead.get(fr)
1001 1002 if headrev is None:
1002 1003 # content is nowhere unfiltered
1003 1004 continue
1004 1005 rev = repo[headrev][f].introrev()
1005 1006 else:
1006 1007 # the lowest known child is a good upper bound
1007 1008 childcrev = backrevref[child]
1008 1009 # XXX this does not guarantee returning the lowest
1009 1010 # introduction of this revision, but this gives a
1010 1011 # result which is a good start and will fit in most
1011 1012 # cases. We probably need to fix the multiple
1012 1013 # introductions case properly (report each
1013 1014 # introduction, even for identical file revisions)
1014 1015 # once and for all at some point anyway.
1015 1016 for p in repo[childcrev][f].parents():
1016 1017 if p.filerev() == fr:
1017 1018 rev = p.rev()
1018 1019 break
1019 1020 if rev == lkr: # no shadowed entry found
1020 1021 # XXX This should never happen unless some manifest points
1021 1022 # to biggish file revisions (like a revision that uses a
1022 1023 # parent that never appears in the manifest ancestors)
1023 1024 continue
1024 1025
1025 1026 # Fill the data for the next iteration.
1026 1027 for p in fl.parentrevs(fr):
1027 1028 if 0 <= p and p not in lowestchild:
1028 1029 lowestchild[p] = fr
1029 1030 backrevref[fr] = rev
1030 1031 s.add(rev)
1031 1032
1032 1033 return subset & s
1033 1034
1034 1035 def first(repo, subset, x):
1035 1036 """``first(set, [n])``
1036 1037 An alias for limit().
1037 1038 """
1038 1039 return limit(repo, subset, x)
1039 1040
1040 1041 def _follow(repo, subset, x, name, followfirst=False):
1041 1042 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1042 1043 c = repo['.']
1043 1044 if l:
1044 1045 x = getstring(l[0], _("%s expected a filename") % name)
1045 1046 if x in c:
1046 1047 cx = c[x]
1047 1048 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1048 1049 # include the revision responsible for the most recent version
1049 1050 s.add(cx.introrev())
1050 1051 else:
1051 1052 return baseset()
1052 1053 else:
1053 1054 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1054 1055
1055 1056 return subset & s
1056 1057
1057 1058 def follow(repo, subset, x):
1058 1059 """``follow([file])``
1059 1060 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 1061 If a filename is specified, the history of the given file is followed,
1061 1062 including copies.
1062 1063 """
1063 1064 return _follow(repo, subset, x, 'follow')
1064 1065
1065 1066 def _followfirst(repo, subset, x):
1066 1067 # ``followfirst([file])``
1067 1068 # Like ``follow([file])`` but follows only the first parent of
1068 1069 # every revision or file revision.
1069 1070 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 1071
1071 1072 def getall(repo, subset, x):
1072 1073 """``all()``
1073 1074 All changesets, the same as ``0:tip``.
1074 1075 """
1075 1076 # i18n: "all" is a keyword
1076 1077 getargs(x, 0, 0, _("all takes no arguments"))
1077 1078 return subset & spanset(repo) # drop "null" if any
1078 1079
1079 1080 def grep(repo, subset, x):
1080 1081 """``grep(regex)``
1081 1082 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 1083 to ensure special escape characters are handled correctly. Unlike
1083 1084 ``keyword(string)``, the match is case-sensitive.
1084 1085 """
1085 1086 try:
1086 1087 # i18n: "grep" is a keyword
1087 1088 gr = re.compile(getstring(x, _("grep requires a string")))
1088 1089 except re.error as e:
1089 1090 raise error.ParseError(_('invalid match pattern: %s') % e)
1090 1091
1091 1092 def matches(x):
1092 1093 c = repo[x]
1093 1094 for e in c.files() + [c.user(), c.description()]:
1094 1095 if gr.search(e):
1095 1096 return True
1096 1097 return False
1097 1098
1098 1099 return subset.filter(matches)
1099 1100
1100 1101 def _matchfiles(repo, subset, x):
1101 1102 # _matchfiles takes a revset list of prefixed arguments:
1102 1103 #
1103 1104 # [p:foo, i:bar, x:baz]
1104 1105 #
1105 1106 # builds a match object from them and filters subset. Allowed
1106 1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 1109 # a revision identifier, or the empty string to reference the
1109 1110 # working directory, from which the match object is
1110 1111 # initialized. Use 'd:' to set the default matching mode, default
1111 1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112 1113
1113 1114 # i18n: "_matchfiles" is a keyword
1114 1115 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1115 1116 pats, inc, exc = [], [], []
1116 1117 rev, default = None, None
1117 1118 for arg in l:
1118 1119 # i18n: "_matchfiles" is a keyword
1119 1120 s = getstring(arg, _("_matchfiles requires string arguments"))
1120 1121 prefix, value = s[:2], s[2:]
1121 1122 if prefix == 'p:':
1122 1123 pats.append(value)
1123 1124 elif prefix == 'i:':
1124 1125 inc.append(value)
1125 1126 elif prefix == 'x:':
1126 1127 exc.append(value)
1127 1128 elif prefix == 'r:':
1128 1129 if rev is not None:
1129 1130 # i18n: "_matchfiles" is a keyword
1130 1131 raise error.ParseError(_('_matchfiles expected at most one '
1131 1132 'revision'))
1132 1133 if value != '': # empty means working directory; leave rev as None
1133 1134 rev = value
1134 1135 elif prefix == 'd:':
1135 1136 if default is not None:
1136 1137 # i18n: "_matchfiles" is a keyword
1137 1138 raise error.ParseError(_('_matchfiles expected at most one '
1138 1139 'default mode'))
1139 1140 default = value
1140 1141 else:
1141 1142 # i18n: "_matchfiles" is a keyword
1142 1143 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1143 1144 if not default:
1144 1145 default = 'glob'
1145 1146
1146 1147 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1147 1148 exclude=exc, ctx=repo[rev], default=default)
1148 1149
1149 1150 def matches(x):
1150 1151 for f in repo[x].files():
1151 1152 if m(f):
1152 1153 return True
1153 1154 return False
1154 1155
1155 1156 return subset.filter(matches)
1156 1157
1157 1158 def hasfile(repo, subset, x):
1158 1159 """``file(pattern)``
1159 1160 Changesets affecting files matched by pattern.
1160 1161
1161 1162 For a faster but less accurate result, consider using ``filelog()``
1162 1163 instead.
1163 1164
1164 1165 This predicate uses ``glob:`` as the default kind of pattern.
1165 1166 """
1166 1167 # i18n: "file" is a keyword
1167 1168 pat = getstring(x, _("file requires a pattern"))
1168 1169 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1169 1170
1170 1171 def head(repo, subset, x):
1171 1172 """``head()``
1172 1173 Changeset is a named branch head.
1173 1174 """
1174 1175 # i18n: "head" is a keyword
1175 1176 getargs(x, 0, 0, _("head takes no arguments"))
1176 1177 hs = set()
1177 1178 cl = repo.changelog
1178 1179 for b, ls in repo.branchmap().iteritems():
1179 1180 hs.update(cl.rev(h) for h in ls)
1180 1181 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1181 1182 # This does not break because of other fullreposet misbehavior.
1182 1183 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1183 1184 # necessary to ensure we preserve the order in subset.
1184 1185 return baseset(hs) & subset
1185 1186
1186 1187 def heads(repo, subset, x):
1187 1188 """``heads(set)``
1188 1189 Members of set with no children in set.
1189 1190 """
1190 1191 s = getset(repo, subset, x)
1191 1192 ps = parents(repo, subset, x)
1192 1193 return s - ps
1193 1194
1194 1195 def hidden(repo, subset, x):
1195 1196 """``hidden()``
1196 1197 Hidden changesets.
1197 1198 """
1198 1199 # i18n: "hidden" is a keyword
1199 1200 getargs(x, 0, 0, _("hidden takes no arguments"))
1200 1201 hiddenrevs = repoview.filterrevs(repo, 'visible')
1201 1202 return subset & hiddenrevs
1202 1203
1203 1204 def keyword(repo, subset, x):
1204 1205 """``keyword(string)``
1205 1206 Search commit message, user name, and names of changed files for
1206 1207 string. The match is case-insensitive.
1207 1208 """
1208 1209 # i18n: "keyword" is a keyword
1209 1210 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1210 1211
1211 1212 def matches(r):
1212 1213 c = repo[r]
1213 1214 return any(kw in encoding.lower(t)
1214 1215 for t in c.files() + [c.user(), c.description()])
1215 1216
1216 1217 return subset.filter(matches)
1217 1218
1218 1219 def limit(repo, subset, x):
1219 1220 """``limit(set, [n])``
1220 1221 First n members of set, defaulting to 1.
1221 1222 """
1222 1223 # i18n: "limit" is a keyword
1223 1224 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1224 1225 try:
1225 1226 lim = 1
1226 1227 if len(l) == 2:
1227 1228 # i18n: "limit" is a keyword
1228 1229 lim = int(getstring(l[1], _("limit requires a number")))
1229 1230 except (TypeError, ValueError):
1230 1231 # i18n: "limit" is a keyword
1231 1232 raise error.ParseError(_("limit expects a number"))
1232 1233 ss = subset
1233 1234 os = getset(repo, fullreposet(repo), l[0])
1234 1235 result = []
1235 1236 it = iter(os)
1236 1237 for x in xrange(lim):
1237 1238 y = next(it, None)
1238 1239 if y is None:
1239 1240 break
1240 1241 elif y in ss:
1241 1242 result.append(y)
1242 1243 return baseset(result)
1243 1244
1244 1245 def last(repo, subset, x):
1245 1246 """``last(set, [n])``
1246 1247 Last n members of set, defaulting to 1.
1247 1248 """
1248 1249 # i18n: "last" is a keyword
1249 1250 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1250 1251 try:
1251 1252 lim = 1
1252 1253 if len(l) == 2:
1253 1254 # i18n: "last" is a keyword
1254 1255 lim = int(getstring(l[1], _("last requires a number")))
1255 1256 except (TypeError, ValueError):
1256 1257 # i18n: "last" is a keyword
1257 1258 raise error.ParseError(_("last expects a number"))
1258 1259 ss = subset
1259 1260 os = getset(repo, fullreposet(repo), l[0])
1260 1261 os.reverse()
1261 1262 result = []
1262 1263 it = iter(os)
1263 1264 for x in xrange(lim):
1264 1265 y = next(it, None)
1265 1266 if y is None:
1266 1267 break
1267 1268 elif y in ss:
1268 1269 result.append(y)
1269 1270 return baseset(result)
1270 1271
1271 1272 def maxrev(repo, subset, x):
1272 1273 """``max(set)``
1273 1274 Changeset with highest revision number in set.
1274 1275 """
1275 1276 os = getset(repo, fullreposet(repo), x)
1276 1277 if os:
1277 1278 m = os.max()
1278 1279 if m in subset:
1279 1280 return baseset([m])
1280 1281 return baseset()
1281 1282
1282 1283 def merge(repo, subset, x):
1283 1284 """``merge()``
1284 1285 Changeset is a merge changeset.
1285 1286 """
1286 1287 # i18n: "merge" is a keyword
1287 1288 getargs(x, 0, 0, _("merge takes no arguments"))
1288 1289 cl = repo.changelog
1289 1290 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1290 1291
1291 1292 def branchpoint(repo, subset, x):
1292 1293 """``branchpoint()``
1293 1294 Changesets with more than one child.
1294 1295 """
1295 1296 # i18n: "branchpoint" is a keyword
1296 1297 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1297 1298 cl = repo.changelog
1298 1299 if not subset:
1299 1300 return baseset()
1300 1301 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1301 1302 # (and if it is not, it should.)
1302 1303 baserev = min(subset)
1303 1304 parentscount = [0]*(len(repo) - baserev)
1304 1305 for r in cl.revs(start=baserev + 1):
1305 1306 for p in cl.parentrevs(r):
1306 1307 if p >= baserev:
1307 1308 parentscount[p - baserev] += 1
1308 1309 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1309 1310
1310 1311 def minrev(repo, subset, x):
1311 1312 """``min(set)``
1312 1313 Changeset with lowest revision number in set.
1313 1314 """
1314 1315 os = getset(repo, fullreposet(repo), x)
1315 1316 if os:
1316 1317 m = os.min()
1317 1318 if m in subset:
1318 1319 return baseset([m])
1319 1320 return baseset()
1320 1321
1321 1322 def modifies(repo, subset, x):
1322 1323 """``modifies(pattern)``
1323 1324 Changesets modifying files matched by pattern.
1324 1325
1325 1326 The pattern without explicit kind like ``glob:`` is expected to be
1326 1327 relative to the current directory and match against a file or a
1327 1328 directory.
1328 1329 """
1329 1330 # i18n: "modifies" is a keyword
1330 1331 pat = getstring(x, _("modifies requires a pattern"))
1331 1332 return checkstatus(repo, subset, pat, 0)
1332 1333
1333 1334 def named(repo, subset, x):
1334 1335 """``named(namespace)``
1335 1336 The changesets in a given namespace.
1336 1337
1337 1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 1339 a regular expression. To match a namespace that actually starts with `re:`,
1339 1340 use the prefix `literal:`.
1340 1341 """
1341 1342 # i18n: "named" is a keyword
1342 1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 1344
1344 1345 ns = getstring(args[0],
1345 1346 # i18n: "named" is a keyword
1346 1347 _('the argument to named must be a string'))
1347 1348 kind, pattern, matcher = _stringmatcher(ns)
1348 1349 namespaces = set()
1349 1350 if kind == 'literal':
1350 1351 if pattern not in repo.names:
1351 1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 1353 % ns)
1353 1354 namespaces.add(repo.names[pattern])
1354 1355 else:
1355 1356 for name, ns in repo.names.iteritems():
1356 1357 if matcher(name):
1357 1358 namespaces.add(ns)
1358 1359 if not namespaces:
1359 1360 raise error.RepoLookupError(_("no namespace exists"
1360 1361 " that match '%s'") % pattern)
1361 1362
1362 1363 names = set()
1363 1364 for ns in namespaces:
1364 1365 for name in ns.listnames(repo):
1365 1366 if name not in ns.deprecated:
1366 1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 1368
1368 1369 names -= set([node.nullrev])
1369 1370 return subset & names
1370 1371
1371 1372 def node_(repo, subset, x):
1372 1373 """``id(string)``
1373 1374 Revision non-ambiguously specified by the given hex string prefix.
1374 1375 """
1375 1376 # i18n: "id" is a keyword
1376 1377 l = getargs(x, 1, 1, _("id requires one argument"))
1377 1378 # i18n: "id" is a keyword
1378 1379 n = getstring(l[0], _("id requires a string"))
1379 1380 if len(n) == 40:
1380 1381 try:
1381 1382 rn = repo.changelog.rev(node.bin(n))
1382 1383 except (LookupError, TypeError):
1383 1384 rn = None
1384 1385 else:
1385 1386 rn = None
1386 1387 pm = repo.changelog._partialmatch(n)
1387 1388 if pm is not None:
1388 1389 rn = repo.changelog.rev(pm)
1389 1390
1390 1391 if rn is None:
1391 1392 return baseset()
1392 1393 result = baseset([rn])
1393 1394 return result & subset
1394 1395
1395 1396 def obsolete(repo, subset, x):
1396 1397 """``obsolete()``
1397 1398 Mutable changeset with a newer version."""
1398 1399 # i18n: "obsolete" is a keyword
1399 1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 1402 return subset & obsoletes
1402 1403
1403 1404 def only(repo, subset, x):
1404 1405 """``only(set, [set])``
1405 1406 Changesets that are ancestors of the first set that are not ancestors
1406 1407 of any other head in the repo. If a second set is specified, the result
1407 1408 is ancestors of the first set that are not ancestors of the second set
1408 1409 (i.e. ::<set1> - ::<set2>).
1409 1410 """
1410 1411 cl = repo.changelog
1411 1412 # i18n: "only" is a keyword
1412 1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 1414 include = getset(repo, fullreposet(repo), args[0])
1414 1415 if len(args) == 1:
1415 1416 if not include:
1416 1417 return baseset()
1417 1418
1418 1419 descendants = set(_revdescendants(repo, include, False))
1419 1420 exclude = [rev for rev in cl.headrevs()
1420 1421 if not rev in descendants and not rev in include]
1421 1422 else:
1422 1423 exclude = getset(repo, fullreposet(repo), args[1])
1423 1424
1424 1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 1427 # some optimisations from the fact this is a baseset.
1427 1428 return subset & results
1428 1429
1429 1430 def origin(repo, subset, x):
1430 1431 """``origin([set])``
1431 1432 Changesets that were specified as a source for the grafts, transplants or
1432 1433 rebases that created the given revisions. Omitting the optional set is the
1433 1434 same as passing all(). If a changeset created by these operations is itself
1434 1435 specified as a source for one of these operations, only the source changeset
1435 1436 for the first operation is selected.
1436 1437 """
1437 1438 if x is not None:
1438 1439 dests = getset(repo, fullreposet(repo), x)
1439 1440 else:
1440 1441 dests = fullreposet(repo)
1441 1442
1442 1443 def _firstsrc(rev):
1443 1444 src = _getrevsource(repo, rev)
1444 1445 if src is None:
1445 1446 return None
1446 1447
1447 1448 while True:
1448 1449 prev = _getrevsource(repo, src)
1449 1450
1450 1451 if prev is None:
1451 1452 return src
1452 1453 src = prev
1453 1454
1454 1455 o = set([_firstsrc(r) for r in dests])
1455 1456 o -= set([None])
1456 1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 1458 # some optimisations from the fact this is a baseset.
1458 1459 return subset & o
1459 1460
1460 1461 def outgoing(repo, subset, x):
1461 1462 """``outgoing([path])``
1462 1463 Changesets not found in the specified destination repository, or the
1463 1464 default push location.
1464 1465 """
1465 1466 # Avoid cycles.
1466 1467 from . import (
1467 1468 discovery,
1468 1469 hg,
1469 1470 )
1470 1471 # i18n: "outgoing" is a keyword
1471 1472 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1472 1473 # i18n: "outgoing" is a keyword
1473 1474 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1474 1475 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1475 1476 dest, branches = hg.parseurl(dest)
1476 1477 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1477 1478 if revs:
1478 1479 revs = [repo.lookup(rev) for rev in revs]
1479 1480 other = hg.peer(repo, {}, dest)
1480 1481 repo.ui.pushbuffer()
1481 1482 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1482 1483 repo.ui.popbuffer()
1483 1484 cl = repo.changelog
1484 1485 o = set([cl.rev(r) for r in outgoing.missing])
1485 1486 return subset & o
1486 1487
1487 1488 def p1(repo, subset, x):
1488 1489 """``p1([set])``
1489 1490 First parent of changesets in set, or the working directory.
1490 1491 """
1491 1492 if x is None:
1492 1493 p = repo[x].p1().rev()
1493 1494 if p >= 0:
1494 1495 return subset & baseset([p])
1495 1496 return baseset()
1496 1497
1497 1498 ps = set()
1498 1499 cl = repo.changelog
1499 1500 for r in getset(repo, fullreposet(repo), x):
1500 1501 ps.add(cl.parentrevs(r)[0])
1501 1502 ps -= set([node.nullrev])
1502 1503 # XXX we should turn this into a baseset instead of a set, smartset may do
1503 1504 # some optimisations from the fact this is a baseset.
1504 1505 return subset & ps
1505 1506
1506 1507 def p2(repo, subset, x):
1507 1508 """``p2([set])``
1508 1509 Second parent of changesets in set, or the working directory.
1509 1510 """
1510 1511 if x is None:
1511 1512 ps = repo[x].parents()
1512 1513 try:
1513 1514 p = ps[1].rev()
1514 1515 if p >= 0:
1515 1516 return subset & baseset([p])
1516 1517 return baseset()
1517 1518 except IndexError:
1518 1519 return baseset()
1519 1520
1520 1521 ps = set()
1521 1522 cl = repo.changelog
1522 1523 for r in getset(repo, fullreposet(repo), x):
1523 1524 ps.add(cl.parentrevs(r)[1])
1524 1525 ps -= set([node.nullrev])
1525 1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 1527 # some optimisations from the fact this is a baseset.
1527 1528 return subset & ps
1528 1529
1529 1530 def parents(repo, subset, x):
1530 1531 """``parents([set])``
1531 1532 The set of all parents for all changesets in set, or the working directory.
1532 1533 """
1533 1534 if x is None:
1534 1535 ps = set(p.rev() for p in repo[x].parents())
1535 1536 else:
1536 1537 ps = set()
1537 1538 cl = repo.changelog
1538 1539 up = ps.update
1539 1540 parentrevs = cl.parentrevs
1540 1541 for r in getset(repo, fullreposet(repo), x):
1541 1542 if r == node.wdirrev:
1542 1543 up(p.rev() for p in repo[r].parents())
1543 1544 else:
1544 1545 up(parentrevs(r))
1545 1546 ps -= set([node.nullrev])
1546 1547 return subset & ps
1547 1548
1548 1549 def _phase(repo, subset, target):
1549 1550 """helper to select all rev in phase <target>"""
1550 1551 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 1552 if repo._phasecache._phasesets:
1552 1553 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 1554 s = baseset(s)
1554 1555 s.sort() # set are non ordered, so we enforce ascending
1555 1556 return subset & s
1556 1557 else:
1557 1558 phase = repo._phasecache.phase
1558 1559 condition = lambda r: phase(repo, r) == target
1559 1560 return subset.filter(condition, cache=False)
1560 1561
1561 1562 def draft(repo, subset, x):
1562 1563 """``draft()``
1563 1564 Changeset in draft phase."""
1564 1565 # i18n: "draft" is a keyword
1565 1566 getargs(x, 0, 0, _("draft takes no arguments"))
1566 1567 target = phases.draft
1567 1568 return _phase(repo, subset, target)
1568 1569
1569 1570 def secret(repo, subset, x):
1570 1571 """``secret()``
1571 1572 Changeset in secret phase."""
1572 1573 # i18n: "secret" is a keyword
1573 1574 getargs(x, 0, 0, _("secret takes no arguments"))
1574 1575 target = phases.secret
1575 1576 return _phase(repo, subset, target)
1576 1577
1577 1578 def parentspec(repo, subset, x, n):
1578 1579 """``set^0``
1579 1580 The set.
1580 1581 ``set^1`` (or ``set^``), ``set^2``
1581 1582 First or second parent, respectively, of all changesets in set.
1582 1583 """
1583 1584 try:
1584 1585 n = int(n[1])
1585 1586 if n not in (0, 1, 2):
1586 1587 raise ValueError
1587 1588 except (TypeError, ValueError):
1588 1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 1590 ps = set()
1590 1591 cl = repo.changelog
1591 1592 for r in getset(repo, fullreposet(repo), x):
1592 1593 if n == 0:
1593 1594 ps.add(r)
1594 1595 elif n == 1:
1595 1596 ps.add(cl.parentrevs(r)[0])
1596 1597 elif n == 2:
1597 1598 parents = cl.parentrevs(r)
1598 1599 if len(parents) > 1:
1599 1600 ps.add(parents[1])
1600 1601 return subset & ps
1601 1602
1602 1603 def present(repo, subset, x):
1603 1604 """``present(set)``
1604 1605 An empty set, if any revision in set isn't found; otherwise,
1605 1606 all revisions in set.
1606 1607
1607 1608 If any of specified revisions is not present in the local repository,
1608 1609 the query is normally aborted. But this predicate allows the query
1609 1610 to continue even in such cases.
1610 1611 """
1611 1612 try:
1612 1613 return getset(repo, subset, x)
1613 1614 except error.RepoLookupError:
1614 1615 return baseset()
1615 1616
1616 1617 # for internal use
1617 1618 def _notpublic(repo, subset, x):
1618 1619 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 1620 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1620 1621 if repo._phasecache._phasesets:
1621 1622 s = set()
1622 1623 for u in repo._phasecache._phasesets[1:]:
1623 1624 s.update(u)
1624 1625 s = baseset(s - repo.changelog.filteredrevs)
1625 1626 s.sort()
1626 1627 return subset & s
1627 1628 else:
1628 1629 phase = repo._phasecache.phase
1629 1630 target = phases.public
1630 1631 condition = lambda r: phase(repo, r) != target
1631 1632 return subset.filter(condition, cache=False)
1632 1633
1633 1634 def public(repo, subset, x):
1634 1635 """``public()``
1635 1636 Changeset in public phase."""
1636 1637 # i18n: "public" is a keyword
1637 1638 getargs(x, 0, 0, _("public takes no arguments"))
1638 1639 phase = repo._phasecache.phase
1639 1640 target = phases.public
1640 1641 condition = lambda r: phase(repo, r) == target
1641 1642 return subset.filter(condition, cache=False)
1642 1643
1643 1644 def remote(repo, subset, x):
1644 1645 """``remote([id [,path]])``
1645 1646 Local revision that corresponds to the given identifier in a
1646 1647 remote repository, if present. Here, the '.' identifier is a
1647 1648 synonym for the current local branch.
1648 1649 """
1649 1650
1650 1651 from . import hg # avoid start-up nasties
1651 1652 # i18n: "remote" is a keyword
1652 1653 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1653 1654
1654 1655 q = '.'
1655 1656 if len(l) > 0:
1656 1657 # i18n: "remote" is a keyword
1657 1658 q = getstring(l[0], _("remote requires a string id"))
1658 1659 if q == '.':
1659 1660 q = repo['.'].branch()
1660 1661
1661 1662 dest = ''
1662 1663 if len(l) > 1:
1663 1664 # i18n: "remote" is a keyword
1664 1665 dest = getstring(l[1], _("remote requires a repository path"))
1665 1666 dest = repo.ui.expandpath(dest or 'default')
1666 1667 dest, branches = hg.parseurl(dest)
1667 1668 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 1669 if revs:
1669 1670 revs = [repo.lookup(rev) for rev in revs]
1670 1671 other = hg.peer(repo, {}, dest)
1671 1672 n = other.lookup(q)
1672 1673 if n in repo:
1673 1674 r = repo[n].rev()
1674 1675 if r in subset:
1675 1676 return baseset([r])
1676 1677 return baseset()
1677 1678
1678 1679 def removes(repo, subset, x):
1679 1680 """``removes(pattern)``
1680 1681 Changesets which remove files matching pattern.
1681 1682
1682 1683 The pattern without explicit kind like ``glob:`` is expected to be
1683 1684 relative to the current directory and match against a file or a
1684 1685 directory.
1685 1686 """
1686 1687 # i18n: "removes" is a keyword
1687 1688 pat = getstring(x, _("removes requires a pattern"))
1688 1689 return checkstatus(repo, subset, pat, 2)
1689 1690
1690 1691 def rev(repo, subset, x):
1691 1692 """``rev(number)``
1692 1693 Revision with the given numeric identifier.
1693 1694 """
1694 1695 # i18n: "rev" is a keyword
1695 1696 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 1697 try:
1697 1698 # i18n: "rev" is a keyword
1698 1699 l = int(getstring(l[0], _("rev requires a number")))
1699 1700 except (TypeError, ValueError):
1700 1701 # i18n: "rev" is a keyword
1701 1702 raise error.ParseError(_("rev expects a number"))
1702 1703 if l not in repo.changelog and l != node.nullrev:
1703 1704 return baseset()
1704 1705 return subset & baseset([l])
1705 1706
1706 1707 def matching(repo, subset, x):
1707 1708 """``matching(revision [, field])``
1708 1709 Changesets in which a given set of fields match the set of fields in the
1709 1710 selected revision or set.
1710 1711
1711 1712 To match more than one field pass the list of fields to match separated
1712 1713 by spaces (e.g. ``author description``).
1713 1714
1714 1715 Valid fields are most regular revision fields and some special fields.
1715 1716
1716 1717 Regular revision fields are ``description``, ``author``, ``branch``,
1717 1718 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 1719 and ``diff``.
1719 1720 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 1721 contents of the revision. Two revisions matching their ``diff`` will
1721 1722 also match their ``files``.
1722 1723
1723 1724 Special fields are ``summary`` and ``metadata``:
1724 1725 ``summary`` matches the first line of the description.
1725 1726 ``metadata`` is equivalent to matching ``description user date``
1726 1727 (i.e. it matches the main metadata fields).
1727 1728
1728 1729 ``metadata`` is the default field which is used when no fields are
1729 1730 specified. You can match more than one field at a time.
1730 1731 """
1731 1732 # i18n: "matching" is a keyword
1732 1733 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733 1734
1734 1735 revs = getset(repo, fullreposet(repo), l[0])
1735 1736
1736 1737 fieldlist = ['metadata']
1737 1738 if len(l) > 1:
1738 1739 fieldlist = getstring(l[1],
1739 1740 # i18n: "matching" is a keyword
1740 1741 _("matching requires a string "
1741 1742 "as its second argument")).split()
1742 1743
1743 1744 # Make sure that there are no repeated fields,
1744 1745 # expand the 'special' 'metadata' field type
1745 1746 # and check the 'files' whenever we check the 'diff'
1746 1747 fields = []
1747 1748 for field in fieldlist:
1748 1749 if field == 'metadata':
1749 1750 fields += ['user', 'description', 'date']
1750 1751 elif field == 'diff':
1751 1752 # a revision matching the diff must also match the files
1752 1753 # since matching the diff is very costly, make sure to
1753 1754 # also match the files first
1754 1755 fields += ['files', 'diff']
1755 1756 else:
1756 1757 if field == 'author':
1757 1758 field = 'user'
1758 1759 fields.append(field)
1759 1760 fields = set(fields)
1760 1761 if 'summary' in fields and 'description' in fields:
1761 1762 # If a revision matches its description it also matches its summary
1762 1763 fields.discard('summary')
1763 1764
1764 1765 # We may want to match more than one field
1765 1766 # Not all fields take the same amount of time to be matched
1766 1767 # Sort the selected fields in order of increasing matching cost
1767 1768 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 1769 'files', 'description', 'substate', 'diff']
1769 1770 def fieldkeyfunc(f):
1770 1771 try:
1771 1772 return fieldorder.index(f)
1772 1773 except ValueError:
1773 1774 # assume an unknown field is very costly
1774 1775 return len(fieldorder)
1775 1776 fields = list(fields)
1776 1777 fields.sort(key=fieldkeyfunc)
1777 1778
1778 1779 # Each field will be matched with its own "getfield" function
1779 1780 # which will be added to the getfieldfuncs array of functions
1780 1781 getfieldfuncs = []
1781 1782 _funcs = {
1782 1783 'user': lambda r: repo[r].user(),
1783 1784 'branch': lambda r: repo[r].branch(),
1784 1785 'date': lambda r: repo[r].date(),
1785 1786 'description': lambda r: repo[r].description(),
1786 1787 'files': lambda r: repo[r].files(),
1787 1788 'parents': lambda r: repo[r].parents(),
1788 1789 'phase': lambda r: repo[r].phase(),
1789 1790 'substate': lambda r: repo[r].substate,
1790 1791 'summary': lambda r: repo[r].description().splitlines()[0],
1791 1792 'diff': lambda r: list(repo[r].diff(git=True),)
1792 1793 }
1793 1794 for info in fields:
1794 1795 getfield = _funcs.get(info, None)
1795 1796 if getfield is None:
1796 1797 raise error.ParseError(
1797 1798 # i18n: "matching" is a keyword
1798 1799 _("unexpected field name passed to matching: %s") % info)
1799 1800 getfieldfuncs.append(getfield)
1800 1801 # convert the getfield array of functions into a "getinfo" function
1801 1802 # which returns an array of field values (or a single value if there
1802 1803 # is only one field to match)
1803 1804 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804 1805
1805 1806 def matches(x):
1806 1807 for rev in revs:
1807 1808 target = getinfo(rev)
1808 1809 match = True
1809 1810 for n, f in enumerate(getfieldfuncs):
1810 1811 if target[n] != f(x):
1811 1812 match = False
1812 1813 if match:
1813 1814 return True
1814 1815 return False
1815 1816
1816 1817 return subset.filter(matches)
1817 1818
1818 1819 def reverse(repo, subset, x):
1819 1820 """``reverse(set)``
1820 1821 Reverse order of set.
1821 1822 """
1822 1823 l = getset(repo, subset, x)
1823 1824 l.reverse()
1824 1825 return l
1825 1826
1826 1827 def roots(repo, subset, x):
1827 1828 """``roots(set)``
1828 1829 Changesets in set with no parent changeset in set.
1829 1830 """
1830 1831 s = getset(repo, fullreposet(repo), x)
1831 1832 parents = repo.changelog.parentrevs
1832 1833 def filter(r):
1833 1834 for p in parents(r):
1834 1835 if 0 <= p and p in s:
1835 1836 return False
1836 1837 return True
1837 1838 return subset & s.filter(filter)
1838 1839
1839 1840 def sort(repo, subset, x):
1840 1841 """``sort(set[, [-]key...])``
1841 1842 Sort set by keys. The default sort order is ascending, specify a key
1842 1843 as ``-key`` to sort in descending order.
1843 1844
1844 1845 The keys can be:
1845 1846
1846 1847 - ``rev`` for the revision number,
1847 1848 - ``branch`` for the branch name,
1848 1849 - ``desc`` for the commit message (description),
1849 1850 - ``user`` for user name (``author`` can be used as an alias),
1850 1851 - ``date`` for the commit date
1851 1852 """
1852 1853 # i18n: "sort" is a keyword
1853 1854 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 1855 keys = "rev"
1855 1856 if len(l) == 2:
1856 1857 # i18n: "sort" is a keyword
1857 1858 keys = getstring(l[1], _("sort spec must be a string"))
1858 1859
1859 1860 s = l[0]
1860 1861 keys = keys.split()
1861 1862 l = []
1862 1863 def invert(s):
1863 1864 return "".join(chr(255 - ord(c)) for c in s)
1864 1865 revs = getset(repo, subset, s)
1865 1866 if keys == ["rev"]:
1866 1867 revs.sort()
1867 1868 return revs
1868 1869 elif keys == ["-rev"]:
1869 1870 revs.sort(reverse=True)
1870 1871 return revs
1871 1872 for r in revs:
1872 1873 c = repo[r]
1873 1874 e = []
1874 1875 for k in keys:
1875 1876 if k == 'rev':
1876 1877 e.append(r)
1877 1878 elif k == '-rev':
1878 1879 e.append(-r)
1879 1880 elif k == 'branch':
1880 1881 e.append(c.branch())
1881 1882 elif k == '-branch':
1882 1883 e.append(invert(c.branch()))
1883 1884 elif k == 'desc':
1884 1885 e.append(c.description())
1885 1886 elif k == '-desc':
1886 1887 e.append(invert(c.description()))
1887 1888 elif k in 'user author':
1888 1889 e.append(c.user())
1889 1890 elif k in '-user -author':
1890 1891 e.append(invert(c.user()))
1891 1892 elif k == 'date':
1892 1893 e.append(c.date()[0])
1893 1894 elif k == '-date':
1894 1895 e.append(-c.date()[0])
1895 1896 else:
1896 1897 raise error.ParseError(_("unknown sort key %r") % k)
1897 1898 e.append(r)
1898 1899 l.append(e)
1899 1900 l.sort()
1900 1901 return baseset([e[-1] for e in l])
1901 1902
1902 1903 def subrepo(repo, subset, x):
1903 1904 """``subrepo([pattern])``
1904 1905 Changesets that add, modify or remove the given subrepo. If no subrepo
1905 1906 pattern is named, any subrepo changes are returned.
1906 1907 """
1907 1908 # i18n: "subrepo" is a keyword
1908 1909 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1909 1910 if len(args) != 0:
1910 1911 pat = getstring(args[0], _("subrepo requires a pattern"))
1911 1912
1912 1913 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1913 1914
1914 1915 def submatches(names):
1915 1916 k, p, m = _stringmatcher(pat)
1916 1917 for name in names:
1917 1918 if m(name):
1918 1919 yield name
1919 1920
1920 1921 def matches(x):
1921 1922 c = repo[x]
1922 1923 s = repo.status(c.p1().node(), c.node(), match=m)
1923 1924
1924 1925 if len(args) == 0:
1925 1926 return s.added or s.modified or s.removed
1926 1927
1927 1928 if s.added:
1928 1929 return any(submatches(c.substate.keys()))
1929 1930
1930 1931 if s.modified:
1931 1932 subs = set(c.p1().substate.keys())
1932 1933 subs.update(c.substate.keys())
1933 1934
1934 1935 for path in submatches(subs):
1935 1936 if c.p1().substate.get(path) != c.substate.get(path):
1936 1937 return True
1937 1938
1938 1939 if s.removed:
1939 1940 return any(submatches(c.p1().substate.keys()))
1940 1941
1941 1942 return False
1942 1943
1943 1944 return subset.filter(matches)
1944 1945
1945 1946 def _stringmatcher(pattern):
1946 1947 """
1947 1948 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1948 1949 returns the matcher name, pattern, and matcher function.
1949 1950 missing or unknown prefixes are treated as literal matches.
1950 1951
1951 1952 helper for tests:
1952 1953 >>> def test(pattern, *tests):
1953 1954 ... kind, pattern, matcher = _stringmatcher(pattern)
1954 1955 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1955 1956
1956 1957 exact matching (no prefix):
1957 1958 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1958 1959 ('literal', 'abcdefg', [False, False, True])
1959 1960
1960 1961 regex matching ('re:' prefix)
1961 1962 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1962 1963 ('re', 'a.+b', [False, False, True])
1963 1964
1964 1965 force exact matches ('literal:' prefix)
1965 1966 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1966 1967 ('literal', 're:foobar', [False, True])
1967 1968
1968 1969 unknown prefixes are ignored and treated as literals
1969 1970 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1970 1971 ('literal', 'foo:bar', [False, False, True])
1971 1972 """
1972 1973 if pattern.startswith('re:'):
1973 1974 pattern = pattern[3:]
1974 1975 try:
1975 1976 regex = re.compile(pattern)
1976 1977 except re.error as e:
1977 1978 raise error.ParseError(_('invalid regular expression: %s')
1978 1979 % e)
1979 1980 return 're', pattern, regex.search
1980 1981 elif pattern.startswith('literal:'):
1981 1982 pattern = pattern[8:]
1982 1983 return 'literal', pattern, pattern.__eq__
1983 1984
1984 1985 def _substringmatcher(pattern):
1985 1986 kind, pattern, matcher = _stringmatcher(pattern)
1986 1987 if kind == 'literal':
1987 1988 matcher = lambda s: pattern in s
1988 1989 return kind, pattern, matcher
1989 1990
1990 1991 def tag(repo, subset, x):
1991 1992 """``tag([name])``
1992 1993 The specified tag by name, or all tagged revisions if no name is given.
1993 1994
1994 1995 If `name` starts with `re:`, the remainder of the name is treated as
1995 1996 a regular expression. To match a tag that actually starts with `re:`,
1996 1997 use the prefix `literal:`.
1997 1998 """
1998 1999 # i18n: "tag" is a keyword
1999 2000 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2000 2001 cl = repo.changelog
2001 2002 if args:
2002 2003 pattern = getstring(args[0],
2003 2004 # i18n: "tag" is a keyword
2004 2005 _('the argument to tag must be a string'))
2005 2006 kind, pattern, matcher = _stringmatcher(pattern)
2006 2007 if kind == 'literal':
2007 2008 # avoid resolving all tags
2008 2009 tn = repo._tagscache.tags.get(pattern, None)
2009 2010 if tn is None:
2010 2011 raise error.RepoLookupError(_("tag '%s' does not exist")
2011 2012 % pattern)
2012 2013 s = set([repo[tn].rev()])
2013 2014 else:
2014 2015 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2015 2016 else:
2016 2017 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2017 2018 return subset & s
2018 2019
2019 2020 def tagged(repo, subset, x):
2020 2021 return tag(repo, subset, x)
2021 2022
2022 2023 def unstable(repo, subset, x):
2023 2024 """``unstable()``
2024 2025 Non-obsolete changesets with obsolete ancestors.
2025 2026 """
2026 2027 # i18n: "unstable" is a keyword
2027 2028 getargs(x, 0, 0, _("unstable takes no arguments"))
2028 2029 unstables = obsmod.getrevs(repo, 'unstable')
2029 2030 return subset & unstables
2030 2031
2031 2032
2032 2033 def user(repo, subset, x):
2033 2034 """``user(string)``
2034 2035 User name contains string. The match is case-insensitive.
2035 2036
2036 2037 If `string` starts with `re:`, the remainder of the string is treated as
2037 2038 a regular expression. To match a user that actually contains `re:`, use
2038 2039 the prefix `literal:`.
2039 2040 """
2040 2041 return author(repo, subset, x)
2041 2042
2042 2043 # experimental
2043 2044 def wdir(repo, subset, x):
2044 2045 # i18n: "wdir" is a keyword
2045 2046 getargs(x, 0, 0, _("wdir takes no arguments"))
2046 2047 if node.wdirrev in subset or isinstance(subset, fullreposet):
2047 2048 return baseset([node.wdirrev])
2048 2049 return baseset()
2049 2050
2050 2051 # for internal use
2051 2052 def _list(repo, subset, x):
2052 2053 s = getstring(x, "internal error")
2053 2054 if not s:
2054 2055 return baseset()
2055 2056 # remove duplicates here. it's difficult for caller to deduplicate sets
2056 2057 # because different symbols can point to the same rev.
2057 2058 cl = repo.changelog
2058 2059 ls = []
2059 2060 seen = set()
2060 2061 for t in s.split('\0'):
2061 2062 try:
2062 2063 # fast path for integer revision
2063 2064 r = int(t)
2064 2065 if str(r) != t or r not in cl:
2065 2066 raise ValueError
2066 2067 except ValueError:
2067 2068 r = repo[t].rev()
2068 2069 if r in seen:
2069 2070 continue
2070 2071 if (r in subset
2071 2072 or r == node.nullrev and isinstance(subset, fullreposet)):
2072 2073 ls.append(r)
2073 2074 seen.add(r)
2074 2075 return baseset(ls)
2075 2076
2076 2077 # for internal use
2077 2078 def _intlist(repo, subset, x):
2078 2079 s = getstring(x, "internal error")
2079 2080 if not s:
2080 2081 return baseset()
2081 2082 ls = [int(r) for r in s.split('\0')]
2082 2083 s = subset
2083 2084 return baseset([r for r in ls if r in s])
2084 2085
2085 2086 # for internal use
2086 2087 def _hexlist(repo, subset, x):
2087 2088 s = getstring(x, "internal error")
2088 2089 if not s:
2089 2090 return baseset()
2090 2091 cl = repo.changelog
2091 2092 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2092 2093 s = subset
2093 2094 return baseset([r for r in ls if r in s])
2094 2095
2095 2096 symbols = {
2096 2097 "adds": adds,
2097 2098 "all": getall,
2098 2099 "ancestor": ancestor,
2099 2100 "ancestors": ancestors,
2100 2101 "_firstancestors": _firstancestors,
2101 2102 "author": author,
2102 2103 "bisect": bisect,
2103 2104 "bisected": bisected,
2104 2105 "bookmark": bookmark,
2105 2106 "branch": branch,
2106 2107 "branchpoint": branchpoint,
2107 2108 "bumped": bumped,
2108 2109 "bundle": bundle,
2109 2110 "children": children,
2110 2111 "closed": closed,
2111 2112 "contains": contains,
2112 2113 "converted": converted,
2113 2114 "date": date,
2114 2115 "desc": desc,
2115 2116 "descendants": descendants,
2116 2117 "_firstdescendants": _firstdescendants,
2117 2118 "destination": destination,
2118 2119 "divergent": divergent,
2119 2120 "draft": draft,
2120 2121 "extinct": extinct,
2121 2122 "extra": extra,
2122 2123 "file": hasfile,
2123 2124 "filelog": filelog,
2124 2125 "first": first,
2125 2126 "follow": follow,
2126 2127 "_followfirst": _followfirst,
2127 2128 "grep": grep,
2128 2129 "head": head,
2129 2130 "heads": heads,
2130 2131 "hidden": hidden,
2131 2132 "id": node_,
2132 2133 "keyword": keyword,
2133 2134 "last": last,
2134 2135 "limit": limit,
2135 2136 "_matchfiles": _matchfiles,
2136 2137 "max": maxrev,
2137 2138 "merge": merge,
2138 2139 "min": minrev,
2139 2140 "modifies": modifies,
2140 2141 "named": named,
2141 2142 "obsolete": obsolete,
2142 2143 "only": only,
2143 2144 "origin": origin,
2144 2145 "outgoing": outgoing,
2145 2146 "p1": p1,
2146 2147 "p2": p2,
2147 2148 "parents": parents,
2148 2149 "present": present,
2149 2150 "public": public,
2150 2151 "_notpublic": _notpublic,
2151 2152 "remote": remote,
2152 2153 "removes": removes,
2153 2154 "rev": rev,
2154 2155 "reverse": reverse,
2155 2156 "roots": roots,
2156 2157 "sort": sort,
2157 2158 "secret": secret,
2158 2159 "subrepo": subrepo,
2159 2160 "matching": matching,
2160 2161 "tag": tag,
2161 2162 "tagged": tagged,
2162 2163 "user": user,
2163 2164 "unstable": unstable,
2164 2165 "wdir": wdir,
2165 2166 "_list": _list,
2166 2167 "_intlist": _intlist,
2167 2168 "_hexlist": _hexlist,
2168 2169 }
2169 2170
2170 2171 # symbols which can't be used for a DoS attack for any given input
2171 2172 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 2173 # functions that just return a lot of changesets (like all) don't count here
2173 2174 safesymbols = set([
2174 2175 "adds",
2175 2176 "all",
2176 2177 "ancestor",
2177 2178 "ancestors",
2178 2179 "_firstancestors",
2179 2180 "author",
2180 2181 "bisect",
2181 2182 "bisected",
2182 2183 "bookmark",
2183 2184 "branch",
2184 2185 "branchpoint",
2185 2186 "bumped",
2186 2187 "bundle",
2187 2188 "children",
2188 2189 "closed",
2189 2190 "converted",
2190 2191 "date",
2191 2192 "desc",
2192 2193 "descendants",
2193 2194 "_firstdescendants",
2194 2195 "destination",
2195 2196 "divergent",
2196 2197 "draft",
2197 2198 "extinct",
2198 2199 "extra",
2199 2200 "file",
2200 2201 "filelog",
2201 2202 "first",
2202 2203 "follow",
2203 2204 "_followfirst",
2204 2205 "head",
2205 2206 "heads",
2206 2207 "hidden",
2207 2208 "id",
2208 2209 "keyword",
2209 2210 "last",
2210 2211 "limit",
2211 2212 "_matchfiles",
2212 2213 "max",
2213 2214 "merge",
2214 2215 "min",
2215 2216 "modifies",
2216 2217 "obsolete",
2217 2218 "only",
2218 2219 "origin",
2219 2220 "outgoing",
2220 2221 "p1",
2221 2222 "p2",
2222 2223 "parents",
2223 2224 "present",
2224 2225 "public",
2225 2226 "_notpublic",
2226 2227 "remote",
2227 2228 "removes",
2228 2229 "rev",
2229 2230 "reverse",
2230 2231 "roots",
2231 2232 "sort",
2232 2233 "secret",
2233 2234 "matching",
2234 2235 "tag",
2235 2236 "tagged",
2236 2237 "user",
2237 2238 "unstable",
2238 2239 "wdir",
2239 2240 "_list",
2240 2241 "_intlist",
2241 2242 "_hexlist",
2242 2243 ])
2243 2244
2244 2245 methods = {
2245 2246 "range": rangeset,
2246 2247 "dagrange": dagrange,
2247 2248 "string": stringset,
2248 2249 "symbol": stringset,
2249 2250 "and": andset,
2250 2251 "or": orset,
2251 2252 "not": notset,
2252 2253 "list": listset,
2253 2254 "keyvalue": keyvaluepair,
2254 2255 "func": func,
2255 2256 "ancestor": ancestorspec,
2256 2257 "parent": parentspec,
2257 2258 "parentpost": p1,
2258 2259 }
2259 2260
2260 2261 def optimize(x, small):
2261 2262 if x is None:
2262 2263 return 0, x
2263 2264
2264 2265 smallbonus = 1
2265 2266 if small:
2266 2267 smallbonus = .5
2267 2268
2268 2269 op = x[0]
2269 2270 if op == 'minus':
2270 2271 return optimize(('and', x[1], ('not', x[2])), small)
2271 2272 elif op == 'only':
2272 2273 return optimize(('func', ('symbol', 'only'),
2273 2274 ('list', x[1], x[2])), small)
2274 2275 elif op == 'onlypost':
2275 2276 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 2277 elif op == 'dagrangepre':
2277 2278 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 2279 elif op == 'dagrangepost':
2279 2280 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 2281 elif op == 'rangeall':
2281 2282 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 2283 elif op == 'rangepre':
2283 2284 return optimize(('range', ('string', '0'), x[1]), small)
2284 2285 elif op == 'rangepost':
2285 2286 return optimize(('range', x[1], ('string', 'tip')), small)
2286 2287 elif op == 'negate':
2287 2288 return optimize(('string',
2288 2289 '-' + getstring(x[1], _("can't negate that"))), small)
2289 2290 elif op in 'string symbol negate':
2290 2291 return smallbonus, x # single revisions are small
2291 2292 elif op == 'and':
2292 2293 wa, ta = optimize(x[1], True)
2293 2294 wb, tb = optimize(x[2], True)
2294 2295
2295 2296 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 2297 def isonly(revs, bases):
2297 2298 return (
2298 2299 revs is not None
2299 2300 and revs[0] == 'func'
2300 2301 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 2302 and bases is not None
2302 2303 and bases[0] == 'not'
2303 2304 and bases[1][0] == 'func'
2304 2305 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305 2306
2306 2307 w = min(wa, wb)
2307 2308 if isonly(ta, tb):
2308 2309 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 2310 if isonly(tb, ta):
2310 2311 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311 2312
2312 2313 if wa > wb:
2313 2314 return w, (op, tb, ta)
2314 2315 return w, (op, ta, tb)
2315 2316 elif op == 'or':
2316 2317 # fast path for machine-generated expression, that is likely to have
2317 2318 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 2319 ws, ts, ss = [], [], []
2319 2320 def flushss():
2320 2321 if not ss:
2321 2322 return
2322 2323 if len(ss) == 1:
2323 2324 w, t = ss[0]
2324 2325 else:
2325 2326 s = '\0'.join(t[1] for w, t in ss)
2326 2327 y = ('func', ('symbol', '_list'), ('string', s))
2327 2328 w, t = optimize(y, False)
2328 2329 ws.append(w)
2329 2330 ts.append(t)
2330 2331 del ss[:]
2331 2332 for y in x[1:]:
2332 2333 w, t = optimize(y, False)
2333 2334 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 2335 ss.append((w, t))
2335 2336 continue
2336 2337 flushss()
2337 2338 ws.append(w)
2338 2339 ts.append(t)
2339 2340 flushss()
2340 2341 if len(ts) == 1:
2341 2342 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 2343 # we can't reorder trees by weight because it would change the order.
2343 2344 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 2345 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 2346 return max(ws), (op,) + tuple(ts)
2346 2347 elif op == 'not':
2347 2348 # Optimize not public() to _notpublic() because we have a fast version
2348 2349 if x[1] == ('func', ('symbol', 'public'), None):
2349 2350 newsym = ('func', ('symbol', '_notpublic'), None)
2350 2351 o = optimize(newsym, not small)
2351 2352 return o[0], o[1]
2352 2353 else:
2353 2354 o = optimize(x[1], not small)
2354 2355 return o[0], (op, o[1])
2355 2356 elif op == 'parentpost':
2356 2357 o = optimize(x[1], small)
2357 2358 return o[0], (op, o[1])
2358 2359 elif op == 'group':
2359 2360 return optimize(x[1], small)
2360 2361 elif op in 'dagrange range list parent ancestorspec':
2361 2362 if op == 'parent':
2362 2363 # x^:y means (x^) : y, not x ^ (:y)
2363 2364 post = ('parentpost', x[1])
2364 2365 if x[2][0] == 'dagrangepre':
2365 2366 return optimize(('dagrange', post, x[2][1]), small)
2366 2367 elif x[2][0] == 'rangepre':
2367 2368 return optimize(('range', post, x[2][1]), small)
2368 2369
2369 2370 wa, ta = optimize(x[1], small)
2370 2371 wb, tb = optimize(x[2], small)
2371 2372 return wa + wb, (op, ta, tb)
2372 2373 elif op == 'func':
2373 2374 f = getstring(x[1], _("not a symbol"))
2374 2375 wa, ta = optimize(x[2], small)
2375 2376 if f in ("author branch closed date desc file grep keyword "
2376 2377 "outgoing user"):
2377 2378 w = 10 # slow
2378 2379 elif f in "modifies adds removes":
2379 2380 w = 30 # slower
2380 2381 elif f == "contains":
2381 2382 w = 100 # very slow
2382 2383 elif f == "ancestor":
2383 2384 w = 1 * smallbonus
2384 2385 elif f in "reverse limit first _intlist":
2385 2386 w = 0
2386 2387 elif f in "sort":
2387 2388 w = 10 # assume most sorts look at changelog
2388 2389 else:
2389 2390 w = 1
2390 2391 return w + wa, (op, x[1], ta)
2391 2392 return 1, x
2392 2393
2393 2394 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 2395 def _getaliasarg(tree):
2395 2396 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 2397 return X, None otherwise.
2397 2398 """
2398 2399 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 2400 and tree[2][0] == 'string'):
2400 2401 return tree[2][1]
2401 2402 return None
2402 2403
2403 2404 def _checkaliasarg(tree, known=None):
2404 2405 """Check tree contains no _aliasarg construct or only ones which
2405 2406 value is in known. Used to avoid alias placeholders injection.
2406 2407 """
2407 2408 if isinstance(tree, tuple):
2408 2409 arg = _getaliasarg(tree)
2409 2410 if arg is not None and (not known or arg not in known):
2410 2411 raise error.UnknownIdentifier('_aliasarg', [])
2411 2412 for t in tree:
2412 2413 _checkaliasarg(t, known)
2413 2414
2414 2415 # the set of valid characters for the initial letter of symbols in
2415 2416 # alias declarations and definitions
2416 2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 2419
2419 2420 def _tokenizealias(program, lookup=None):
2420 2421 """Parse alias declaration/definition into a stream of tokens
2421 2422
2422 2423 This allows symbol names to use also ``$`` as an initial letter
2423 2424 (for backward compatibility), and callers of this function should
2424 2425 examine whether ``$`` is used also for unexpected symbols or not.
2425 2426 """
2426 2427 return tokenize(program, lookup=lookup,
2427 2428 syminitletters=_aliassyminitletters)
2428 2429
2429 2430 def _parsealiasdecl(decl):
2430 2431 """Parse alias declaration ``decl``
2431 2432
2432 2433 This returns ``(name, tree, args, errorstr)`` tuple:
2433 2434
2434 2435 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 2436 - ``tree``: parse result (or ``None`` at error)
2436 2437 - ``args``: list of alias argument names (or None for symbol declaration)
2437 2438 - ``errorstr``: detail about detected error (or None)
2438 2439
2439 2440 >>> _parsealiasdecl('foo')
2440 2441 ('foo', ('symbol', 'foo'), None, None)
2441 2442 >>> _parsealiasdecl('$foo')
2442 2443 ('$foo', None, None, "'$' not for alias arguments")
2443 2444 >>> _parsealiasdecl('foo::bar')
2444 2445 ('foo::bar', None, None, 'invalid format')
2445 2446 >>> _parsealiasdecl('foo bar')
2446 2447 ('foo bar', None, None, 'at 4: invalid token')
2447 2448 >>> _parsealiasdecl('foo()')
2448 2449 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 2450 >>> _parsealiasdecl('$foo()')
2450 2451 ('$foo()', None, None, "'$' not for alias arguments")
2451 2452 >>> _parsealiasdecl('foo($1, $2)')
2452 2453 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 2454 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 2455 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 2456 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 2457 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 2458 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 2459 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 2460 >>> _parsealiasdecl('foo("string")')
2460 2461 ('foo("string")', None, None, 'invalid argument list')
2461 2462 >>> _parsealiasdecl('foo($1, $2')
2462 2463 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 2464 >>> _parsealiasdecl('foo("string')
2464 2465 ('foo("string', None, None, 'at 5: unterminated string')
2465 2466 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 2467 ('foo', None, None, 'argument names collide with each other')
2467 2468 """
2468 2469 p = parser.parser(elements)
2469 2470 try:
2470 2471 tree, pos = p.parse(_tokenizealias(decl))
2471 2472 if (pos != len(decl)):
2472 2473 raise error.ParseError(_('invalid token'), pos)
2473 2474
2474 2475 if isvalidsymbol(tree):
2475 2476 # "name = ...." style
2476 2477 name = getsymbol(tree)
2477 2478 if name.startswith('$'):
2478 2479 return (decl, None, None, _("'$' not for alias arguments"))
2479 2480 return (name, ('symbol', name), None, None)
2480 2481
2481 2482 if isvalidfunc(tree):
2482 2483 # "name(arg, ....) = ...." style
2483 2484 name = getfuncname(tree)
2484 2485 if name.startswith('$'):
2485 2486 return (decl, None, None, _("'$' not for alias arguments"))
2486 2487 args = []
2487 2488 for arg in getfuncargs(tree):
2488 2489 if not isvalidsymbol(arg):
2489 2490 return (decl, None, None, _("invalid argument list"))
2490 2491 args.append(getsymbol(arg))
2491 2492 if len(args) != len(set(args)):
2492 2493 return (name, None, None,
2493 2494 _("argument names collide with each other"))
2494 2495 return (name, ('func', ('symbol', name)), args, None)
2495 2496
2496 2497 return (decl, None, None, _("invalid format"))
2497 2498 except error.ParseError as inst:
2498 2499 return (decl, None, None, parseerrordetail(inst))
2499 2500
2500 2501 def _parsealiasdefn(defn, args):
2501 2502 """Parse alias definition ``defn``
2502 2503
2503 2504 This function also replaces alias argument references in the
2504 2505 specified definition by ``_aliasarg(ARGNAME)``.
2505 2506
2506 2507 ``args`` is a list of alias argument names, or None if the alias
2507 2508 is declared as a symbol.
2508 2509
2509 2510 This returns "tree" as parsing result.
2510 2511
2511 2512 >>> args = ['$1', '$2', 'foo']
2512 2513 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 2514 (or
2514 2515 (func
2515 2516 ('symbol', '_aliasarg')
2516 2517 ('string', '$1'))
2517 2518 (func
2518 2519 ('symbol', '_aliasarg')
2519 2520 ('string', 'foo')))
2520 2521 >>> try:
2521 2522 ... _parsealiasdefn('$1 or $bar', args)
2522 2523 ... except error.ParseError, inst:
2523 2524 ... print parseerrordetail(inst)
2524 2525 at 6: '$' not for alias arguments
2525 2526 >>> args = ['$1', '$10', 'foo']
2526 2527 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 2528 (or
2528 2529 (func
2529 2530 ('symbol', '_aliasarg')
2530 2531 ('string', '$10'))
2531 2532 ('symbol', 'foobar'))
2532 2533 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 2534 (or
2534 2535 ('string', '$1')
2535 2536 ('string', 'foo'))
2536 2537 """
2537 2538 def tokenizedefn(program, lookup=None):
2538 2539 if args:
2539 2540 argset = set(args)
2540 2541 else:
2541 2542 argset = set()
2542 2543
2543 2544 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 2545 if t == 'symbol':
2545 2546 if value in argset:
2546 2547 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 2548 # "_aliasarg()" is an unknown symbol only used separate
2548 2549 # alias argument placeholders from regular strings.
2549 2550 yield ('symbol', '_aliasarg', pos)
2550 2551 yield ('(', None, pos)
2551 2552 yield ('string', value, pos)
2552 2553 yield (')', None, pos)
2553 2554 continue
2554 2555 elif value.startswith('$'):
2555 2556 raise error.ParseError(_("'$' not for alias arguments"),
2556 2557 pos)
2557 2558 yield (t, value, pos)
2558 2559
2559 2560 p = parser.parser(elements)
2560 2561 tree, pos = p.parse(tokenizedefn(defn))
2561 2562 if pos != len(defn):
2562 2563 raise error.ParseError(_('invalid token'), pos)
2563 2564 return parser.simplifyinfixops(tree, ('or',))
2564 2565
2565 2566 class revsetalias(object):
2566 2567 # whether own `error` information is already shown or not.
2567 2568 # this avoids showing same warning multiple times at each `findaliases`.
2568 2569 warned = False
2569 2570
2570 2571 def __init__(self, name, value):
2571 2572 '''Aliases like:
2572 2573
2573 2574 h = heads(default)
2574 2575 b($1) = ancestors($1) - ancestors(default)
2575 2576 '''
2576 2577 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 2578 if self.error:
2578 2579 self.error = _('failed to parse the declaration of revset alias'
2579 2580 ' "%s": %s') % (self.name, self.error)
2580 2581 return
2581 2582
2582 2583 try:
2583 2584 self.replacement = _parsealiasdefn(value, self.args)
2584 2585 # Check for placeholder injection
2585 2586 _checkaliasarg(self.replacement, self.args)
2586 2587 except error.ParseError as inst:
2587 2588 self.error = _('failed to parse the definition of revset alias'
2588 2589 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589 2590
2590 2591 def _getalias(aliases, tree):
2591 2592 """If tree looks like an unexpanded alias, return it. Return None
2592 2593 otherwise.
2593 2594 """
2594 2595 if isinstance(tree, tuple) and tree:
2595 2596 if tree[0] == 'symbol' and len(tree) == 2:
2596 2597 name = tree[1]
2597 2598 alias = aliases.get(name)
2598 2599 if alias and alias.args is None and alias.tree == tree:
2599 2600 return alias
2600 2601 if tree[0] == 'func' and len(tree) > 1:
2601 2602 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 2603 name = tree[1][1]
2603 2604 alias = aliases.get(name)
2604 2605 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 2606 return alias
2606 2607 return None
2607 2608
2608 2609 def _expandargs(tree, args):
2609 2610 """Replace _aliasarg instances with the substitution value of the
2610 2611 same name in args, recursively.
2611 2612 """
2612 2613 if not tree or not isinstance(tree, tuple):
2613 2614 return tree
2614 2615 arg = _getaliasarg(tree)
2615 2616 if arg is not None:
2616 2617 return args[arg]
2617 2618 return tuple(_expandargs(t, args) for t in tree)
2618 2619
2619 2620 def _expandaliases(aliases, tree, expanding, cache):
2620 2621 """Expand aliases in tree, recursively.
2621 2622
2622 2623 'aliases' is a dictionary mapping user defined aliases to
2623 2624 revsetalias objects.
2624 2625 """
2625 2626 if not isinstance(tree, tuple):
2626 2627 # Do not expand raw strings
2627 2628 return tree
2628 2629 alias = _getalias(aliases, tree)
2629 2630 if alias is not None:
2630 2631 if alias.error:
2631 2632 raise util.Abort(alias.error)
2632 2633 if alias in expanding:
2633 2634 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 2635 'detected') % alias.name)
2635 2636 expanding.append(alias)
2636 2637 if alias.name not in cache:
2637 2638 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 2639 expanding, cache)
2639 2640 result = cache[alias.name]
2640 2641 expanding.pop()
2641 2642 if alias.args is not None:
2642 2643 l = getlist(tree[2])
2643 2644 if len(l) != len(alias.args):
2644 2645 raise error.ParseError(
2645 2646 _('invalid number of arguments: %s') % len(l))
2646 2647 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 2648 result = _expandargs(result, dict(zip(alias.args, l)))
2648 2649 else:
2649 2650 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 2651 for t in tree)
2651 2652 return result
2652 2653
2653 2654 def findaliases(ui, tree, showwarning=None):
2654 2655 _checkaliasarg(tree)
2655 2656 aliases = {}
2656 2657 for k, v in ui.configitems('revsetalias'):
2657 2658 alias = revsetalias(k, v)
2658 2659 aliases[alias.name] = alias
2659 2660 tree = _expandaliases(aliases, tree, [], {})
2660 2661 if showwarning:
2661 2662 # warn about problematic (but not referred) aliases
2662 2663 for name, alias in sorted(aliases.iteritems()):
2663 2664 if alias.error and not alias.warned:
2664 2665 showwarning(_('warning: %s\n') % (alias.error))
2665 2666 alias.warned = True
2666 2667 return tree
2667 2668
2668 2669 def foldconcat(tree):
2669 2670 """Fold elements to be concatenated by `##`
2670 2671 """
2671 2672 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 2673 return tree
2673 2674 if tree[0] == '_concat':
2674 2675 pending = [tree]
2675 2676 l = []
2676 2677 while pending:
2677 2678 e = pending.pop()
2678 2679 if e[0] == '_concat':
2679 2680 pending.extend(reversed(e[1:]))
2680 2681 elif e[0] in ('string', 'symbol'):
2681 2682 l.append(e[1])
2682 2683 else:
2683 2684 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 2685 raise error.ParseError(msg)
2685 2686 return ('string', ''.join(l))
2686 2687 else:
2687 2688 return tuple(foldconcat(t) for t in tree)
2688 2689
2689 2690 def parse(spec, lookup=None):
2690 2691 p = parser.parser(elements)
2691 2692 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 2693 if pos != len(spec):
2693 2694 raise error.ParseError(_("invalid token"), pos)
2694 2695 return parser.simplifyinfixops(tree, ('or',))
2695 2696
2696 2697 def posttreebuilthook(tree, repo):
2697 2698 # hook for extensions to execute code on the optimized tree
2698 2699 pass
2699 2700
2700 2701 def match(ui, spec, repo=None):
2701 2702 if not spec:
2702 2703 raise error.ParseError(_("empty query"))
2703 2704 lookup = None
2704 2705 if repo:
2705 2706 lookup = repo.__contains__
2706 2707 tree = parse(spec, lookup)
2707 2708 return _makematcher(ui, tree, repo)
2708 2709
2709 2710 def matchany(ui, specs, repo=None):
2710 2711 """Create a matcher that will include any revisions matching one of the
2711 2712 given specs"""
2712 2713 if not specs:
2713 2714 def mfunc(repo, subset=None):
2714 2715 return baseset()
2715 2716 return mfunc
2716 2717 if not all(specs):
2717 2718 raise error.ParseError(_("empty query"))
2718 2719 lookup = None
2719 2720 if repo:
2720 2721 lookup = repo.__contains__
2721 2722 if len(specs) == 1:
2722 2723 tree = parse(specs[0], lookup)
2723 2724 else:
2724 2725 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 2726 return _makematcher(ui, tree, repo)
2726 2727
2727 2728 def _makematcher(ui, tree, repo):
2728 2729 if ui:
2729 2730 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 2731 tree = foldconcat(tree)
2731 2732 weight, tree = optimize(tree, True)
2732 2733 posttreebuilthook(tree, repo)
2733 2734 def mfunc(repo, subset=None):
2734 2735 if subset is None:
2735 2736 subset = fullreposet(repo)
2736 2737 if util.safehasattr(subset, 'isascending'):
2737 2738 result = getset(repo, subset, tree)
2738 2739 else:
2739 2740 result = getset(repo, baseset(subset), tree)
2740 2741 return result
2741 2742 return mfunc
2742 2743
2743 2744 def formatspec(expr, *args):
2744 2745 '''
2745 2746 This is a convenience function for using revsets internally, and
2746 2747 escapes arguments appropriately. Aliases are intentionally ignored
2747 2748 so that intended expression behavior isn't accidentally subverted.
2748 2749
2749 2750 Supported arguments:
2750 2751
2751 2752 %r = revset expression, parenthesized
2752 2753 %d = int(arg), no quoting
2753 2754 %s = string(arg), escaped and single-quoted
2754 2755 %b = arg.branch(), escaped and single-quoted
2755 2756 %n = hex(arg), single-quoted
2756 2757 %% = a literal '%'
2757 2758
2758 2759 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759 2760
2760 2761 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 2762 '(10 or 11):: and ((this()) or (that()))'
2762 2763 >>> formatspec('%d:: and not %d::', 10, 20)
2763 2764 '10:: and not 20::'
2764 2765 >>> formatspec('%ld or %ld', [], [1])
2765 2766 "_list('') or 1"
2766 2767 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 2768 "keyword('foo\\\\xe9')"
2768 2769 >>> b = lambda: 'default'
2769 2770 >>> b.branch = b
2770 2771 >>> formatspec('branch(%b)', b)
2771 2772 "branch('default')"
2772 2773 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 2774 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 2775 '''
2775 2776
2776 2777 def quote(s):
2777 2778 return repr(str(s))
2778 2779
2779 2780 def argtype(c, arg):
2780 2781 if c == 'd':
2781 2782 return str(int(arg))
2782 2783 elif c == 's':
2783 2784 return quote(arg)
2784 2785 elif c == 'r':
2785 2786 parse(arg) # make sure syntax errors are confined
2786 2787 return '(%s)' % arg
2787 2788 elif c == 'n':
2788 2789 return quote(node.hex(arg))
2789 2790 elif c == 'b':
2790 2791 return quote(arg.branch())
2791 2792
2792 2793 def listexp(s, t):
2793 2794 l = len(s)
2794 2795 if l == 0:
2795 2796 return "_list('')"
2796 2797 elif l == 1:
2797 2798 return argtype(t, s[0])
2798 2799 elif t == 'd':
2799 2800 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 2801 elif t == 's':
2801 2802 return "_list('%s')" % "\0".join(s)
2802 2803 elif t == 'n':
2803 2804 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 2805 elif t == 'b':
2805 2806 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806 2807
2807 2808 m = l // 2
2808 2809 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809 2810
2810 2811 ret = ''
2811 2812 pos = 0
2812 2813 arg = 0
2813 2814 while pos < len(expr):
2814 2815 c = expr[pos]
2815 2816 if c == '%':
2816 2817 pos += 1
2817 2818 d = expr[pos]
2818 2819 if d == '%':
2819 2820 ret += d
2820 2821 elif d in 'dsnbr':
2821 2822 ret += argtype(d, args[arg])
2822 2823 arg += 1
2823 2824 elif d == 'l':
2824 2825 # a list of some type
2825 2826 pos += 1
2826 2827 d = expr[pos]
2827 2828 ret += listexp(list(args[arg]), d)
2828 2829 arg += 1
2829 2830 else:
2830 2831 raise util.Abort('unexpected revspec format character %s' % d)
2831 2832 else:
2832 2833 ret += c
2833 2834 pos += 1
2834 2835
2835 2836 return ret
2836 2837
2837 2838 def prettyformat(tree):
2838 2839 return parser.prettyformat(tree, ('string', 'symbol'))
2839 2840
2840 2841 def depth(tree):
2841 2842 if isinstance(tree, tuple):
2842 2843 return max(map(depth, tree)) + 1
2843 2844 else:
2844 2845 return 0
2845 2846
2846 2847 def funcsused(tree):
2847 2848 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 2849 return set()
2849 2850 else:
2850 2851 funcs = set()
2851 2852 for s in tree[1:]:
2852 2853 funcs |= funcsused(s)
2853 2854 if tree[0] == 'func':
2854 2855 funcs.add(tree[1][1])
2855 2856 return funcs
2856 2857
2857 2858 class abstractsmartset(object):
2858 2859
2859 2860 def __nonzero__(self):
2860 2861 """True if the smartset is not empty"""
2861 2862 raise NotImplementedError()
2862 2863
2863 2864 def __contains__(self, rev):
2864 2865 """provide fast membership testing"""
2865 2866 raise NotImplementedError()
2866 2867
2867 2868 def __iter__(self):
2868 2869 """iterate the set in the order it is supposed to be iterated"""
2869 2870 raise NotImplementedError()
2870 2871
2871 2872 # Attributes containing a function to perform a fast iteration in a given
2872 2873 # direction. A smartset can have none, one, or both defined.
2873 2874 #
2874 2875 # Default value is None instead of a function returning None to avoid
2875 2876 # initializing an iterator just for testing if a fast method exists.
2876 2877 fastasc = None
2877 2878 fastdesc = None
2878 2879
2879 2880 def isascending(self):
2880 2881 """True if the set will iterate in ascending order"""
2881 2882 raise NotImplementedError()
2882 2883
2883 2884 def isdescending(self):
2884 2885 """True if the set will iterate in descending order"""
2885 2886 raise NotImplementedError()
2886 2887
2887 2888 def min(self):
2888 2889 """return the minimum element in the set"""
2889 2890 if self.fastasc is not None:
2890 2891 for r in self.fastasc():
2891 2892 return r
2892 2893 raise ValueError('arg is an empty sequence')
2893 2894 return min(self)
2894 2895
2895 2896 def max(self):
2896 2897 """return the maximum element in the set"""
2897 2898 if self.fastdesc is not None:
2898 2899 for r in self.fastdesc():
2899 2900 return r
2900 2901 raise ValueError('arg is an empty sequence')
2901 2902 return max(self)
2902 2903
2903 2904 def first(self):
2904 2905 """return the first element in the set (user iteration perspective)
2905 2906
2906 2907 Return None if the set is empty"""
2907 2908 raise NotImplementedError()
2908 2909
2909 2910 def last(self):
2910 2911 """return the last element in the set (user iteration perspective)
2911 2912
2912 2913 Return None if the set is empty"""
2913 2914 raise NotImplementedError()
2914 2915
2915 2916 def __len__(self):
2916 2917 """return the length of the smartsets
2917 2918
2918 2919 This can be expensive on smartset that could be lazy otherwise."""
2919 2920 raise NotImplementedError()
2920 2921
2921 2922 def reverse(self):
2922 2923 """reverse the expected iteration order"""
2923 2924 raise NotImplementedError()
2924 2925
2925 2926 def sort(self, reverse=True):
2926 2927 """get the set to iterate in an ascending or descending order"""
2927 2928 raise NotImplementedError()
2928 2929
2929 2930 def __and__(self, other):
2930 2931 """Returns a new object with the intersection of the two collections.
2931 2932
2932 2933 This is part of the mandatory API for smartset."""
2933 2934 if isinstance(other, fullreposet):
2934 2935 return self
2935 2936 return self.filter(other.__contains__, cache=False)
2936 2937
2937 2938 def __add__(self, other):
2938 2939 """Returns a new object with the union of the two collections.
2939 2940
2940 2941 This is part of the mandatory API for smartset."""
2941 2942 return addset(self, other)
2942 2943
2943 2944 def __sub__(self, other):
2944 2945 """Returns a new object with the substraction of the two collections.
2945 2946
2946 2947 This is part of the mandatory API for smartset."""
2947 2948 c = other.__contains__
2948 2949 return self.filter(lambda r: not c(r), cache=False)
2949 2950
2950 2951 def filter(self, condition, cache=True):
2951 2952 """Returns this smartset filtered by condition as a new smartset.
2952 2953
2953 2954 `condition` is a callable which takes a revision number and returns a
2954 2955 boolean.
2955 2956
2956 2957 This is part of the mandatory API for smartset."""
2957 2958 # builtin cannot be cached. but do not needs to
2958 2959 if cache and util.safehasattr(condition, 'func_code'):
2959 2960 condition = util.cachefunc(condition)
2960 2961 return filteredset(self, condition)
2961 2962
2962 2963 class baseset(abstractsmartset):
2963 2964 """Basic data structure that represents a revset and contains the basic
2964 2965 operation that it should be able to perform.
2965 2966
2966 2967 Every method in this class should be implemented by any smartset class.
2967 2968 """
2968 2969 def __init__(self, data=()):
2969 2970 if not isinstance(data, list):
2970 2971 if isinstance(data, set):
2971 2972 self._set = data
2972 2973 data = list(data)
2973 2974 self._list = data
2974 2975 self._ascending = None
2975 2976
2976 2977 @util.propertycache
2977 2978 def _set(self):
2978 2979 return set(self._list)
2979 2980
2980 2981 @util.propertycache
2981 2982 def _asclist(self):
2982 2983 asclist = self._list[:]
2983 2984 asclist.sort()
2984 2985 return asclist
2985 2986
2986 2987 def __iter__(self):
2987 2988 if self._ascending is None:
2988 2989 return iter(self._list)
2989 2990 elif self._ascending:
2990 2991 return iter(self._asclist)
2991 2992 else:
2992 2993 return reversed(self._asclist)
2993 2994
2994 2995 def fastasc(self):
2995 2996 return iter(self._asclist)
2996 2997
2997 2998 def fastdesc(self):
2998 2999 return reversed(self._asclist)
2999 3000
3000 3001 @util.propertycache
3001 3002 def __contains__(self):
3002 3003 return self._set.__contains__
3003 3004
3004 3005 def __nonzero__(self):
3005 3006 return bool(self._list)
3006 3007
3007 3008 def sort(self, reverse=False):
3008 3009 self._ascending = not bool(reverse)
3009 3010
3010 3011 def reverse(self):
3011 3012 if self._ascending is None:
3012 3013 self._list.reverse()
3013 3014 else:
3014 3015 self._ascending = not self._ascending
3015 3016
3016 3017 def __len__(self):
3017 3018 return len(self._list)
3018 3019
3019 3020 def isascending(self):
3020 3021 """Returns True if the collection is ascending order, False if not.
3021 3022
3022 3023 This is part of the mandatory API for smartset."""
3023 3024 if len(self) <= 1:
3024 3025 return True
3025 3026 return self._ascending is not None and self._ascending
3026 3027
3027 3028 def isdescending(self):
3028 3029 """Returns True if the collection is descending order, False if not.
3029 3030
3030 3031 This is part of the mandatory API for smartset."""
3031 3032 if len(self) <= 1:
3032 3033 return True
3033 3034 return self._ascending is not None and not self._ascending
3034 3035
3035 3036 def first(self):
3036 3037 if self:
3037 3038 if self._ascending is None:
3038 3039 return self._list[0]
3039 3040 elif self._ascending:
3040 3041 return self._asclist[0]
3041 3042 else:
3042 3043 return self._asclist[-1]
3043 3044 return None
3044 3045
3045 3046 def last(self):
3046 3047 if self:
3047 3048 if self._ascending is None:
3048 3049 return self._list[-1]
3049 3050 elif self._ascending:
3050 3051 return self._asclist[-1]
3051 3052 else:
3052 3053 return self._asclist[0]
3053 3054 return None
3054 3055
3055 3056 def __repr__(self):
3056 3057 d = {None: '', False: '-', True: '+'}[self._ascending]
3057 3058 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3058 3059
3059 3060 class filteredset(abstractsmartset):
3060 3061 """Duck type for baseset class which iterates lazily over the revisions in
3061 3062 the subset and contains a function which tests for membership in the
3062 3063 revset
3063 3064 """
3064 3065 def __init__(self, subset, condition=lambda x: True):
3065 3066 """
3066 3067 condition: a function that decide whether a revision in the subset
3067 3068 belongs to the revset or not.
3068 3069 """
3069 3070 self._subset = subset
3070 3071 self._condition = condition
3071 3072 self._cache = {}
3072 3073
3073 3074 def __contains__(self, x):
3074 3075 c = self._cache
3075 3076 if x not in c:
3076 3077 v = c[x] = x in self._subset and self._condition(x)
3077 3078 return v
3078 3079 return c[x]
3079 3080
3080 3081 def __iter__(self):
3081 3082 return self._iterfilter(self._subset)
3082 3083
3083 3084 def _iterfilter(self, it):
3084 3085 cond = self._condition
3085 3086 for x in it:
3086 3087 if cond(x):
3087 3088 yield x
3088 3089
3089 3090 @property
3090 3091 def fastasc(self):
3091 3092 it = self._subset.fastasc
3092 3093 if it is None:
3093 3094 return None
3094 3095 return lambda: self._iterfilter(it())
3095 3096
3096 3097 @property
3097 3098 def fastdesc(self):
3098 3099 it = self._subset.fastdesc
3099 3100 if it is None:
3100 3101 return None
3101 3102 return lambda: self._iterfilter(it())
3102 3103
3103 3104 def __nonzero__(self):
3104 3105 for r in self:
3105 3106 return True
3106 3107 return False
3107 3108
3108 3109 def __len__(self):
3109 3110 # Basic implementation to be changed in future patches.
3110 3111 l = baseset([r for r in self])
3111 3112 return len(l)
3112 3113
3113 3114 def sort(self, reverse=False):
3114 3115 self._subset.sort(reverse=reverse)
3115 3116
3116 3117 def reverse(self):
3117 3118 self._subset.reverse()
3118 3119
3119 3120 def isascending(self):
3120 3121 return self._subset.isascending()
3121 3122
3122 3123 def isdescending(self):
3123 3124 return self._subset.isdescending()
3124 3125
3125 3126 def first(self):
3126 3127 for x in self:
3127 3128 return x
3128 3129 return None
3129 3130
3130 3131 def last(self):
3131 3132 it = None
3132 3133 if self.isascending():
3133 3134 it = self.fastdesc
3134 3135 elif self.isdescending():
3135 3136 it = self.fastasc
3136 3137 if it is not None:
3137 3138 for x in it():
3138 3139 return x
3139 3140 return None #empty case
3140 3141 else:
3141 3142 x = None
3142 3143 for x in self:
3143 3144 pass
3144 3145 return x
3145 3146
3146 3147 def __repr__(self):
3147 3148 return '<%s %r>' % (type(self).__name__, self._subset)
3148 3149
3149 3150 def _iterordered(ascending, iter1, iter2):
3150 3151 """produce an ordered iteration from two iterators with the same order
3151 3152
3152 3153 The ascending is used to indicated the iteration direction.
3153 3154 """
3154 3155 choice = max
3155 3156 if ascending:
3156 3157 choice = min
3157 3158
3158 3159 val1 = None
3159 3160 val2 = None
3160 3161 try:
3161 3162 # Consume both iterators in an ordered way until one is empty
3162 3163 while True:
3163 3164 if val1 is None:
3164 3165 val1 = iter1.next()
3165 3166 if val2 is None:
3166 3167 val2 = iter2.next()
3167 3168 next = choice(val1, val2)
3168 3169 yield next
3169 3170 if val1 == next:
3170 3171 val1 = None
3171 3172 if val2 == next:
3172 3173 val2 = None
3173 3174 except StopIteration:
3174 3175 # Flush any remaining values and consume the other one
3175 3176 it = iter2
3176 3177 if val1 is not None:
3177 3178 yield val1
3178 3179 it = iter1
3179 3180 elif val2 is not None:
3180 3181 # might have been equality and both are empty
3181 3182 yield val2
3182 3183 for val in it:
3183 3184 yield val
3184 3185
3185 3186 class addset(abstractsmartset):
3186 3187 """Represent the addition of two sets
3187 3188
3188 3189 Wrapper structure for lazily adding two structures without losing much
3189 3190 performance on the __contains__ method
3190 3191
3191 3192 If the ascending attribute is set, that means the two structures are
3192 3193 ordered in either an ascending or descending way. Therefore, we can add
3193 3194 them maintaining the order by iterating over both at the same time
3194 3195
3195 3196 >>> xs = baseset([0, 3, 2])
3196 3197 >>> ys = baseset([5, 2, 4])
3197 3198
3198 3199 >>> rs = addset(xs, ys)
3199 3200 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3200 3201 (True, True, False, True, 0, 4)
3201 3202 >>> rs = addset(xs, baseset([]))
3202 3203 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3203 3204 (True, True, False, 0, 2)
3204 3205 >>> rs = addset(baseset([]), baseset([]))
3205 3206 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3206 3207 (False, False, None, None)
3207 3208
3208 3209 iterate unsorted:
3209 3210 >>> rs = addset(xs, ys)
3210 3211 >>> [x for x in rs] # without _genlist
3211 3212 [0, 3, 2, 5, 4]
3212 3213 >>> assert not rs._genlist
3213 3214 >>> len(rs)
3214 3215 5
3215 3216 >>> [x for x in rs] # with _genlist
3216 3217 [0, 3, 2, 5, 4]
3217 3218 >>> assert rs._genlist
3218 3219
3219 3220 iterate ascending:
3220 3221 >>> rs = addset(xs, ys, ascending=True)
3221 3222 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3222 3223 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3223 3224 >>> assert not rs._asclist
3224 3225 >>> len(rs)
3225 3226 5
3226 3227 >>> [x for x in rs], [x for x in rs.fastasc()]
3227 3228 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3228 3229 >>> assert rs._asclist
3229 3230
3230 3231 iterate descending:
3231 3232 >>> rs = addset(xs, ys, ascending=False)
3232 3233 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3233 3234 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3234 3235 >>> assert not rs._asclist
3235 3236 >>> len(rs)
3236 3237 5
3237 3238 >>> [x for x in rs], [x for x in rs.fastdesc()]
3238 3239 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3239 3240 >>> assert rs._asclist
3240 3241
3241 3242 iterate ascending without fastasc:
3242 3243 >>> rs = addset(xs, generatorset(ys), ascending=True)
3243 3244 >>> assert rs.fastasc is None
3244 3245 >>> [x for x in rs]
3245 3246 [0, 2, 3, 4, 5]
3246 3247
3247 3248 iterate descending without fastdesc:
3248 3249 >>> rs = addset(generatorset(xs), ys, ascending=False)
3249 3250 >>> assert rs.fastdesc is None
3250 3251 >>> [x for x in rs]
3251 3252 [5, 4, 3, 2, 0]
3252 3253 """
3253 3254 def __init__(self, revs1, revs2, ascending=None):
3254 3255 self._r1 = revs1
3255 3256 self._r2 = revs2
3256 3257 self._iter = None
3257 3258 self._ascending = ascending
3258 3259 self._genlist = None
3259 3260 self._asclist = None
3260 3261
3261 3262 def __len__(self):
3262 3263 return len(self._list)
3263 3264
3264 3265 def __nonzero__(self):
3265 3266 return bool(self._r1) or bool(self._r2)
3266 3267
3267 3268 @util.propertycache
3268 3269 def _list(self):
3269 3270 if not self._genlist:
3270 3271 self._genlist = baseset(iter(self))
3271 3272 return self._genlist
3272 3273
3273 3274 def __iter__(self):
3274 3275 """Iterate over both collections without repeating elements
3275 3276
3276 3277 If the ascending attribute is not set, iterate over the first one and
3277 3278 then over the second one checking for membership on the first one so we
3278 3279 dont yield any duplicates.
3279 3280
3280 3281 If the ascending attribute is set, iterate over both collections at the
3281 3282 same time, yielding only one value at a time in the given order.
3282 3283 """
3283 3284 if self._ascending is None:
3284 3285 if self._genlist:
3285 3286 return iter(self._genlist)
3286 3287 def arbitraryordergen():
3287 3288 for r in self._r1:
3288 3289 yield r
3289 3290 inr1 = self._r1.__contains__
3290 3291 for r in self._r2:
3291 3292 if not inr1(r):
3292 3293 yield r
3293 3294 return arbitraryordergen()
3294 3295 # try to use our own fast iterator if it exists
3295 3296 self._trysetasclist()
3296 3297 if self._ascending:
3297 3298 attr = 'fastasc'
3298 3299 else:
3299 3300 attr = 'fastdesc'
3300 3301 it = getattr(self, attr)
3301 3302 if it is not None:
3302 3303 return it()
3303 3304 # maybe half of the component supports fast
3304 3305 # get iterator for _r1
3305 3306 iter1 = getattr(self._r1, attr)
3306 3307 if iter1 is None:
3307 3308 # let's avoid side effect (not sure it matters)
3308 3309 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3309 3310 else:
3310 3311 iter1 = iter1()
3311 3312 # get iterator for _r2
3312 3313 iter2 = getattr(self._r2, attr)
3313 3314 if iter2 is None:
3314 3315 # let's avoid side effect (not sure it matters)
3315 3316 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3316 3317 else:
3317 3318 iter2 = iter2()
3318 3319 return _iterordered(self._ascending, iter1, iter2)
3319 3320
3320 3321 def _trysetasclist(self):
3321 3322 """populate the _asclist attribute if possible and necessary"""
3322 3323 if self._genlist is not None and self._asclist is None:
3323 3324 self._asclist = sorted(self._genlist)
3324 3325
3325 3326 @property
3326 3327 def fastasc(self):
3327 3328 self._trysetasclist()
3328 3329 if self._asclist is not None:
3329 3330 return self._asclist.__iter__
3330 3331 iter1 = self._r1.fastasc
3331 3332 iter2 = self._r2.fastasc
3332 3333 if None in (iter1, iter2):
3333 3334 return None
3334 3335 return lambda: _iterordered(True, iter1(), iter2())
3335 3336
3336 3337 @property
3337 3338 def fastdesc(self):
3338 3339 self._trysetasclist()
3339 3340 if self._asclist is not None:
3340 3341 return self._asclist.__reversed__
3341 3342 iter1 = self._r1.fastdesc
3342 3343 iter2 = self._r2.fastdesc
3343 3344 if None in (iter1, iter2):
3344 3345 return None
3345 3346 return lambda: _iterordered(False, iter1(), iter2())
3346 3347
3347 3348 def __contains__(self, x):
3348 3349 return x in self._r1 or x in self._r2
3349 3350
3350 3351 def sort(self, reverse=False):
3351 3352 """Sort the added set
3352 3353
3353 3354 For this we use the cached list with all the generated values and if we
3354 3355 know they are ascending or descending we can sort them in a smart way.
3355 3356 """
3356 3357 self._ascending = not reverse
3357 3358
3358 3359 def isascending(self):
3359 3360 return self._ascending is not None and self._ascending
3360 3361
3361 3362 def isdescending(self):
3362 3363 return self._ascending is not None and not self._ascending
3363 3364
3364 3365 def reverse(self):
3365 3366 if self._ascending is None:
3366 3367 self._list.reverse()
3367 3368 else:
3368 3369 self._ascending = not self._ascending
3369 3370
3370 3371 def first(self):
3371 3372 for x in self:
3372 3373 return x
3373 3374 return None
3374 3375
3375 3376 def last(self):
3376 3377 self.reverse()
3377 3378 val = self.first()
3378 3379 self.reverse()
3379 3380 return val
3380 3381
3381 3382 def __repr__(self):
3382 3383 d = {None: '', False: '-', True: '+'}[self._ascending]
3383 3384 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3384 3385
3385 3386 class generatorset(abstractsmartset):
3386 3387 """Wrap a generator for lazy iteration
3387 3388
3388 3389 Wrapper structure for generators that provides lazy membership and can
3389 3390 be iterated more than once.
3390 3391 When asked for membership it generates values until either it finds the
3391 3392 requested one or has gone through all the elements in the generator
3392 3393 """
3393 3394 def __init__(self, gen, iterasc=None):
3394 3395 """
3395 3396 gen: a generator producing the values for the generatorset.
3396 3397 """
3397 3398 self._gen = gen
3398 3399 self._asclist = None
3399 3400 self._cache = {}
3400 3401 self._genlist = []
3401 3402 self._finished = False
3402 3403 self._ascending = True
3403 3404 if iterasc is not None:
3404 3405 if iterasc:
3405 3406 self.fastasc = self._iterator
3406 3407 self.__contains__ = self._asccontains
3407 3408 else:
3408 3409 self.fastdesc = self._iterator
3409 3410 self.__contains__ = self._desccontains
3410 3411
3411 3412 def __nonzero__(self):
3412 3413 # Do not use 'for r in self' because it will enforce the iteration
3413 3414 # order (default ascending), possibly unrolling a whole descending
3414 3415 # iterator.
3415 3416 if self._genlist:
3416 3417 return True
3417 3418 for r in self._consumegen():
3418 3419 return True
3419 3420 return False
3420 3421
3421 3422 def __contains__(self, x):
3422 3423 if x in self._cache:
3423 3424 return self._cache[x]
3424 3425
3425 3426 # Use new values only, as existing values would be cached.
3426 3427 for l in self._consumegen():
3427 3428 if l == x:
3428 3429 return True
3429 3430
3430 3431 self._cache[x] = False
3431 3432 return False
3432 3433
3433 3434 def _asccontains(self, x):
3434 3435 """version of contains optimised for ascending generator"""
3435 3436 if x in self._cache:
3436 3437 return self._cache[x]
3437 3438
3438 3439 # Use new values only, as existing values would be cached.
3439 3440 for l in self._consumegen():
3440 3441 if l == x:
3441 3442 return True
3442 3443 if l > x:
3443 3444 break
3444 3445
3445 3446 self._cache[x] = False
3446 3447 return False
3447 3448
3448 3449 def _desccontains(self, x):
3449 3450 """version of contains optimised for descending generator"""
3450 3451 if x in self._cache:
3451 3452 return self._cache[x]
3452 3453
3453 3454 # Use new values only, as existing values would be cached.
3454 3455 for l in self._consumegen():
3455 3456 if l == x:
3456 3457 return True
3457 3458 if l < x:
3458 3459 break
3459 3460
3460 3461 self._cache[x] = False
3461 3462 return False
3462 3463
3463 3464 def __iter__(self):
3464 3465 if self._ascending:
3465 3466 it = self.fastasc
3466 3467 else:
3467 3468 it = self.fastdesc
3468 3469 if it is not None:
3469 3470 return it()
3470 3471 # we need to consume the iterator
3471 3472 for x in self._consumegen():
3472 3473 pass
3473 3474 # recall the same code
3474 3475 return iter(self)
3475 3476
3476 3477 def _iterator(self):
3477 3478 if self._finished:
3478 3479 return iter(self._genlist)
3479 3480
3480 3481 # We have to use this complex iteration strategy to allow multiple
3481 3482 # iterations at the same time. We need to be able to catch revision
3482 3483 # removed from _consumegen and added to genlist in another instance.
3483 3484 #
3484 3485 # Getting rid of it would provide an about 15% speed up on this
3485 3486 # iteration.
3486 3487 genlist = self._genlist
3487 3488 nextrev = self._consumegen().next
3488 3489 _len = len # cache global lookup
3489 3490 def gen():
3490 3491 i = 0
3491 3492 while True:
3492 3493 if i < _len(genlist):
3493 3494 yield genlist[i]
3494 3495 else:
3495 3496 yield nextrev()
3496 3497 i += 1
3497 3498 return gen()
3498 3499
3499 3500 def _consumegen(self):
3500 3501 cache = self._cache
3501 3502 genlist = self._genlist.append
3502 3503 for item in self._gen:
3503 3504 cache[item] = True
3504 3505 genlist(item)
3505 3506 yield item
3506 3507 if not self._finished:
3507 3508 self._finished = True
3508 3509 asc = self._genlist[:]
3509 3510 asc.sort()
3510 3511 self._asclist = asc
3511 3512 self.fastasc = asc.__iter__
3512 3513 self.fastdesc = asc.__reversed__
3513 3514
3514 3515 def __len__(self):
3515 3516 for x in self._consumegen():
3516 3517 pass
3517 3518 return len(self._genlist)
3518 3519
3519 3520 def sort(self, reverse=False):
3520 3521 self._ascending = not reverse
3521 3522
3522 3523 def reverse(self):
3523 3524 self._ascending = not self._ascending
3524 3525
3525 3526 def isascending(self):
3526 3527 return self._ascending
3527 3528
3528 3529 def isdescending(self):
3529 3530 return not self._ascending
3530 3531
3531 3532 def first(self):
3532 3533 if self._ascending:
3533 3534 it = self.fastasc
3534 3535 else:
3535 3536 it = self.fastdesc
3536 3537 if it is None:
3537 3538 # we need to consume all and try again
3538 3539 for x in self._consumegen():
3539 3540 pass
3540 3541 return self.first()
3541 3542 return next(it(), None)
3542 3543
3543 3544 def last(self):
3544 3545 if self._ascending:
3545 3546 it = self.fastdesc
3546 3547 else:
3547 3548 it = self.fastasc
3548 3549 if it is None:
3549 3550 # we need to consume all and try again
3550 3551 for x in self._consumegen():
3551 3552 pass
3552 3553 return self.first()
3553 3554 return next(it(), None)
3554 3555
3555 3556 def __repr__(self):
3556 3557 d = {False: '-', True: '+'}[self._ascending]
3557 3558 return '<%s%s>' % (type(self).__name__, d)
3558 3559
3559 3560 class spanset(abstractsmartset):
3560 3561 """Duck type for baseset class which represents a range of revisions and
3561 3562 can work lazily and without having all the range in memory
3562 3563
3563 3564 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3564 3565 notable points:
3565 3566 - when x < y it will be automatically descending,
3566 3567 - revision filtered with this repoview will be skipped.
3567 3568
3568 3569 """
3569 3570 def __init__(self, repo, start=0, end=None):
3570 3571 """
3571 3572 start: first revision included the set
3572 3573 (default to 0)
3573 3574 end: first revision excluded (last+1)
3574 3575 (default to len(repo)
3575 3576
3576 3577 Spanset will be descending if `end` < `start`.
3577 3578 """
3578 3579 if end is None:
3579 3580 end = len(repo)
3580 3581 self._ascending = start <= end
3581 3582 if not self._ascending:
3582 3583 start, end = end + 1, start +1
3583 3584 self._start = start
3584 3585 self._end = end
3585 3586 self._hiddenrevs = repo.changelog.filteredrevs
3586 3587
3587 3588 def sort(self, reverse=False):
3588 3589 self._ascending = not reverse
3589 3590
3590 3591 def reverse(self):
3591 3592 self._ascending = not self._ascending
3592 3593
3593 3594 def _iterfilter(self, iterrange):
3594 3595 s = self._hiddenrevs
3595 3596 for r in iterrange:
3596 3597 if r not in s:
3597 3598 yield r
3598 3599
3599 3600 def __iter__(self):
3600 3601 if self._ascending:
3601 3602 return self.fastasc()
3602 3603 else:
3603 3604 return self.fastdesc()
3604 3605
3605 3606 def fastasc(self):
3606 3607 iterrange = xrange(self._start, self._end)
3607 3608 if self._hiddenrevs:
3608 3609 return self._iterfilter(iterrange)
3609 3610 return iter(iterrange)
3610 3611
3611 3612 def fastdesc(self):
3612 3613 iterrange = xrange(self._end - 1, self._start - 1, -1)
3613 3614 if self._hiddenrevs:
3614 3615 return self._iterfilter(iterrange)
3615 3616 return iter(iterrange)
3616 3617
3617 3618 def __contains__(self, rev):
3618 3619 hidden = self._hiddenrevs
3619 3620 return ((self._start <= rev < self._end)
3620 3621 and not (hidden and rev in hidden))
3621 3622
3622 3623 def __nonzero__(self):
3623 3624 for r in self:
3624 3625 return True
3625 3626 return False
3626 3627
3627 3628 def __len__(self):
3628 3629 if not self._hiddenrevs:
3629 3630 return abs(self._end - self._start)
3630 3631 else:
3631 3632 count = 0
3632 3633 start = self._start
3633 3634 end = self._end
3634 3635 for rev in self._hiddenrevs:
3635 3636 if (end < rev <= start) or (start <= rev < end):
3636 3637 count += 1
3637 3638 return abs(self._end - self._start) - count
3638 3639
3639 3640 def isascending(self):
3640 3641 return self._ascending
3641 3642
3642 3643 def isdescending(self):
3643 3644 return not self._ascending
3644 3645
3645 3646 def first(self):
3646 3647 if self._ascending:
3647 3648 it = self.fastasc
3648 3649 else:
3649 3650 it = self.fastdesc
3650 3651 for x in it():
3651 3652 return x
3652 3653 return None
3653 3654
3654 3655 def last(self):
3655 3656 if self._ascending:
3656 3657 it = self.fastdesc
3657 3658 else:
3658 3659 it = self.fastasc
3659 3660 for x in it():
3660 3661 return x
3661 3662 return None
3662 3663
3663 3664 def __repr__(self):
3664 3665 d = {False: '-', True: '+'}[self._ascending]
3665 3666 return '<%s%s %d:%d>' % (type(self).__name__, d,
3666 3667 self._start, self._end - 1)
3667 3668
3668 3669 class fullreposet(spanset):
3669 3670 """a set containing all revisions in the repo
3670 3671
3671 3672 This class exists to host special optimization and magic to handle virtual
3672 3673 revisions such as "null".
3673 3674 """
3674 3675
3675 3676 def __init__(self, repo):
3676 3677 super(fullreposet, self).__init__(repo)
3677 3678
3678 3679 def __and__(self, other):
3679 3680 """As self contains the whole repo, all of the other set should also be
3680 3681 in self. Therefore `self & other = other`.
3681 3682
3682 3683 This boldly assumes the other contains valid revs only.
3683 3684 """
3684 3685 # other not a smartset, make is so
3685 3686 if not util.safehasattr(other, 'isascending'):
3686 3687 # filter out hidden revision
3687 3688 # (this boldly assumes all smartset are pure)
3688 3689 #
3689 3690 # `other` was used with "&", let's assume this is a set like
3690 3691 # object.
3691 3692 other = baseset(other - self._hiddenrevs)
3692 3693
3693 3694 # XXX As fullreposet is also used as bootstrap, this is wrong.
3694 3695 #
3695 3696 # With a giveme312() revset returning [3,1,2], this makes
3696 3697 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3697 3698 # We cannot just drop it because other usage still need to sort it:
3698 3699 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3699 3700 #
3700 3701 # There is also some faulty revset implementations that rely on it
3701 3702 # (eg: children as of its state in e8075329c5fb)
3702 3703 #
3703 3704 # When we fix the two points above we can move this into the if clause
3704 3705 other.sort(reverse=self.isdescending())
3705 3706 return other
3706 3707
3707 3708 def prettyformatset(revs):
3708 3709 lines = []
3709 3710 rs = repr(revs)
3710 3711 p = 0
3711 3712 while p < len(rs):
3712 3713 q = rs.find('<', p + 1)
3713 3714 if q < 0:
3714 3715 q = len(rs)
3715 3716 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3716 3717 assert l >= 0
3717 3718 lines.append((l, rs[p:q].rstrip()))
3718 3719 p = q
3719 3720 return '\n'.join(' ' * l + s for l, s in lines)
3720 3721
3721 3722 # tell hggettext to extract docstrings from these functions:
3722 3723 i18nfunctions = symbols.values()
@@ -1,185 +1,185 b''
1 1 revlog.parseindex must be able to parse the index file even if
2 2 an index entry is split between two 64k blocks. The ideal test
3 3 would be to create an index file with inline data where
4 4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
5 5 the size of an index entry) and with an index entry starting right
6 6 before the 64k block boundary, and try to read it.
7 7 We approximate that by reducing the read buffer to 1 byte.
8 8
9 9 $ hg init a
10 10 $ cd a
11 11 $ echo abc > foo
12 12 $ hg add foo
13 13 $ hg commit -m 'add foo'
14 14 $ echo >> foo
15 15 $ hg commit -m 'change foo'
16 16 $ hg log -r 0:
17 17 changeset: 0:7c31755bf9b5
18 18 user: test
19 19 date: Thu Jan 01 00:00:00 1970 +0000
20 20 summary: add foo
21 21
22 22 changeset: 1:26333235a41c
23 23 tag: tip
24 24 user: test
25 25 date: Thu Jan 01 00:00:00 1970 +0000
26 26 summary: change foo
27 27
28 28 $ cat >> test.py << EOF
29 29 > from mercurial import changelog, scmutil
30 30 > from mercurial.node import *
31 31 >
32 32 > class singlebyteread(object):
33 33 > def __init__(self, real):
34 34 > self.real = real
35 35 >
36 36 > def read(self, size=-1):
37 37 > if size == 65536:
38 38 > size = 1
39 39 > return self.real.read(size)
40 40 >
41 41 > def __getattr__(self, key):
42 42 > return getattr(self.real, key)
43 43 >
44 44 > def opener(*args):
45 45 > o = scmutil.opener(*args)
46 46 > def wrapper(*a):
47 47 > f = o(*a)
48 48 > return singlebyteread(f)
49 49 > return wrapper
50 50 >
51 51 > cl = changelog.changelog(opener('.hg/store'))
52 52 > print len(cl), 'revisions:'
53 53 > for r in cl:
54 54 > print short(cl.node(r))
55 55 > EOF
56 56 $ python test.py
57 57 2 revisions:
58 58 7c31755bf9b5
59 59 26333235a41c
60 60
61 61 $ cd ..
62 62
63 63 #if no-pure
64 64
65 65 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
66 66
67 67 $ cd a
68 68
69 69 $ python <<EOF
70 70 > from mercurial import changelog, scmutil
71 71 > cl = changelog.changelog(scmutil.vfs('.hg/store'))
72 72 > print 'good heads:'
73 73 > for head in [0, len(cl) - 1, -1]:
74 74 > print'%s: %r' % (head, cl.reachableroots(0, [head], [0]))
75 75 > print 'bad heads:'
76 76 > for head in [len(cl), 10000, -2, -10000, None]:
77 77 > print '%s:' % head,
78 78 > try:
79 79 > cl.reachableroots(0, [head], [0])
80 80 > print 'uncaught buffer overflow?'
81 81 > except (IndexError, TypeError) as inst:
82 82 > print inst
83 83 > print 'good roots:'
84 84 > for root in [0, len(cl) - 1, -1]:
85 85 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
86 86 > print 'out-of-range roots are ignored:'
87 87 > for root in [len(cl), 10000, -2, -10000]:
88 88 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
89 89 > print 'bad roots:'
90 90 > for root in [None]:
91 91 > print '%s:' % root,
92 92 > try:
93 93 > cl.reachableroots(root, [len(cl) - 1], [root])
94 94 > print 'uncaught error?'
95 95 > except TypeError as inst:
96 96 > print inst
97 97 > EOF
98 98 good heads:
99 0: <baseset+ [0]>
100 1: <baseset+ [0]>
101 -1: <baseset+ []>
99 0: [0]
100 1: [0]
101 -1: []
102 102 bad heads:
103 103 2: head out of range
104 104 10000: head out of range
105 105 -2: head out of range
106 106 -10000: head out of range
107 107 None: an integer is required
108 108 good roots:
109 0: <baseset+ [0]>
110 1: <baseset+ [1]>
111 -1: <baseset+ [-1]>
109 0: [0]
110 1: [1]
111 -1: [-1]
112 112 out-of-range roots are ignored:
113 2: <baseset+ []>
114 10000: <baseset+ []>
115 -2: <baseset+ []>
116 -10000: <baseset+ []>
113 2: []
114 10000: []
115 -2: []
116 -10000: []
117 117 bad roots:
118 118 None: an integer is required
119 119
120 120 $ cd ..
121 121
122 122 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
123 123
124 124 $ mkdir invalidparent
125 125 $ cd invalidparent
126 126
127 127 $ hg clone --pull -q --config phases.publish=False ../a limit
128 128 $ hg clone --pull -q --config phases.publish=False ../a segv
129 129 $ rm -R limit/.hg/cache segv/.hg/cache
130 130
131 131 $ python <<EOF
132 132 > data = open("limit/.hg/store/00changelog.i", "rb").read()
133 133 > for n, p in [('limit', '\0\0\0\x02'), ('segv', '\0\x01\0\0')]:
134 134 > # corrupt p1 at rev0 and p2 at rev1
135 135 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
136 136 > open(n + "/.hg/store/00changelog.i", "wb").write(d)
137 137 > EOF
138 138
139 139 $ hg debugindex -f1 limit/.hg/store/00changelog.i
140 140 rev flag offset length size base link p1 p2 nodeid
141 141 0 0000 0 63 62 0 0 2 -1 7c31755bf9b5
142 142 1 0000 63 66 65 1 1 0 2 26333235a41c
143 143 $ hg debugindex -f1 segv/.hg/store/00changelog.i
144 144 rev flag offset length size base link p1 p2 nodeid
145 145 0 0000 0 63 62 0 0 65536 -1 7c31755bf9b5
146 146 1 0000 63 66 65 1 1 0 65536 26333235a41c
147 147
148 148 $ cat <<EOF > test.py
149 149 > import sys
150 150 > from mercurial import changelog, scmutil
151 151 > cl = changelog.changelog(scmutil.vfs(sys.argv[1]))
152 152 > n0, n1 = cl.node(0), cl.node(1)
153 153 > ops = [
154 154 > ('reachableroots',
155 155 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
156 156 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
157 157 > ('index_headrevs', lambda: cl.headrevs()),
158 158 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
159 159 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
160 160 > ]
161 161 > for l, f in ops:
162 162 > print l + ':',
163 163 > try:
164 164 > f()
165 165 > print 'uncaught buffer overflow?'
166 166 > except ValueError, inst:
167 167 > print inst
168 168 > EOF
169 169
170 170 $ python test.py limit/.hg/store
171 171 reachableroots: parent out of range
172 172 compute_phases_map_sets: parent out of range
173 173 index_headrevs: parent out of range
174 174 find_gca_candidates: parent out of range
175 175 find_deepest: parent out of range
176 176 $ python test.py segv/.hg/store
177 177 reachableroots: parent out of range
178 178 compute_phases_map_sets: parent out of range
179 179 index_headrevs: parent out of range
180 180 find_gca_candidates: parent out of range
181 181 find_deepest: parent out of range
182 182
183 183 $ cd ..
184 184
185 185 #endif
General Comments 0
You need to be logged in to leave comments. Login now