##// END OF EJS Templates
reachableroots: construct and sort baseset in revset module...
Yuya Nishihara -
r26094:df41c7be default
parent child Browse files
Show More
@@ -1,410 +1,406 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 revlog,
20 revlog,
21 revset,
22 util,
21 util,
23 )
22 )
24
23
25 _defaultextra = {'branch': 'default'}
24 _defaultextra = {'branch': 'default'}
26
25
27 def _string_escape(text):
26 def _string_escape(text):
28 """
27 """
29 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
28 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
30 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
29 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
31 >>> s
30 >>> s
32 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
31 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
33 >>> res = _string_escape(s)
32 >>> res = _string_escape(s)
34 >>> s == res.decode('string_escape')
33 >>> s == res.decode('string_escape')
35 True
34 True
36 """
35 """
37 # subset of the string_escape codec
36 # subset of the string_escape codec
38 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
37 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
39 return text.replace('\0', '\\0')
38 return text.replace('\0', '\\0')
40
39
41 def decodeextra(text):
40 def decodeextra(text):
42 """
41 """
43 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
42 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
44 ... ).iteritems())
43 ... ).iteritems())
45 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
44 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
46 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
45 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
47 ... 'baz': chr(92) + chr(0) + '2'})
46 ... 'baz': chr(92) + chr(0) + '2'})
48 ... ).iteritems())
47 ... ).iteritems())
49 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
48 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
50 """
49 """
51 extra = _defaultextra.copy()
50 extra = _defaultextra.copy()
52 for l in text.split('\0'):
51 for l in text.split('\0'):
53 if l:
52 if l:
54 if '\\0' in l:
53 if '\\0' in l:
55 # fix up \0 without getting into trouble with \\0
54 # fix up \0 without getting into trouble with \\0
56 l = l.replace('\\\\', '\\\\\n')
55 l = l.replace('\\\\', '\\\\\n')
57 l = l.replace('\\0', '\0')
56 l = l.replace('\\0', '\0')
58 l = l.replace('\n', '')
57 l = l.replace('\n', '')
59 k, v = l.decode('string_escape').split(':', 1)
58 k, v = l.decode('string_escape').split(':', 1)
60 extra[k] = v
59 extra[k] = v
61 return extra
60 return extra
62
61
63 def encodeextra(d):
62 def encodeextra(d):
64 # keys must be sorted to produce a deterministic changelog entry
63 # keys must be sorted to produce a deterministic changelog entry
65 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
64 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
66 return "\0".join(items)
65 return "\0".join(items)
67
66
68 def stripdesc(desc):
67 def stripdesc(desc):
69 """strip trailing whitespace and leading and trailing empty lines"""
68 """strip trailing whitespace and leading and trailing empty lines"""
70 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
69 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
71
70
72 class appender(object):
71 class appender(object):
73 '''the changelog index must be updated last on disk, so we use this class
72 '''the changelog index must be updated last on disk, so we use this class
74 to delay writes to it'''
73 to delay writes to it'''
75 def __init__(self, vfs, name, mode, buf):
74 def __init__(self, vfs, name, mode, buf):
76 self.data = buf
75 self.data = buf
77 fp = vfs(name, mode)
76 fp = vfs(name, mode)
78 self.fp = fp
77 self.fp = fp
79 self.offset = fp.tell()
78 self.offset = fp.tell()
80 self.size = vfs.fstat(fp).st_size
79 self.size = vfs.fstat(fp).st_size
81
80
82 def end(self):
81 def end(self):
83 return self.size + len("".join(self.data))
82 return self.size + len("".join(self.data))
84 def tell(self):
83 def tell(self):
85 return self.offset
84 return self.offset
86 def flush(self):
85 def flush(self):
87 pass
86 pass
88 def close(self):
87 def close(self):
89 self.fp.close()
88 self.fp.close()
90
89
91 def seek(self, offset, whence=0):
90 def seek(self, offset, whence=0):
92 '''virtual file offset spans real file and data'''
91 '''virtual file offset spans real file and data'''
93 if whence == 0:
92 if whence == 0:
94 self.offset = offset
93 self.offset = offset
95 elif whence == 1:
94 elif whence == 1:
96 self.offset += offset
95 self.offset += offset
97 elif whence == 2:
96 elif whence == 2:
98 self.offset = self.end() + offset
97 self.offset = self.end() + offset
99 if self.offset < self.size:
98 if self.offset < self.size:
100 self.fp.seek(self.offset)
99 self.fp.seek(self.offset)
101
100
102 def read(self, count=-1):
101 def read(self, count=-1):
103 '''only trick here is reads that span real file and data'''
102 '''only trick here is reads that span real file and data'''
104 ret = ""
103 ret = ""
105 if self.offset < self.size:
104 if self.offset < self.size:
106 s = self.fp.read(count)
105 s = self.fp.read(count)
107 ret = s
106 ret = s
108 self.offset += len(s)
107 self.offset += len(s)
109 if count > 0:
108 if count > 0:
110 count -= len(s)
109 count -= len(s)
111 if count != 0:
110 if count != 0:
112 doff = self.offset - self.size
111 doff = self.offset - self.size
113 self.data.insert(0, "".join(self.data))
112 self.data.insert(0, "".join(self.data))
114 del self.data[1:]
113 del self.data[1:]
115 s = self.data[0][doff:doff + count]
114 s = self.data[0][doff:doff + count]
116 self.offset += len(s)
115 self.offset += len(s)
117 ret += s
116 ret += s
118 return ret
117 return ret
119
118
120 def write(self, s):
119 def write(self, s):
121 self.data.append(str(s))
120 self.data.append(str(s))
122 self.offset += len(s)
121 self.offset += len(s)
123
122
124 def _divertopener(opener, target):
123 def _divertopener(opener, target):
125 """build an opener that writes in 'target.a' instead of 'target'"""
124 """build an opener that writes in 'target.a' instead of 'target'"""
126 def _divert(name, mode='r'):
125 def _divert(name, mode='r'):
127 if name != target:
126 if name != target:
128 return opener(name, mode)
127 return opener(name, mode)
129 return opener(name + ".a", mode)
128 return opener(name + ".a", mode)
130 return _divert
129 return _divert
131
130
132 def _delayopener(opener, target, buf):
131 def _delayopener(opener, target, buf):
133 """build an opener that stores chunks in 'buf' instead of 'target'"""
132 """build an opener that stores chunks in 'buf' instead of 'target'"""
134 def _delay(name, mode='r'):
133 def _delay(name, mode='r'):
135 if name != target:
134 if name != target:
136 return opener(name, mode)
135 return opener(name, mode)
137 return appender(opener, name, mode, buf)
136 return appender(opener, name, mode, buf)
138 return _delay
137 return _delay
139
138
140 class changelog(revlog.revlog):
139 class changelog(revlog.revlog):
141 def __init__(self, opener):
140 def __init__(self, opener):
142 revlog.revlog.__init__(self, opener, "00changelog.i")
141 revlog.revlog.__init__(self, opener, "00changelog.i")
143 if self._initempty:
142 if self._initempty:
144 # changelogs don't benefit from generaldelta
143 # changelogs don't benefit from generaldelta
145 self.version &= ~revlog.REVLOGGENERALDELTA
144 self.version &= ~revlog.REVLOGGENERALDELTA
146 self._generaldelta = False
145 self._generaldelta = False
147 self._realopener = opener
146 self._realopener = opener
148 self._delayed = False
147 self._delayed = False
149 self._delaybuf = None
148 self._delaybuf = None
150 self._divert = False
149 self._divert = False
151 self.filteredrevs = frozenset()
150 self.filteredrevs = frozenset()
152
151
153 def tip(self):
152 def tip(self):
154 """filtered version of revlog.tip"""
153 """filtered version of revlog.tip"""
155 for i in xrange(len(self) -1, -2, -1):
154 for i in xrange(len(self) -1, -2, -1):
156 if i not in self.filteredrevs:
155 if i not in self.filteredrevs:
157 return self.node(i)
156 return self.node(i)
158
157
159 def __contains__(self, rev):
158 def __contains__(self, rev):
160 """filtered version of revlog.__contains__"""
159 """filtered version of revlog.__contains__"""
161 return (0 <= rev < len(self)
160 return (0 <= rev < len(self)
162 and rev not in self.filteredrevs)
161 and rev not in self.filteredrevs)
163
162
164 def __iter__(self):
163 def __iter__(self):
165 """filtered version of revlog.__iter__"""
164 """filtered version of revlog.__iter__"""
166 if len(self.filteredrevs) == 0:
165 if len(self.filteredrevs) == 0:
167 return revlog.revlog.__iter__(self)
166 return revlog.revlog.__iter__(self)
168
167
169 def filterediter():
168 def filterediter():
170 for i in xrange(len(self)):
169 for i in xrange(len(self)):
171 if i not in self.filteredrevs:
170 if i not in self.filteredrevs:
172 yield i
171 yield i
173
172
174 return filterediter()
173 return filterediter()
175
174
176 def revs(self, start=0, stop=None):
175 def revs(self, start=0, stop=None):
177 """filtered version of revlog.revs"""
176 """filtered version of revlog.revs"""
178 for i in super(changelog, self).revs(start, stop):
177 for i in super(changelog, self).revs(start, stop):
179 if i not in self.filteredrevs:
178 if i not in self.filteredrevs:
180 yield i
179 yield i
181
180
182 @util.propertycache
181 @util.propertycache
183 def nodemap(self):
182 def nodemap(self):
184 # XXX need filtering too
183 # XXX need filtering too
185 self.rev(self.node(0))
184 self.rev(self.node(0))
186 return self._nodecache
185 return self._nodecache
187
186
188 def reachableroots(self, minroot, heads, roots, includepath=False):
187 def reachableroots(self, minroot, heads, roots, includepath=False):
189 rroots = self.index.reachableroots2(minroot, heads, roots, includepath)
188 return self.index.reachableroots2(minroot, heads, roots, includepath)
190 rroots = revset.baseset(rroots)
191 rroots.sort()
192 return rroots
193
189
194 def headrevs(self):
190 def headrevs(self):
195 if self.filteredrevs:
191 if self.filteredrevs:
196 try:
192 try:
197 return self.index.headrevsfiltered(self.filteredrevs)
193 return self.index.headrevsfiltered(self.filteredrevs)
198 # AttributeError covers non-c-extension environments and
194 # AttributeError covers non-c-extension environments and
199 # old c extensions without filter handling.
195 # old c extensions without filter handling.
200 except AttributeError:
196 except AttributeError:
201 return self._headrevs()
197 return self._headrevs()
202
198
203 return super(changelog, self).headrevs()
199 return super(changelog, self).headrevs()
204
200
205 def strip(self, *args, **kwargs):
201 def strip(self, *args, **kwargs):
206 # XXX make something better than assert
202 # XXX make something better than assert
207 # We can't expect proper strip behavior if we are filtered.
203 # We can't expect proper strip behavior if we are filtered.
208 assert not self.filteredrevs
204 assert not self.filteredrevs
209 super(changelog, self).strip(*args, **kwargs)
205 super(changelog, self).strip(*args, **kwargs)
210
206
211 def rev(self, node):
207 def rev(self, node):
212 """filtered version of revlog.rev"""
208 """filtered version of revlog.rev"""
213 r = super(changelog, self).rev(node)
209 r = super(changelog, self).rev(node)
214 if r in self.filteredrevs:
210 if r in self.filteredrevs:
215 raise error.FilteredLookupError(hex(node), self.indexfile,
211 raise error.FilteredLookupError(hex(node), self.indexfile,
216 _('filtered node'))
212 _('filtered node'))
217 return r
213 return r
218
214
219 def node(self, rev):
215 def node(self, rev):
220 """filtered version of revlog.node"""
216 """filtered version of revlog.node"""
221 if rev in self.filteredrevs:
217 if rev in self.filteredrevs:
222 raise error.FilteredIndexError(rev)
218 raise error.FilteredIndexError(rev)
223 return super(changelog, self).node(rev)
219 return super(changelog, self).node(rev)
224
220
225 def linkrev(self, rev):
221 def linkrev(self, rev):
226 """filtered version of revlog.linkrev"""
222 """filtered version of revlog.linkrev"""
227 if rev in self.filteredrevs:
223 if rev in self.filteredrevs:
228 raise error.FilteredIndexError(rev)
224 raise error.FilteredIndexError(rev)
229 return super(changelog, self).linkrev(rev)
225 return super(changelog, self).linkrev(rev)
230
226
231 def parentrevs(self, rev):
227 def parentrevs(self, rev):
232 """filtered version of revlog.parentrevs"""
228 """filtered version of revlog.parentrevs"""
233 if rev in self.filteredrevs:
229 if rev in self.filteredrevs:
234 raise error.FilteredIndexError(rev)
230 raise error.FilteredIndexError(rev)
235 return super(changelog, self).parentrevs(rev)
231 return super(changelog, self).parentrevs(rev)
236
232
237 def flags(self, rev):
233 def flags(self, rev):
238 """filtered version of revlog.flags"""
234 """filtered version of revlog.flags"""
239 if rev in self.filteredrevs:
235 if rev in self.filteredrevs:
240 raise error.FilteredIndexError(rev)
236 raise error.FilteredIndexError(rev)
241 return super(changelog, self).flags(rev)
237 return super(changelog, self).flags(rev)
242
238
243 def delayupdate(self, tr):
239 def delayupdate(self, tr):
244 "delay visibility of index updates to other readers"
240 "delay visibility of index updates to other readers"
245
241
246 if not self._delayed:
242 if not self._delayed:
247 if len(self) == 0:
243 if len(self) == 0:
248 self._divert = True
244 self._divert = True
249 if self._realopener.exists(self.indexfile + '.a'):
245 if self._realopener.exists(self.indexfile + '.a'):
250 self._realopener.unlink(self.indexfile + '.a')
246 self._realopener.unlink(self.indexfile + '.a')
251 self.opener = _divertopener(self._realopener, self.indexfile)
247 self.opener = _divertopener(self._realopener, self.indexfile)
252 else:
248 else:
253 self._delaybuf = []
249 self._delaybuf = []
254 self.opener = _delayopener(self._realopener, self.indexfile,
250 self.opener = _delayopener(self._realopener, self.indexfile,
255 self._delaybuf)
251 self._delaybuf)
256 self._delayed = True
252 self._delayed = True
257 tr.addpending('cl-%i' % id(self), self._writepending)
253 tr.addpending('cl-%i' % id(self), self._writepending)
258 tr.addfinalize('cl-%i' % id(self), self._finalize)
254 tr.addfinalize('cl-%i' % id(self), self._finalize)
259
255
260 def _finalize(self, tr):
256 def _finalize(self, tr):
261 "finalize index updates"
257 "finalize index updates"
262 self._delayed = False
258 self._delayed = False
263 self.opener = self._realopener
259 self.opener = self._realopener
264 # move redirected index data back into place
260 # move redirected index data back into place
265 if self._divert:
261 if self._divert:
266 assert not self._delaybuf
262 assert not self._delaybuf
267 tmpname = self.indexfile + ".a"
263 tmpname = self.indexfile + ".a"
268 nfile = self.opener.open(tmpname)
264 nfile = self.opener.open(tmpname)
269 nfile.close()
265 nfile.close()
270 self.opener.rename(tmpname, self.indexfile)
266 self.opener.rename(tmpname, self.indexfile)
271 elif self._delaybuf:
267 elif self._delaybuf:
272 fp = self.opener(self.indexfile, 'a')
268 fp = self.opener(self.indexfile, 'a')
273 fp.write("".join(self._delaybuf))
269 fp.write("".join(self._delaybuf))
274 fp.close()
270 fp.close()
275 self._delaybuf = None
271 self._delaybuf = None
276 self._divert = False
272 self._divert = False
277 # split when we're done
273 # split when we're done
278 self.checkinlinesize(tr)
274 self.checkinlinesize(tr)
279
275
280 def readpending(self, file):
276 def readpending(self, file):
281 """read index data from a "pending" file
277 """read index data from a "pending" file
282
278
283 During a transaction, the actual changeset data is already stored in the
279 During a transaction, the actual changeset data is already stored in the
284 main file, but not yet finalized in the on-disk index. Instead, a
280 main file, but not yet finalized in the on-disk index. Instead, a
285 "pending" index is written by the transaction logic. If this function
281 "pending" index is written by the transaction logic. If this function
286 is running, we are likely in a subprocess invoked in a hook. The
282 is running, we are likely in a subprocess invoked in a hook. The
287 subprocess is informed that it is within a transaction and needs to
283 subprocess is informed that it is within a transaction and needs to
288 access its content.
284 access its content.
289
285
290 This function will read all the index data out of the pending file and
286 This function will read all the index data out of the pending file and
291 overwrite the main index."""
287 overwrite the main index."""
292
288
293 if not self.opener.exists(file):
289 if not self.opener.exists(file):
294 return # no pending data for changelog
290 return # no pending data for changelog
295 r = revlog.revlog(self.opener, file)
291 r = revlog.revlog(self.opener, file)
296 self.index = r.index
292 self.index = r.index
297 self.nodemap = r.nodemap
293 self.nodemap = r.nodemap
298 self._nodecache = r._nodecache
294 self._nodecache = r._nodecache
299 self._chunkcache = r._chunkcache
295 self._chunkcache = r._chunkcache
300
296
301 def _writepending(self, tr):
297 def _writepending(self, tr):
302 "create a file containing the unfinalized state for pretxnchangegroup"
298 "create a file containing the unfinalized state for pretxnchangegroup"
303 if self._delaybuf:
299 if self._delaybuf:
304 # make a temporary copy of the index
300 # make a temporary copy of the index
305 fp1 = self._realopener(self.indexfile)
301 fp1 = self._realopener(self.indexfile)
306 pendingfilename = self.indexfile + ".a"
302 pendingfilename = self.indexfile + ".a"
307 # register as a temp file to ensure cleanup on failure
303 # register as a temp file to ensure cleanup on failure
308 tr.registertmp(pendingfilename)
304 tr.registertmp(pendingfilename)
309 # write existing data
305 # write existing data
310 fp2 = self._realopener(pendingfilename, "w")
306 fp2 = self._realopener(pendingfilename, "w")
311 fp2.write(fp1.read())
307 fp2.write(fp1.read())
312 # add pending data
308 # add pending data
313 fp2.write("".join(self._delaybuf))
309 fp2.write("".join(self._delaybuf))
314 fp2.close()
310 fp2.close()
315 # switch modes so finalize can simply rename
311 # switch modes so finalize can simply rename
316 self._delaybuf = None
312 self._delaybuf = None
317 self._divert = True
313 self._divert = True
318 self.opener = _divertopener(self._realopener, self.indexfile)
314 self.opener = _divertopener(self._realopener, self.indexfile)
319
315
320 if self._divert:
316 if self._divert:
321 return True
317 return True
322
318
323 return False
319 return False
324
320
325 def checkinlinesize(self, tr, fp=None):
321 def checkinlinesize(self, tr, fp=None):
326 if not self._delayed:
322 if not self._delayed:
327 revlog.revlog.checkinlinesize(self, tr, fp)
323 revlog.revlog.checkinlinesize(self, tr, fp)
328
324
329 def read(self, node):
325 def read(self, node):
330 """
326 """
331 format used:
327 format used:
332 nodeid\n : manifest node in ascii
328 nodeid\n : manifest node in ascii
333 user\n : user, no \n or \r allowed
329 user\n : user, no \n or \r allowed
334 time tz extra\n : date (time is int or float, timezone is int)
330 time tz extra\n : date (time is int or float, timezone is int)
335 : extra is metadata, encoded and separated by '\0'
331 : extra is metadata, encoded and separated by '\0'
336 : older versions ignore it
332 : older versions ignore it
337 files\n\n : files modified by the cset, no \n or \r allowed
333 files\n\n : files modified by the cset, no \n or \r allowed
338 (.*) : comment (free text, ideally utf-8)
334 (.*) : comment (free text, ideally utf-8)
339
335
340 changelog v0 doesn't use extra
336 changelog v0 doesn't use extra
341 """
337 """
342 text = self.revision(node)
338 text = self.revision(node)
343 if not text:
339 if not text:
344 return (nullid, "", (0, 0), [], "", _defaultextra)
340 return (nullid, "", (0, 0), [], "", _defaultextra)
345 last = text.index("\n\n")
341 last = text.index("\n\n")
346 desc = encoding.tolocal(text[last + 2:])
342 desc = encoding.tolocal(text[last + 2:])
347 l = text[:last].split('\n')
343 l = text[:last].split('\n')
348 manifest = bin(l[0])
344 manifest = bin(l[0])
349 user = encoding.tolocal(l[1])
345 user = encoding.tolocal(l[1])
350
346
351 tdata = l[2].split(' ', 2)
347 tdata = l[2].split(' ', 2)
352 if len(tdata) != 3:
348 if len(tdata) != 3:
353 time = float(tdata[0])
349 time = float(tdata[0])
354 try:
350 try:
355 # various tools did silly things with the time zone field.
351 # various tools did silly things with the time zone field.
356 timezone = int(tdata[1])
352 timezone = int(tdata[1])
357 except ValueError:
353 except ValueError:
358 timezone = 0
354 timezone = 0
359 extra = _defaultextra
355 extra = _defaultextra
360 else:
356 else:
361 time, timezone = float(tdata[0]), int(tdata[1])
357 time, timezone = float(tdata[0]), int(tdata[1])
362 extra = decodeextra(tdata[2])
358 extra = decodeextra(tdata[2])
363
359
364 files = l[3:]
360 files = l[3:]
365 return (manifest, user, (time, timezone), files, desc, extra)
361 return (manifest, user, (time, timezone), files, desc, extra)
366
362
367 def add(self, manifest, files, desc, transaction, p1, p2,
363 def add(self, manifest, files, desc, transaction, p1, p2,
368 user, date=None, extra=None):
364 user, date=None, extra=None):
369 # Convert to UTF-8 encoded bytestrings as the very first
365 # Convert to UTF-8 encoded bytestrings as the very first
370 # thing: calling any method on a localstr object will turn it
366 # thing: calling any method on a localstr object will turn it
371 # into a str object and the cached UTF-8 string is thus lost.
367 # into a str object and the cached UTF-8 string is thus lost.
372 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
368 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
373
369
374 user = user.strip()
370 user = user.strip()
375 # An empty username or a username with a "\n" will make the
371 # An empty username or a username with a "\n" will make the
376 # revision text contain two "\n\n" sequences -> corrupt
372 # revision text contain two "\n\n" sequences -> corrupt
377 # repository since read cannot unpack the revision.
373 # repository since read cannot unpack the revision.
378 if not user:
374 if not user:
379 raise error.RevlogError(_("empty username"))
375 raise error.RevlogError(_("empty username"))
380 if "\n" in user:
376 if "\n" in user:
381 raise error.RevlogError(_("username %s contains a newline")
377 raise error.RevlogError(_("username %s contains a newline")
382 % repr(user))
378 % repr(user))
383
379
384 desc = stripdesc(desc)
380 desc = stripdesc(desc)
385
381
386 if date:
382 if date:
387 parseddate = "%d %d" % util.parsedate(date)
383 parseddate = "%d %d" % util.parsedate(date)
388 else:
384 else:
389 parseddate = "%d %d" % util.makedate()
385 parseddate = "%d %d" % util.makedate()
390 if extra:
386 if extra:
391 branch = extra.get("branch")
387 branch = extra.get("branch")
392 if branch in ("default", ""):
388 if branch in ("default", ""):
393 del extra["branch"]
389 del extra["branch"]
394 elif branch in (".", "null", "tip"):
390 elif branch in (".", "null", "tip"):
395 raise error.RevlogError(_('the name \'%s\' is reserved')
391 raise error.RevlogError(_('the name \'%s\' is reserved')
396 % branch)
392 % branch)
397 if extra:
393 if extra:
398 extra = encodeextra(extra)
394 extra = encodeextra(extra)
399 parseddate = "%s %s" % (parseddate, extra)
395 parseddate = "%s %s" % (parseddate, extra)
400 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
396 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
401 text = "\n".join(l)
397 text = "\n".join(l)
402 return self.addrevision(text, transaction, len(self), p1, p2)
398 return self.addrevision(text, transaction, len(self), p1, p2)
403
399
404 def branchinfo(self, rev):
400 def branchinfo(self, rev):
405 """return the branch name and open/close state of a revision
401 """return the branch name and open/close state of a revision
406
402
407 This function exists because creating a changectx object
403 This function exists because creating a changectx object
408 just to access this is costly."""
404 just to access this is costly."""
409 extra = self.read(rev)[5]
405 extra = self.read(rev)[5]
410 return encoding.tolocal(extra.get("branch")), 'close' in extra
406 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,3722 +1,3723 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 encoding,
15 encoding,
16 error,
16 error,
17 hbisect,
17 hbisect,
18 match as matchmod,
18 match as matchmod,
19 node,
19 node,
20 obsolete as obsmod,
20 obsolete as obsmod,
21 parser,
21 parser,
22 pathutil,
22 pathutil,
23 phases,
23 phases,
24 repoview,
24 repoview,
25 util,
25 util,
26 )
26 )
27
27
28 def _revancestors(repo, revs, followfirst):
28 def _revancestors(repo, revs, followfirst):
29 """Like revlog.ancestors(), but supports followfirst."""
29 """Like revlog.ancestors(), but supports followfirst."""
30 if followfirst:
30 if followfirst:
31 cut = 1
31 cut = 1
32 else:
32 else:
33 cut = None
33 cut = None
34 cl = repo.changelog
34 cl = repo.changelog
35
35
36 def iterate():
36 def iterate():
37 revs.sort(reverse=True)
37 revs.sort(reverse=True)
38 irevs = iter(revs)
38 irevs = iter(revs)
39 h = []
39 h = []
40
40
41 inputrev = next(irevs, None)
41 inputrev = next(irevs, None)
42 if inputrev is not None:
42 if inputrev is not None:
43 heapq.heappush(h, -inputrev)
43 heapq.heappush(h, -inputrev)
44
44
45 seen = set()
45 seen = set()
46 while h:
46 while h:
47 current = -heapq.heappop(h)
47 current = -heapq.heappop(h)
48 if current == inputrev:
48 if current == inputrev:
49 inputrev = next(irevs, None)
49 inputrev = next(irevs, None)
50 if inputrev is not None:
50 if inputrev is not None:
51 heapq.heappush(h, -inputrev)
51 heapq.heappush(h, -inputrev)
52 if current not in seen:
52 if current not in seen:
53 seen.add(current)
53 seen.add(current)
54 yield current
54 yield current
55 for parent in cl.parentrevs(current)[:cut]:
55 for parent in cl.parentrevs(current)[:cut]:
56 if parent != node.nullrev:
56 if parent != node.nullrev:
57 heapq.heappush(h, -parent)
57 heapq.heappush(h, -parent)
58
58
59 return generatorset(iterate(), iterasc=False)
59 return generatorset(iterate(), iterasc=False)
60
60
61 def _revdescendants(repo, revs, followfirst):
61 def _revdescendants(repo, revs, followfirst):
62 """Like revlog.descendants() but supports followfirst."""
62 """Like revlog.descendants() but supports followfirst."""
63 if followfirst:
63 if followfirst:
64 cut = 1
64 cut = 1
65 else:
65 else:
66 cut = None
66 cut = None
67
67
68 def iterate():
68 def iterate():
69 cl = repo.changelog
69 cl = repo.changelog
70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 # smartset (and if it is not, it should.)
71 # smartset (and if it is not, it should.)
72 first = min(revs)
72 first = min(revs)
73 nullrev = node.nullrev
73 nullrev = node.nullrev
74 if first == nullrev:
74 if first == nullrev:
75 # Are there nodes with a null first parent and a non-null
75 # Are there nodes with a null first parent and a non-null
76 # second one? Maybe. Do we care? Probably not.
76 # second one? Maybe. Do we care? Probably not.
77 for i in cl:
77 for i in cl:
78 yield i
78 yield i
79 else:
79 else:
80 seen = set(revs)
80 seen = set(revs)
81 for i in cl.revs(first + 1):
81 for i in cl.revs(first + 1):
82 for x in cl.parentrevs(i)[:cut]:
82 for x in cl.parentrevs(i)[:cut]:
83 if x != nullrev and x in seen:
83 if x != nullrev and x in seen:
84 seen.add(i)
84 seen.add(i)
85 yield i
85 yield i
86 break
86 break
87
87
88 return generatorset(iterate(), iterasc=True)
88 return generatorset(iterate(), iterasc=True)
89
89
90 def reachablerootspure(repo, minroot, roots, heads, includepath):
90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 """return (heads(::<roots> and ::<heads>))
91 """return (heads(::<roots> and ::<heads>))
92
92
93 If includepath is True, return (<roots>::<heads>)."""
93 If includepath is True, return (<roots>::<heads>)."""
94 if not roots:
94 if not roots:
95 return baseset()
95 return []
96 parentrevs = repo.changelog.parentrevs
96 parentrevs = repo.changelog.parentrevs
97 roots = set(roots)
97 roots = set(roots)
98 visit = list(heads)
98 visit = list(heads)
99 reachable = set()
99 reachable = set()
100 seen = {}
100 seen = {}
101 # prefetch all the things! (because python is slow)
101 # prefetch all the things! (because python is slow)
102 reached = reachable.add
102 reached = reachable.add
103 dovisit = visit.append
103 dovisit = visit.append
104 nextvisit = visit.pop
104 nextvisit = visit.pop
105 # open-code the post-order traversal due to the tiny size of
105 # open-code the post-order traversal due to the tiny size of
106 # sys.getrecursionlimit()
106 # sys.getrecursionlimit()
107 while visit:
107 while visit:
108 rev = nextvisit()
108 rev = nextvisit()
109 if rev in roots:
109 if rev in roots:
110 reached(rev)
110 reached(rev)
111 if not includepath:
111 if not includepath:
112 continue
112 continue
113 parents = parentrevs(rev)
113 parents = parentrevs(rev)
114 seen[rev] = parents
114 seen[rev] = parents
115 for parent in parents:
115 for parent in parents:
116 if parent >= minroot and parent not in seen:
116 if parent >= minroot and parent not in seen:
117 dovisit(parent)
117 dovisit(parent)
118 if not reachable:
118 if not reachable:
119 return baseset()
119 return baseset()
120 if not includepath:
120 if not includepath:
121 return reachable
121 return reachable
122 for rev in sorted(seen):
122 for rev in sorted(seen):
123 for parent in seen[rev]:
123 for parent in seen[rev]:
124 if parent in reachable:
124 if parent in reachable:
125 reached(rev)
125 reached(rev)
126 reachable = baseset(reachable)
127 reachable.sort()
128 return reachable
126 return reachable
129
127
130 def reachableroots(repo, roots, heads, includepath=False):
128 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
129 """return (heads(::<roots> and ::<heads>))
132
130
133 If includepath is True, return (<roots>::<heads>)."""
131 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
132 if not roots:
135 return baseset()
133 return baseset()
136 minroot = roots.min()
134 minroot = roots.min()
137 roots = list(roots)
135 roots = list(roots)
138 heads = list(heads)
136 heads = list(heads)
139 try:
137 try:
140 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
139 except AttributeError:
142 return reachablerootspure(repo, minroot, roots, heads, includepath)
140 revs = reachablerootspure(repo, minroot, roots, heads, includepath)
141 revs = baseset(revs)
142 revs.sort()
143 return revs
143
144
144 elements = {
145 elements = {
145 # token-type: binding-strength, primary, prefix, infix, suffix
146 # token-type: binding-strength, primary, prefix, infix, suffix
146 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
147 "##": (20, None, None, ("_concat", 20), None),
148 "##": (20, None, None, ("_concat", 20), None),
148 "~": (18, None, None, ("ancestor", 18), None),
149 "~": (18, None, None, ("ancestor", 18), None),
149 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
150 "-": (5, None, ("negate", 19), ("minus", 5), None),
151 "-": (5, None, ("negate", 19), ("minus", 5), None),
151 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
152 ("dagrangepost", 17)),
153 ("dagrangepost", 17)),
153 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
155 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
156 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
157 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
158 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
159 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
160 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
162 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
163 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
164 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
165 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
166 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
167 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
168 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
169 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
170 }
171 }
171
172
172 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
173
174
174 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
175 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
177
178
178 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
179 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181
182
182 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 '''
184 '''
184 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
185
186
186 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
187 letter of symbols.
188 letter of symbols.
188
189
189 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
190 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191
192
192 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
193 letters of symbols.
194 letters of symbols.
194
195
195 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
196 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197
198
198 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
199 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
200 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201
202
202 '''
203 '''
203 if syminitletters is None:
204 if syminitletters is None:
204 syminitletters = _syminitletters
205 syminitletters = _syminitletters
205 if symletters is None:
206 if symletters is None:
206 symletters = _symletters
207 symletters = _symletters
207
208
208 if program and lookup:
209 if program and lookup:
209 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
210 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
211 parts = program.split(':', 1)
212 parts = program.split(':', 1)
212 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
213 if parts[0]:
214 if parts[0]:
214 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
215 if len(parts) > 1:
216 if len(parts) > 1:
216 s = len(parts[0])
217 s = len(parts[0])
217 yield (':', None, s)
218 yield (':', None, s)
218 if parts[1]:
219 if parts[1]:
219 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
220 yield ('end', None, len(program))
221 yield ('end', None, len(program))
221 return
222 return
222
223
223 pos, l = 0, len(program)
224 pos, l = 0, len(program)
224 while pos < l:
225 while pos < l:
225 c = program[pos]
226 c = program[pos]
226 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
227 pass
228 pass
228 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 yield ('::', None, pos)
230 yield ('::', None, pos)
230 pos += 1 # skip ahead
231 pos += 1 # skip ahead
231 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 yield ('..', None, pos)
233 yield ('..', None, pos)
233 pos += 1 # skip ahead
234 pos += 1 # skip ahead
234 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 yield ('##', None, pos)
236 yield ('##', None, pos)
236 pos += 1 # skip ahead
237 pos += 1 # skip ahead
237 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
238 yield (c, None, pos)
239 yield (c, None, pos)
239 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
240 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 if c == 'r':
242 if c == 'r':
242 pos += 1
243 pos += 1
243 c = program[pos]
244 c = program[pos]
244 decode = lambda x: x
245 decode = lambda x: x
245 else:
246 else:
246 decode = lambda x: x.decode('string-escape')
247 decode = lambda x: x.decode('string-escape')
247 pos += 1
248 pos += 1
248 s = pos
249 s = pos
249 while pos < l: # find closing quote
250 while pos < l: # find closing quote
250 d = program[pos]
251 d = program[pos]
251 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
252 pos += 2
253 pos += 2
253 continue
254 continue
254 if d == c:
255 if d == c:
255 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
256 break
257 break
257 pos += 1
258 pos += 1
258 else:
259 else:
259 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
260 # gather up a symbol/keyword
261 # gather up a symbol/keyword
261 elif c in syminitletters:
262 elif c in syminitletters:
262 s = pos
263 s = pos
263 pos += 1
264 pos += 1
264 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
265 d = program[pos]
266 d = program[pos]
266 if d not in symletters:
267 if d not in symletters:
267 break
268 break
268 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 pos -= 1
270 pos -= 1
270 break
271 break
271 pos += 1
272 pos += 1
272 sym = program[s:pos]
273 sym = program[s:pos]
273 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
274 yield (sym, None, s)
275 yield (sym, None, s)
275 elif '-' in sym:
276 elif '-' in sym:
276 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 if lookup and lookup(sym):
278 if lookup and lookup(sym):
278 # looks like a real symbol
279 # looks like a real symbol
279 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
280 else:
281 else:
281 # looks like an expression
282 # looks like an expression
282 parts = sym.split('-')
283 parts = sym.split('-')
283 for p in parts[:-1]:
284 for p in parts[:-1]:
284 if p: # possible consecutive -
285 if p: # possible consecutive -
285 yield ('symbol', p, s)
286 yield ('symbol', p, s)
286 s += len(p)
287 s += len(p)
287 yield ('-', None, pos)
288 yield ('-', None, pos)
288 s += 1
289 s += 1
289 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
290 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
291 else:
292 else:
292 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
293 pos -= 1
294 pos -= 1
294 else:
295 else:
295 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
296 program, pos)
297 program, pos)
297 pos += 1
298 pos += 1
298 yield ('end', None, pos)
299 yield ('end', None, pos)
299
300
300 def parseerrordetail(inst):
301 def parseerrordetail(inst):
301 """Compose error message from specified ParseError object
302 """Compose error message from specified ParseError object
302 """
303 """
303 if len(inst.args) > 1:
304 if len(inst.args) > 1:
304 return _('at %s: %s') % (inst.args[1], inst.args[0])
305 return _('at %s: %s') % (inst.args[1], inst.args[0])
305 else:
306 else:
306 return inst.args[0]
307 return inst.args[0]
307
308
308 # helpers
309 # helpers
309
310
310 def getstring(x, err):
311 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
313 return x[1]
313 raise error.ParseError(err)
314 raise error.ParseError(err)
314
315
315 def getlist(x):
316 def getlist(x):
316 if not x:
317 if not x:
317 return []
318 return []
318 if x[0] == 'list':
319 if x[0] == 'list':
319 return getlist(x[1]) + [x[2]]
320 return getlist(x[1]) + [x[2]]
320 return [x]
321 return [x]
321
322
322 def getargs(x, min, max, err):
323 def getargs(x, min, max, err):
323 l = getlist(x)
324 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
325 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
326 raise error.ParseError(err)
326 return l
327 return l
327
328
328 def getargsdict(x, funcname, keys):
329 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 keyvaluenode='keyvalue', keynode='symbol')
331 keyvaluenode='keyvalue', keynode='symbol')
331
332
332 def isvalidsymbol(tree):
333 def isvalidsymbol(tree):
333 """Examine whether specified ``tree`` is valid ``symbol`` or not
334 """Examine whether specified ``tree`` is valid ``symbol`` or not
334 """
335 """
335 return tree[0] == 'symbol' and len(tree) > 1
336 return tree[0] == 'symbol' and len(tree) > 1
336
337
337 def getsymbol(tree):
338 def getsymbol(tree):
338 """Get symbol name from valid ``symbol`` in ``tree``
339 """Get symbol name from valid ``symbol`` in ``tree``
339
340
340 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
341 """
342 """
342 return tree[1]
343 return tree[1]
343
344
344 def isvalidfunc(tree):
345 def isvalidfunc(tree):
345 """Examine whether specified ``tree`` is valid ``func`` or not
346 """Examine whether specified ``tree`` is valid ``func`` or not
346 """
347 """
347 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
348
349
349 def getfuncname(tree):
350 def getfuncname(tree):
350 """Get function name from valid ``func`` in ``tree``
351 """Get function name from valid ``func`` in ``tree``
351
352
352 This assumes that ``tree`` is already examined by ``isvalidfunc``.
353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
353 """
354 """
354 return getsymbol(tree[1])
355 return getsymbol(tree[1])
355
356
356 def getfuncargs(tree):
357 def getfuncargs(tree):
357 """Get list of function arguments from valid ``func`` in ``tree``
358 """Get list of function arguments from valid ``func`` in ``tree``
358
359
359 This assumes that ``tree`` is already examined by ``isvalidfunc``.
360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
360 """
361 """
361 if len(tree) > 2:
362 if len(tree) > 2:
362 return getlist(tree[2])
363 return getlist(tree[2])
363 else:
364 else:
364 return []
365 return []
365
366
366 def getset(repo, subset, x):
367 def getset(repo, subset, x):
367 if not x:
368 if not x:
368 raise error.ParseError(_("missing argument"))
369 raise error.ParseError(_("missing argument"))
369 s = methods[x[0]](repo, subset, *x[1:])
370 s = methods[x[0]](repo, subset, *x[1:])
370 if util.safehasattr(s, 'isascending'):
371 if util.safehasattr(s, 'isascending'):
371 return s
372 return s
372 if (repo.ui.configbool('devel', 'all-warnings')
373 if (repo.ui.configbool('devel', 'all-warnings')
373 or repo.ui.configbool('devel', 'old-revset')):
374 or repo.ui.configbool('devel', 'old-revset')):
374 # else case should not happen, because all non-func are internal,
375 # else case should not happen, because all non-func are internal,
375 # ignoring for now.
376 # ignoring for now.
376 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 '(upgrade your code)' % x[1][1])
379 '(upgrade your code)' % x[1][1])
379 return baseset(s)
380 return baseset(s)
380
381
381 def _getrevsource(repo, r):
382 def _getrevsource(repo, r):
382 extra = repo[r].extra()
383 extra = repo[r].extra()
383 for label in ('source', 'transplant_source', 'rebase_source'):
384 for label in ('source', 'transplant_source', 'rebase_source'):
384 if label in extra:
385 if label in extra:
385 try:
386 try:
386 return repo[extra[label]].rev()
387 return repo[extra[label]].rev()
387 except error.RepoLookupError:
388 except error.RepoLookupError:
388 pass
389 pass
389 return None
390 return None
390
391
391 # operator methods
392 # operator methods
392
393
393 def stringset(repo, subset, x):
394 def stringset(repo, subset, x):
394 x = repo[x].rev()
395 x = repo[x].rev()
395 if (x in subset
396 if (x in subset
396 or x == node.nullrev and isinstance(subset, fullreposet)):
397 or x == node.nullrev and isinstance(subset, fullreposet)):
397 return baseset([x])
398 return baseset([x])
398 return baseset()
399 return baseset()
399
400
400 def rangeset(repo, subset, x, y):
401 def rangeset(repo, subset, x, y):
401 m = getset(repo, fullreposet(repo), x)
402 m = getset(repo, fullreposet(repo), x)
402 n = getset(repo, fullreposet(repo), y)
403 n = getset(repo, fullreposet(repo), y)
403
404
404 if not m or not n:
405 if not m or not n:
405 return baseset()
406 return baseset()
406 m, n = m.first(), n.last()
407 m, n = m.first(), n.last()
407
408
408 if m == n:
409 if m == n:
409 r = baseset([m])
410 r = baseset([m])
410 elif n == node.wdirrev:
411 elif n == node.wdirrev:
411 r = spanset(repo, m, len(repo)) + baseset([n])
412 r = spanset(repo, m, len(repo)) + baseset([n])
412 elif m == node.wdirrev:
413 elif m == node.wdirrev:
413 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 elif m < n:
415 elif m < n:
415 r = spanset(repo, m, n + 1)
416 r = spanset(repo, m, n + 1)
416 else:
417 else:
417 r = spanset(repo, m, n - 1)
418 r = spanset(repo, m, n - 1)
418 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 # necessary to ensure we preserve the order in subset.
420 # necessary to ensure we preserve the order in subset.
420 #
421 #
421 # This has performance implication, carrying the sorting over when possible
422 # This has performance implication, carrying the sorting over when possible
422 # would be more efficient.
423 # would be more efficient.
423 return r & subset
424 return r & subset
424
425
425 def dagrange(repo, subset, x, y):
426 def dagrange(repo, subset, x, y):
426 r = fullreposet(repo)
427 r = fullreposet(repo)
427 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 includepath=True)
429 includepath=True)
429 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 # necessary to ensure we preserve the order in subset.
431 # necessary to ensure we preserve the order in subset.
431 return xs & subset
432 return xs & subset
432
433
433 def andset(repo, subset, x, y):
434 def andset(repo, subset, x, y):
434 return getset(repo, getset(repo, subset, x), y)
435 return getset(repo, getset(repo, subset, x), y)
435
436
436 def orset(repo, subset, *xs):
437 def orset(repo, subset, *xs):
437 assert xs
438 assert xs
438 if len(xs) == 1:
439 if len(xs) == 1:
439 return getset(repo, subset, xs[0])
440 return getset(repo, subset, xs[0])
440 p = len(xs) // 2
441 p = len(xs) // 2
441 a = orset(repo, subset, *xs[:p])
442 a = orset(repo, subset, *xs[:p])
442 b = orset(repo, subset, *xs[p:])
443 b = orset(repo, subset, *xs[p:])
443 return a + b
444 return a + b
444
445
445 def notset(repo, subset, x):
446 def notset(repo, subset, x):
446 return subset - getset(repo, subset, x)
447 return subset - getset(repo, subset, x)
447
448
448 def listset(repo, subset, a, b):
449 def listset(repo, subset, a, b):
449 raise error.ParseError(_("can't use a list in this context"))
450 raise error.ParseError(_("can't use a list in this context"))
450
451
451 def keyvaluepair(repo, subset, k, v):
452 def keyvaluepair(repo, subset, k, v):
452 raise error.ParseError(_("can't use a key-value pair in this context"))
453 raise error.ParseError(_("can't use a key-value pair in this context"))
453
454
454 def func(repo, subset, a, b):
455 def func(repo, subset, a, b):
455 if a[0] == 'symbol' and a[1] in symbols:
456 if a[0] == 'symbol' and a[1] in symbols:
456 return symbols[a[1]](repo, subset, b)
457 return symbols[a[1]](repo, subset, b)
457
458
458 keep = lambda fn: getattr(fn, '__doc__', None) is not None
459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
459
460
460 syms = [s for (s, fn) in symbols.items() if keep(fn)]
461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
461 raise error.UnknownIdentifier(a[1], syms)
462 raise error.UnknownIdentifier(a[1], syms)
462
463
463 # functions
464 # functions
464
465
465 def adds(repo, subset, x):
466 def adds(repo, subset, x):
466 """``adds(pattern)``
467 """``adds(pattern)``
467 Changesets that add a file matching pattern.
468 Changesets that add a file matching pattern.
468
469
469 The pattern without explicit kind like ``glob:`` is expected to be
470 The pattern without explicit kind like ``glob:`` is expected to be
470 relative to the current directory and match against a file or a
471 relative to the current directory and match against a file or a
471 directory.
472 directory.
472 """
473 """
473 # i18n: "adds" is a keyword
474 # i18n: "adds" is a keyword
474 pat = getstring(x, _("adds requires a pattern"))
475 pat = getstring(x, _("adds requires a pattern"))
475 return checkstatus(repo, subset, pat, 1)
476 return checkstatus(repo, subset, pat, 1)
476
477
477 def ancestor(repo, subset, x):
478 def ancestor(repo, subset, x):
478 """``ancestor(*changeset)``
479 """``ancestor(*changeset)``
479 A greatest common ancestor of the changesets.
480 A greatest common ancestor of the changesets.
480
481
481 Accepts 0 or more changesets.
482 Accepts 0 or more changesets.
482 Will return empty list when passed no args.
483 Will return empty list when passed no args.
483 Greatest common ancestor of a single changeset is that changeset.
484 Greatest common ancestor of a single changeset is that changeset.
484 """
485 """
485 # i18n: "ancestor" is a keyword
486 # i18n: "ancestor" is a keyword
486 l = getlist(x)
487 l = getlist(x)
487 rl = fullreposet(repo)
488 rl = fullreposet(repo)
488 anc = None
489 anc = None
489
490
490 # (getset(repo, rl, i) for i in l) generates a list of lists
491 # (getset(repo, rl, i) for i in l) generates a list of lists
491 for revs in (getset(repo, rl, i) for i in l):
492 for revs in (getset(repo, rl, i) for i in l):
492 for r in revs:
493 for r in revs:
493 if anc is None:
494 if anc is None:
494 anc = repo[r]
495 anc = repo[r]
495 else:
496 else:
496 anc = anc.ancestor(repo[r])
497 anc = anc.ancestor(repo[r])
497
498
498 if anc is not None and anc.rev() in subset:
499 if anc is not None and anc.rev() in subset:
499 return baseset([anc.rev()])
500 return baseset([anc.rev()])
500 return baseset()
501 return baseset()
501
502
502 def _ancestors(repo, subset, x, followfirst=False):
503 def _ancestors(repo, subset, x, followfirst=False):
503 heads = getset(repo, fullreposet(repo), x)
504 heads = getset(repo, fullreposet(repo), x)
504 if not heads:
505 if not heads:
505 return baseset()
506 return baseset()
506 s = _revancestors(repo, heads, followfirst)
507 s = _revancestors(repo, heads, followfirst)
507 return subset & s
508 return subset & s
508
509
509 def ancestors(repo, subset, x):
510 def ancestors(repo, subset, x):
510 """``ancestors(set)``
511 """``ancestors(set)``
511 Changesets that are ancestors of a changeset in set.
512 Changesets that are ancestors of a changeset in set.
512 """
513 """
513 return _ancestors(repo, subset, x)
514 return _ancestors(repo, subset, x)
514
515
515 def _firstancestors(repo, subset, x):
516 def _firstancestors(repo, subset, x):
516 # ``_firstancestors(set)``
517 # ``_firstancestors(set)``
517 # Like ``ancestors(set)`` but follows only the first parents.
518 # Like ``ancestors(set)`` but follows only the first parents.
518 return _ancestors(repo, subset, x, followfirst=True)
519 return _ancestors(repo, subset, x, followfirst=True)
519
520
520 def ancestorspec(repo, subset, x, n):
521 def ancestorspec(repo, subset, x, n):
521 """``set~n``
522 """``set~n``
522 Changesets that are the Nth ancestor (first parents only) of a changeset
523 Changesets that are the Nth ancestor (first parents only) of a changeset
523 in set.
524 in set.
524 """
525 """
525 try:
526 try:
526 n = int(n[1])
527 n = int(n[1])
527 except (TypeError, ValueError):
528 except (TypeError, ValueError):
528 raise error.ParseError(_("~ expects a number"))
529 raise error.ParseError(_("~ expects a number"))
529 ps = set()
530 ps = set()
530 cl = repo.changelog
531 cl = repo.changelog
531 for r in getset(repo, fullreposet(repo), x):
532 for r in getset(repo, fullreposet(repo), x):
532 for i in range(n):
533 for i in range(n):
533 r = cl.parentrevs(r)[0]
534 r = cl.parentrevs(r)[0]
534 ps.add(r)
535 ps.add(r)
535 return subset & ps
536 return subset & ps
536
537
537 def author(repo, subset, x):
538 def author(repo, subset, x):
538 """``author(string)``
539 """``author(string)``
539 Alias for ``user(string)``.
540 Alias for ``user(string)``.
540 """
541 """
541 # i18n: "author" is a keyword
542 # i18n: "author" is a keyword
542 n = encoding.lower(getstring(x, _("author requires a string")))
543 n = encoding.lower(getstring(x, _("author requires a string")))
543 kind, pattern, matcher = _substringmatcher(n)
544 kind, pattern, matcher = _substringmatcher(n)
544 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
545 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
545
546
546 def bisect(repo, subset, x):
547 def bisect(repo, subset, x):
547 """``bisect(string)``
548 """``bisect(string)``
548 Changesets marked in the specified bisect status:
549 Changesets marked in the specified bisect status:
549
550
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 - ``goods``, ``bads`` : csets topologically good/bad
552 - ``goods``, ``bads`` : csets topologically good/bad
552 - ``range`` : csets taking part in the bisection
553 - ``range`` : csets taking part in the bisection
553 - ``pruned`` : csets that are goods, bads or skipped
554 - ``pruned`` : csets that are goods, bads or skipped
554 - ``untested`` : csets whose fate is yet unknown
555 - ``untested`` : csets whose fate is yet unknown
555 - ``ignored`` : csets ignored due to DAG topology
556 - ``ignored`` : csets ignored due to DAG topology
556 - ``current`` : the cset currently being bisected
557 - ``current`` : the cset currently being bisected
557 """
558 """
558 # i18n: "bisect" is a keyword
559 # i18n: "bisect" is a keyword
559 status = getstring(x, _("bisect requires a string")).lower()
560 status = getstring(x, _("bisect requires a string")).lower()
560 state = set(hbisect.get(repo, status))
561 state = set(hbisect.get(repo, status))
561 return subset & state
562 return subset & state
562
563
563 # Backward-compatibility
564 # Backward-compatibility
564 # - no help entry so that we do not advertise it any more
565 # - no help entry so that we do not advertise it any more
565 def bisected(repo, subset, x):
566 def bisected(repo, subset, x):
566 return bisect(repo, subset, x)
567 return bisect(repo, subset, x)
567
568
568 def bookmark(repo, subset, x):
569 def bookmark(repo, subset, x):
569 """``bookmark([name])``
570 """``bookmark([name])``
570 The named bookmark or all bookmarks.
571 The named bookmark or all bookmarks.
571
572
572 If `name` starts with `re:`, the remainder of the name is treated as
573 If `name` starts with `re:`, the remainder of the name is treated as
573 a regular expression. To match a bookmark that actually starts with `re:`,
574 a regular expression. To match a bookmark that actually starts with `re:`,
574 use the prefix `literal:`.
575 use the prefix `literal:`.
575 """
576 """
576 # i18n: "bookmark" is a keyword
577 # i18n: "bookmark" is a keyword
577 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 if args:
579 if args:
579 bm = getstring(args[0],
580 bm = getstring(args[0],
580 # i18n: "bookmark" is a keyword
581 # i18n: "bookmark" is a keyword
581 _('the argument to bookmark must be a string'))
582 _('the argument to bookmark must be a string'))
582 kind, pattern, matcher = _stringmatcher(bm)
583 kind, pattern, matcher = _stringmatcher(bm)
583 bms = set()
584 bms = set()
584 if kind == 'literal':
585 if kind == 'literal':
585 bmrev = repo._bookmarks.get(pattern, None)
586 bmrev = repo._bookmarks.get(pattern, None)
586 if not bmrev:
587 if not bmrev:
587 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 % bm)
589 % bm)
589 bms.add(repo[bmrev].rev())
590 bms.add(repo[bmrev].rev())
590 else:
591 else:
591 matchrevs = set()
592 matchrevs = set()
592 for name, bmrev in repo._bookmarks.iteritems():
593 for name, bmrev in repo._bookmarks.iteritems():
593 if matcher(name):
594 if matcher(name):
594 matchrevs.add(bmrev)
595 matchrevs.add(bmrev)
595 if not matchrevs:
596 if not matchrevs:
596 raise error.RepoLookupError(_("no bookmarks exist"
597 raise error.RepoLookupError(_("no bookmarks exist"
597 " that match '%s'") % pattern)
598 " that match '%s'") % pattern)
598 for bmrev in matchrevs:
599 for bmrev in matchrevs:
599 bms.add(repo[bmrev].rev())
600 bms.add(repo[bmrev].rev())
600 else:
601 else:
601 bms = set([repo[r].rev()
602 bms = set([repo[r].rev()
602 for r in repo._bookmarks.values()])
603 for r in repo._bookmarks.values()])
603 bms -= set([node.nullrev])
604 bms -= set([node.nullrev])
604 return subset & bms
605 return subset & bms
605
606
606 def branch(repo, subset, x):
607 def branch(repo, subset, x):
607 """``branch(string or set)``
608 """``branch(string or set)``
608 All changesets belonging to the given branch or the branches of the given
609 All changesets belonging to the given branch or the branches of the given
609 changesets.
610 changesets.
610
611
611 If `string` starts with `re:`, the remainder of the name is treated as
612 If `string` starts with `re:`, the remainder of the name is treated as
612 a regular expression. To match a branch that actually starts with `re:`,
613 a regular expression. To match a branch that actually starts with `re:`,
613 use the prefix `literal:`.
614 use the prefix `literal:`.
614 """
615 """
615 getbi = repo.revbranchcache().branchinfo
616 getbi = repo.revbranchcache().branchinfo
616
617
617 try:
618 try:
618 b = getstring(x, '')
619 b = getstring(x, '')
619 except error.ParseError:
620 except error.ParseError:
620 # not a string, but another revspec, e.g. tip()
621 # not a string, but another revspec, e.g. tip()
621 pass
622 pass
622 else:
623 else:
623 kind, pattern, matcher = _stringmatcher(b)
624 kind, pattern, matcher = _stringmatcher(b)
624 if kind == 'literal':
625 if kind == 'literal':
625 # note: falls through to the revspec case if no branch with
626 # note: falls through to the revspec case if no branch with
626 # this name exists
627 # this name exists
627 if pattern in repo.branchmap():
628 if pattern in repo.branchmap():
628 return subset.filter(lambda r: matcher(getbi(r)[0]))
629 return subset.filter(lambda r: matcher(getbi(r)[0]))
629 else:
630 else:
630 return subset.filter(lambda r: matcher(getbi(r)[0]))
631 return subset.filter(lambda r: matcher(getbi(r)[0]))
631
632
632 s = getset(repo, fullreposet(repo), x)
633 s = getset(repo, fullreposet(repo), x)
633 b = set()
634 b = set()
634 for r in s:
635 for r in s:
635 b.add(getbi(r)[0])
636 b.add(getbi(r)[0])
636 c = s.__contains__
637 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
638 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
638
639
639 def bumped(repo, subset, x):
640 def bumped(repo, subset, x):
640 """``bumped()``
641 """``bumped()``
641 Mutable changesets marked as successors of public changesets.
642 Mutable changesets marked as successors of public changesets.
642
643
643 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
644 """
645 """
645 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
646 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
648 return subset & bumped
649 return subset & bumped
649
650
650 def bundle(repo, subset, x):
651 def bundle(repo, subset, x):
651 """``bundle()``
652 """``bundle()``
652 Changesets in the bundle.
653 Changesets in the bundle.
653
654
654 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
655
656
656 try:
657 try:
657 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
658 except AttributeError:
659 except AttributeError:
659 raise util.Abort(_("no bundle provided - specify with -R"))
660 raise util.Abort(_("no bundle provided - specify with -R"))
660 return subset & bundlerevs
661 return subset & bundlerevs
661
662
662 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
663 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
664
665
665 mcache = [None]
666 mcache = [None]
666 def matches(x):
667 def matches(x):
667 c = repo[x]
668 c = repo[x]
668 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
669 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 m = mcache[0]
671 m = mcache[0]
671 fname = None
672 fname = None
672 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
673 fname = m.files()[0]
674 fname = m.files()[0]
674 if fname is not None:
675 if fname is not None:
675 if fname not in c.files():
676 if fname not in c.files():
676 return False
677 return False
677 else:
678 else:
678 for f in c.files():
679 for f in c.files():
679 if m(f):
680 if m(f):
680 break
681 break
681 else:
682 else:
682 return False
683 return False
683 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
684 if fname is not None:
685 if fname is not None:
685 if fname in files:
686 if fname in files:
686 return True
687 return True
687 else:
688 else:
688 for f in files:
689 for f in files:
689 if m(f):
690 if m(f):
690 return True
691 return True
691
692
692 return subset.filter(matches)
693 return subset.filter(matches)
693
694
694 def _children(repo, narrow, parentset):
695 def _children(repo, narrow, parentset):
695 if not parentset:
696 if not parentset:
696 return baseset()
697 return baseset()
697 cs = set()
698 cs = set()
698 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
699 minrev = parentset.min()
700 minrev = parentset.min()
700 for r in narrow:
701 for r in narrow:
701 if r <= minrev:
702 if r <= minrev:
702 continue
703 continue
703 for p in pr(r):
704 for p in pr(r):
704 if p in parentset:
705 if p in parentset:
705 cs.add(r)
706 cs.add(r)
706 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # This does not break because of other fullreposet misbehavior.
708 # This does not break because of other fullreposet misbehavior.
708 return baseset(cs)
709 return baseset(cs)
709
710
710 def children(repo, subset, x):
711 def children(repo, subset, x):
711 """``children(set)``
712 """``children(set)``
712 Child changesets of changesets in set.
713 Child changesets of changesets in set.
713 """
714 """
714 s = getset(repo, fullreposet(repo), x)
715 s = getset(repo, fullreposet(repo), x)
715 cs = _children(repo, subset, s)
716 cs = _children(repo, subset, s)
716 return subset & cs
717 return subset & cs
717
718
718 def closed(repo, subset, x):
719 def closed(repo, subset, x):
719 """``closed()``
720 """``closed()``
720 Changeset is closed.
721 Changeset is closed.
721 """
722 """
722 # i18n: "closed" is a keyword
723 # i18n: "closed" is a keyword
723 getargs(x, 0, 0, _("closed takes no arguments"))
724 getargs(x, 0, 0, _("closed takes no arguments"))
724 return subset.filter(lambda r: repo[r].closesbranch())
725 return subset.filter(lambda r: repo[r].closesbranch())
725
726
726 def contains(repo, subset, x):
727 def contains(repo, subset, x):
727 """``contains(pattern)``
728 """``contains(pattern)``
728 The revision's manifest contains a file matching pattern (but might not
729 The revision's manifest contains a file matching pattern (but might not
729 modify it). See :hg:`help patterns` for information about file patterns.
730 modify it). See :hg:`help patterns` for information about file patterns.
730
731
731 The pattern without explicit kind like ``glob:`` is expected to be
732 The pattern without explicit kind like ``glob:`` is expected to be
732 relative to the current directory and match against a file exactly
733 relative to the current directory and match against a file exactly
733 for efficiency.
734 for efficiency.
734 """
735 """
735 # i18n: "contains" is a keyword
736 # i18n: "contains" is a keyword
736 pat = getstring(x, _("contains requires a pattern"))
737 pat = getstring(x, _("contains requires a pattern"))
737
738
738 def matches(x):
739 def matches(x):
739 if not matchmod.patkind(pat):
740 if not matchmod.patkind(pat):
740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 if pats in repo[x]:
742 if pats in repo[x]:
742 return True
743 return True
743 else:
744 else:
744 c = repo[x]
745 c = repo[x]
745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 for f in c.manifest():
747 for f in c.manifest():
747 if m(f):
748 if m(f):
748 return True
749 return True
749 return False
750 return False
750
751
751 return subset.filter(matches)
752 return subset.filter(matches)
752
753
753 def converted(repo, subset, x):
754 def converted(repo, subset, x):
754 """``converted([id])``
755 """``converted([id])``
755 Changesets converted from the given identifier in the old repository if
756 Changesets converted from the given identifier in the old repository if
756 present, or all converted changesets if no identifier is specified.
757 present, or all converted changesets if no identifier is specified.
757 """
758 """
758
759
759 # There is exactly no chance of resolving the revision, so do a simple
760 # There is exactly no chance of resolving the revision, so do a simple
760 # string compare and hope for the best
761 # string compare and hope for the best
761
762
762 rev = None
763 rev = None
763 # i18n: "converted" is a keyword
764 # i18n: "converted" is a keyword
764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 if l:
766 if l:
766 # i18n: "converted" is a keyword
767 # i18n: "converted" is a keyword
767 rev = getstring(l[0], _('converted requires a revision'))
768 rev = getstring(l[0], _('converted requires a revision'))
768
769
769 def _matchvalue(r):
770 def _matchvalue(r):
770 source = repo[r].extra().get('convert_revision', None)
771 source = repo[r].extra().get('convert_revision', None)
771 return source is not None and (rev is None or source.startswith(rev))
772 return source is not None and (rev is None or source.startswith(rev))
772
773
773 return subset.filter(lambda r: _matchvalue(r))
774 return subset.filter(lambda r: _matchvalue(r))
774
775
775 def date(repo, subset, x):
776 def date(repo, subset, x):
776 """``date(interval)``
777 """``date(interval)``
777 Changesets within the interval, see :hg:`help dates`.
778 Changesets within the interval, see :hg:`help dates`.
778 """
779 """
779 # i18n: "date" is a keyword
780 # i18n: "date" is a keyword
780 ds = getstring(x, _("date requires a string"))
781 ds = getstring(x, _("date requires a string"))
781 dm = util.matchdate(ds)
782 dm = util.matchdate(ds)
782 return subset.filter(lambda x: dm(repo[x].date()[0]))
783 return subset.filter(lambda x: dm(repo[x].date()[0]))
783
784
784 def desc(repo, subset, x):
785 def desc(repo, subset, x):
785 """``desc(string)``
786 """``desc(string)``
786 Search commit message for string. The match is case-insensitive.
787 Search commit message for string. The match is case-insensitive.
787 """
788 """
788 # i18n: "desc" is a keyword
789 # i18n: "desc" is a keyword
789 ds = encoding.lower(getstring(x, _("desc requires a string")))
790 ds = encoding.lower(getstring(x, _("desc requires a string")))
790
791
791 def matches(x):
792 def matches(x):
792 c = repo[x]
793 c = repo[x]
793 return ds in encoding.lower(c.description())
794 return ds in encoding.lower(c.description())
794
795
795 return subset.filter(matches)
796 return subset.filter(matches)
796
797
797 def _descendants(repo, subset, x, followfirst=False):
798 def _descendants(repo, subset, x, followfirst=False):
798 roots = getset(repo, fullreposet(repo), x)
799 roots = getset(repo, fullreposet(repo), x)
799 if not roots:
800 if not roots:
800 return baseset()
801 return baseset()
801 s = _revdescendants(repo, roots, followfirst)
802 s = _revdescendants(repo, roots, followfirst)
802
803
803 # Both sets need to be ascending in order to lazily return the union
804 # Both sets need to be ascending in order to lazily return the union
804 # in the correct order.
805 # in the correct order.
805 base = subset & roots
806 base = subset & roots
806 desc = subset & s
807 desc = subset & s
807 result = base + desc
808 result = base + desc
808 if subset.isascending():
809 if subset.isascending():
809 result.sort()
810 result.sort()
810 elif subset.isdescending():
811 elif subset.isdescending():
811 result.sort(reverse=True)
812 result.sort(reverse=True)
812 else:
813 else:
813 result = subset & result
814 result = subset & result
814 return result
815 return result
815
816
816 def descendants(repo, subset, x):
817 def descendants(repo, subset, x):
817 """``descendants(set)``
818 """``descendants(set)``
818 Changesets which are descendants of changesets in set.
819 Changesets which are descendants of changesets in set.
819 """
820 """
820 return _descendants(repo, subset, x)
821 return _descendants(repo, subset, x)
821
822
822 def _firstdescendants(repo, subset, x):
823 def _firstdescendants(repo, subset, x):
823 # ``_firstdescendants(set)``
824 # ``_firstdescendants(set)``
824 # Like ``descendants(set)`` but follows only the first parents.
825 # Like ``descendants(set)`` but follows only the first parents.
825 return _descendants(repo, subset, x, followfirst=True)
826 return _descendants(repo, subset, x, followfirst=True)
826
827
827 def destination(repo, subset, x):
828 def destination(repo, subset, x):
828 """``destination([set])``
829 """``destination([set])``
829 Changesets that were created by a graft, transplant or rebase operation,
830 Changesets that were created by a graft, transplant or rebase operation,
830 with the given revisions specified as the source. Omitting the optional set
831 with the given revisions specified as the source. Omitting the optional set
831 is the same as passing all().
832 is the same as passing all().
832 """
833 """
833 if x is not None:
834 if x is not None:
834 sources = getset(repo, fullreposet(repo), x)
835 sources = getset(repo, fullreposet(repo), x)
835 else:
836 else:
836 sources = fullreposet(repo)
837 sources = fullreposet(repo)
837
838
838 dests = set()
839 dests = set()
839
840
840 # subset contains all of the possible destinations that can be returned, so
841 # subset contains all of the possible destinations that can be returned, so
841 # iterate over them and see if their source(s) were provided in the arg set.
842 # iterate over them and see if their source(s) were provided in the arg set.
842 # Even if the immediate src of r is not in the arg set, src's source (or
843 # Even if the immediate src of r is not in the arg set, src's source (or
843 # further back) may be. Scanning back further than the immediate src allows
844 # further back) may be. Scanning back further than the immediate src allows
844 # transitive transplants and rebases to yield the same results as transitive
845 # transitive transplants and rebases to yield the same results as transitive
845 # grafts.
846 # grafts.
846 for r in subset:
847 for r in subset:
847 src = _getrevsource(repo, r)
848 src = _getrevsource(repo, r)
848 lineage = None
849 lineage = None
849
850
850 while src is not None:
851 while src is not None:
851 if lineage is None:
852 if lineage is None:
852 lineage = list()
853 lineage = list()
853
854
854 lineage.append(r)
855 lineage.append(r)
855
856
856 # The visited lineage is a match if the current source is in the arg
857 # The visited lineage is a match if the current source is in the arg
857 # set. Since every candidate dest is visited by way of iterating
858 # set. Since every candidate dest is visited by way of iterating
858 # subset, any dests further back in the lineage will be tested by a
859 # subset, any dests further back in the lineage will be tested by a
859 # different iteration over subset. Likewise, if the src was already
860 # different iteration over subset. Likewise, if the src was already
860 # selected, the current lineage can be selected without going back
861 # selected, the current lineage can be selected without going back
861 # further.
862 # further.
862 if src in sources or src in dests:
863 if src in sources or src in dests:
863 dests.update(lineage)
864 dests.update(lineage)
864 break
865 break
865
866
866 r = src
867 r = src
867 src = _getrevsource(repo, r)
868 src = _getrevsource(repo, r)
868
869
869 return subset.filter(dests.__contains__)
870 return subset.filter(dests.__contains__)
870
871
871 def divergent(repo, subset, x):
872 def divergent(repo, subset, x):
872 """``divergent()``
873 """``divergent()``
873 Final successors of changesets with an alternative set of final successors.
874 Final successors of changesets with an alternative set of final successors.
874 """
875 """
875 # i18n: "divergent" is a keyword
876 # i18n: "divergent" is a keyword
876 getargs(x, 0, 0, _("divergent takes no arguments"))
877 getargs(x, 0, 0, _("divergent takes no arguments"))
877 divergent = obsmod.getrevs(repo, 'divergent')
878 divergent = obsmod.getrevs(repo, 'divergent')
878 return subset & divergent
879 return subset & divergent
879
880
880 def extinct(repo, subset, x):
881 def extinct(repo, subset, x):
881 """``extinct()``
882 """``extinct()``
882 Obsolete changesets with obsolete descendants only.
883 Obsolete changesets with obsolete descendants only.
883 """
884 """
884 # i18n: "extinct" is a keyword
885 # i18n: "extinct" is a keyword
885 getargs(x, 0, 0, _("extinct takes no arguments"))
886 getargs(x, 0, 0, _("extinct takes no arguments"))
886 extincts = obsmod.getrevs(repo, 'extinct')
887 extincts = obsmod.getrevs(repo, 'extinct')
887 return subset & extincts
888 return subset & extincts
888
889
889 def extra(repo, subset, x):
890 def extra(repo, subset, x):
890 """``extra(label, [value])``
891 """``extra(label, [value])``
891 Changesets with the given label in the extra metadata, with the given
892 Changesets with the given label in the extra metadata, with the given
892 optional value.
893 optional value.
893
894
894 If `value` starts with `re:`, the remainder of the value is treated as
895 If `value` starts with `re:`, the remainder of the value is treated as
895 a regular expression. To match a value that actually starts with `re:`,
896 a regular expression. To match a value that actually starts with `re:`,
896 use the prefix `literal:`.
897 use the prefix `literal:`.
897 """
898 """
898 args = getargsdict(x, 'extra', 'label value')
899 args = getargsdict(x, 'extra', 'label value')
899 if 'label' not in args:
900 if 'label' not in args:
900 # i18n: "extra" is a keyword
901 # i18n: "extra" is a keyword
901 raise error.ParseError(_('extra takes at least 1 argument'))
902 raise error.ParseError(_('extra takes at least 1 argument'))
902 # i18n: "extra" is a keyword
903 # i18n: "extra" is a keyword
903 label = getstring(args['label'], _('first argument to extra must be '
904 label = getstring(args['label'], _('first argument to extra must be '
904 'a string'))
905 'a string'))
905 value = None
906 value = None
906
907
907 if 'value' in args:
908 if 'value' in args:
908 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
909 value = getstring(args['value'], _('second argument to extra must be '
910 value = getstring(args['value'], _('second argument to extra must be '
910 'a string'))
911 'a string'))
911 kind, value, matcher = _stringmatcher(value)
912 kind, value, matcher = _stringmatcher(value)
912
913
913 def _matchvalue(r):
914 def _matchvalue(r):
914 extra = repo[r].extra()
915 extra = repo[r].extra()
915 return label in extra and (value is None or matcher(extra[label]))
916 return label in extra and (value is None or matcher(extra[label]))
916
917
917 return subset.filter(lambda r: _matchvalue(r))
918 return subset.filter(lambda r: _matchvalue(r))
918
919
919 def filelog(repo, subset, x):
920 def filelog(repo, subset, x):
920 """``filelog(pattern)``
921 """``filelog(pattern)``
921 Changesets connected to the specified filelog.
922 Changesets connected to the specified filelog.
922
923
923 For performance reasons, visits only revisions mentioned in the file-level
924 For performance reasons, visits only revisions mentioned in the file-level
924 filelog, rather than filtering through all changesets (much faster, but
925 filelog, rather than filtering through all changesets (much faster, but
925 doesn't include deletes or duplicate changes). For a slower, more accurate
926 doesn't include deletes or duplicate changes). For a slower, more accurate
926 result, use ``file()``.
927 result, use ``file()``.
927
928
928 The pattern without explicit kind like ``glob:`` is expected to be
929 The pattern without explicit kind like ``glob:`` is expected to be
929 relative to the current directory and match against a file exactly
930 relative to the current directory and match against a file exactly
930 for efficiency.
931 for efficiency.
931
932
932 If some linkrev points to revisions filtered by the current repoview, we'll
933 If some linkrev points to revisions filtered by the current repoview, we'll
933 work around it to return a non-filtered value.
934 work around it to return a non-filtered value.
934 """
935 """
935
936
936 # i18n: "filelog" is a keyword
937 # i18n: "filelog" is a keyword
937 pat = getstring(x, _("filelog requires a pattern"))
938 pat = getstring(x, _("filelog requires a pattern"))
938 s = set()
939 s = set()
939 cl = repo.changelog
940 cl = repo.changelog
940
941
941 if not matchmod.patkind(pat):
942 if not matchmod.patkind(pat):
942 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
943 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
943 files = [f]
944 files = [f]
944 else:
945 else:
945 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
946 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
946 files = (f for f in repo[None] if m(f))
947 files = (f for f in repo[None] if m(f))
947
948
948 for f in files:
949 for f in files:
949 backrevref = {} # final value for: filerev -> changerev
950 backrevref = {} # final value for: filerev -> changerev
950 lowestchild = {} # lowest known filerev child of a filerev
951 lowestchild = {} # lowest known filerev child of a filerev
951 delayed = [] # filerev with filtered linkrev, for post-processing
952 delayed = [] # filerev with filtered linkrev, for post-processing
952 lowesthead = None # cache for manifest content of all head revisions
953 lowesthead = None # cache for manifest content of all head revisions
953 fl = repo.file(f)
954 fl = repo.file(f)
954 for fr in list(fl):
955 for fr in list(fl):
955 rev = fl.linkrev(fr)
956 rev = fl.linkrev(fr)
956 if rev not in cl:
957 if rev not in cl:
957 # changerev pointed in linkrev is filtered
958 # changerev pointed in linkrev is filtered
958 # record it for post processing.
959 # record it for post processing.
959 delayed.append((fr, rev))
960 delayed.append((fr, rev))
960 continue
961 continue
961 for p in fl.parentrevs(fr):
962 for p in fl.parentrevs(fr):
962 if 0 <= p and p not in lowestchild:
963 if 0 <= p and p not in lowestchild:
963 lowestchild[p] = fr
964 lowestchild[p] = fr
964 backrevref[fr] = rev
965 backrevref[fr] = rev
965 s.add(rev)
966 s.add(rev)
966
967
967 # Post-processing of all filerevs we skipped because they were
968 # Post-processing of all filerevs we skipped because they were
968 # filtered. If such filerevs have known and unfiltered children, this
969 # filtered. If such filerevs have known and unfiltered children, this
969 # means they have an unfiltered appearance out there. We'll use linkrev
970 # means they have an unfiltered appearance out there. We'll use linkrev
970 # adjustment to find one of these appearances. The lowest known child
971 # adjustment to find one of these appearances. The lowest known child
971 # will be used as a starting point because it is the best upper-bound we
972 # will be used as a starting point because it is the best upper-bound we
972 # have.
973 # have.
973 #
974 #
974 # This approach will fail when an unfiltered but linkrev-shadowed
975 # This approach will fail when an unfiltered but linkrev-shadowed
975 # appearance exists in a head changeset without unfiltered filerev
976 # appearance exists in a head changeset without unfiltered filerev
976 # children anywhere.
977 # children anywhere.
977 while delayed:
978 while delayed:
978 # must be a descending iteration. To slowly fill lowest child
979 # must be a descending iteration. To slowly fill lowest child
979 # information that is of potential use by the next item.
980 # information that is of potential use by the next item.
980 fr, rev = delayed.pop()
981 fr, rev = delayed.pop()
981 lkr = rev
982 lkr = rev
982
983
983 child = lowestchild.get(fr)
984 child = lowestchild.get(fr)
984
985
985 if child is None:
986 if child is None:
986 # search for existence of this file revision in a head revision.
987 # search for existence of this file revision in a head revision.
987 # There are three possibilities:
988 # There are three possibilities:
988 # - the revision exists in a head and we can find an
989 # - the revision exists in a head and we can find an
989 # introduction from there,
990 # introduction from there,
990 # - the revision does not exist in a head because it has been
991 # - the revision does not exist in a head because it has been
991 # changed since its introduction: we would have found a child
992 # changed since its introduction: we would have found a child
992 # and be in the other 'else' clause,
993 # and be in the other 'else' clause,
993 # - all versions of the revision are hidden.
994 # - all versions of the revision are hidden.
994 if lowesthead is None:
995 if lowesthead is None:
995 lowesthead = {}
996 lowesthead = {}
996 for h in repo.heads():
997 for h in repo.heads():
997 fnode = repo[h].manifest().get(f)
998 fnode = repo[h].manifest().get(f)
998 if fnode is not None:
999 if fnode is not None:
999 lowesthead[fl.rev(fnode)] = h
1000 lowesthead[fl.rev(fnode)] = h
1000 headrev = lowesthead.get(fr)
1001 headrev = lowesthead.get(fr)
1001 if headrev is None:
1002 if headrev is None:
1002 # content is nowhere unfiltered
1003 # content is nowhere unfiltered
1003 continue
1004 continue
1004 rev = repo[headrev][f].introrev()
1005 rev = repo[headrev][f].introrev()
1005 else:
1006 else:
1006 # the lowest known child is a good upper bound
1007 # the lowest known child is a good upper bound
1007 childcrev = backrevref[child]
1008 childcrev = backrevref[child]
1008 # XXX this does not guarantee returning the lowest
1009 # XXX this does not guarantee returning the lowest
1009 # introduction of this revision, but this gives a
1010 # introduction of this revision, but this gives a
1010 # result which is a good start and will fit in most
1011 # result which is a good start and will fit in most
1011 # cases. We probably need to fix the multiple
1012 # cases. We probably need to fix the multiple
1012 # introductions case properly (report each
1013 # introductions case properly (report each
1013 # introduction, even for identical file revisions)
1014 # introduction, even for identical file revisions)
1014 # once and for all at some point anyway.
1015 # once and for all at some point anyway.
1015 for p in repo[childcrev][f].parents():
1016 for p in repo[childcrev][f].parents():
1016 if p.filerev() == fr:
1017 if p.filerev() == fr:
1017 rev = p.rev()
1018 rev = p.rev()
1018 break
1019 break
1019 if rev == lkr: # no shadowed entry found
1020 if rev == lkr: # no shadowed entry found
1020 # XXX This should never happen unless some manifest points
1021 # XXX This should never happen unless some manifest points
1021 # to biggish file revisions (like a revision that uses a
1022 # to biggish file revisions (like a revision that uses a
1022 # parent that never appears in the manifest ancestors)
1023 # parent that never appears in the manifest ancestors)
1023 continue
1024 continue
1024
1025
1025 # Fill the data for the next iteration.
1026 # Fill the data for the next iteration.
1026 for p in fl.parentrevs(fr):
1027 for p in fl.parentrevs(fr):
1027 if 0 <= p and p not in lowestchild:
1028 if 0 <= p and p not in lowestchild:
1028 lowestchild[p] = fr
1029 lowestchild[p] = fr
1029 backrevref[fr] = rev
1030 backrevref[fr] = rev
1030 s.add(rev)
1031 s.add(rev)
1031
1032
1032 return subset & s
1033 return subset & s
1033
1034
1034 def first(repo, subset, x):
1035 def first(repo, subset, x):
1035 """``first(set, [n])``
1036 """``first(set, [n])``
1036 An alias for limit().
1037 An alias for limit().
1037 """
1038 """
1038 return limit(repo, subset, x)
1039 return limit(repo, subset, x)
1039
1040
1040 def _follow(repo, subset, x, name, followfirst=False):
1041 def _follow(repo, subset, x, name, followfirst=False):
1041 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1042 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1042 c = repo['.']
1043 c = repo['.']
1043 if l:
1044 if l:
1044 x = getstring(l[0], _("%s expected a filename") % name)
1045 x = getstring(l[0], _("%s expected a filename") % name)
1045 if x in c:
1046 if x in c:
1046 cx = c[x]
1047 cx = c[x]
1047 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1048 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1048 # include the revision responsible for the most recent version
1049 # include the revision responsible for the most recent version
1049 s.add(cx.introrev())
1050 s.add(cx.introrev())
1050 else:
1051 else:
1051 return baseset()
1052 return baseset()
1052 else:
1053 else:
1053 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1054 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1054
1055
1055 return subset & s
1056 return subset & s
1056
1057
1057 def follow(repo, subset, x):
1058 def follow(repo, subset, x):
1058 """``follow([file])``
1059 """``follow([file])``
1059 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 If a filename is specified, the history of the given file is followed,
1061 If a filename is specified, the history of the given file is followed,
1061 including copies.
1062 including copies.
1062 """
1063 """
1063 return _follow(repo, subset, x, 'follow')
1064 return _follow(repo, subset, x, 'follow')
1064
1065
1065 def _followfirst(repo, subset, x):
1066 def _followfirst(repo, subset, x):
1066 # ``followfirst([file])``
1067 # ``followfirst([file])``
1067 # Like ``follow([file])`` but follows only the first parent of
1068 # Like ``follow([file])`` but follows only the first parent of
1068 # every revision or file revision.
1069 # every revision or file revision.
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070
1071
1071 def getall(repo, subset, x):
1072 def getall(repo, subset, x):
1072 """``all()``
1073 """``all()``
1073 All changesets, the same as ``0:tip``.
1074 All changesets, the same as ``0:tip``.
1074 """
1075 """
1075 # i18n: "all" is a keyword
1076 # i18n: "all" is a keyword
1076 getargs(x, 0, 0, _("all takes no arguments"))
1077 getargs(x, 0, 0, _("all takes no arguments"))
1077 return subset & spanset(repo) # drop "null" if any
1078 return subset & spanset(repo) # drop "null" if any
1078
1079
1079 def grep(repo, subset, x):
1080 def grep(repo, subset, x):
1080 """``grep(regex)``
1081 """``grep(regex)``
1081 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 to ensure special escape characters are handled correctly. Unlike
1083 to ensure special escape characters are handled correctly. Unlike
1083 ``keyword(string)``, the match is case-sensitive.
1084 ``keyword(string)``, the match is case-sensitive.
1084 """
1085 """
1085 try:
1086 try:
1086 # i18n: "grep" is a keyword
1087 # i18n: "grep" is a keyword
1087 gr = re.compile(getstring(x, _("grep requires a string")))
1088 gr = re.compile(getstring(x, _("grep requires a string")))
1088 except re.error as e:
1089 except re.error as e:
1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1090 raise error.ParseError(_('invalid match pattern: %s') % e)
1090
1091
1091 def matches(x):
1092 def matches(x):
1092 c = repo[x]
1093 c = repo[x]
1093 for e in c.files() + [c.user(), c.description()]:
1094 for e in c.files() + [c.user(), c.description()]:
1094 if gr.search(e):
1095 if gr.search(e):
1095 return True
1096 return True
1096 return False
1097 return False
1097
1098
1098 return subset.filter(matches)
1099 return subset.filter(matches)
1099
1100
1100 def _matchfiles(repo, subset, x):
1101 def _matchfiles(repo, subset, x):
1101 # _matchfiles takes a revset list of prefixed arguments:
1102 # _matchfiles takes a revset list of prefixed arguments:
1102 #
1103 #
1103 # [p:foo, i:bar, x:baz]
1104 # [p:foo, i:bar, x:baz]
1104 #
1105 #
1105 # builds a match object from them and filters subset. Allowed
1106 # builds a match object from them and filters subset. Allowed
1106 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 # a revision identifier, or the empty string to reference the
1109 # a revision identifier, or the empty string to reference the
1109 # working directory, from which the match object is
1110 # working directory, from which the match object is
1110 # initialized. Use 'd:' to set the default matching mode, default
1111 # initialized. Use 'd:' to set the default matching mode, default
1111 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112
1113
1113 # i18n: "_matchfiles" is a keyword
1114 # i18n: "_matchfiles" is a keyword
1114 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1115 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1115 pats, inc, exc = [], [], []
1116 pats, inc, exc = [], [], []
1116 rev, default = None, None
1117 rev, default = None, None
1117 for arg in l:
1118 for arg in l:
1118 # i18n: "_matchfiles" is a keyword
1119 # i18n: "_matchfiles" is a keyword
1119 s = getstring(arg, _("_matchfiles requires string arguments"))
1120 s = getstring(arg, _("_matchfiles requires string arguments"))
1120 prefix, value = s[:2], s[2:]
1121 prefix, value = s[:2], s[2:]
1121 if prefix == 'p:':
1122 if prefix == 'p:':
1122 pats.append(value)
1123 pats.append(value)
1123 elif prefix == 'i:':
1124 elif prefix == 'i:':
1124 inc.append(value)
1125 inc.append(value)
1125 elif prefix == 'x:':
1126 elif prefix == 'x:':
1126 exc.append(value)
1127 exc.append(value)
1127 elif prefix == 'r:':
1128 elif prefix == 'r:':
1128 if rev is not None:
1129 if rev is not None:
1129 # i18n: "_matchfiles" is a keyword
1130 # i18n: "_matchfiles" is a keyword
1130 raise error.ParseError(_('_matchfiles expected at most one '
1131 raise error.ParseError(_('_matchfiles expected at most one '
1131 'revision'))
1132 'revision'))
1132 if value != '': # empty means working directory; leave rev as None
1133 if value != '': # empty means working directory; leave rev as None
1133 rev = value
1134 rev = value
1134 elif prefix == 'd:':
1135 elif prefix == 'd:':
1135 if default is not None:
1136 if default is not None:
1136 # i18n: "_matchfiles" is a keyword
1137 # i18n: "_matchfiles" is a keyword
1137 raise error.ParseError(_('_matchfiles expected at most one '
1138 raise error.ParseError(_('_matchfiles expected at most one '
1138 'default mode'))
1139 'default mode'))
1139 default = value
1140 default = value
1140 else:
1141 else:
1141 # i18n: "_matchfiles" is a keyword
1142 # i18n: "_matchfiles" is a keyword
1142 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1143 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1143 if not default:
1144 if not default:
1144 default = 'glob'
1145 default = 'glob'
1145
1146
1146 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1147 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1147 exclude=exc, ctx=repo[rev], default=default)
1148 exclude=exc, ctx=repo[rev], default=default)
1148
1149
1149 def matches(x):
1150 def matches(x):
1150 for f in repo[x].files():
1151 for f in repo[x].files():
1151 if m(f):
1152 if m(f):
1152 return True
1153 return True
1153 return False
1154 return False
1154
1155
1155 return subset.filter(matches)
1156 return subset.filter(matches)
1156
1157
1157 def hasfile(repo, subset, x):
1158 def hasfile(repo, subset, x):
1158 """``file(pattern)``
1159 """``file(pattern)``
1159 Changesets affecting files matched by pattern.
1160 Changesets affecting files matched by pattern.
1160
1161
1161 For a faster but less accurate result, consider using ``filelog()``
1162 For a faster but less accurate result, consider using ``filelog()``
1162 instead.
1163 instead.
1163
1164
1164 This predicate uses ``glob:`` as the default kind of pattern.
1165 This predicate uses ``glob:`` as the default kind of pattern.
1165 """
1166 """
1166 # i18n: "file" is a keyword
1167 # i18n: "file" is a keyword
1167 pat = getstring(x, _("file requires a pattern"))
1168 pat = getstring(x, _("file requires a pattern"))
1168 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1169 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1169
1170
1170 def head(repo, subset, x):
1171 def head(repo, subset, x):
1171 """``head()``
1172 """``head()``
1172 Changeset is a named branch head.
1173 Changeset is a named branch head.
1173 """
1174 """
1174 # i18n: "head" is a keyword
1175 # i18n: "head" is a keyword
1175 getargs(x, 0, 0, _("head takes no arguments"))
1176 getargs(x, 0, 0, _("head takes no arguments"))
1176 hs = set()
1177 hs = set()
1177 cl = repo.changelog
1178 cl = repo.changelog
1178 for b, ls in repo.branchmap().iteritems():
1179 for b, ls in repo.branchmap().iteritems():
1179 hs.update(cl.rev(h) for h in ls)
1180 hs.update(cl.rev(h) for h in ls)
1180 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1181 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1181 # This does not break because of other fullreposet misbehavior.
1182 # This does not break because of other fullreposet misbehavior.
1182 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1183 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1183 # necessary to ensure we preserve the order in subset.
1184 # necessary to ensure we preserve the order in subset.
1184 return baseset(hs) & subset
1185 return baseset(hs) & subset
1185
1186
1186 def heads(repo, subset, x):
1187 def heads(repo, subset, x):
1187 """``heads(set)``
1188 """``heads(set)``
1188 Members of set with no children in set.
1189 Members of set with no children in set.
1189 """
1190 """
1190 s = getset(repo, subset, x)
1191 s = getset(repo, subset, x)
1191 ps = parents(repo, subset, x)
1192 ps = parents(repo, subset, x)
1192 return s - ps
1193 return s - ps
1193
1194
1194 def hidden(repo, subset, x):
1195 def hidden(repo, subset, x):
1195 """``hidden()``
1196 """``hidden()``
1196 Hidden changesets.
1197 Hidden changesets.
1197 """
1198 """
1198 # i18n: "hidden" is a keyword
1199 # i18n: "hidden" is a keyword
1199 getargs(x, 0, 0, _("hidden takes no arguments"))
1200 getargs(x, 0, 0, _("hidden takes no arguments"))
1200 hiddenrevs = repoview.filterrevs(repo, 'visible')
1201 hiddenrevs = repoview.filterrevs(repo, 'visible')
1201 return subset & hiddenrevs
1202 return subset & hiddenrevs
1202
1203
1203 def keyword(repo, subset, x):
1204 def keyword(repo, subset, x):
1204 """``keyword(string)``
1205 """``keyword(string)``
1205 Search commit message, user name, and names of changed files for
1206 Search commit message, user name, and names of changed files for
1206 string. The match is case-insensitive.
1207 string. The match is case-insensitive.
1207 """
1208 """
1208 # i18n: "keyword" is a keyword
1209 # i18n: "keyword" is a keyword
1209 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1210 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1210
1211
1211 def matches(r):
1212 def matches(r):
1212 c = repo[r]
1213 c = repo[r]
1213 return any(kw in encoding.lower(t)
1214 return any(kw in encoding.lower(t)
1214 for t in c.files() + [c.user(), c.description()])
1215 for t in c.files() + [c.user(), c.description()])
1215
1216
1216 return subset.filter(matches)
1217 return subset.filter(matches)
1217
1218
1218 def limit(repo, subset, x):
1219 def limit(repo, subset, x):
1219 """``limit(set, [n])``
1220 """``limit(set, [n])``
1220 First n members of set, defaulting to 1.
1221 First n members of set, defaulting to 1.
1221 """
1222 """
1222 # i18n: "limit" is a keyword
1223 # i18n: "limit" is a keyword
1223 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1224 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1224 try:
1225 try:
1225 lim = 1
1226 lim = 1
1226 if len(l) == 2:
1227 if len(l) == 2:
1227 # i18n: "limit" is a keyword
1228 # i18n: "limit" is a keyword
1228 lim = int(getstring(l[1], _("limit requires a number")))
1229 lim = int(getstring(l[1], _("limit requires a number")))
1229 except (TypeError, ValueError):
1230 except (TypeError, ValueError):
1230 # i18n: "limit" is a keyword
1231 # i18n: "limit" is a keyword
1231 raise error.ParseError(_("limit expects a number"))
1232 raise error.ParseError(_("limit expects a number"))
1232 ss = subset
1233 ss = subset
1233 os = getset(repo, fullreposet(repo), l[0])
1234 os = getset(repo, fullreposet(repo), l[0])
1234 result = []
1235 result = []
1235 it = iter(os)
1236 it = iter(os)
1236 for x in xrange(lim):
1237 for x in xrange(lim):
1237 y = next(it, None)
1238 y = next(it, None)
1238 if y is None:
1239 if y is None:
1239 break
1240 break
1240 elif y in ss:
1241 elif y in ss:
1241 result.append(y)
1242 result.append(y)
1242 return baseset(result)
1243 return baseset(result)
1243
1244
1244 def last(repo, subset, x):
1245 def last(repo, subset, x):
1245 """``last(set, [n])``
1246 """``last(set, [n])``
1246 Last n members of set, defaulting to 1.
1247 Last n members of set, defaulting to 1.
1247 """
1248 """
1248 # i18n: "last" is a keyword
1249 # i18n: "last" is a keyword
1249 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1250 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1250 try:
1251 try:
1251 lim = 1
1252 lim = 1
1252 if len(l) == 2:
1253 if len(l) == 2:
1253 # i18n: "last" is a keyword
1254 # i18n: "last" is a keyword
1254 lim = int(getstring(l[1], _("last requires a number")))
1255 lim = int(getstring(l[1], _("last requires a number")))
1255 except (TypeError, ValueError):
1256 except (TypeError, ValueError):
1256 # i18n: "last" is a keyword
1257 # i18n: "last" is a keyword
1257 raise error.ParseError(_("last expects a number"))
1258 raise error.ParseError(_("last expects a number"))
1258 ss = subset
1259 ss = subset
1259 os = getset(repo, fullreposet(repo), l[0])
1260 os = getset(repo, fullreposet(repo), l[0])
1260 os.reverse()
1261 os.reverse()
1261 result = []
1262 result = []
1262 it = iter(os)
1263 it = iter(os)
1263 for x in xrange(lim):
1264 for x in xrange(lim):
1264 y = next(it, None)
1265 y = next(it, None)
1265 if y is None:
1266 if y is None:
1266 break
1267 break
1267 elif y in ss:
1268 elif y in ss:
1268 result.append(y)
1269 result.append(y)
1269 return baseset(result)
1270 return baseset(result)
1270
1271
1271 def maxrev(repo, subset, x):
1272 def maxrev(repo, subset, x):
1272 """``max(set)``
1273 """``max(set)``
1273 Changeset with highest revision number in set.
1274 Changeset with highest revision number in set.
1274 """
1275 """
1275 os = getset(repo, fullreposet(repo), x)
1276 os = getset(repo, fullreposet(repo), x)
1276 if os:
1277 if os:
1277 m = os.max()
1278 m = os.max()
1278 if m in subset:
1279 if m in subset:
1279 return baseset([m])
1280 return baseset([m])
1280 return baseset()
1281 return baseset()
1281
1282
1282 def merge(repo, subset, x):
1283 def merge(repo, subset, x):
1283 """``merge()``
1284 """``merge()``
1284 Changeset is a merge changeset.
1285 Changeset is a merge changeset.
1285 """
1286 """
1286 # i18n: "merge" is a keyword
1287 # i18n: "merge" is a keyword
1287 getargs(x, 0, 0, _("merge takes no arguments"))
1288 getargs(x, 0, 0, _("merge takes no arguments"))
1288 cl = repo.changelog
1289 cl = repo.changelog
1289 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1290 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1290
1291
1291 def branchpoint(repo, subset, x):
1292 def branchpoint(repo, subset, x):
1292 """``branchpoint()``
1293 """``branchpoint()``
1293 Changesets with more than one child.
1294 Changesets with more than one child.
1294 """
1295 """
1295 # i18n: "branchpoint" is a keyword
1296 # i18n: "branchpoint" is a keyword
1296 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1297 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1297 cl = repo.changelog
1298 cl = repo.changelog
1298 if not subset:
1299 if not subset:
1299 return baseset()
1300 return baseset()
1300 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1301 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1301 # (and if it is not, it should.)
1302 # (and if it is not, it should.)
1302 baserev = min(subset)
1303 baserev = min(subset)
1303 parentscount = [0]*(len(repo) - baserev)
1304 parentscount = [0]*(len(repo) - baserev)
1304 for r in cl.revs(start=baserev + 1):
1305 for r in cl.revs(start=baserev + 1):
1305 for p in cl.parentrevs(r):
1306 for p in cl.parentrevs(r):
1306 if p >= baserev:
1307 if p >= baserev:
1307 parentscount[p - baserev] += 1
1308 parentscount[p - baserev] += 1
1308 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1309 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1309
1310
1310 def minrev(repo, subset, x):
1311 def minrev(repo, subset, x):
1311 """``min(set)``
1312 """``min(set)``
1312 Changeset with lowest revision number in set.
1313 Changeset with lowest revision number in set.
1313 """
1314 """
1314 os = getset(repo, fullreposet(repo), x)
1315 os = getset(repo, fullreposet(repo), x)
1315 if os:
1316 if os:
1316 m = os.min()
1317 m = os.min()
1317 if m in subset:
1318 if m in subset:
1318 return baseset([m])
1319 return baseset([m])
1319 return baseset()
1320 return baseset()
1320
1321
1321 def modifies(repo, subset, x):
1322 def modifies(repo, subset, x):
1322 """``modifies(pattern)``
1323 """``modifies(pattern)``
1323 Changesets modifying files matched by pattern.
1324 Changesets modifying files matched by pattern.
1324
1325
1325 The pattern without explicit kind like ``glob:`` is expected to be
1326 The pattern without explicit kind like ``glob:`` is expected to be
1326 relative to the current directory and match against a file or a
1327 relative to the current directory and match against a file or a
1327 directory.
1328 directory.
1328 """
1329 """
1329 # i18n: "modifies" is a keyword
1330 # i18n: "modifies" is a keyword
1330 pat = getstring(x, _("modifies requires a pattern"))
1331 pat = getstring(x, _("modifies requires a pattern"))
1331 return checkstatus(repo, subset, pat, 0)
1332 return checkstatus(repo, subset, pat, 0)
1332
1333
1333 def named(repo, subset, x):
1334 def named(repo, subset, x):
1334 """``named(namespace)``
1335 """``named(namespace)``
1335 The changesets in a given namespace.
1336 The changesets in a given namespace.
1336
1337
1337 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 a regular expression. To match a namespace that actually starts with `re:`,
1339 a regular expression. To match a namespace that actually starts with `re:`,
1339 use the prefix `literal:`.
1340 use the prefix `literal:`.
1340 """
1341 """
1341 # i18n: "named" is a keyword
1342 # i18n: "named" is a keyword
1342 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343
1344
1344 ns = getstring(args[0],
1345 ns = getstring(args[0],
1345 # i18n: "named" is a keyword
1346 # i18n: "named" is a keyword
1346 _('the argument to named must be a string'))
1347 _('the argument to named must be a string'))
1347 kind, pattern, matcher = _stringmatcher(ns)
1348 kind, pattern, matcher = _stringmatcher(ns)
1348 namespaces = set()
1349 namespaces = set()
1349 if kind == 'literal':
1350 if kind == 'literal':
1350 if pattern not in repo.names:
1351 if pattern not in repo.names:
1351 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 % ns)
1353 % ns)
1353 namespaces.add(repo.names[pattern])
1354 namespaces.add(repo.names[pattern])
1354 else:
1355 else:
1355 for name, ns in repo.names.iteritems():
1356 for name, ns in repo.names.iteritems():
1356 if matcher(name):
1357 if matcher(name):
1357 namespaces.add(ns)
1358 namespaces.add(ns)
1358 if not namespaces:
1359 if not namespaces:
1359 raise error.RepoLookupError(_("no namespace exists"
1360 raise error.RepoLookupError(_("no namespace exists"
1360 " that match '%s'") % pattern)
1361 " that match '%s'") % pattern)
1361
1362
1362 names = set()
1363 names = set()
1363 for ns in namespaces:
1364 for ns in namespaces:
1364 for name in ns.listnames(repo):
1365 for name in ns.listnames(repo):
1365 if name not in ns.deprecated:
1366 if name not in ns.deprecated:
1366 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367
1368
1368 names -= set([node.nullrev])
1369 names -= set([node.nullrev])
1369 return subset & names
1370 return subset & names
1370
1371
1371 def node_(repo, subset, x):
1372 def node_(repo, subset, x):
1372 """``id(string)``
1373 """``id(string)``
1373 Revision non-ambiguously specified by the given hex string prefix.
1374 Revision non-ambiguously specified by the given hex string prefix.
1374 """
1375 """
1375 # i18n: "id" is a keyword
1376 # i18n: "id" is a keyword
1376 l = getargs(x, 1, 1, _("id requires one argument"))
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1377 # i18n: "id" is a keyword
1378 # i18n: "id" is a keyword
1378 n = getstring(l[0], _("id requires a string"))
1379 n = getstring(l[0], _("id requires a string"))
1379 if len(n) == 40:
1380 if len(n) == 40:
1380 try:
1381 try:
1381 rn = repo.changelog.rev(node.bin(n))
1382 rn = repo.changelog.rev(node.bin(n))
1382 except (LookupError, TypeError):
1383 except (LookupError, TypeError):
1383 rn = None
1384 rn = None
1384 else:
1385 else:
1385 rn = None
1386 rn = None
1386 pm = repo.changelog._partialmatch(n)
1387 pm = repo.changelog._partialmatch(n)
1387 if pm is not None:
1388 if pm is not None:
1388 rn = repo.changelog.rev(pm)
1389 rn = repo.changelog.rev(pm)
1389
1390
1390 if rn is None:
1391 if rn is None:
1391 return baseset()
1392 return baseset()
1392 result = baseset([rn])
1393 result = baseset([rn])
1393 return result & subset
1394 return result & subset
1394
1395
1395 def obsolete(repo, subset, x):
1396 def obsolete(repo, subset, x):
1396 """``obsolete()``
1397 """``obsolete()``
1397 Mutable changeset with a newer version."""
1398 Mutable changeset with a newer version."""
1398 # i18n: "obsolete" is a keyword
1399 # i18n: "obsolete" is a keyword
1399 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 return subset & obsoletes
1402 return subset & obsoletes
1402
1403
1403 def only(repo, subset, x):
1404 def only(repo, subset, x):
1404 """``only(set, [set])``
1405 """``only(set, [set])``
1405 Changesets that are ancestors of the first set that are not ancestors
1406 Changesets that are ancestors of the first set that are not ancestors
1406 of any other head in the repo. If a second set is specified, the result
1407 of any other head in the repo. If a second set is specified, the result
1407 is ancestors of the first set that are not ancestors of the second set
1408 is ancestors of the first set that are not ancestors of the second set
1408 (i.e. ::<set1> - ::<set2>).
1409 (i.e. ::<set1> - ::<set2>).
1409 """
1410 """
1410 cl = repo.changelog
1411 cl = repo.changelog
1411 # i18n: "only" is a keyword
1412 # i18n: "only" is a keyword
1412 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 include = getset(repo, fullreposet(repo), args[0])
1414 include = getset(repo, fullreposet(repo), args[0])
1414 if len(args) == 1:
1415 if len(args) == 1:
1415 if not include:
1416 if not include:
1416 return baseset()
1417 return baseset()
1417
1418
1418 descendants = set(_revdescendants(repo, include, False))
1419 descendants = set(_revdescendants(repo, include, False))
1419 exclude = [rev for rev in cl.headrevs()
1420 exclude = [rev for rev in cl.headrevs()
1420 if not rev in descendants and not rev in include]
1421 if not rev in descendants and not rev in include]
1421 else:
1422 else:
1422 exclude = getset(repo, fullreposet(repo), args[1])
1423 exclude = getset(repo, fullreposet(repo), args[1])
1423
1424
1424 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 # some optimisations from the fact this is a baseset.
1427 # some optimisations from the fact this is a baseset.
1427 return subset & results
1428 return subset & results
1428
1429
1429 def origin(repo, subset, x):
1430 def origin(repo, subset, x):
1430 """``origin([set])``
1431 """``origin([set])``
1431 Changesets that were specified as a source for the grafts, transplants or
1432 Changesets that were specified as a source for the grafts, transplants or
1432 rebases that created the given revisions. Omitting the optional set is the
1433 rebases that created the given revisions. Omitting the optional set is the
1433 same as passing all(). If a changeset created by these operations is itself
1434 same as passing all(). If a changeset created by these operations is itself
1434 specified as a source for one of these operations, only the source changeset
1435 specified as a source for one of these operations, only the source changeset
1435 for the first operation is selected.
1436 for the first operation is selected.
1436 """
1437 """
1437 if x is not None:
1438 if x is not None:
1438 dests = getset(repo, fullreposet(repo), x)
1439 dests = getset(repo, fullreposet(repo), x)
1439 else:
1440 else:
1440 dests = fullreposet(repo)
1441 dests = fullreposet(repo)
1441
1442
1442 def _firstsrc(rev):
1443 def _firstsrc(rev):
1443 src = _getrevsource(repo, rev)
1444 src = _getrevsource(repo, rev)
1444 if src is None:
1445 if src is None:
1445 return None
1446 return None
1446
1447
1447 while True:
1448 while True:
1448 prev = _getrevsource(repo, src)
1449 prev = _getrevsource(repo, src)
1449
1450
1450 if prev is None:
1451 if prev is None:
1451 return src
1452 return src
1452 src = prev
1453 src = prev
1453
1454
1454 o = set([_firstsrc(r) for r in dests])
1455 o = set([_firstsrc(r) for r in dests])
1455 o -= set([None])
1456 o -= set([None])
1456 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 # some optimisations from the fact this is a baseset.
1458 # some optimisations from the fact this is a baseset.
1458 return subset & o
1459 return subset & o
1459
1460
1460 def outgoing(repo, subset, x):
1461 def outgoing(repo, subset, x):
1461 """``outgoing([path])``
1462 """``outgoing([path])``
1462 Changesets not found in the specified destination repository, or the
1463 Changesets not found in the specified destination repository, or the
1463 default push location.
1464 default push location.
1464 """
1465 """
1465 # Avoid cycles.
1466 # Avoid cycles.
1466 from . import (
1467 from . import (
1467 discovery,
1468 discovery,
1468 hg,
1469 hg,
1469 )
1470 )
1470 # i18n: "outgoing" is a keyword
1471 # i18n: "outgoing" is a keyword
1471 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1472 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1472 # i18n: "outgoing" is a keyword
1473 # i18n: "outgoing" is a keyword
1473 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1474 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1474 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1475 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1475 dest, branches = hg.parseurl(dest)
1476 dest, branches = hg.parseurl(dest)
1476 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1477 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1477 if revs:
1478 if revs:
1478 revs = [repo.lookup(rev) for rev in revs]
1479 revs = [repo.lookup(rev) for rev in revs]
1479 other = hg.peer(repo, {}, dest)
1480 other = hg.peer(repo, {}, dest)
1480 repo.ui.pushbuffer()
1481 repo.ui.pushbuffer()
1481 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1482 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1482 repo.ui.popbuffer()
1483 repo.ui.popbuffer()
1483 cl = repo.changelog
1484 cl = repo.changelog
1484 o = set([cl.rev(r) for r in outgoing.missing])
1485 o = set([cl.rev(r) for r in outgoing.missing])
1485 return subset & o
1486 return subset & o
1486
1487
1487 def p1(repo, subset, x):
1488 def p1(repo, subset, x):
1488 """``p1([set])``
1489 """``p1([set])``
1489 First parent of changesets in set, or the working directory.
1490 First parent of changesets in set, or the working directory.
1490 """
1491 """
1491 if x is None:
1492 if x is None:
1492 p = repo[x].p1().rev()
1493 p = repo[x].p1().rev()
1493 if p >= 0:
1494 if p >= 0:
1494 return subset & baseset([p])
1495 return subset & baseset([p])
1495 return baseset()
1496 return baseset()
1496
1497
1497 ps = set()
1498 ps = set()
1498 cl = repo.changelog
1499 cl = repo.changelog
1499 for r in getset(repo, fullreposet(repo), x):
1500 for r in getset(repo, fullreposet(repo), x):
1500 ps.add(cl.parentrevs(r)[0])
1501 ps.add(cl.parentrevs(r)[0])
1501 ps -= set([node.nullrev])
1502 ps -= set([node.nullrev])
1502 # XXX we should turn this into a baseset instead of a set, smartset may do
1503 # XXX we should turn this into a baseset instead of a set, smartset may do
1503 # some optimisations from the fact this is a baseset.
1504 # some optimisations from the fact this is a baseset.
1504 return subset & ps
1505 return subset & ps
1505
1506
1506 def p2(repo, subset, x):
1507 def p2(repo, subset, x):
1507 """``p2([set])``
1508 """``p2([set])``
1508 Second parent of changesets in set, or the working directory.
1509 Second parent of changesets in set, or the working directory.
1509 """
1510 """
1510 if x is None:
1511 if x is None:
1511 ps = repo[x].parents()
1512 ps = repo[x].parents()
1512 try:
1513 try:
1513 p = ps[1].rev()
1514 p = ps[1].rev()
1514 if p >= 0:
1515 if p >= 0:
1515 return subset & baseset([p])
1516 return subset & baseset([p])
1516 return baseset()
1517 return baseset()
1517 except IndexError:
1518 except IndexError:
1518 return baseset()
1519 return baseset()
1519
1520
1520 ps = set()
1521 ps = set()
1521 cl = repo.changelog
1522 cl = repo.changelog
1522 for r in getset(repo, fullreposet(repo), x):
1523 for r in getset(repo, fullreposet(repo), x):
1523 ps.add(cl.parentrevs(r)[1])
1524 ps.add(cl.parentrevs(r)[1])
1524 ps -= set([node.nullrev])
1525 ps -= set([node.nullrev])
1525 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 # some optimisations from the fact this is a baseset.
1527 # some optimisations from the fact this is a baseset.
1527 return subset & ps
1528 return subset & ps
1528
1529
1529 def parents(repo, subset, x):
1530 def parents(repo, subset, x):
1530 """``parents([set])``
1531 """``parents([set])``
1531 The set of all parents for all changesets in set, or the working directory.
1532 The set of all parents for all changesets in set, or the working directory.
1532 """
1533 """
1533 if x is None:
1534 if x is None:
1534 ps = set(p.rev() for p in repo[x].parents())
1535 ps = set(p.rev() for p in repo[x].parents())
1535 else:
1536 else:
1536 ps = set()
1537 ps = set()
1537 cl = repo.changelog
1538 cl = repo.changelog
1538 up = ps.update
1539 up = ps.update
1539 parentrevs = cl.parentrevs
1540 parentrevs = cl.parentrevs
1540 for r in getset(repo, fullreposet(repo), x):
1541 for r in getset(repo, fullreposet(repo), x):
1541 if r == node.wdirrev:
1542 if r == node.wdirrev:
1542 up(p.rev() for p in repo[r].parents())
1543 up(p.rev() for p in repo[r].parents())
1543 else:
1544 else:
1544 up(parentrevs(r))
1545 up(parentrevs(r))
1545 ps -= set([node.nullrev])
1546 ps -= set([node.nullrev])
1546 return subset & ps
1547 return subset & ps
1547
1548
1548 def _phase(repo, subset, target):
1549 def _phase(repo, subset, target):
1549 """helper to select all rev in phase <target>"""
1550 """helper to select all rev in phase <target>"""
1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 if repo._phasecache._phasesets:
1552 if repo._phasecache._phasesets:
1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 s = baseset(s)
1554 s = baseset(s)
1554 s.sort() # set are non ordered, so we enforce ascending
1555 s.sort() # set are non ordered, so we enforce ascending
1555 return subset & s
1556 return subset & s
1556 else:
1557 else:
1557 phase = repo._phasecache.phase
1558 phase = repo._phasecache.phase
1558 condition = lambda r: phase(repo, r) == target
1559 condition = lambda r: phase(repo, r) == target
1559 return subset.filter(condition, cache=False)
1560 return subset.filter(condition, cache=False)
1560
1561
1561 def draft(repo, subset, x):
1562 def draft(repo, subset, x):
1562 """``draft()``
1563 """``draft()``
1563 Changeset in draft phase."""
1564 Changeset in draft phase."""
1564 # i18n: "draft" is a keyword
1565 # i18n: "draft" is a keyword
1565 getargs(x, 0, 0, _("draft takes no arguments"))
1566 getargs(x, 0, 0, _("draft takes no arguments"))
1566 target = phases.draft
1567 target = phases.draft
1567 return _phase(repo, subset, target)
1568 return _phase(repo, subset, target)
1568
1569
1569 def secret(repo, subset, x):
1570 def secret(repo, subset, x):
1570 """``secret()``
1571 """``secret()``
1571 Changeset in secret phase."""
1572 Changeset in secret phase."""
1572 # i18n: "secret" is a keyword
1573 # i18n: "secret" is a keyword
1573 getargs(x, 0, 0, _("secret takes no arguments"))
1574 getargs(x, 0, 0, _("secret takes no arguments"))
1574 target = phases.secret
1575 target = phases.secret
1575 return _phase(repo, subset, target)
1576 return _phase(repo, subset, target)
1576
1577
1577 def parentspec(repo, subset, x, n):
1578 def parentspec(repo, subset, x, n):
1578 """``set^0``
1579 """``set^0``
1579 The set.
1580 The set.
1580 ``set^1`` (or ``set^``), ``set^2``
1581 ``set^1`` (or ``set^``), ``set^2``
1581 First or second parent, respectively, of all changesets in set.
1582 First or second parent, respectively, of all changesets in set.
1582 """
1583 """
1583 try:
1584 try:
1584 n = int(n[1])
1585 n = int(n[1])
1585 if n not in (0, 1, 2):
1586 if n not in (0, 1, 2):
1586 raise ValueError
1587 raise ValueError
1587 except (TypeError, ValueError):
1588 except (TypeError, ValueError):
1588 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 ps = set()
1590 ps = set()
1590 cl = repo.changelog
1591 cl = repo.changelog
1591 for r in getset(repo, fullreposet(repo), x):
1592 for r in getset(repo, fullreposet(repo), x):
1592 if n == 0:
1593 if n == 0:
1593 ps.add(r)
1594 ps.add(r)
1594 elif n == 1:
1595 elif n == 1:
1595 ps.add(cl.parentrevs(r)[0])
1596 ps.add(cl.parentrevs(r)[0])
1596 elif n == 2:
1597 elif n == 2:
1597 parents = cl.parentrevs(r)
1598 parents = cl.parentrevs(r)
1598 if len(parents) > 1:
1599 if len(parents) > 1:
1599 ps.add(parents[1])
1600 ps.add(parents[1])
1600 return subset & ps
1601 return subset & ps
1601
1602
1602 def present(repo, subset, x):
1603 def present(repo, subset, x):
1603 """``present(set)``
1604 """``present(set)``
1604 An empty set, if any revision in set isn't found; otherwise,
1605 An empty set, if any revision in set isn't found; otherwise,
1605 all revisions in set.
1606 all revisions in set.
1606
1607
1607 If any of specified revisions is not present in the local repository,
1608 If any of specified revisions is not present in the local repository,
1608 the query is normally aborted. But this predicate allows the query
1609 the query is normally aborted. But this predicate allows the query
1609 to continue even in such cases.
1610 to continue even in such cases.
1610 """
1611 """
1611 try:
1612 try:
1612 return getset(repo, subset, x)
1613 return getset(repo, subset, x)
1613 except error.RepoLookupError:
1614 except error.RepoLookupError:
1614 return baseset()
1615 return baseset()
1615
1616
1616 # for internal use
1617 # for internal use
1617 def _notpublic(repo, subset, x):
1618 def _notpublic(repo, subset, x):
1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1620 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1620 if repo._phasecache._phasesets:
1621 if repo._phasecache._phasesets:
1621 s = set()
1622 s = set()
1622 for u in repo._phasecache._phasesets[1:]:
1623 for u in repo._phasecache._phasesets[1:]:
1623 s.update(u)
1624 s.update(u)
1624 s = baseset(s - repo.changelog.filteredrevs)
1625 s = baseset(s - repo.changelog.filteredrevs)
1625 s.sort()
1626 s.sort()
1626 return subset & s
1627 return subset & s
1627 else:
1628 else:
1628 phase = repo._phasecache.phase
1629 phase = repo._phasecache.phase
1629 target = phases.public
1630 target = phases.public
1630 condition = lambda r: phase(repo, r) != target
1631 condition = lambda r: phase(repo, r) != target
1631 return subset.filter(condition, cache=False)
1632 return subset.filter(condition, cache=False)
1632
1633
1633 def public(repo, subset, x):
1634 def public(repo, subset, x):
1634 """``public()``
1635 """``public()``
1635 Changeset in public phase."""
1636 Changeset in public phase."""
1636 # i18n: "public" is a keyword
1637 # i18n: "public" is a keyword
1637 getargs(x, 0, 0, _("public takes no arguments"))
1638 getargs(x, 0, 0, _("public takes no arguments"))
1638 phase = repo._phasecache.phase
1639 phase = repo._phasecache.phase
1639 target = phases.public
1640 target = phases.public
1640 condition = lambda r: phase(repo, r) == target
1641 condition = lambda r: phase(repo, r) == target
1641 return subset.filter(condition, cache=False)
1642 return subset.filter(condition, cache=False)
1642
1643
1643 def remote(repo, subset, x):
1644 def remote(repo, subset, x):
1644 """``remote([id [,path]])``
1645 """``remote([id [,path]])``
1645 Local revision that corresponds to the given identifier in a
1646 Local revision that corresponds to the given identifier in a
1646 remote repository, if present. Here, the '.' identifier is a
1647 remote repository, if present. Here, the '.' identifier is a
1647 synonym for the current local branch.
1648 synonym for the current local branch.
1648 """
1649 """
1649
1650
1650 from . import hg # avoid start-up nasties
1651 from . import hg # avoid start-up nasties
1651 # i18n: "remote" is a keyword
1652 # i18n: "remote" is a keyword
1652 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1653 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1653
1654
1654 q = '.'
1655 q = '.'
1655 if len(l) > 0:
1656 if len(l) > 0:
1656 # i18n: "remote" is a keyword
1657 # i18n: "remote" is a keyword
1657 q = getstring(l[0], _("remote requires a string id"))
1658 q = getstring(l[0], _("remote requires a string id"))
1658 if q == '.':
1659 if q == '.':
1659 q = repo['.'].branch()
1660 q = repo['.'].branch()
1660
1661
1661 dest = ''
1662 dest = ''
1662 if len(l) > 1:
1663 if len(l) > 1:
1663 # i18n: "remote" is a keyword
1664 # i18n: "remote" is a keyword
1664 dest = getstring(l[1], _("remote requires a repository path"))
1665 dest = getstring(l[1], _("remote requires a repository path"))
1665 dest = repo.ui.expandpath(dest or 'default')
1666 dest = repo.ui.expandpath(dest or 'default')
1666 dest, branches = hg.parseurl(dest)
1667 dest, branches = hg.parseurl(dest)
1667 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 if revs:
1669 if revs:
1669 revs = [repo.lookup(rev) for rev in revs]
1670 revs = [repo.lookup(rev) for rev in revs]
1670 other = hg.peer(repo, {}, dest)
1671 other = hg.peer(repo, {}, dest)
1671 n = other.lookup(q)
1672 n = other.lookup(q)
1672 if n in repo:
1673 if n in repo:
1673 r = repo[n].rev()
1674 r = repo[n].rev()
1674 if r in subset:
1675 if r in subset:
1675 return baseset([r])
1676 return baseset([r])
1676 return baseset()
1677 return baseset()
1677
1678
1678 def removes(repo, subset, x):
1679 def removes(repo, subset, x):
1679 """``removes(pattern)``
1680 """``removes(pattern)``
1680 Changesets which remove files matching pattern.
1681 Changesets which remove files matching pattern.
1681
1682
1682 The pattern without explicit kind like ``glob:`` is expected to be
1683 The pattern without explicit kind like ``glob:`` is expected to be
1683 relative to the current directory and match against a file or a
1684 relative to the current directory and match against a file or a
1684 directory.
1685 directory.
1685 """
1686 """
1686 # i18n: "removes" is a keyword
1687 # i18n: "removes" is a keyword
1687 pat = getstring(x, _("removes requires a pattern"))
1688 pat = getstring(x, _("removes requires a pattern"))
1688 return checkstatus(repo, subset, pat, 2)
1689 return checkstatus(repo, subset, pat, 2)
1689
1690
1690 def rev(repo, subset, x):
1691 def rev(repo, subset, x):
1691 """``rev(number)``
1692 """``rev(number)``
1692 Revision with the given numeric identifier.
1693 Revision with the given numeric identifier.
1693 """
1694 """
1694 # i18n: "rev" is a keyword
1695 # i18n: "rev" is a keyword
1695 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 try:
1697 try:
1697 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1698 l = int(getstring(l[0], _("rev requires a number")))
1699 l = int(getstring(l[0], _("rev requires a number")))
1699 except (TypeError, ValueError):
1700 except (TypeError, ValueError):
1700 # i18n: "rev" is a keyword
1701 # i18n: "rev" is a keyword
1701 raise error.ParseError(_("rev expects a number"))
1702 raise error.ParseError(_("rev expects a number"))
1702 if l not in repo.changelog and l != node.nullrev:
1703 if l not in repo.changelog and l != node.nullrev:
1703 return baseset()
1704 return baseset()
1704 return subset & baseset([l])
1705 return subset & baseset([l])
1705
1706
1706 def matching(repo, subset, x):
1707 def matching(repo, subset, x):
1707 """``matching(revision [, field])``
1708 """``matching(revision [, field])``
1708 Changesets in which a given set of fields match the set of fields in the
1709 Changesets in which a given set of fields match the set of fields in the
1709 selected revision or set.
1710 selected revision or set.
1710
1711
1711 To match more than one field pass the list of fields to match separated
1712 To match more than one field pass the list of fields to match separated
1712 by spaces (e.g. ``author description``).
1713 by spaces (e.g. ``author description``).
1713
1714
1714 Valid fields are most regular revision fields and some special fields.
1715 Valid fields are most regular revision fields and some special fields.
1715
1716
1716 Regular revision fields are ``description``, ``author``, ``branch``,
1717 Regular revision fields are ``description``, ``author``, ``branch``,
1717 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 and ``diff``.
1719 and ``diff``.
1719 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 contents of the revision. Two revisions matching their ``diff`` will
1721 contents of the revision. Two revisions matching their ``diff`` will
1721 also match their ``files``.
1722 also match their ``files``.
1722
1723
1723 Special fields are ``summary`` and ``metadata``:
1724 Special fields are ``summary`` and ``metadata``:
1724 ``summary`` matches the first line of the description.
1725 ``summary`` matches the first line of the description.
1725 ``metadata`` is equivalent to matching ``description user date``
1726 ``metadata`` is equivalent to matching ``description user date``
1726 (i.e. it matches the main metadata fields).
1727 (i.e. it matches the main metadata fields).
1727
1728
1728 ``metadata`` is the default field which is used when no fields are
1729 ``metadata`` is the default field which is used when no fields are
1729 specified. You can match more than one field at a time.
1730 specified. You can match more than one field at a time.
1730 """
1731 """
1731 # i18n: "matching" is a keyword
1732 # i18n: "matching" is a keyword
1732 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733
1734
1734 revs = getset(repo, fullreposet(repo), l[0])
1735 revs = getset(repo, fullreposet(repo), l[0])
1735
1736
1736 fieldlist = ['metadata']
1737 fieldlist = ['metadata']
1737 if len(l) > 1:
1738 if len(l) > 1:
1738 fieldlist = getstring(l[1],
1739 fieldlist = getstring(l[1],
1739 # i18n: "matching" is a keyword
1740 # i18n: "matching" is a keyword
1740 _("matching requires a string "
1741 _("matching requires a string "
1741 "as its second argument")).split()
1742 "as its second argument")).split()
1742
1743
1743 # Make sure that there are no repeated fields,
1744 # Make sure that there are no repeated fields,
1744 # expand the 'special' 'metadata' field type
1745 # expand the 'special' 'metadata' field type
1745 # and check the 'files' whenever we check the 'diff'
1746 # and check the 'files' whenever we check the 'diff'
1746 fields = []
1747 fields = []
1747 for field in fieldlist:
1748 for field in fieldlist:
1748 if field == 'metadata':
1749 if field == 'metadata':
1749 fields += ['user', 'description', 'date']
1750 fields += ['user', 'description', 'date']
1750 elif field == 'diff':
1751 elif field == 'diff':
1751 # a revision matching the diff must also match the files
1752 # a revision matching the diff must also match the files
1752 # since matching the diff is very costly, make sure to
1753 # since matching the diff is very costly, make sure to
1753 # also match the files first
1754 # also match the files first
1754 fields += ['files', 'diff']
1755 fields += ['files', 'diff']
1755 else:
1756 else:
1756 if field == 'author':
1757 if field == 'author':
1757 field = 'user'
1758 field = 'user'
1758 fields.append(field)
1759 fields.append(field)
1759 fields = set(fields)
1760 fields = set(fields)
1760 if 'summary' in fields and 'description' in fields:
1761 if 'summary' in fields and 'description' in fields:
1761 # If a revision matches its description it also matches its summary
1762 # If a revision matches its description it also matches its summary
1762 fields.discard('summary')
1763 fields.discard('summary')
1763
1764
1764 # We may want to match more than one field
1765 # We may want to match more than one field
1765 # Not all fields take the same amount of time to be matched
1766 # Not all fields take the same amount of time to be matched
1766 # Sort the selected fields in order of increasing matching cost
1767 # Sort the selected fields in order of increasing matching cost
1767 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 'files', 'description', 'substate', 'diff']
1769 'files', 'description', 'substate', 'diff']
1769 def fieldkeyfunc(f):
1770 def fieldkeyfunc(f):
1770 try:
1771 try:
1771 return fieldorder.index(f)
1772 return fieldorder.index(f)
1772 except ValueError:
1773 except ValueError:
1773 # assume an unknown field is very costly
1774 # assume an unknown field is very costly
1774 return len(fieldorder)
1775 return len(fieldorder)
1775 fields = list(fields)
1776 fields = list(fields)
1776 fields.sort(key=fieldkeyfunc)
1777 fields.sort(key=fieldkeyfunc)
1777
1778
1778 # Each field will be matched with its own "getfield" function
1779 # Each field will be matched with its own "getfield" function
1779 # which will be added to the getfieldfuncs array of functions
1780 # which will be added to the getfieldfuncs array of functions
1780 getfieldfuncs = []
1781 getfieldfuncs = []
1781 _funcs = {
1782 _funcs = {
1782 'user': lambda r: repo[r].user(),
1783 'user': lambda r: repo[r].user(),
1783 'branch': lambda r: repo[r].branch(),
1784 'branch': lambda r: repo[r].branch(),
1784 'date': lambda r: repo[r].date(),
1785 'date': lambda r: repo[r].date(),
1785 'description': lambda r: repo[r].description(),
1786 'description': lambda r: repo[r].description(),
1786 'files': lambda r: repo[r].files(),
1787 'files': lambda r: repo[r].files(),
1787 'parents': lambda r: repo[r].parents(),
1788 'parents': lambda r: repo[r].parents(),
1788 'phase': lambda r: repo[r].phase(),
1789 'phase': lambda r: repo[r].phase(),
1789 'substate': lambda r: repo[r].substate,
1790 'substate': lambda r: repo[r].substate,
1790 'summary': lambda r: repo[r].description().splitlines()[0],
1791 'summary': lambda r: repo[r].description().splitlines()[0],
1791 'diff': lambda r: list(repo[r].diff(git=True),)
1792 'diff': lambda r: list(repo[r].diff(git=True),)
1792 }
1793 }
1793 for info in fields:
1794 for info in fields:
1794 getfield = _funcs.get(info, None)
1795 getfield = _funcs.get(info, None)
1795 if getfield is None:
1796 if getfield is None:
1796 raise error.ParseError(
1797 raise error.ParseError(
1797 # i18n: "matching" is a keyword
1798 # i18n: "matching" is a keyword
1798 _("unexpected field name passed to matching: %s") % info)
1799 _("unexpected field name passed to matching: %s") % info)
1799 getfieldfuncs.append(getfield)
1800 getfieldfuncs.append(getfield)
1800 # convert the getfield array of functions into a "getinfo" function
1801 # convert the getfield array of functions into a "getinfo" function
1801 # which returns an array of field values (or a single value if there
1802 # which returns an array of field values (or a single value if there
1802 # is only one field to match)
1803 # is only one field to match)
1803 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804
1805
1805 def matches(x):
1806 def matches(x):
1806 for rev in revs:
1807 for rev in revs:
1807 target = getinfo(rev)
1808 target = getinfo(rev)
1808 match = True
1809 match = True
1809 for n, f in enumerate(getfieldfuncs):
1810 for n, f in enumerate(getfieldfuncs):
1810 if target[n] != f(x):
1811 if target[n] != f(x):
1811 match = False
1812 match = False
1812 if match:
1813 if match:
1813 return True
1814 return True
1814 return False
1815 return False
1815
1816
1816 return subset.filter(matches)
1817 return subset.filter(matches)
1817
1818
1818 def reverse(repo, subset, x):
1819 def reverse(repo, subset, x):
1819 """``reverse(set)``
1820 """``reverse(set)``
1820 Reverse order of set.
1821 Reverse order of set.
1821 """
1822 """
1822 l = getset(repo, subset, x)
1823 l = getset(repo, subset, x)
1823 l.reverse()
1824 l.reverse()
1824 return l
1825 return l
1825
1826
1826 def roots(repo, subset, x):
1827 def roots(repo, subset, x):
1827 """``roots(set)``
1828 """``roots(set)``
1828 Changesets in set with no parent changeset in set.
1829 Changesets in set with no parent changeset in set.
1829 """
1830 """
1830 s = getset(repo, fullreposet(repo), x)
1831 s = getset(repo, fullreposet(repo), x)
1831 parents = repo.changelog.parentrevs
1832 parents = repo.changelog.parentrevs
1832 def filter(r):
1833 def filter(r):
1833 for p in parents(r):
1834 for p in parents(r):
1834 if 0 <= p and p in s:
1835 if 0 <= p and p in s:
1835 return False
1836 return False
1836 return True
1837 return True
1837 return subset & s.filter(filter)
1838 return subset & s.filter(filter)
1838
1839
1839 def sort(repo, subset, x):
1840 def sort(repo, subset, x):
1840 """``sort(set[, [-]key...])``
1841 """``sort(set[, [-]key...])``
1841 Sort set by keys. The default sort order is ascending, specify a key
1842 Sort set by keys. The default sort order is ascending, specify a key
1842 as ``-key`` to sort in descending order.
1843 as ``-key`` to sort in descending order.
1843
1844
1844 The keys can be:
1845 The keys can be:
1845
1846
1846 - ``rev`` for the revision number,
1847 - ``rev`` for the revision number,
1847 - ``branch`` for the branch name,
1848 - ``branch`` for the branch name,
1848 - ``desc`` for the commit message (description),
1849 - ``desc`` for the commit message (description),
1849 - ``user`` for user name (``author`` can be used as an alias),
1850 - ``user`` for user name (``author`` can be used as an alias),
1850 - ``date`` for the commit date
1851 - ``date`` for the commit date
1851 """
1852 """
1852 # i18n: "sort" is a keyword
1853 # i18n: "sort" is a keyword
1853 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 keys = "rev"
1855 keys = "rev"
1855 if len(l) == 2:
1856 if len(l) == 2:
1856 # i18n: "sort" is a keyword
1857 # i18n: "sort" is a keyword
1857 keys = getstring(l[1], _("sort spec must be a string"))
1858 keys = getstring(l[1], _("sort spec must be a string"))
1858
1859
1859 s = l[0]
1860 s = l[0]
1860 keys = keys.split()
1861 keys = keys.split()
1861 l = []
1862 l = []
1862 def invert(s):
1863 def invert(s):
1863 return "".join(chr(255 - ord(c)) for c in s)
1864 return "".join(chr(255 - ord(c)) for c in s)
1864 revs = getset(repo, subset, s)
1865 revs = getset(repo, subset, s)
1865 if keys == ["rev"]:
1866 if keys == ["rev"]:
1866 revs.sort()
1867 revs.sort()
1867 return revs
1868 return revs
1868 elif keys == ["-rev"]:
1869 elif keys == ["-rev"]:
1869 revs.sort(reverse=True)
1870 revs.sort(reverse=True)
1870 return revs
1871 return revs
1871 for r in revs:
1872 for r in revs:
1872 c = repo[r]
1873 c = repo[r]
1873 e = []
1874 e = []
1874 for k in keys:
1875 for k in keys:
1875 if k == 'rev':
1876 if k == 'rev':
1876 e.append(r)
1877 e.append(r)
1877 elif k == '-rev':
1878 elif k == '-rev':
1878 e.append(-r)
1879 e.append(-r)
1879 elif k == 'branch':
1880 elif k == 'branch':
1880 e.append(c.branch())
1881 e.append(c.branch())
1881 elif k == '-branch':
1882 elif k == '-branch':
1882 e.append(invert(c.branch()))
1883 e.append(invert(c.branch()))
1883 elif k == 'desc':
1884 elif k == 'desc':
1884 e.append(c.description())
1885 e.append(c.description())
1885 elif k == '-desc':
1886 elif k == '-desc':
1886 e.append(invert(c.description()))
1887 e.append(invert(c.description()))
1887 elif k in 'user author':
1888 elif k in 'user author':
1888 e.append(c.user())
1889 e.append(c.user())
1889 elif k in '-user -author':
1890 elif k in '-user -author':
1890 e.append(invert(c.user()))
1891 e.append(invert(c.user()))
1891 elif k == 'date':
1892 elif k == 'date':
1892 e.append(c.date()[0])
1893 e.append(c.date()[0])
1893 elif k == '-date':
1894 elif k == '-date':
1894 e.append(-c.date()[0])
1895 e.append(-c.date()[0])
1895 else:
1896 else:
1896 raise error.ParseError(_("unknown sort key %r") % k)
1897 raise error.ParseError(_("unknown sort key %r") % k)
1897 e.append(r)
1898 e.append(r)
1898 l.append(e)
1899 l.append(e)
1899 l.sort()
1900 l.sort()
1900 return baseset([e[-1] for e in l])
1901 return baseset([e[-1] for e in l])
1901
1902
1902 def subrepo(repo, subset, x):
1903 def subrepo(repo, subset, x):
1903 """``subrepo([pattern])``
1904 """``subrepo([pattern])``
1904 Changesets that add, modify or remove the given subrepo. If no subrepo
1905 Changesets that add, modify or remove the given subrepo. If no subrepo
1905 pattern is named, any subrepo changes are returned.
1906 pattern is named, any subrepo changes are returned.
1906 """
1907 """
1907 # i18n: "subrepo" is a keyword
1908 # i18n: "subrepo" is a keyword
1908 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1909 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1909 if len(args) != 0:
1910 if len(args) != 0:
1910 pat = getstring(args[0], _("subrepo requires a pattern"))
1911 pat = getstring(args[0], _("subrepo requires a pattern"))
1911
1912
1912 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1913 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1913
1914
1914 def submatches(names):
1915 def submatches(names):
1915 k, p, m = _stringmatcher(pat)
1916 k, p, m = _stringmatcher(pat)
1916 for name in names:
1917 for name in names:
1917 if m(name):
1918 if m(name):
1918 yield name
1919 yield name
1919
1920
1920 def matches(x):
1921 def matches(x):
1921 c = repo[x]
1922 c = repo[x]
1922 s = repo.status(c.p1().node(), c.node(), match=m)
1923 s = repo.status(c.p1().node(), c.node(), match=m)
1923
1924
1924 if len(args) == 0:
1925 if len(args) == 0:
1925 return s.added or s.modified or s.removed
1926 return s.added or s.modified or s.removed
1926
1927
1927 if s.added:
1928 if s.added:
1928 return any(submatches(c.substate.keys()))
1929 return any(submatches(c.substate.keys()))
1929
1930
1930 if s.modified:
1931 if s.modified:
1931 subs = set(c.p1().substate.keys())
1932 subs = set(c.p1().substate.keys())
1932 subs.update(c.substate.keys())
1933 subs.update(c.substate.keys())
1933
1934
1934 for path in submatches(subs):
1935 for path in submatches(subs):
1935 if c.p1().substate.get(path) != c.substate.get(path):
1936 if c.p1().substate.get(path) != c.substate.get(path):
1936 return True
1937 return True
1937
1938
1938 if s.removed:
1939 if s.removed:
1939 return any(submatches(c.p1().substate.keys()))
1940 return any(submatches(c.p1().substate.keys()))
1940
1941
1941 return False
1942 return False
1942
1943
1943 return subset.filter(matches)
1944 return subset.filter(matches)
1944
1945
1945 def _stringmatcher(pattern):
1946 def _stringmatcher(pattern):
1946 """
1947 """
1947 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1948 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1948 returns the matcher name, pattern, and matcher function.
1949 returns the matcher name, pattern, and matcher function.
1949 missing or unknown prefixes are treated as literal matches.
1950 missing or unknown prefixes are treated as literal matches.
1950
1951
1951 helper for tests:
1952 helper for tests:
1952 >>> def test(pattern, *tests):
1953 >>> def test(pattern, *tests):
1953 ... kind, pattern, matcher = _stringmatcher(pattern)
1954 ... kind, pattern, matcher = _stringmatcher(pattern)
1954 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1955 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1955
1956
1956 exact matching (no prefix):
1957 exact matching (no prefix):
1957 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1958 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1958 ('literal', 'abcdefg', [False, False, True])
1959 ('literal', 'abcdefg', [False, False, True])
1959
1960
1960 regex matching ('re:' prefix)
1961 regex matching ('re:' prefix)
1961 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1962 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1962 ('re', 'a.+b', [False, False, True])
1963 ('re', 'a.+b', [False, False, True])
1963
1964
1964 force exact matches ('literal:' prefix)
1965 force exact matches ('literal:' prefix)
1965 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1966 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1966 ('literal', 're:foobar', [False, True])
1967 ('literal', 're:foobar', [False, True])
1967
1968
1968 unknown prefixes are ignored and treated as literals
1969 unknown prefixes are ignored and treated as literals
1969 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1970 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1970 ('literal', 'foo:bar', [False, False, True])
1971 ('literal', 'foo:bar', [False, False, True])
1971 """
1972 """
1972 if pattern.startswith('re:'):
1973 if pattern.startswith('re:'):
1973 pattern = pattern[3:]
1974 pattern = pattern[3:]
1974 try:
1975 try:
1975 regex = re.compile(pattern)
1976 regex = re.compile(pattern)
1976 except re.error as e:
1977 except re.error as e:
1977 raise error.ParseError(_('invalid regular expression: %s')
1978 raise error.ParseError(_('invalid regular expression: %s')
1978 % e)
1979 % e)
1979 return 're', pattern, regex.search
1980 return 're', pattern, regex.search
1980 elif pattern.startswith('literal:'):
1981 elif pattern.startswith('literal:'):
1981 pattern = pattern[8:]
1982 pattern = pattern[8:]
1982 return 'literal', pattern, pattern.__eq__
1983 return 'literal', pattern, pattern.__eq__
1983
1984
1984 def _substringmatcher(pattern):
1985 def _substringmatcher(pattern):
1985 kind, pattern, matcher = _stringmatcher(pattern)
1986 kind, pattern, matcher = _stringmatcher(pattern)
1986 if kind == 'literal':
1987 if kind == 'literal':
1987 matcher = lambda s: pattern in s
1988 matcher = lambda s: pattern in s
1988 return kind, pattern, matcher
1989 return kind, pattern, matcher
1989
1990
1990 def tag(repo, subset, x):
1991 def tag(repo, subset, x):
1991 """``tag([name])``
1992 """``tag([name])``
1992 The specified tag by name, or all tagged revisions if no name is given.
1993 The specified tag by name, or all tagged revisions if no name is given.
1993
1994
1994 If `name` starts with `re:`, the remainder of the name is treated as
1995 If `name` starts with `re:`, the remainder of the name is treated as
1995 a regular expression. To match a tag that actually starts with `re:`,
1996 a regular expression. To match a tag that actually starts with `re:`,
1996 use the prefix `literal:`.
1997 use the prefix `literal:`.
1997 """
1998 """
1998 # i18n: "tag" is a keyword
1999 # i18n: "tag" is a keyword
1999 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2000 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2000 cl = repo.changelog
2001 cl = repo.changelog
2001 if args:
2002 if args:
2002 pattern = getstring(args[0],
2003 pattern = getstring(args[0],
2003 # i18n: "tag" is a keyword
2004 # i18n: "tag" is a keyword
2004 _('the argument to tag must be a string'))
2005 _('the argument to tag must be a string'))
2005 kind, pattern, matcher = _stringmatcher(pattern)
2006 kind, pattern, matcher = _stringmatcher(pattern)
2006 if kind == 'literal':
2007 if kind == 'literal':
2007 # avoid resolving all tags
2008 # avoid resolving all tags
2008 tn = repo._tagscache.tags.get(pattern, None)
2009 tn = repo._tagscache.tags.get(pattern, None)
2009 if tn is None:
2010 if tn is None:
2010 raise error.RepoLookupError(_("tag '%s' does not exist")
2011 raise error.RepoLookupError(_("tag '%s' does not exist")
2011 % pattern)
2012 % pattern)
2012 s = set([repo[tn].rev()])
2013 s = set([repo[tn].rev()])
2013 else:
2014 else:
2014 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2015 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2015 else:
2016 else:
2016 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2017 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2017 return subset & s
2018 return subset & s
2018
2019
2019 def tagged(repo, subset, x):
2020 def tagged(repo, subset, x):
2020 return tag(repo, subset, x)
2021 return tag(repo, subset, x)
2021
2022
2022 def unstable(repo, subset, x):
2023 def unstable(repo, subset, x):
2023 """``unstable()``
2024 """``unstable()``
2024 Non-obsolete changesets with obsolete ancestors.
2025 Non-obsolete changesets with obsolete ancestors.
2025 """
2026 """
2026 # i18n: "unstable" is a keyword
2027 # i18n: "unstable" is a keyword
2027 getargs(x, 0, 0, _("unstable takes no arguments"))
2028 getargs(x, 0, 0, _("unstable takes no arguments"))
2028 unstables = obsmod.getrevs(repo, 'unstable')
2029 unstables = obsmod.getrevs(repo, 'unstable')
2029 return subset & unstables
2030 return subset & unstables
2030
2031
2031
2032
2032 def user(repo, subset, x):
2033 def user(repo, subset, x):
2033 """``user(string)``
2034 """``user(string)``
2034 User name contains string. The match is case-insensitive.
2035 User name contains string. The match is case-insensitive.
2035
2036
2036 If `string` starts with `re:`, the remainder of the string is treated as
2037 If `string` starts with `re:`, the remainder of the string is treated as
2037 a regular expression. To match a user that actually contains `re:`, use
2038 a regular expression. To match a user that actually contains `re:`, use
2038 the prefix `literal:`.
2039 the prefix `literal:`.
2039 """
2040 """
2040 return author(repo, subset, x)
2041 return author(repo, subset, x)
2041
2042
2042 # experimental
2043 # experimental
2043 def wdir(repo, subset, x):
2044 def wdir(repo, subset, x):
2044 # i18n: "wdir" is a keyword
2045 # i18n: "wdir" is a keyword
2045 getargs(x, 0, 0, _("wdir takes no arguments"))
2046 getargs(x, 0, 0, _("wdir takes no arguments"))
2046 if node.wdirrev in subset or isinstance(subset, fullreposet):
2047 if node.wdirrev in subset or isinstance(subset, fullreposet):
2047 return baseset([node.wdirrev])
2048 return baseset([node.wdirrev])
2048 return baseset()
2049 return baseset()
2049
2050
2050 # for internal use
2051 # for internal use
2051 def _list(repo, subset, x):
2052 def _list(repo, subset, x):
2052 s = getstring(x, "internal error")
2053 s = getstring(x, "internal error")
2053 if not s:
2054 if not s:
2054 return baseset()
2055 return baseset()
2055 # remove duplicates here. it's difficult for caller to deduplicate sets
2056 # remove duplicates here. it's difficult for caller to deduplicate sets
2056 # because different symbols can point to the same rev.
2057 # because different symbols can point to the same rev.
2057 cl = repo.changelog
2058 cl = repo.changelog
2058 ls = []
2059 ls = []
2059 seen = set()
2060 seen = set()
2060 for t in s.split('\0'):
2061 for t in s.split('\0'):
2061 try:
2062 try:
2062 # fast path for integer revision
2063 # fast path for integer revision
2063 r = int(t)
2064 r = int(t)
2064 if str(r) != t or r not in cl:
2065 if str(r) != t or r not in cl:
2065 raise ValueError
2066 raise ValueError
2066 except ValueError:
2067 except ValueError:
2067 r = repo[t].rev()
2068 r = repo[t].rev()
2068 if r in seen:
2069 if r in seen:
2069 continue
2070 continue
2070 if (r in subset
2071 if (r in subset
2071 or r == node.nullrev and isinstance(subset, fullreposet)):
2072 or r == node.nullrev and isinstance(subset, fullreposet)):
2072 ls.append(r)
2073 ls.append(r)
2073 seen.add(r)
2074 seen.add(r)
2074 return baseset(ls)
2075 return baseset(ls)
2075
2076
2076 # for internal use
2077 # for internal use
2077 def _intlist(repo, subset, x):
2078 def _intlist(repo, subset, x):
2078 s = getstring(x, "internal error")
2079 s = getstring(x, "internal error")
2079 if not s:
2080 if not s:
2080 return baseset()
2081 return baseset()
2081 ls = [int(r) for r in s.split('\0')]
2082 ls = [int(r) for r in s.split('\0')]
2082 s = subset
2083 s = subset
2083 return baseset([r for r in ls if r in s])
2084 return baseset([r for r in ls if r in s])
2084
2085
2085 # for internal use
2086 # for internal use
2086 def _hexlist(repo, subset, x):
2087 def _hexlist(repo, subset, x):
2087 s = getstring(x, "internal error")
2088 s = getstring(x, "internal error")
2088 if not s:
2089 if not s:
2089 return baseset()
2090 return baseset()
2090 cl = repo.changelog
2091 cl = repo.changelog
2091 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2092 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2092 s = subset
2093 s = subset
2093 return baseset([r for r in ls if r in s])
2094 return baseset([r for r in ls if r in s])
2094
2095
2095 symbols = {
2096 symbols = {
2096 "adds": adds,
2097 "adds": adds,
2097 "all": getall,
2098 "all": getall,
2098 "ancestor": ancestor,
2099 "ancestor": ancestor,
2099 "ancestors": ancestors,
2100 "ancestors": ancestors,
2100 "_firstancestors": _firstancestors,
2101 "_firstancestors": _firstancestors,
2101 "author": author,
2102 "author": author,
2102 "bisect": bisect,
2103 "bisect": bisect,
2103 "bisected": bisected,
2104 "bisected": bisected,
2104 "bookmark": bookmark,
2105 "bookmark": bookmark,
2105 "branch": branch,
2106 "branch": branch,
2106 "branchpoint": branchpoint,
2107 "branchpoint": branchpoint,
2107 "bumped": bumped,
2108 "bumped": bumped,
2108 "bundle": bundle,
2109 "bundle": bundle,
2109 "children": children,
2110 "children": children,
2110 "closed": closed,
2111 "closed": closed,
2111 "contains": contains,
2112 "contains": contains,
2112 "converted": converted,
2113 "converted": converted,
2113 "date": date,
2114 "date": date,
2114 "desc": desc,
2115 "desc": desc,
2115 "descendants": descendants,
2116 "descendants": descendants,
2116 "_firstdescendants": _firstdescendants,
2117 "_firstdescendants": _firstdescendants,
2117 "destination": destination,
2118 "destination": destination,
2118 "divergent": divergent,
2119 "divergent": divergent,
2119 "draft": draft,
2120 "draft": draft,
2120 "extinct": extinct,
2121 "extinct": extinct,
2121 "extra": extra,
2122 "extra": extra,
2122 "file": hasfile,
2123 "file": hasfile,
2123 "filelog": filelog,
2124 "filelog": filelog,
2124 "first": first,
2125 "first": first,
2125 "follow": follow,
2126 "follow": follow,
2126 "_followfirst": _followfirst,
2127 "_followfirst": _followfirst,
2127 "grep": grep,
2128 "grep": grep,
2128 "head": head,
2129 "head": head,
2129 "heads": heads,
2130 "heads": heads,
2130 "hidden": hidden,
2131 "hidden": hidden,
2131 "id": node_,
2132 "id": node_,
2132 "keyword": keyword,
2133 "keyword": keyword,
2133 "last": last,
2134 "last": last,
2134 "limit": limit,
2135 "limit": limit,
2135 "_matchfiles": _matchfiles,
2136 "_matchfiles": _matchfiles,
2136 "max": maxrev,
2137 "max": maxrev,
2137 "merge": merge,
2138 "merge": merge,
2138 "min": minrev,
2139 "min": minrev,
2139 "modifies": modifies,
2140 "modifies": modifies,
2140 "named": named,
2141 "named": named,
2141 "obsolete": obsolete,
2142 "obsolete": obsolete,
2142 "only": only,
2143 "only": only,
2143 "origin": origin,
2144 "origin": origin,
2144 "outgoing": outgoing,
2145 "outgoing": outgoing,
2145 "p1": p1,
2146 "p1": p1,
2146 "p2": p2,
2147 "p2": p2,
2147 "parents": parents,
2148 "parents": parents,
2148 "present": present,
2149 "present": present,
2149 "public": public,
2150 "public": public,
2150 "_notpublic": _notpublic,
2151 "_notpublic": _notpublic,
2151 "remote": remote,
2152 "remote": remote,
2152 "removes": removes,
2153 "removes": removes,
2153 "rev": rev,
2154 "rev": rev,
2154 "reverse": reverse,
2155 "reverse": reverse,
2155 "roots": roots,
2156 "roots": roots,
2156 "sort": sort,
2157 "sort": sort,
2157 "secret": secret,
2158 "secret": secret,
2158 "subrepo": subrepo,
2159 "subrepo": subrepo,
2159 "matching": matching,
2160 "matching": matching,
2160 "tag": tag,
2161 "tag": tag,
2161 "tagged": tagged,
2162 "tagged": tagged,
2162 "user": user,
2163 "user": user,
2163 "unstable": unstable,
2164 "unstable": unstable,
2164 "wdir": wdir,
2165 "wdir": wdir,
2165 "_list": _list,
2166 "_list": _list,
2166 "_intlist": _intlist,
2167 "_intlist": _intlist,
2167 "_hexlist": _hexlist,
2168 "_hexlist": _hexlist,
2168 }
2169 }
2169
2170
2170 # symbols which can't be used for a DoS attack for any given input
2171 # symbols which can't be used for a DoS attack for any given input
2171 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 # functions that just return a lot of changesets (like all) don't count here
2173 # functions that just return a lot of changesets (like all) don't count here
2173 safesymbols = set([
2174 safesymbols = set([
2174 "adds",
2175 "adds",
2175 "all",
2176 "all",
2176 "ancestor",
2177 "ancestor",
2177 "ancestors",
2178 "ancestors",
2178 "_firstancestors",
2179 "_firstancestors",
2179 "author",
2180 "author",
2180 "bisect",
2181 "bisect",
2181 "bisected",
2182 "bisected",
2182 "bookmark",
2183 "bookmark",
2183 "branch",
2184 "branch",
2184 "branchpoint",
2185 "branchpoint",
2185 "bumped",
2186 "bumped",
2186 "bundle",
2187 "bundle",
2187 "children",
2188 "children",
2188 "closed",
2189 "closed",
2189 "converted",
2190 "converted",
2190 "date",
2191 "date",
2191 "desc",
2192 "desc",
2192 "descendants",
2193 "descendants",
2193 "_firstdescendants",
2194 "_firstdescendants",
2194 "destination",
2195 "destination",
2195 "divergent",
2196 "divergent",
2196 "draft",
2197 "draft",
2197 "extinct",
2198 "extinct",
2198 "extra",
2199 "extra",
2199 "file",
2200 "file",
2200 "filelog",
2201 "filelog",
2201 "first",
2202 "first",
2202 "follow",
2203 "follow",
2203 "_followfirst",
2204 "_followfirst",
2204 "head",
2205 "head",
2205 "heads",
2206 "heads",
2206 "hidden",
2207 "hidden",
2207 "id",
2208 "id",
2208 "keyword",
2209 "keyword",
2209 "last",
2210 "last",
2210 "limit",
2211 "limit",
2211 "_matchfiles",
2212 "_matchfiles",
2212 "max",
2213 "max",
2213 "merge",
2214 "merge",
2214 "min",
2215 "min",
2215 "modifies",
2216 "modifies",
2216 "obsolete",
2217 "obsolete",
2217 "only",
2218 "only",
2218 "origin",
2219 "origin",
2219 "outgoing",
2220 "outgoing",
2220 "p1",
2221 "p1",
2221 "p2",
2222 "p2",
2222 "parents",
2223 "parents",
2223 "present",
2224 "present",
2224 "public",
2225 "public",
2225 "_notpublic",
2226 "_notpublic",
2226 "remote",
2227 "remote",
2227 "removes",
2228 "removes",
2228 "rev",
2229 "rev",
2229 "reverse",
2230 "reverse",
2230 "roots",
2231 "roots",
2231 "sort",
2232 "sort",
2232 "secret",
2233 "secret",
2233 "matching",
2234 "matching",
2234 "tag",
2235 "tag",
2235 "tagged",
2236 "tagged",
2236 "user",
2237 "user",
2237 "unstable",
2238 "unstable",
2238 "wdir",
2239 "wdir",
2239 "_list",
2240 "_list",
2240 "_intlist",
2241 "_intlist",
2241 "_hexlist",
2242 "_hexlist",
2242 ])
2243 ])
2243
2244
2244 methods = {
2245 methods = {
2245 "range": rangeset,
2246 "range": rangeset,
2246 "dagrange": dagrange,
2247 "dagrange": dagrange,
2247 "string": stringset,
2248 "string": stringset,
2248 "symbol": stringset,
2249 "symbol": stringset,
2249 "and": andset,
2250 "and": andset,
2250 "or": orset,
2251 "or": orset,
2251 "not": notset,
2252 "not": notset,
2252 "list": listset,
2253 "list": listset,
2253 "keyvalue": keyvaluepair,
2254 "keyvalue": keyvaluepair,
2254 "func": func,
2255 "func": func,
2255 "ancestor": ancestorspec,
2256 "ancestor": ancestorspec,
2256 "parent": parentspec,
2257 "parent": parentspec,
2257 "parentpost": p1,
2258 "parentpost": p1,
2258 }
2259 }
2259
2260
2260 def optimize(x, small):
2261 def optimize(x, small):
2261 if x is None:
2262 if x is None:
2262 return 0, x
2263 return 0, x
2263
2264
2264 smallbonus = 1
2265 smallbonus = 1
2265 if small:
2266 if small:
2266 smallbonus = .5
2267 smallbonus = .5
2267
2268
2268 op = x[0]
2269 op = x[0]
2269 if op == 'minus':
2270 if op == 'minus':
2270 return optimize(('and', x[1], ('not', x[2])), small)
2271 return optimize(('and', x[1], ('not', x[2])), small)
2271 elif op == 'only':
2272 elif op == 'only':
2272 return optimize(('func', ('symbol', 'only'),
2273 return optimize(('func', ('symbol', 'only'),
2273 ('list', x[1], x[2])), small)
2274 ('list', x[1], x[2])), small)
2274 elif op == 'onlypost':
2275 elif op == 'onlypost':
2275 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 elif op == 'dagrangepre':
2277 elif op == 'dagrangepre':
2277 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 elif op == 'dagrangepost':
2279 elif op == 'dagrangepost':
2279 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 elif op == 'rangeall':
2281 elif op == 'rangeall':
2281 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 elif op == 'rangepre':
2283 elif op == 'rangepre':
2283 return optimize(('range', ('string', '0'), x[1]), small)
2284 return optimize(('range', ('string', '0'), x[1]), small)
2284 elif op == 'rangepost':
2285 elif op == 'rangepost':
2285 return optimize(('range', x[1], ('string', 'tip')), small)
2286 return optimize(('range', x[1], ('string', 'tip')), small)
2286 elif op == 'negate':
2287 elif op == 'negate':
2287 return optimize(('string',
2288 return optimize(('string',
2288 '-' + getstring(x[1], _("can't negate that"))), small)
2289 '-' + getstring(x[1], _("can't negate that"))), small)
2289 elif op in 'string symbol negate':
2290 elif op in 'string symbol negate':
2290 return smallbonus, x # single revisions are small
2291 return smallbonus, x # single revisions are small
2291 elif op == 'and':
2292 elif op == 'and':
2292 wa, ta = optimize(x[1], True)
2293 wa, ta = optimize(x[1], True)
2293 wb, tb = optimize(x[2], True)
2294 wb, tb = optimize(x[2], True)
2294
2295
2295 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 def isonly(revs, bases):
2297 def isonly(revs, bases):
2297 return (
2298 return (
2298 revs is not None
2299 revs is not None
2299 and revs[0] == 'func'
2300 and revs[0] == 'func'
2300 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 and bases is not None
2302 and bases is not None
2302 and bases[0] == 'not'
2303 and bases[0] == 'not'
2303 and bases[1][0] == 'func'
2304 and bases[1][0] == 'func'
2304 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305
2306
2306 w = min(wa, wb)
2307 w = min(wa, wb)
2307 if isonly(ta, tb):
2308 if isonly(ta, tb):
2308 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 if isonly(tb, ta):
2310 if isonly(tb, ta):
2310 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311
2312
2312 if wa > wb:
2313 if wa > wb:
2313 return w, (op, tb, ta)
2314 return w, (op, tb, ta)
2314 return w, (op, ta, tb)
2315 return w, (op, ta, tb)
2315 elif op == 'or':
2316 elif op == 'or':
2316 # fast path for machine-generated expression, that is likely to have
2317 # fast path for machine-generated expression, that is likely to have
2317 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 ws, ts, ss = [], [], []
2319 ws, ts, ss = [], [], []
2319 def flushss():
2320 def flushss():
2320 if not ss:
2321 if not ss:
2321 return
2322 return
2322 if len(ss) == 1:
2323 if len(ss) == 1:
2323 w, t = ss[0]
2324 w, t = ss[0]
2324 else:
2325 else:
2325 s = '\0'.join(t[1] for w, t in ss)
2326 s = '\0'.join(t[1] for w, t in ss)
2326 y = ('func', ('symbol', '_list'), ('string', s))
2327 y = ('func', ('symbol', '_list'), ('string', s))
2327 w, t = optimize(y, False)
2328 w, t = optimize(y, False)
2328 ws.append(w)
2329 ws.append(w)
2329 ts.append(t)
2330 ts.append(t)
2330 del ss[:]
2331 del ss[:]
2331 for y in x[1:]:
2332 for y in x[1:]:
2332 w, t = optimize(y, False)
2333 w, t = optimize(y, False)
2333 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 ss.append((w, t))
2335 ss.append((w, t))
2335 continue
2336 continue
2336 flushss()
2337 flushss()
2337 ws.append(w)
2338 ws.append(w)
2338 ts.append(t)
2339 ts.append(t)
2339 flushss()
2340 flushss()
2340 if len(ts) == 1:
2341 if len(ts) == 1:
2341 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 # we can't reorder trees by weight because it would change the order.
2343 # we can't reorder trees by weight because it would change the order.
2343 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 return max(ws), (op,) + tuple(ts)
2346 return max(ws), (op,) + tuple(ts)
2346 elif op == 'not':
2347 elif op == 'not':
2347 # Optimize not public() to _notpublic() because we have a fast version
2348 # Optimize not public() to _notpublic() because we have a fast version
2348 if x[1] == ('func', ('symbol', 'public'), None):
2349 if x[1] == ('func', ('symbol', 'public'), None):
2349 newsym = ('func', ('symbol', '_notpublic'), None)
2350 newsym = ('func', ('symbol', '_notpublic'), None)
2350 o = optimize(newsym, not small)
2351 o = optimize(newsym, not small)
2351 return o[0], o[1]
2352 return o[0], o[1]
2352 else:
2353 else:
2353 o = optimize(x[1], not small)
2354 o = optimize(x[1], not small)
2354 return o[0], (op, o[1])
2355 return o[0], (op, o[1])
2355 elif op == 'parentpost':
2356 elif op == 'parentpost':
2356 o = optimize(x[1], small)
2357 o = optimize(x[1], small)
2357 return o[0], (op, o[1])
2358 return o[0], (op, o[1])
2358 elif op == 'group':
2359 elif op == 'group':
2359 return optimize(x[1], small)
2360 return optimize(x[1], small)
2360 elif op in 'dagrange range list parent ancestorspec':
2361 elif op in 'dagrange range list parent ancestorspec':
2361 if op == 'parent':
2362 if op == 'parent':
2362 # x^:y means (x^) : y, not x ^ (:y)
2363 # x^:y means (x^) : y, not x ^ (:y)
2363 post = ('parentpost', x[1])
2364 post = ('parentpost', x[1])
2364 if x[2][0] == 'dagrangepre':
2365 if x[2][0] == 'dagrangepre':
2365 return optimize(('dagrange', post, x[2][1]), small)
2366 return optimize(('dagrange', post, x[2][1]), small)
2366 elif x[2][0] == 'rangepre':
2367 elif x[2][0] == 'rangepre':
2367 return optimize(('range', post, x[2][1]), small)
2368 return optimize(('range', post, x[2][1]), small)
2368
2369
2369 wa, ta = optimize(x[1], small)
2370 wa, ta = optimize(x[1], small)
2370 wb, tb = optimize(x[2], small)
2371 wb, tb = optimize(x[2], small)
2371 return wa + wb, (op, ta, tb)
2372 return wa + wb, (op, ta, tb)
2372 elif op == 'func':
2373 elif op == 'func':
2373 f = getstring(x[1], _("not a symbol"))
2374 f = getstring(x[1], _("not a symbol"))
2374 wa, ta = optimize(x[2], small)
2375 wa, ta = optimize(x[2], small)
2375 if f in ("author branch closed date desc file grep keyword "
2376 if f in ("author branch closed date desc file grep keyword "
2376 "outgoing user"):
2377 "outgoing user"):
2377 w = 10 # slow
2378 w = 10 # slow
2378 elif f in "modifies adds removes":
2379 elif f in "modifies adds removes":
2379 w = 30 # slower
2380 w = 30 # slower
2380 elif f == "contains":
2381 elif f == "contains":
2381 w = 100 # very slow
2382 w = 100 # very slow
2382 elif f == "ancestor":
2383 elif f == "ancestor":
2383 w = 1 * smallbonus
2384 w = 1 * smallbonus
2384 elif f in "reverse limit first _intlist":
2385 elif f in "reverse limit first _intlist":
2385 w = 0
2386 w = 0
2386 elif f in "sort":
2387 elif f in "sort":
2387 w = 10 # assume most sorts look at changelog
2388 w = 10 # assume most sorts look at changelog
2388 else:
2389 else:
2389 w = 1
2390 w = 1
2390 return w + wa, (op, x[1], ta)
2391 return w + wa, (op, x[1], ta)
2391 return 1, x
2392 return 1, x
2392
2393
2393 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 def _getaliasarg(tree):
2395 def _getaliasarg(tree):
2395 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 return X, None otherwise.
2397 return X, None otherwise.
2397 """
2398 """
2398 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 and tree[2][0] == 'string'):
2400 and tree[2][0] == 'string'):
2400 return tree[2][1]
2401 return tree[2][1]
2401 return None
2402 return None
2402
2403
2403 def _checkaliasarg(tree, known=None):
2404 def _checkaliasarg(tree, known=None):
2404 """Check tree contains no _aliasarg construct or only ones which
2405 """Check tree contains no _aliasarg construct or only ones which
2405 value is in known. Used to avoid alias placeholders injection.
2406 value is in known. Used to avoid alias placeholders injection.
2406 """
2407 """
2407 if isinstance(tree, tuple):
2408 if isinstance(tree, tuple):
2408 arg = _getaliasarg(tree)
2409 arg = _getaliasarg(tree)
2409 if arg is not None and (not known or arg not in known):
2410 if arg is not None and (not known or arg not in known):
2410 raise error.UnknownIdentifier('_aliasarg', [])
2411 raise error.UnknownIdentifier('_aliasarg', [])
2411 for t in tree:
2412 for t in tree:
2412 _checkaliasarg(t, known)
2413 _checkaliasarg(t, known)
2413
2414
2414 # the set of valid characters for the initial letter of symbols in
2415 # the set of valid characters for the initial letter of symbols in
2415 # alias declarations and definitions
2416 # alias declarations and definitions
2416 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418
2419
2419 def _tokenizealias(program, lookup=None):
2420 def _tokenizealias(program, lookup=None):
2420 """Parse alias declaration/definition into a stream of tokens
2421 """Parse alias declaration/definition into a stream of tokens
2421
2422
2422 This allows symbol names to use also ``$`` as an initial letter
2423 This allows symbol names to use also ``$`` as an initial letter
2423 (for backward compatibility), and callers of this function should
2424 (for backward compatibility), and callers of this function should
2424 examine whether ``$`` is used also for unexpected symbols or not.
2425 examine whether ``$`` is used also for unexpected symbols or not.
2425 """
2426 """
2426 return tokenize(program, lookup=lookup,
2427 return tokenize(program, lookup=lookup,
2427 syminitletters=_aliassyminitletters)
2428 syminitletters=_aliassyminitletters)
2428
2429
2429 def _parsealiasdecl(decl):
2430 def _parsealiasdecl(decl):
2430 """Parse alias declaration ``decl``
2431 """Parse alias declaration ``decl``
2431
2432
2432 This returns ``(name, tree, args, errorstr)`` tuple:
2433 This returns ``(name, tree, args, errorstr)`` tuple:
2433
2434
2434 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 - ``tree``: parse result (or ``None`` at error)
2436 - ``tree``: parse result (or ``None`` at error)
2436 - ``args``: list of alias argument names (or None for symbol declaration)
2437 - ``args``: list of alias argument names (or None for symbol declaration)
2437 - ``errorstr``: detail about detected error (or None)
2438 - ``errorstr``: detail about detected error (or None)
2438
2439
2439 >>> _parsealiasdecl('foo')
2440 >>> _parsealiasdecl('foo')
2440 ('foo', ('symbol', 'foo'), None, None)
2441 ('foo', ('symbol', 'foo'), None, None)
2441 >>> _parsealiasdecl('$foo')
2442 >>> _parsealiasdecl('$foo')
2442 ('$foo', None, None, "'$' not for alias arguments")
2443 ('$foo', None, None, "'$' not for alias arguments")
2443 >>> _parsealiasdecl('foo::bar')
2444 >>> _parsealiasdecl('foo::bar')
2444 ('foo::bar', None, None, 'invalid format')
2445 ('foo::bar', None, None, 'invalid format')
2445 >>> _parsealiasdecl('foo bar')
2446 >>> _parsealiasdecl('foo bar')
2446 ('foo bar', None, None, 'at 4: invalid token')
2447 ('foo bar', None, None, 'at 4: invalid token')
2447 >>> _parsealiasdecl('foo()')
2448 >>> _parsealiasdecl('foo()')
2448 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 >>> _parsealiasdecl('$foo()')
2450 >>> _parsealiasdecl('$foo()')
2450 ('$foo()', None, None, "'$' not for alias arguments")
2451 ('$foo()', None, None, "'$' not for alias arguments")
2451 >>> _parsealiasdecl('foo($1, $2)')
2452 >>> _parsealiasdecl('foo($1, $2)')
2452 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 >>> _parsealiasdecl('foo("string")')
2460 >>> _parsealiasdecl('foo("string")')
2460 ('foo("string")', None, None, 'invalid argument list')
2461 ('foo("string")', None, None, 'invalid argument list')
2461 >>> _parsealiasdecl('foo($1, $2')
2462 >>> _parsealiasdecl('foo($1, $2')
2462 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 >>> _parsealiasdecl('foo("string')
2464 >>> _parsealiasdecl('foo("string')
2464 ('foo("string', None, None, 'at 5: unterminated string')
2465 ('foo("string', None, None, 'at 5: unterminated string')
2465 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 ('foo', None, None, 'argument names collide with each other')
2467 ('foo', None, None, 'argument names collide with each other')
2467 """
2468 """
2468 p = parser.parser(elements)
2469 p = parser.parser(elements)
2469 try:
2470 try:
2470 tree, pos = p.parse(_tokenizealias(decl))
2471 tree, pos = p.parse(_tokenizealias(decl))
2471 if (pos != len(decl)):
2472 if (pos != len(decl)):
2472 raise error.ParseError(_('invalid token'), pos)
2473 raise error.ParseError(_('invalid token'), pos)
2473
2474
2474 if isvalidsymbol(tree):
2475 if isvalidsymbol(tree):
2475 # "name = ...." style
2476 # "name = ...." style
2476 name = getsymbol(tree)
2477 name = getsymbol(tree)
2477 if name.startswith('$'):
2478 if name.startswith('$'):
2478 return (decl, None, None, _("'$' not for alias arguments"))
2479 return (decl, None, None, _("'$' not for alias arguments"))
2479 return (name, ('symbol', name), None, None)
2480 return (name, ('symbol', name), None, None)
2480
2481
2481 if isvalidfunc(tree):
2482 if isvalidfunc(tree):
2482 # "name(arg, ....) = ...." style
2483 # "name(arg, ....) = ...." style
2483 name = getfuncname(tree)
2484 name = getfuncname(tree)
2484 if name.startswith('$'):
2485 if name.startswith('$'):
2485 return (decl, None, None, _("'$' not for alias arguments"))
2486 return (decl, None, None, _("'$' not for alias arguments"))
2486 args = []
2487 args = []
2487 for arg in getfuncargs(tree):
2488 for arg in getfuncargs(tree):
2488 if not isvalidsymbol(arg):
2489 if not isvalidsymbol(arg):
2489 return (decl, None, None, _("invalid argument list"))
2490 return (decl, None, None, _("invalid argument list"))
2490 args.append(getsymbol(arg))
2491 args.append(getsymbol(arg))
2491 if len(args) != len(set(args)):
2492 if len(args) != len(set(args)):
2492 return (name, None, None,
2493 return (name, None, None,
2493 _("argument names collide with each other"))
2494 _("argument names collide with each other"))
2494 return (name, ('func', ('symbol', name)), args, None)
2495 return (name, ('func', ('symbol', name)), args, None)
2495
2496
2496 return (decl, None, None, _("invalid format"))
2497 return (decl, None, None, _("invalid format"))
2497 except error.ParseError as inst:
2498 except error.ParseError as inst:
2498 return (decl, None, None, parseerrordetail(inst))
2499 return (decl, None, None, parseerrordetail(inst))
2499
2500
2500 def _parsealiasdefn(defn, args):
2501 def _parsealiasdefn(defn, args):
2501 """Parse alias definition ``defn``
2502 """Parse alias definition ``defn``
2502
2503
2503 This function also replaces alias argument references in the
2504 This function also replaces alias argument references in the
2504 specified definition by ``_aliasarg(ARGNAME)``.
2505 specified definition by ``_aliasarg(ARGNAME)``.
2505
2506
2506 ``args`` is a list of alias argument names, or None if the alias
2507 ``args`` is a list of alias argument names, or None if the alias
2507 is declared as a symbol.
2508 is declared as a symbol.
2508
2509
2509 This returns "tree" as parsing result.
2510 This returns "tree" as parsing result.
2510
2511
2511 >>> args = ['$1', '$2', 'foo']
2512 >>> args = ['$1', '$2', 'foo']
2512 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 (or
2514 (or
2514 (func
2515 (func
2515 ('symbol', '_aliasarg')
2516 ('symbol', '_aliasarg')
2516 ('string', '$1'))
2517 ('string', '$1'))
2517 (func
2518 (func
2518 ('symbol', '_aliasarg')
2519 ('symbol', '_aliasarg')
2519 ('string', 'foo')))
2520 ('string', 'foo')))
2520 >>> try:
2521 >>> try:
2521 ... _parsealiasdefn('$1 or $bar', args)
2522 ... _parsealiasdefn('$1 or $bar', args)
2522 ... except error.ParseError, inst:
2523 ... except error.ParseError, inst:
2523 ... print parseerrordetail(inst)
2524 ... print parseerrordetail(inst)
2524 at 6: '$' not for alias arguments
2525 at 6: '$' not for alias arguments
2525 >>> args = ['$1', '$10', 'foo']
2526 >>> args = ['$1', '$10', 'foo']
2526 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 (or
2528 (or
2528 (func
2529 (func
2529 ('symbol', '_aliasarg')
2530 ('symbol', '_aliasarg')
2530 ('string', '$10'))
2531 ('string', '$10'))
2531 ('symbol', 'foobar'))
2532 ('symbol', 'foobar'))
2532 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 (or
2534 (or
2534 ('string', '$1')
2535 ('string', '$1')
2535 ('string', 'foo'))
2536 ('string', 'foo'))
2536 """
2537 """
2537 def tokenizedefn(program, lookup=None):
2538 def tokenizedefn(program, lookup=None):
2538 if args:
2539 if args:
2539 argset = set(args)
2540 argset = set(args)
2540 else:
2541 else:
2541 argset = set()
2542 argset = set()
2542
2543
2543 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 if t == 'symbol':
2545 if t == 'symbol':
2545 if value in argset:
2546 if value in argset:
2546 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 # "_aliasarg()" is an unknown symbol only used separate
2548 # "_aliasarg()" is an unknown symbol only used separate
2548 # alias argument placeholders from regular strings.
2549 # alias argument placeholders from regular strings.
2549 yield ('symbol', '_aliasarg', pos)
2550 yield ('symbol', '_aliasarg', pos)
2550 yield ('(', None, pos)
2551 yield ('(', None, pos)
2551 yield ('string', value, pos)
2552 yield ('string', value, pos)
2552 yield (')', None, pos)
2553 yield (')', None, pos)
2553 continue
2554 continue
2554 elif value.startswith('$'):
2555 elif value.startswith('$'):
2555 raise error.ParseError(_("'$' not for alias arguments"),
2556 raise error.ParseError(_("'$' not for alias arguments"),
2556 pos)
2557 pos)
2557 yield (t, value, pos)
2558 yield (t, value, pos)
2558
2559
2559 p = parser.parser(elements)
2560 p = parser.parser(elements)
2560 tree, pos = p.parse(tokenizedefn(defn))
2561 tree, pos = p.parse(tokenizedefn(defn))
2561 if pos != len(defn):
2562 if pos != len(defn):
2562 raise error.ParseError(_('invalid token'), pos)
2563 raise error.ParseError(_('invalid token'), pos)
2563 return parser.simplifyinfixops(tree, ('or',))
2564 return parser.simplifyinfixops(tree, ('or',))
2564
2565
2565 class revsetalias(object):
2566 class revsetalias(object):
2566 # whether own `error` information is already shown or not.
2567 # whether own `error` information is already shown or not.
2567 # this avoids showing same warning multiple times at each `findaliases`.
2568 # this avoids showing same warning multiple times at each `findaliases`.
2568 warned = False
2569 warned = False
2569
2570
2570 def __init__(self, name, value):
2571 def __init__(self, name, value):
2571 '''Aliases like:
2572 '''Aliases like:
2572
2573
2573 h = heads(default)
2574 h = heads(default)
2574 b($1) = ancestors($1) - ancestors(default)
2575 b($1) = ancestors($1) - ancestors(default)
2575 '''
2576 '''
2576 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 if self.error:
2578 if self.error:
2578 self.error = _('failed to parse the declaration of revset alias'
2579 self.error = _('failed to parse the declaration of revset alias'
2579 ' "%s": %s') % (self.name, self.error)
2580 ' "%s": %s') % (self.name, self.error)
2580 return
2581 return
2581
2582
2582 try:
2583 try:
2583 self.replacement = _parsealiasdefn(value, self.args)
2584 self.replacement = _parsealiasdefn(value, self.args)
2584 # Check for placeholder injection
2585 # Check for placeholder injection
2585 _checkaliasarg(self.replacement, self.args)
2586 _checkaliasarg(self.replacement, self.args)
2586 except error.ParseError as inst:
2587 except error.ParseError as inst:
2587 self.error = _('failed to parse the definition of revset alias'
2588 self.error = _('failed to parse the definition of revset alias'
2588 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589
2590
2590 def _getalias(aliases, tree):
2591 def _getalias(aliases, tree):
2591 """If tree looks like an unexpanded alias, return it. Return None
2592 """If tree looks like an unexpanded alias, return it. Return None
2592 otherwise.
2593 otherwise.
2593 """
2594 """
2594 if isinstance(tree, tuple) and tree:
2595 if isinstance(tree, tuple) and tree:
2595 if tree[0] == 'symbol' and len(tree) == 2:
2596 if tree[0] == 'symbol' and len(tree) == 2:
2596 name = tree[1]
2597 name = tree[1]
2597 alias = aliases.get(name)
2598 alias = aliases.get(name)
2598 if alias and alias.args is None and alias.tree == tree:
2599 if alias and alias.args is None and alias.tree == tree:
2599 return alias
2600 return alias
2600 if tree[0] == 'func' and len(tree) > 1:
2601 if tree[0] == 'func' and len(tree) > 1:
2601 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 name = tree[1][1]
2603 name = tree[1][1]
2603 alias = aliases.get(name)
2604 alias = aliases.get(name)
2604 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 return alias
2606 return alias
2606 return None
2607 return None
2607
2608
2608 def _expandargs(tree, args):
2609 def _expandargs(tree, args):
2609 """Replace _aliasarg instances with the substitution value of the
2610 """Replace _aliasarg instances with the substitution value of the
2610 same name in args, recursively.
2611 same name in args, recursively.
2611 """
2612 """
2612 if not tree or not isinstance(tree, tuple):
2613 if not tree or not isinstance(tree, tuple):
2613 return tree
2614 return tree
2614 arg = _getaliasarg(tree)
2615 arg = _getaliasarg(tree)
2615 if arg is not None:
2616 if arg is not None:
2616 return args[arg]
2617 return args[arg]
2617 return tuple(_expandargs(t, args) for t in tree)
2618 return tuple(_expandargs(t, args) for t in tree)
2618
2619
2619 def _expandaliases(aliases, tree, expanding, cache):
2620 def _expandaliases(aliases, tree, expanding, cache):
2620 """Expand aliases in tree, recursively.
2621 """Expand aliases in tree, recursively.
2621
2622
2622 'aliases' is a dictionary mapping user defined aliases to
2623 'aliases' is a dictionary mapping user defined aliases to
2623 revsetalias objects.
2624 revsetalias objects.
2624 """
2625 """
2625 if not isinstance(tree, tuple):
2626 if not isinstance(tree, tuple):
2626 # Do not expand raw strings
2627 # Do not expand raw strings
2627 return tree
2628 return tree
2628 alias = _getalias(aliases, tree)
2629 alias = _getalias(aliases, tree)
2629 if alias is not None:
2630 if alias is not None:
2630 if alias.error:
2631 if alias.error:
2631 raise util.Abort(alias.error)
2632 raise util.Abort(alias.error)
2632 if alias in expanding:
2633 if alias in expanding:
2633 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 'detected') % alias.name)
2635 'detected') % alias.name)
2635 expanding.append(alias)
2636 expanding.append(alias)
2636 if alias.name not in cache:
2637 if alias.name not in cache:
2637 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 expanding, cache)
2639 expanding, cache)
2639 result = cache[alias.name]
2640 result = cache[alias.name]
2640 expanding.pop()
2641 expanding.pop()
2641 if alias.args is not None:
2642 if alias.args is not None:
2642 l = getlist(tree[2])
2643 l = getlist(tree[2])
2643 if len(l) != len(alias.args):
2644 if len(l) != len(alias.args):
2644 raise error.ParseError(
2645 raise error.ParseError(
2645 _('invalid number of arguments: %s') % len(l))
2646 _('invalid number of arguments: %s') % len(l))
2646 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 result = _expandargs(result, dict(zip(alias.args, l)))
2648 result = _expandargs(result, dict(zip(alias.args, l)))
2648 else:
2649 else:
2649 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 for t in tree)
2651 for t in tree)
2651 return result
2652 return result
2652
2653
2653 def findaliases(ui, tree, showwarning=None):
2654 def findaliases(ui, tree, showwarning=None):
2654 _checkaliasarg(tree)
2655 _checkaliasarg(tree)
2655 aliases = {}
2656 aliases = {}
2656 for k, v in ui.configitems('revsetalias'):
2657 for k, v in ui.configitems('revsetalias'):
2657 alias = revsetalias(k, v)
2658 alias = revsetalias(k, v)
2658 aliases[alias.name] = alias
2659 aliases[alias.name] = alias
2659 tree = _expandaliases(aliases, tree, [], {})
2660 tree = _expandaliases(aliases, tree, [], {})
2660 if showwarning:
2661 if showwarning:
2661 # warn about problematic (but not referred) aliases
2662 # warn about problematic (but not referred) aliases
2662 for name, alias in sorted(aliases.iteritems()):
2663 for name, alias in sorted(aliases.iteritems()):
2663 if alias.error and not alias.warned:
2664 if alias.error and not alias.warned:
2664 showwarning(_('warning: %s\n') % (alias.error))
2665 showwarning(_('warning: %s\n') % (alias.error))
2665 alias.warned = True
2666 alias.warned = True
2666 return tree
2667 return tree
2667
2668
2668 def foldconcat(tree):
2669 def foldconcat(tree):
2669 """Fold elements to be concatenated by `##`
2670 """Fold elements to be concatenated by `##`
2670 """
2671 """
2671 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 return tree
2673 return tree
2673 if tree[0] == '_concat':
2674 if tree[0] == '_concat':
2674 pending = [tree]
2675 pending = [tree]
2675 l = []
2676 l = []
2676 while pending:
2677 while pending:
2677 e = pending.pop()
2678 e = pending.pop()
2678 if e[0] == '_concat':
2679 if e[0] == '_concat':
2679 pending.extend(reversed(e[1:]))
2680 pending.extend(reversed(e[1:]))
2680 elif e[0] in ('string', 'symbol'):
2681 elif e[0] in ('string', 'symbol'):
2681 l.append(e[1])
2682 l.append(e[1])
2682 else:
2683 else:
2683 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 raise error.ParseError(msg)
2685 raise error.ParseError(msg)
2685 return ('string', ''.join(l))
2686 return ('string', ''.join(l))
2686 else:
2687 else:
2687 return tuple(foldconcat(t) for t in tree)
2688 return tuple(foldconcat(t) for t in tree)
2688
2689
2689 def parse(spec, lookup=None):
2690 def parse(spec, lookup=None):
2690 p = parser.parser(elements)
2691 p = parser.parser(elements)
2691 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 if pos != len(spec):
2693 if pos != len(spec):
2693 raise error.ParseError(_("invalid token"), pos)
2694 raise error.ParseError(_("invalid token"), pos)
2694 return parser.simplifyinfixops(tree, ('or',))
2695 return parser.simplifyinfixops(tree, ('or',))
2695
2696
2696 def posttreebuilthook(tree, repo):
2697 def posttreebuilthook(tree, repo):
2697 # hook for extensions to execute code on the optimized tree
2698 # hook for extensions to execute code on the optimized tree
2698 pass
2699 pass
2699
2700
2700 def match(ui, spec, repo=None):
2701 def match(ui, spec, repo=None):
2701 if not spec:
2702 if not spec:
2702 raise error.ParseError(_("empty query"))
2703 raise error.ParseError(_("empty query"))
2703 lookup = None
2704 lookup = None
2704 if repo:
2705 if repo:
2705 lookup = repo.__contains__
2706 lookup = repo.__contains__
2706 tree = parse(spec, lookup)
2707 tree = parse(spec, lookup)
2707 return _makematcher(ui, tree, repo)
2708 return _makematcher(ui, tree, repo)
2708
2709
2709 def matchany(ui, specs, repo=None):
2710 def matchany(ui, specs, repo=None):
2710 """Create a matcher that will include any revisions matching one of the
2711 """Create a matcher that will include any revisions matching one of the
2711 given specs"""
2712 given specs"""
2712 if not specs:
2713 if not specs:
2713 def mfunc(repo, subset=None):
2714 def mfunc(repo, subset=None):
2714 return baseset()
2715 return baseset()
2715 return mfunc
2716 return mfunc
2716 if not all(specs):
2717 if not all(specs):
2717 raise error.ParseError(_("empty query"))
2718 raise error.ParseError(_("empty query"))
2718 lookup = None
2719 lookup = None
2719 if repo:
2720 if repo:
2720 lookup = repo.__contains__
2721 lookup = repo.__contains__
2721 if len(specs) == 1:
2722 if len(specs) == 1:
2722 tree = parse(specs[0], lookup)
2723 tree = parse(specs[0], lookup)
2723 else:
2724 else:
2724 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 return _makematcher(ui, tree, repo)
2726 return _makematcher(ui, tree, repo)
2726
2727
2727 def _makematcher(ui, tree, repo):
2728 def _makematcher(ui, tree, repo):
2728 if ui:
2729 if ui:
2729 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 tree = foldconcat(tree)
2731 tree = foldconcat(tree)
2731 weight, tree = optimize(tree, True)
2732 weight, tree = optimize(tree, True)
2732 posttreebuilthook(tree, repo)
2733 posttreebuilthook(tree, repo)
2733 def mfunc(repo, subset=None):
2734 def mfunc(repo, subset=None):
2734 if subset is None:
2735 if subset is None:
2735 subset = fullreposet(repo)
2736 subset = fullreposet(repo)
2736 if util.safehasattr(subset, 'isascending'):
2737 if util.safehasattr(subset, 'isascending'):
2737 result = getset(repo, subset, tree)
2738 result = getset(repo, subset, tree)
2738 else:
2739 else:
2739 result = getset(repo, baseset(subset), tree)
2740 result = getset(repo, baseset(subset), tree)
2740 return result
2741 return result
2741 return mfunc
2742 return mfunc
2742
2743
2743 def formatspec(expr, *args):
2744 def formatspec(expr, *args):
2744 '''
2745 '''
2745 This is a convenience function for using revsets internally, and
2746 This is a convenience function for using revsets internally, and
2746 escapes arguments appropriately. Aliases are intentionally ignored
2747 escapes arguments appropriately. Aliases are intentionally ignored
2747 so that intended expression behavior isn't accidentally subverted.
2748 so that intended expression behavior isn't accidentally subverted.
2748
2749
2749 Supported arguments:
2750 Supported arguments:
2750
2751
2751 %r = revset expression, parenthesized
2752 %r = revset expression, parenthesized
2752 %d = int(arg), no quoting
2753 %d = int(arg), no quoting
2753 %s = string(arg), escaped and single-quoted
2754 %s = string(arg), escaped and single-quoted
2754 %b = arg.branch(), escaped and single-quoted
2755 %b = arg.branch(), escaped and single-quoted
2755 %n = hex(arg), single-quoted
2756 %n = hex(arg), single-quoted
2756 %% = a literal '%'
2757 %% = a literal '%'
2757
2758
2758 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759
2760
2760 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 '(10 or 11):: and ((this()) or (that()))'
2762 '(10 or 11):: and ((this()) or (that()))'
2762 >>> formatspec('%d:: and not %d::', 10, 20)
2763 >>> formatspec('%d:: and not %d::', 10, 20)
2763 '10:: and not 20::'
2764 '10:: and not 20::'
2764 >>> formatspec('%ld or %ld', [], [1])
2765 >>> formatspec('%ld or %ld', [], [1])
2765 "_list('') or 1"
2766 "_list('') or 1"
2766 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 "keyword('foo\\\\xe9')"
2768 "keyword('foo\\\\xe9')"
2768 >>> b = lambda: 'default'
2769 >>> b = lambda: 'default'
2769 >>> b.branch = b
2770 >>> b.branch = b
2770 >>> formatspec('branch(%b)', b)
2771 >>> formatspec('branch(%b)', b)
2771 "branch('default')"
2772 "branch('default')"
2772 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 '''
2775 '''
2775
2776
2776 def quote(s):
2777 def quote(s):
2777 return repr(str(s))
2778 return repr(str(s))
2778
2779
2779 def argtype(c, arg):
2780 def argtype(c, arg):
2780 if c == 'd':
2781 if c == 'd':
2781 return str(int(arg))
2782 return str(int(arg))
2782 elif c == 's':
2783 elif c == 's':
2783 return quote(arg)
2784 return quote(arg)
2784 elif c == 'r':
2785 elif c == 'r':
2785 parse(arg) # make sure syntax errors are confined
2786 parse(arg) # make sure syntax errors are confined
2786 return '(%s)' % arg
2787 return '(%s)' % arg
2787 elif c == 'n':
2788 elif c == 'n':
2788 return quote(node.hex(arg))
2789 return quote(node.hex(arg))
2789 elif c == 'b':
2790 elif c == 'b':
2790 return quote(arg.branch())
2791 return quote(arg.branch())
2791
2792
2792 def listexp(s, t):
2793 def listexp(s, t):
2793 l = len(s)
2794 l = len(s)
2794 if l == 0:
2795 if l == 0:
2795 return "_list('')"
2796 return "_list('')"
2796 elif l == 1:
2797 elif l == 1:
2797 return argtype(t, s[0])
2798 return argtype(t, s[0])
2798 elif t == 'd':
2799 elif t == 'd':
2799 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 elif t == 's':
2801 elif t == 's':
2801 return "_list('%s')" % "\0".join(s)
2802 return "_list('%s')" % "\0".join(s)
2802 elif t == 'n':
2803 elif t == 'n':
2803 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 elif t == 'b':
2805 elif t == 'b':
2805 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806
2807
2807 m = l // 2
2808 m = l // 2
2808 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809
2810
2810 ret = ''
2811 ret = ''
2811 pos = 0
2812 pos = 0
2812 arg = 0
2813 arg = 0
2813 while pos < len(expr):
2814 while pos < len(expr):
2814 c = expr[pos]
2815 c = expr[pos]
2815 if c == '%':
2816 if c == '%':
2816 pos += 1
2817 pos += 1
2817 d = expr[pos]
2818 d = expr[pos]
2818 if d == '%':
2819 if d == '%':
2819 ret += d
2820 ret += d
2820 elif d in 'dsnbr':
2821 elif d in 'dsnbr':
2821 ret += argtype(d, args[arg])
2822 ret += argtype(d, args[arg])
2822 arg += 1
2823 arg += 1
2823 elif d == 'l':
2824 elif d == 'l':
2824 # a list of some type
2825 # a list of some type
2825 pos += 1
2826 pos += 1
2826 d = expr[pos]
2827 d = expr[pos]
2827 ret += listexp(list(args[arg]), d)
2828 ret += listexp(list(args[arg]), d)
2828 arg += 1
2829 arg += 1
2829 else:
2830 else:
2830 raise util.Abort('unexpected revspec format character %s' % d)
2831 raise util.Abort('unexpected revspec format character %s' % d)
2831 else:
2832 else:
2832 ret += c
2833 ret += c
2833 pos += 1
2834 pos += 1
2834
2835
2835 return ret
2836 return ret
2836
2837
2837 def prettyformat(tree):
2838 def prettyformat(tree):
2838 return parser.prettyformat(tree, ('string', 'symbol'))
2839 return parser.prettyformat(tree, ('string', 'symbol'))
2839
2840
2840 def depth(tree):
2841 def depth(tree):
2841 if isinstance(tree, tuple):
2842 if isinstance(tree, tuple):
2842 return max(map(depth, tree)) + 1
2843 return max(map(depth, tree)) + 1
2843 else:
2844 else:
2844 return 0
2845 return 0
2845
2846
2846 def funcsused(tree):
2847 def funcsused(tree):
2847 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 return set()
2849 return set()
2849 else:
2850 else:
2850 funcs = set()
2851 funcs = set()
2851 for s in tree[1:]:
2852 for s in tree[1:]:
2852 funcs |= funcsused(s)
2853 funcs |= funcsused(s)
2853 if tree[0] == 'func':
2854 if tree[0] == 'func':
2854 funcs.add(tree[1][1])
2855 funcs.add(tree[1][1])
2855 return funcs
2856 return funcs
2856
2857
2857 class abstractsmartset(object):
2858 class abstractsmartset(object):
2858
2859
2859 def __nonzero__(self):
2860 def __nonzero__(self):
2860 """True if the smartset is not empty"""
2861 """True if the smartset is not empty"""
2861 raise NotImplementedError()
2862 raise NotImplementedError()
2862
2863
2863 def __contains__(self, rev):
2864 def __contains__(self, rev):
2864 """provide fast membership testing"""
2865 """provide fast membership testing"""
2865 raise NotImplementedError()
2866 raise NotImplementedError()
2866
2867
2867 def __iter__(self):
2868 def __iter__(self):
2868 """iterate the set in the order it is supposed to be iterated"""
2869 """iterate the set in the order it is supposed to be iterated"""
2869 raise NotImplementedError()
2870 raise NotImplementedError()
2870
2871
2871 # Attributes containing a function to perform a fast iteration in a given
2872 # Attributes containing a function to perform a fast iteration in a given
2872 # direction. A smartset can have none, one, or both defined.
2873 # direction. A smartset can have none, one, or both defined.
2873 #
2874 #
2874 # Default value is None instead of a function returning None to avoid
2875 # Default value is None instead of a function returning None to avoid
2875 # initializing an iterator just for testing if a fast method exists.
2876 # initializing an iterator just for testing if a fast method exists.
2876 fastasc = None
2877 fastasc = None
2877 fastdesc = None
2878 fastdesc = None
2878
2879
2879 def isascending(self):
2880 def isascending(self):
2880 """True if the set will iterate in ascending order"""
2881 """True if the set will iterate in ascending order"""
2881 raise NotImplementedError()
2882 raise NotImplementedError()
2882
2883
2883 def isdescending(self):
2884 def isdescending(self):
2884 """True if the set will iterate in descending order"""
2885 """True if the set will iterate in descending order"""
2885 raise NotImplementedError()
2886 raise NotImplementedError()
2886
2887
2887 def min(self):
2888 def min(self):
2888 """return the minimum element in the set"""
2889 """return the minimum element in the set"""
2889 if self.fastasc is not None:
2890 if self.fastasc is not None:
2890 for r in self.fastasc():
2891 for r in self.fastasc():
2891 return r
2892 return r
2892 raise ValueError('arg is an empty sequence')
2893 raise ValueError('arg is an empty sequence')
2893 return min(self)
2894 return min(self)
2894
2895
2895 def max(self):
2896 def max(self):
2896 """return the maximum element in the set"""
2897 """return the maximum element in the set"""
2897 if self.fastdesc is not None:
2898 if self.fastdesc is not None:
2898 for r in self.fastdesc():
2899 for r in self.fastdesc():
2899 return r
2900 return r
2900 raise ValueError('arg is an empty sequence')
2901 raise ValueError('arg is an empty sequence')
2901 return max(self)
2902 return max(self)
2902
2903
2903 def first(self):
2904 def first(self):
2904 """return the first element in the set (user iteration perspective)
2905 """return the first element in the set (user iteration perspective)
2905
2906
2906 Return None if the set is empty"""
2907 Return None if the set is empty"""
2907 raise NotImplementedError()
2908 raise NotImplementedError()
2908
2909
2909 def last(self):
2910 def last(self):
2910 """return the last element in the set (user iteration perspective)
2911 """return the last element in the set (user iteration perspective)
2911
2912
2912 Return None if the set is empty"""
2913 Return None if the set is empty"""
2913 raise NotImplementedError()
2914 raise NotImplementedError()
2914
2915
2915 def __len__(self):
2916 def __len__(self):
2916 """return the length of the smartsets
2917 """return the length of the smartsets
2917
2918
2918 This can be expensive on smartset that could be lazy otherwise."""
2919 This can be expensive on smartset that could be lazy otherwise."""
2919 raise NotImplementedError()
2920 raise NotImplementedError()
2920
2921
2921 def reverse(self):
2922 def reverse(self):
2922 """reverse the expected iteration order"""
2923 """reverse the expected iteration order"""
2923 raise NotImplementedError()
2924 raise NotImplementedError()
2924
2925
2925 def sort(self, reverse=True):
2926 def sort(self, reverse=True):
2926 """get the set to iterate in an ascending or descending order"""
2927 """get the set to iterate in an ascending or descending order"""
2927 raise NotImplementedError()
2928 raise NotImplementedError()
2928
2929
2929 def __and__(self, other):
2930 def __and__(self, other):
2930 """Returns a new object with the intersection of the two collections.
2931 """Returns a new object with the intersection of the two collections.
2931
2932
2932 This is part of the mandatory API for smartset."""
2933 This is part of the mandatory API for smartset."""
2933 if isinstance(other, fullreposet):
2934 if isinstance(other, fullreposet):
2934 return self
2935 return self
2935 return self.filter(other.__contains__, cache=False)
2936 return self.filter(other.__contains__, cache=False)
2936
2937
2937 def __add__(self, other):
2938 def __add__(self, other):
2938 """Returns a new object with the union of the two collections.
2939 """Returns a new object with the union of the two collections.
2939
2940
2940 This is part of the mandatory API for smartset."""
2941 This is part of the mandatory API for smartset."""
2941 return addset(self, other)
2942 return addset(self, other)
2942
2943
2943 def __sub__(self, other):
2944 def __sub__(self, other):
2944 """Returns a new object with the substraction of the two collections.
2945 """Returns a new object with the substraction of the two collections.
2945
2946
2946 This is part of the mandatory API for smartset."""
2947 This is part of the mandatory API for smartset."""
2947 c = other.__contains__
2948 c = other.__contains__
2948 return self.filter(lambda r: not c(r), cache=False)
2949 return self.filter(lambda r: not c(r), cache=False)
2949
2950
2950 def filter(self, condition, cache=True):
2951 def filter(self, condition, cache=True):
2951 """Returns this smartset filtered by condition as a new smartset.
2952 """Returns this smartset filtered by condition as a new smartset.
2952
2953
2953 `condition` is a callable which takes a revision number and returns a
2954 `condition` is a callable which takes a revision number and returns a
2954 boolean.
2955 boolean.
2955
2956
2956 This is part of the mandatory API for smartset."""
2957 This is part of the mandatory API for smartset."""
2957 # builtin cannot be cached. but do not needs to
2958 # builtin cannot be cached. but do not needs to
2958 if cache and util.safehasattr(condition, 'func_code'):
2959 if cache and util.safehasattr(condition, 'func_code'):
2959 condition = util.cachefunc(condition)
2960 condition = util.cachefunc(condition)
2960 return filteredset(self, condition)
2961 return filteredset(self, condition)
2961
2962
2962 class baseset(abstractsmartset):
2963 class baseset(abstractsmartset):
2963 """Basic data structure that represents a revset and contains the basic
2964 """Basic data structure that represents a revset and contains the basic
2964 operation that it should be able to perform.
2965 operation that it should be able to perform.
2965
2966
2966 Every method in this class should be implemented by any smartset class.
2967 Every method in this class should be implemented by any smartset class.
2967 """
2968 """
2968 def __init__(self, data=()):
2969 def __init__(self, data=()):
2969 if not isinstance(data, list):
2970 if not isinstance(data, list):
2970 if isinstance(data, set):
2971 if isinstance(data, set):
2971 self._set = data
2972 self._set = data
2972 data = list(data)
2973 data = list(data)
2973 self._list = data
2974 self._list = data
2974 self._ascending = None
2975 self._ascending = None
2975
2976
2976 @util.propertycache
2977 @util.propertycache
2977 def _set(self):
2978 def _set(self):
2978 return set(self._list)
2979 return set(self._list)
2979
2980
2980 @util.propertycache
2981 @util.propertycache
2981 def _asclist(self):
2982 def _asclist(self):
2982 asclist = self._list[:]
2983 asclist = self._list[:]
2983 asclist.sort()
2984 asclist.sort()
2984 return asclist
2985 return asclist
2985
2986
2986 def __iter__(self):
2987 def __iter__(self):
2987 if self._ascending is None:
2988 if self._ascending is None:
2988 return iter(self._list)
2989 return iter(self._list)
2989 elif self._ascending:
2990 elif self._ascending:
2990 return iter(self._asclist)
2991 return iter(self._asclist)
2991 else:
2992 else:
2992 return reversed(self._asclist)
2993 return reversed(self._asclist)
2993
2994
2994 def fastasc(self):
2995 def fastasc(self):
2995 return iter(self._asclist)
2996 return iter(self._asclist)
2996
2997
2997 def fastdesc(self):
2998 def fastdesc(self):
2998 return reversed(self._asclist)
2999 return reversed(self._asclist)
2999
3000
3000 @util.propertycache
3001 @util.propertycache
3001 def __contains__(self):
3002 def __contains__(self):
3002 return self._set.__contains__
3003 return self._set.__contains__
3003
3004
3004 def __nonzero__(self):
3005 def __nonzero__(self):
3005 return bool(self._list)
3006 return bool(self._list)
3006
3007
3007 def sort(self, reverse=False):
3008 def sort(self, reverse=False):
3008 self._ascending = not bool(reverse)
3009 self._ascending = not bool(reverse)
3009
3010
3010 def reverse(self):
3011 def reverse(self):
3011 if self._ascending is None:
3012 if self._ascending is None:
3012 self._list.reverse()
3013 self._list.reverse()
3013 else:
3014 else:
3014 self._ascending = not self._ascending
3015 self._ascending = not self._ascending
3015
3016
3016 def __len__(self):
3017 def __len__(self):
3017 return len(self._list)
3018 return len(self._list)
3018
3019
3019 def isascending(self):
3020 def isascending(self):
3020 """Returns True if the collection is ascending order, False if not.
3021 """Returns True if the collection is ascending order, False if not.
3021
3022
3022 This is part of the mandatory API for smartset."""
3023 This is part of the mandatory API for smartset."""
3023 if len(self) <= 1:
3024 if len(self) <= 1:
3024 return True
3025 return True
3025 return self._ascending is not None and self._ascending
3026 return self._ascending is not None and self._ascending
3026
3027
3027 def isdescending(self):
3028 def isdescending(self):
3028 """Returns True if the collection is descending order, False if not.
3029 """Returns True if the collection is descending order, False if not.
3029
3030
3030 This is part of the mandatory API for smartset."""
3031 This is part of the mandatory API for smartset."""
3031 if len(self) <= 1:
3032 if len(self) <= 1:
3032 return True
3033 return True
3033 return self._ascending is not None and not self._ascending
3034 return self._ascending is not None and not self._ascending
3034
3035
3035 def first(self):
3036 def first(self):
3036 if self:
3037 if self:
3037 if self._ascending is None:
3038 if self._ascending is None:
3038 return self._list[0]
3039 return self._list[0]
3039 elif self._ascending:
3040 elif self._ascending:
3040 return self._asclist[0]
3041 return self._asclist[0]
3041 else:
3042 else:
3042 return self._asclist[-1]
3043 return self._asclist[-1]
3043 return None
3044 return None
3044
3045
3045 def last(self):
3046 def last(self):
3046 if self:
3047 if self:
3047 if self._ascending is None:
3048 if self._ascending is None:
3048 return self._list[-1]
3049 return self._list[-1]
3049 elif self._ascending:
3050 elif self._ascending:
3050 return self._asclist[-1]
3051 return self._asclist[-1]
3051 else:
3052 else:
3052 return self._asclist[0]
3053 return self._asclist[0]
3053 return None
3054 return None
3054
3055
3055 def __repr__(self):
3056 def __repr__(self):
3056 d = {None: '', False: '-', True: '+'}[self._ascending]
3057 d = {None: '', False: '-', True: '+'}[self._ascending]
3057 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3058 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3058
3059
3059 class filteredset(abstractsmartset):
3060 class filteredset(abstractsmartset):
3060 """Duck type for baseset class which iterates lazily over the revisions in
3061 """Duck type for baseset class which iterates lazily over the revisions in
3061 the subset and contains a function which tests for membership in the
3062 the subset and contains a function which tests for membership in the
3062 revset
3063 revset
3063 """
3064 """
3064 def __init__(self, subset, condition=lambda x: True):
3065 def __init__(self, subset, condition=lambda x: True):
3065 """
3066 """
3066 condition: a function that decide whether a revision in the subset
3067 condition: a function that decide whether a revision in the subset
3067 belongs to the revset or not.
3068 belongs to the revset or not.
3068 """
3069 """
3069 self._subset = subset
3070 self._subset = subset
3070 self._condition = condition
3071 self._condition = condition
3071 self._cache = {}
3072 self._cache = {}
3072
3073
3073 def __contains__(self, x):
3074 def __contains__(self, x):
3074 c = self._cache
3075 c = self._cache
3075 if x not in c:
3076 if x not in c:
3076 v = c[x] = x in self._subset and self._condition(x)
3077 v = c[x] = x in self._subset and self._condition(x)
3077 return v
3078 return v
3078 return c[x]
3079 return c[x]
3079
3080
3080 def __iter__(self):
3081 def __iter__(self):
3081 return self._iterfilter(self._subset)
3082 return self._iterfilter(self._subset)
3082
3083
3083 def _iterfilter(self, it):
3084 def _iterfilter(self, it):
3084 cond = self._condition
3085 cond = self._condition
3085 for x in it:
3086 for x in it:
3086 if cond(x):
3087 if cond(x):
3087 yield x
3088 yield x
3088
3089
3089 @property
3090 @property
3090 def fastasc(self):
3091 def fastasc(self):
3091 it = self._subset.fastasc
3092 it = self._subset.fastasc
3092 if it is None:
3093 if it is None:
3093 return None
3094 return None
3094 return lambda: self._iterfilter(it())
3095 return lambda: self._iterfilter(it())
3095
3096
3096 @property
3097 @property
3097 def fastdesc(self):
3098 def fastdesc(self):
3098 it = self._subset.fastdesc
3099 it = self._subset.fastdesc
3099 if it is None:
3100 if it is None:
3100 return None
3101 return None
3101 return lambda: self._iterfilter(it())
3102 return lambda: self._iterfilter(it())
3102
3103
3103 def __nonzero__(self):
3104 def __nonzero__(self):
3104 for r in self:
3105 for r in self:
3105 return True
3106 return True
3106 return False
3107 return False
3107
3108
3108 def __len__(self):
3109 def __len__(self):
3109 # Basic implementation to be changed in future patches.
3110 # Basic implementation to be changed in future patches.
3110 l = baseset([r for r in self])
3111 l = baseset([r for r in self])
3111 return len(l)
3112 return len(l)
3112
3113
3113 def sort(self, reverse=False):
3114 def sort(self, reverse=False):
3114 self._subset.sort(reverse=reverse)
3115 self._subset.sort(reverse=reverse)
3115
3116
3116 def reverse(self):
3117 def reverse(self):
3117 self._subset.reverse()
3118 self._subset.reverse()
3118
3119
3119 def isascending(self):
3120 def isascending(self):
3120 return self._subset.isascending()
3121 return self._subset.isascending()
3121
3122
3122 def isdescending(self):
3123 def isdescending(self):
3123 return self._subset.isdescending()
3124 return self._subset.isdescending()
3124
3125
3125 def first(self):
3126 def first(self):
3126 for x in self:
3127 for x in self:
3127 return x
3128 return x
3128 return None
3129 return None
3129
3130
3130 def last(self):
3131 def last(self):
3131 it = None
3132 it = None
3132 if self.isascending():
3133 if self.isascending():
3133 it = self.fastdesc
3134 it = self.fastdesc
3134 elif self.isdescending():
3135 elif self.isdescending():
3135 it = self.fastasc
3136 it = self.fastasc
3136 if it is not None:
3137 if it is not None:
3137 for x in it():
3138 for x in it():
3138 return x
3139 return x
3139 return None #empty case
3140 return None #empty case
3140 else:
3141 else:
3141 x = None
3142 x = None
3142 for x in self:
3143 for x in self:
3143 pass
3144 pass
3144 return x
3145 return x
3145
3146
3146 def __repr__(self):
3147 def __repr__(self):
3147 return '<%s %r>' % (type(self).__name__, self._subset)
3148 return '<%s %r>' % (type(self).__name__, self._subset)
3148
3149
3149 def _iterordered(ascending, iter1, iter2):
3150 def _iterordered(ascending, iter1, iter2):
3150 """produce an ordered iteration from two iterators with the same order
3151 """produce an ordered iteration from two iterators with the same order
3151
3152
3152 The ascending is used to indicated the iteration direction.
3153 The ascending is used to indicated the iteration direction.
3153 """
3154 """
3154 choice = max
3155 choice = max
3155 if ascending:
3156 if ascending:
3156 choice = min
3157 choice = min
3157
3158
3158 val1 = None
3159 val1 = None
3159 val2 = None
3160 val2 = None
3160 try:
3161 try:
3161 # Consume both iterators in an ordered way until one is empty
3162 # Consume both iterators in an ordered way until one is empty
3162 while True:
3163 while True:
3163 if val1 is None:
3164 if val1 is None:
3164 val1 = iter1.next()
3165 val1 = iter1.next()
3165 if val2 is None:
3166 if val2 is None:
3166 val2 = iter2.next()
3167 val2 = iter2.next()
3167 next = choice(val1, val2)
3168 next = choice(val1, val2)
3168 yield next
3169 yield next
3169 if val1 == next:
3170 if val1 == next:
3170 val1 = None
3171 val1 = None
3171 if val2 == next:
3172 if val2 == next:
3172 val2 = None
3173 val2 = None
3173 except StopIteration:
3174 except StopIteration:
3174 # Flush any remaining values and consume the other one
3175 # Flush any remaining values and consume the other one
3175 it = iter2
3176 it = iter2
3176 if val1 is not None:
3177 if val1 is not None:
3177 yield val1
3178 yield val1
3178 it = iter1
3179 it = iter1
3179 elif val2 is not None:
3180 elif val2 is not None:
3180 # might have been equality and both are empty
3181 # might have been equality and both are empty
3181 yield val2
3182 yield val2
3182 for val in it:
3183 for val in it:
3183 yield val
3184 yield val
3184
3185
3185 class addset(abstractsmartset):
3186 class addset(abstractsmartset):
3186 """Represent the addition of two sets
3187 """Represent the addition of two sets
3187
3188
3188 Wrapper structure for lazily adding two structures without losing much
3189 Wrapper structure for lazily adding two structures without losing much
3189 performance on the __contains__ method
3190 performance on the __contains__ method
3190
3191
3191 If the ascending attribute is set, that means the two structures are
3192 If the ascending attribute is set, that means the two structures are
3192 ordered in either an ascending or descending way. Therefore, we can add
3193 ordered in either an ascending or descending way. Therefore, we can add
3193 them maintaining the order by iterating over both at the same time
3194 them maintaining the order by iterating over both at the same time
3194
3195
3195 >>> xs = baseset([0, 3, 2])
3196 >>> xs = baseset([0, 3, 2])
3196 >>> ys = baseset([5, 2, 4])
3197 >>> ys = baseset([5, 2, 4])
3197
3198
3198 >>> rs = addset(xs, ys)
3199 >>> rs = addset(xs, ys)
3199 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3200 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3200 (True, True, False, True, 0, 4)
3201 (True, True, False, True, 0, 4)
3201 >>> rs = addset(xs, baseset([]))
3202 >>> rs = addset(xs, baseset([]))
3202 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3203 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3203 (True, True, False, 0, 2)
3204 (True, True, False, 0, 2)
3204 >>> rs = addset(baseset([]), baseset([]))
3205 >>> rs = addset(baseset([]), baseset([]))
3205 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3206 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3206 (False, False, None, None)
3207 (False, False, None, None)
3207
3208
3208 iterate unsorted:
3209 iterate unsorted:
3209 >>> rs = addset(xs, ys)
3210 >>> rs = addset(xs, ys)
3210 >>> [x for x in rs] # without _genlist
3211 >>> [x for x in rs] # without _genlist
3211 [0, 3, 2, 5, 4]
3212 [0, 3, 2, 5, 4]
3212 >>> assert not rs._genlist
3213 >>> assert not rs._genlist
3213 >>> len(rs)
3214 >>> len(rs)
3214 5
3215 5
3215 >>> [x for x in rs] # with _genlist
3216 >>> [x for x in rs] # with _genlist
3216 [0, 3, 2, 5, 4]
3217 [0, 3, 2, 5, 4]
3217 >>> assert rs._genlist
3218 >>> assert rs._genlist
3218
3219
3219 iterate ascending:
3220 iterate ascending:
3220 >>> rs = addset(xs, ys, ascending=True)
3221 >>> rs = addset(xs, ys, ascending=True)
3221 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3222 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3222 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3223 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3223 >>> assert not rs._asclist
3224 >>> assert not rs._asclist
3224 >>> len(rs)
3225 >>> len(rs)
3225 5
3226 5
3226 >>> [x for x in rs], [x for x in rs.fastasc()]
3227 >>> [x for x in rs], [x for x in rs.fastasc()]
3227 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3228 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3228 >>> assert rs._asclist
3229 >>> assert rs._asclist
3229
3230
3230 iterate descending:
3231 iterate descending:
3231 >>> rs = addset(xs, ys, ascending=False)
3232 >>> rs = addset(xs, ys, ascending=False)
3232 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3233 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3233 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3234 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3234 >>> assert not rs._asclist
3235 >>> assert not rs._asclist
3235 >>> len(rs)
3236 >>> len(rs)
3236 5
3237 5
3237 >>> [x for x in rs], [x for x in rs.fastdesc()]
3238 >>> [x for x in rs], [x for x in rs.fastdesc()]
3238 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3239 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3239 >>> assert rs._asclist
3240 >>> assert rs._asclist
3240
3241
3241 iterate ascending without fastasc:
3242 iterate ascending without fastasc:
3242 >>> rs = addset(xs, generatorset(ys), ascending=True)
3243 >>> rs = addset(xs, generatorset(ys), ascending=True)
3243 >>> assert rs.fastasc is None
3244 >>> assert rs.fastasc is None
3244 >>> [x for x in rs]
3245 >>> [x for x in rs]
3245 [0, 2, 3, 4, 5]
3246 [0, 2, 3, 4, 5]
3246
3247
3247 iterate descending without fastdesc:
3248 iterate descending without fastdesc:
3248 >>> rs = addset(generatorset(xs), ys, ascending=False)
3249 >>> rs = addset(generatorset(xs), ys, ascending=False)
3249 >>> assert rs.fastdesc is None
3250 >>> assert rs.fastdesc is None
3250 >>> [x for x in rs]
3251 >>> [x for x in rs]
3251 [5, 4, 3, 2, 0]
3252 [5, 4, 3, 2, 0]
3252 """
3253 """
3253 def __init__(self, revs1, revs2, ascending=None):
3254 def __init__(self, revs1, revs2, ascending=None):
3254 self._r1 = revs1
3255 self._r1 = revs1
3255 self._r2 = revs2
3256 self._r2 = revs2
3256 self._iter = None
3257 self._iter = None
3257 self._ascending = ascending
3258 self._ascending = ascending
3258 self._genlist = None
3259 self._genlist = None
3259 self._asclist = None
3260 self._asclist = None
3260
3261
3261 def __len__(self):
3262 def __len__(self):
3262 return len(self._list)
3263 return len(self._list)
3263
3264
3264 def __nonzero__(self):
3265 def __nonzero__(self):
3265 return bool(self._r1) or bool(self._r2)
3266 return bool(self._r1) or bool(self._r2)
3266
3267
3267 @util.propertycache
3268 @util.propertycache
3268 def _list(self):
3269 def _list(self):
3269 if not self._genlist:
3270 if not self._genlist:
3270 self._genlist = baseset(iter(self))
3271 self._genlist = baseset(iter(self))
3271 return self._genlist
3272 return self._genlist
3272
3273
3273 def __iter__(self):
3274 def __iter__(self):
3274 """Iterate over both collections without repeating elements
3275 """Iterate over both collections without repeating elements
3275
3276
3276 If the ascending attribute is not set, iterate over the first one and
3277 If the ascending attribute is not set, iterate over the first one and
3277 then over the second one checking for membership on the first one so we
3278 then over the second one checking for membership on the first one so we
3278 dont yield any duplicates.
3279 dont yield any duplicates.
3279
3280
3280 If the ascending attribute is set, iterate over both collections at the
3281 If the ascending attribute is set, iterate over both collections at the
3281 same time, yielding only one value at a time in the given order.
3282 same time, yielding only one value at a time in the given order.
3282 """
3283 """
3283 if self._ascending is None:
3284 if self._ascending is None:
3284 if self._genlist:
3285 if self._genlist:
3285 return iter(self._genlist)
3286 return iter(self._genlist)
3286 def arbitraryordergen():
3287 def arbitraryordergen():
3287 for r in self._r1:
3288 for r in self._r1:
3288 yield r
3289 yield r
3289 inr1 = self._r1.__contains__
3290 inr1 = self._r1.__contains__
3290 for r in self._r2:
3291 for r in self._r2:
3291 if not inr1(r):
3292 if not inr1(r):
3292 yield r
3293 yield r
3293 return arbitraryordergen()
3294 return arbitraryordergen()
3294 # try to use our own fast iterator if it exists
3295 # try to use our own fast iterator if it exists
3295 self._trysetasclist()
3296 self._trysetasclist()
3296 if self._ascending:
3297 if self._ascending:
3297 attr = 'fastasc'
3298 attr = 'fastasc'
3298 else:
3299 else:
3299 attr = 'fastdesc'
3300 attr = 'fastdesc'
3300 it = getattr(self, attr)
3301 it = getattr(self, attr)
3301 if it is not None:
3302 if it is not None:
3302 return it()
3303 return it()
3303 # maybe half of the component supports fast
3304 # maybe half of the component supports fast
3304 # get iterator for _r1
3305 # get iterator for _r1
3305 iter1 = getattr(self._r1, attr)
3306 iter1 = getattr(self._r1, attr)
3306 if iter1 is None:
3307 if iter1 is None:
3307 # let's avoid side effect (not sure it matters)
3308 # let's avoid side effect (not sure it matters)
3308 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3309 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3309 else:
3310 else:
3310 iter1 = iter1()
3311 iter1 = iter1()
3311 # get iterator for _r2
3312 # get iterator for _r2
3312 iter2 = getattr(self._r2, attr)
3313 iter2 = getattr(self._r2, attr)
3313 if iter2 is None:
3314 if iter2 is None:
3314 # let's avoid side effect (not sure it matters)
3315 # let's avoid side effect (not sure it matters)
3315 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3316 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3316 else:
3317 else:
3317 iter2 = iter2()
3318 iter2 = iter2()
3318 return _iterordered(self._ascending, iter1, iter2)
3319 return _iterordered(self._ascending, iter1, iter2)
3319
3320
3320 def _trysetasclist(self):
3321 def _trysetasclist(self):
3321 """populate the _asclist attribute if possible and necessary"""
3322 """populate the _asclist attribute if possible and necessary"""
3322 if self._genlist is not None and self._asclist is None:
3323 if self._genlist is not None and self._asclist is None:
3323 self._asclist = sorted(self._genlist)
3324 self._asclist = sorted(self._genlist)
3324
3325
3325 @property
3326 @property
3326 def fastasc(self):
3327 def fastasc(self):
3327 self._trysetasclist()
3328 self._trysetasclist()
3328 if self._asclist is not None:
3329 if self._asclist is not None:
3329 return self._asclist.__iter__
3330 return self._asclist.__iter__
3330 iter1 = self._r1.fastasc
3331 iter1 = self._r1.fastasc
3331 iter2 = self._r2.fastasc
3332 iter2 = self._r2.fastasc
3332 if None in (iter1, iter2):
3333 if None in (iter1, iter2):
3333 return None
3334 return None
3334 return lambda: _iterordered(True, iter1(), iter2())
3335 return lambda: _iterordered(True, iter1(), iter2())
3335
3336
3336 @property
3337 @property
3337 def fastdesc(self):
3338 def fastdesc(self):
3338 self._trysetasclist()
3339 self._trysetasclist()
3339 if self._asclist is not None:
3340 if self._asclist is not None:
3340 return self._asclist.__reversed__
3341 return self._asclist.__reversed__
3341 iter1 = self._r1.fastdesc
3342 iter1 = self._r1.fastdesc
3342 iter2 = self._r2.fastdesc
3343 iter2 = self._r2.fastdesc
3343 if None in (iter1, iter2):
3344 if None in (iter1, iter2):
3344 return None
3345 return None
3345 return lambda: _iterordered(False, iter1(), iter2())
3346 return lambda: _iterordered(False, iter1(), iter2())
3346
3347
3347 def __contains__(self, x):
3348 def __contains__(self, x):
3348 return x in self._r1 or x in self._r2
3349 return x in self._r1 or x in self._r2
3349
3350
3350 def sort(self, reverse=False):
3351 def sort(self, reverse=False):
3351 """Sort the added set
3352 """Sort the added set
3352
3353
3353 For this we use the cached list with all the generated values and if we
3354 For this we use the cached list with all the generated values and if we
3354 know they are ascending or descending we can sort them in a smart way.
3355 know they are ascending or descending we can sort them in a smart way.
3355 """
3356 """
3356 self._ascending = not reverse
3357 self._ascending = not reverse
3357
3358
3358 def isascending(self):
3359 def isascending(self):
3359 return self._ascending is not None and self._ascending
3360 return self._ascending is not None and self._ascending
3360
3361
3361 def isdescending(self):
3362 def isdescending(self):
3362 return self._ascending is not None and not self._ascending
3363 return self._ascending is not None and not self._ascending
3363
3364
3364 def reverse(self):
3365 def reverse(self):
3365 if self._ascending is None:
3366 if self._ascending is None:
3366 self._list.reverse()
3367 self._list.reverse()
3367 else:
3368 else:
3368 self._ascending = not self._ascending
3369 self._ascending = not self._ascending
3369
3370
3370 def first(self):
3371 def first(self):
3371 for x in self:
3372 for x in self:
3372 return x
3373 return x
3373 return None
3374 return None
3374
3375
3375 def last(self):
3376 def last(self):
3376 self.reverse()
3377 self.reverse()
3377 val = self.first()
3378 val = self.first()
3378 self.reverse()
3379 self.reverse()
3379 return val
3380 return val
3380
3381
3381 def __repr__(self):
3382 def __repr__(self):
3382 d = {None: '', False: '-', True: '+'}[self._ascending]
3383 d = {None: '', False: '-', True: '+'}[self._ascending]
3383 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3384 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3384
3385
3385 class generatorset(abstractsmartset):
3386 class generatorset(abstractsmartset):
3386 """Wrap a generator for lazy iteration
3387 """Wrap a generator for lazy iteration
3387
3388
3388 Wrapper structure for generators that provides lazy membership and can
3389 Wrapper structure for generators that provides lazy membership and can
3389 be iterated more than once.
3390 be iterated more than once.
3390 When asked for membership it generates values until either it finds the
3391 When asked for membership it generates values until either it finds the
3391 requested one or has gone through all the elements in the generator
3392 requested one or has gone through all the elements in the generator
3392 """
3393 """
3393 def __init__(self, gen, iterasc=None):
3394 def __init__(self, gen, iterasc=None):
3394 """
3395 """
3395 gen: a generator producing the values for the generatorset.
3396 gen: a generator producing the values for the generatorset.
3396 """
3397 """
3397 self._gen = gen
3398 self._gen = gen
3398 self._asclist = None
3399 self._asclist = None
3399 self._cache = {}
3400 self._cache = {}
3400 self._genlist = []
3401 self._genlist = []
3401 self._finished = False
3402 self._finished = False
3402 self._ascending = True
3403 self._ascending = True
3403 if iterasc is not None:
3404 if iterasc is not None:
3404 if iterasc:
3405 if iterasc:
3405 self.fastasc = self._iterator
3406 self.fastasc = self._iterator
3406 self.__contains__ = self._asccontains
3407 self.__contains__ = self._asccontains
3407 else:
3408 else:
3408 self.fastdesc = self._iterator
3409 self.fastdesc = self._iterator
3409 self.__contains__ = self._desccontains
3410 self.__contains__ = self._desccontains
3410
3411
3411 def __nonzero__(self):
3412 def __nonzero__(self):
3412 # Do not use 'for r in self' because it will enforce the iteration
3413 # Do not use 'for r in self' because it will enforce the iteration
3413 # order (default ascending), possibly unrolling a whole descending
3414 # order (default ascending), possibly unrolling a whole descending
3414 # iterator.
3415 # iterator.
3415 if self._genlist:
3416 if self._genlist:
3416 return True
3417 return True
3417 for r in self._consumegen():
3418 for r in self._consumegen():
3418 return True
3419 return True
3419 return False
3420 return False
3420
3421
3421 def __contains__(self, x):
3422 def __contains__(self, x):
3422 if x in self._cache:
3423 if x in self._cache:
3423 return self._cache[x]
3424 return self._cache[x]
3424
3425
3425 # Use new values only, as existing values would be cached.
3426 # Use new values only, as existing values would be cached.
3426 for l in self._consumegen():
3427 for l in self._consumegen():
3427 if l == x:
3428 if l == x:
3428 return True
3429 return True
3429
3430
3430 self._cache[x] = False
3431 self._cache[x] = False
3431 return False
3432 return False
3432
3433
3433 def _asccontains(self, x):
3434 def _asccontains(self, x):
3434 """version of contains optimised for ascending generator"""
3435 """version of contains optimised for ascending generator"""
3435 if x in self._cache:
3436 if x in self._cache:
3436 return self._cache[x]
3437 return self._cache[x]
3437
3438
3438 # Use new values only, as existing values would be cached.
3439 # Use new values only, as existing values would be cached.
3439 for l in self._consumegen():
3440 for l in self._consumegen():
3440 if l == x:
3441 if l == x:
3441 return True
3442 return True
3442 if l > x:
3443 if l > x:
3443 break
3444 break
3444
3445
3445 self._cache[x] = False
3446 self._cache[x] = False
3446 return False
3447 return False
3447
3448
3448 def _desccontains(self, x):
3449 def _desccontains(self, x):
3449 """version of contains optimised for descending generator"""
3450 """version of contains optimised for descending generator"""
3450 if x in self._cache:
3451 if x in self._cache:
3451 return self._cache[x]
3452 return self._cache[x]
3452
3453
3453 # Use new values only, as existing values would be cached.
3454 # Use new values only, as existing values would be cached.
3454 for l in self._consumegen():
3455 for l in self._consumegen():
3455 if l == x:
3456 if l == x:
3456 return True
3457 return True
3457 if l < x:
3458 if l < x:
3458 break
3459 break
3459
3460
3460 self._cache[x] = False
3461 self._cache[x] = False
3461 return False
3462 return False
3462
3463
3463 def __iter__(self):
3464 def __iter__(self):
3464 if self._ascending:
3465 if self._ascending:
3465 it = self.fastasc
3466 it = self.fastasc
3466 else:
3467 else:
3467 it = self.fastdesc
3468 it = self.fastdesc
3468 if it is not None:
3469 if it is not None:
3469 return it()
3470 return it()
3470 # we need to consume the iterator
3471 # we need to consume the iterator
3471 for x in self._consumegen():
3472 for x in self._consumegen():
3472 pass
3473 pass
3473 # recall the same code
3474 # recall the same code
3474 return iter(self)
3475 return iter(self)
3475
3476
3476 def _iterator(self):
3477 def _iterator(self):
3477 if self._finished:
3478 if self._finished:
3478 return iter(self._genlist)
3479 return iter(self._genlist)
3479
3480
3480 # We have to use this complex iteration strategy to allow multiple
3481 # We have to use this complex iteration strategy to allow multiple
3481 # iterations at the same time. We need to be able to catch revision
3482 # iterations at the same time. We need to be able to catch revision
3482 # removed from _consumegen and added to genlist in another instance.
3483 # removed from _consumegen and added to genlist in another instance.
3483 #
3484 #
3484 # Getting rid of it would provide an about 15% speed up on this
3485 # Getting rid of it would provide an about 15% speed up on this
3485 # iteration.
3486 # iteration.
3486 genlist = self._genlist
3487 genlist = self._genlist
3487 nextrev = self._consumegen().next
3488 nextrev = self._consumegen().next
3488 _len = len # cache global lookup
3489 _len = len # cache global lookup
3489 def gen():
3490 def gen():
3490 i = 0
3491 i = 0
3491 while True:
3492 while True:
3492 if i < _len(genlist):
3493 if i < _len(genlist):
3493 yield genlist[i]
3494 yield genlist[i]
3494 else:
3495 else:
3495 yield nextrev()
3496 yield nextrev()
3496 i += 1
3497 i += 1
3497 return gen()
3498 return gen()
3498
3499
3499 def _consumegen(self):
3500 def _consumegen(self):
3500 cache = self._cache
3501 cache = self._cache
3501 genlist = self._genlist.append
3502 genlist = self._genlist.append
3502 for item in self._gen:
3503 for item in self._gen:
3503 cache[item] = True
3504 cache[item] = True
3504 genlist(item)
3505 genlist(item)
3505 yield item
3506 yield item
3506 if not self._finished:
3507 if not self._finished:
3507 self._finished = True
3508 self._finished = True
3508 asc = self._genlist[:]
3509 asc = self._genlist[:]
3509 asc.sort()
3510 asc.sort()
3510 self._asclist = asc
3511 self._asclist = asc
3511 self.fastasc = asc.__iter__
3512 self.fastasc = asc.__iter__
3512 self.fastdesc = asc.__reversed__
3513 self.fastdesc = asc.__reversed__
3513
3514
3514 def __len__(self):
3515 def __len__(self):
3515 for x in self._consumegen():
3516 for x in self._consumegen():
3516 pass
3517 pass
3517 return len(self._genlist)
3518 return len(self._genlist)
3518
3519
3519 def sort(self, reverse=False):
3520 def sort(self, reverse=False):
3520 self._ascending = not reverse
3521 self._ascending = not reverse
3521
3522
3522 def reverse(self):
3523 def reverse(self):
3523 self._ascending = not self._ascending
3524 self._ascending = not self._ascending
3524
3525
3525 def isascending(self):
3526 def isascending(self):
3526 return self._ascending
3527 return self._ascending
3527
3528
3528 def isdescending(self):
3529 def isdescending(self):
3529 return not self._ascending
3530 return not self._ascending
3530
3531
3531 def first(self):
3532 def first(self):
3532 if self._ascending:
3533 if self._ascending:
3533 it = self.fastasc
3534 it = self.fastasc
3534 else:
3535 else:
3535 it = self.fastdesc
3536 it = self.fastdesc
3536 if it is None:
3537 if it is None:
3537 # we need to consume all and try again
3538 # we need to consume all and try again
3538 for x in self._consumegen():
3539 for x in self._consumegen():
3539 pass
3540 pass
3540 return self.first()
3541 return self.first()
3541 return next(it(), None)
3542 return next(it(), None)
3542
3543
3543 def last(self):
3544 def last(self):
3544 if self._ascending:
3545 if self._ascending:
3545 it = self.fastdesc
3546 it = self.fastdesc
3546 else:
3547 else:
3547 it = self.fastasc
3548 it = self.fastasc
3548 if it is None:
3549 if it is None:
3549 # we need to consume all and try again
3550 # we need to consume all and try again
3550 for x in self._consumegen():
3551 for x in self._consumegen():
3551 pass
3552 pass
3552 return self.first()
3553 return self.first()
3553 return next(it(), None)
3554 return next(it(), None)
3554
3555
3555 def __repr__(self):
3556 def __repr__(self):
3556 d = {False: '-', True: '+'}[self._ascending]
3557 d = {False: '-', True: '+'}[self._ascending]
3557 return '<%s%s>' % (type(self).__name__, d)
3558 return '<%s%s>' % (type(self).__name__, d)
3558
3559
3559 class spanset(abstractsmartset):
3560 class spanset(abstractsmartset):
3560 """Duck type for baseset class which represents a range of revisions and
3561 """Duck type for baseset class which represents a range of revisions and
3561 can work lazily and without having all the range in memory
3562 can work lazily and without having all the range in memory
3562
3563
3563 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3564 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3564 notable points:
3565 notable points:
3565 - when x < y it will be automatically descending,
3566 - when x < y it will be automatically descending,
3566 - revision filtered with this repoview will be skipped.
3567 - revision filtered with this repoview will be skipped.
3567
3568
3568 """
3569 """
3569 def __init__(self, repo, start=0, end=None):
3570 def __init__(self, repo, start=0, end=None):
3570 """
3571 """
3571 start: first revision included the set
3572 start: first revision included the set
3572 (default to 0)
3573 (default to 0)
3573 end: first revision excluded (last+1)
3574 end: first revision excluded (last+1)
3574 (default to len(repo)
3575 (default to len(repo)
3575
3576
3576 Spanset will be descending if `end` < `start`.
3577 Spanset will be descending if `end` < `start`.
3577 """
3578 """
3578 if end is None:
3579 if end is None:
3579 end = len(repo)
3580 end = len(repo)
3580 self._ascending = start <= end
3581 self._ascending = start <= end
3581 if not self._ascending:
3582 if not self._ascending:
3582 start, end = end + 1, start +1
3583 start, end = end + 1, start +1
3583 self._start = start
3584 self._start = start
3584 self._end = end
3585 self._end = end
3585 self._hiddenrevs = repo.changelog.filteredrevs
3586 self._hiddenrevs = repo.changelog.filteredrevs
3586
3587
3587 def sort(self, reverse=False):
3588 def sort(self, reverse=False):
3588 self._ascending = not reverse
3589 self._ascending = not reverse
3589
3590
3590 def reverse(self):
3591 def reverse(self):
3591 self._ascending = not self._ascending
3592 self._ascending = not self._ascending
3592
3593
3593 def _iterfilter(self, iterrange):
3594 def _iterfilter(self, iterrange):
3594 s = self._hiddenrevs
3595 s = self._hiddenrevs
3595 for r in iterrange:
3596 for r in iterrange:
3596 if r not in s:
3597 if r not in s:
3597 yield r
3598 yield r
3598
3599
3599 def __iter__(self):
3600 def __iter__(self):
3600 if self._ascending:
3601 if self._ascending:
3601 return self.fastasc()
3602 return self.fastasc()
3602 else:
3603 else:
3603 return self.fastdesc()
3604 return self.fastdesc()
3604
3605
3605 def fastasc(self):
3606 def fastasc(self):
3606 iterrange = xrange(self._start, self._end)
3607 iterrange = xrange(self._start, self._end)
3607 if self._hiddenrevs:
3608 if self._hiddenrevs:
3608 return self._iterfilter(iterrange)
3609 return self._iterfilter(iterrange)
3609 return iter(iterrange)
3610 return iter(iterrange)
3610
3611
3611 def fastdesc(self):
3612 def fastdesc(self):
3612 iterrange = xrange(self._end - 1, self._start - 1, -1)
3613 iterrange = xrange(self._end - 1, self._start - 1, -1)
3613 if self._hiddenrevs:
3614 if self._hiddenrevs:
3614 return self._iterfilter(iterrange)
3615 return self._iterfilter(iterrange)
3615 return iter(iterrange)
3616 return iter(iterrange)
3616
3617
3617 def __contains__(self, rev):
3618 def __contains__(self, rev):
3618 hidden = self._hiddenrevs
3619 hidden = self._hiddenrevs
3619 return ((self._start <= rev < self._end)
3620 return ((self._start <= rev < self._end)
3620 and not (hidden and rev in hidden))
3621 and not (hidden and rev in hidden))
3621
3622
3622 def __nonzero__(self):
3623 def __nonzero__(self):
3623 for r in self:
3624 for r in self:
3624 return True
3625 return True
3625 return False
3626 return False
3626
3627
3627 def __len__(self):
3628 def __len__(self):
3628 if not self._hiddenrevs:
3629 if not self._hiddenrevs:
3629 return abs(self._end - self._start)
3630 return abs(self._end - self._start)
3630 else:
3631 else:
3631 count = 0
3632 count = 0
3632 start = self._start
3633 start = self._start
3633 end = self._end
3634 end = self._end
3634 for rev in self._hiddenrevs:
3635 for rev in self._hiddenrevs:
3635 if (end < rev <= start) or (start <= rev < end):
3636 if (end < rev <= start) or (start <= rev < end):
3636 count += 1
3637 count += 1
3637 return abs(self._end - self._start) - count
3638 return abs(self._end - self._start) - count
3638
3639
3639 def isascending(self):
3640 def isascending(self):
3640 return self._ascending
3641 return self._ascending
3641
3642
3642 def isdescending(self):
3643 def isdescending(self):
3643 return not self._ascending
3644 return not self._ascending
3644
3645
3645 def first(self):
3646 def first(self):
3646 if self._ascending:
3647 if self._ascending:
3647 it = self.fastasc
3648 it = self.fastasc
3648 else:
3649 else:
3649 it = self.fastdesc
3650 it = self.fastdesc
3650 for x in it():
3651 for x in it():
3651 return x
3652 return x
3652 return None
3653 return None
3653
3654
3654 def last(self):
3655 def last(self):
3655 if self._ascending:
3656 if self._ascending:
3656 it = self.fastdesc
3657 it = self.fastdesc
3657 else:
3658 else:
3658 it = self.fastasc
3659 it = self.fastasc
3659 for x in it():
3660 for x in it():
3660 return x
3661 return x
3661 return None
3662 return None
3662
3663
3663 def __repr__(self):
3664 def __repr__(self):
3664 d = {False: '-', True: '+'}[self._ascending]
3665 d = {False: '-', True: '+'}[self._ascending]
3665 return '<%s%s %d:%d>' % (type(self).__name__, d,
3666 return '<%s%s %d:%d>' % (type(self).__name__, d,
3666 self._start, self._end - 1)
3667 self._start, self._end - 1)
3667
3668
3668 class fullreposet(spanset):
3669 class fullreposet(spanset):
3669 """a set containing all revisions in the repo
3670 """a set containing all revisions in the repo
3670
3671
3671 This class exists to host special optimization and magic to handle virtual
3672 This class exists to host special optimization and magic to handle virtual
3672 revisions such as "null".
3673 revisions such as "null".
3673 """
3674 """
3674
3675
3675 def __init__(self, repo):
3676 def __init__(self, repo):
3676 super(fullreposet, self).__init__(repo)
3677 super(fullreposet, self).__init__(repo)
3677
3678
3678 def __and__(self, other):
3679 def __and__(self, other):
3679 """As self contains the whole repo, all of the other set should also be
3680 """As self contains the whole repo, all of the other set should also be
3680 in self. Therefore `self & other = other`.
3681 in self. Therefore `self & other = other`.
3681
3682
3682 This boldly assumes the other contains valid revs only.
3683 This boldly assumes the other contains valid revs only.
3683 """
3684 """
3684 # other not a smartset, make is so
3685 # other not a smartset, make is so
3685 if not util.safehasattr(other, 'isascending'):
3686 if not util.safehasattr(other, 'isascending'):
3686 # filter out hidden revision
3687 # filter out hidden revision
3687 # (this boldly assumes all smartset are pure)
3688 # (this boldly assumes all smartset are pure)
3688 #
3689 #
3689 # `other` was used with "&", let's assume this is a set like
3690 # `other` was used with "&", let's assume this is a set like
3690 # object.
3691 # object.
3691 other = baseset(other - self._hiddenrevs)
3692 other = baseset(other - self._hiddenrevs)
3692
3693
3693 # XXX As fullreposet is also used as bootstrap, this is wrong.
3694 # XXX As fullreposet is also used as bootstrap, this is wrong.
3694 #
3695 #
3695 # With a giveme312() revset returning [3,1,2], this makes
3696 # With a giveme312() revset returning [3,1,2], this makes
3696 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3697 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3697 # We cannot just drop it because other usage still need to sort it:
3698 # We cannot just drop it because other usage still need to sort it:
3698 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3699 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3699 #
3700 #
3700 # There is also some faulty revset implementations that rely on it
3701 # There is also some faulty revset implementations that rely on it
3701 # (eg: children as of its state in e8075329c5fb)
3702 # (eg: children as of its state in e8075329c5fb)
3702 #
3703 #
3703 # When we fix the two points above we can move this into the if clause
3704 # When we fix the two points above we can move this into the if clause
3704 other.sort(reverse=self.isdescending())
3705 other.sort(reverse=self.isdescending())
3705 return other
3706 return other
3706
3707
3707 def prettyformatset(revs):
3708 def prettyformatset(revs):
3708 lines = []
3709 lines = []
3709 rs = repr(revs)
3710 rs = repr(revs)
3710 p = 0
3711 p = 0
3711 while p < len(rs):
3712 while p < len(rs):
3712 q = rs.find('<', p + 1)
3713 q = rs.find('<', p + 1)
3713 if q < 0:
3714 if q < 0:
3714 q = len(rs)
3715 q = len(rs)
3715 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3716 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3716 assert l >= 0
3717 assert l >= 0
3717 lines.append((l, rs[p:q].rstrip()))
3718 lines.append((l, rs[p:q].rstrip()))
3718 p = q
3719 p = q
3719 return '\n'.join(' ' * l + s for l, s in lines)
3720 return '\n'.join(' ' * l + s for l, s in lines)
3720
3721
3721 # tell hggettext to extract docstrings from these functions:
3722 # tell hggettext to extract docstrings from these functions:
3722 i18nfunctions = symbols.values()
3723 i18nfunctions = symbols.values()
@@ -1,185 +1,185 b''
1 revlog.parseindex must be able to parse the index file even if
1 revlog.parseindex must be able to parse the index file even if
2 an index entry is split between two 64k blocks. The ideal test
2 an index entry is split between two 64k blocks. The ideal test
3 would be to create an index file with inline data where
3 would be to create an index file with inline data where
4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
5 the size of an index entry) and with an index entry starting right
5 the size of an index entry) and with an index entry starting right
6 before the 64k block boundary, and try to read it.
6 before the 64k block boundary, and try to read it.
7 We approximate that by reducing the read buffer to 1 byte.
7 We approximate that by reducing the read buffer to 1 byte.
8
8
9 $ hg init a
9 $ hg init a
10 $ cd a
10 $ cd a
11 $ echo abc > foo
11 $ echo abc > foo
12 $ hg add foo
12 $ hg add foo
13 $ hg commit -m 'add foo'
13 $ hg commit -m 'add foo'
14 $ echo >> foo
14 $ echo >> foo
15 $ hg commit -m 'change foo'
15 $ hg commit -m 'change foo'
16 $ hg log -r 0:
16 $ hg log -r 0:
17 changeset: 0:7c31755bf9b5
17 changeset: 0:7c31755bf9b5
18 user: test
18 user: test
19 date: Thu Jan 01 00:00:00 1970 +0000
19 date: Thu Jan 01 00:00:00 1970 +0000
20 summary: add foo
20 summary: add foo
21
21
22 changeset: 1:26333235a41c
22 changeset: 1:26333235a41c
23 tag: tip
23 tag: tip
24 user: test
24 user: test
25 date: Thu Jan 01 00:00:00 1970 +0000
25 date: Thu Jan 01 00:00:00 1970 +0000
26 summary: change foo
26 summary: change foo
27
27
28 $ cat >> test.py << EOF
28 $ cat >> test.py << EOF
29 > from mercurial import changelog, scmutil
29 > from mercurial import changelog, scmutil
30 > from mercurial.node import *
30 > from mercurial.node import *
31 >
31 >
32 > class singlebyteread(object):
32 > class singlebyteread(object):
33 > def __init__(self, real):
33 > def __init__(self, real):
34 > self.real = real
34 > self.real = real
35 >
35 >
36 > def read(self, size=-1):
36 > def read(self, size=-1):
37 > if size == 65536:
37 > if size == 65536:
38 > size = 1
38 > size = 1
39 > return self.real.read(size)
39 > return self.real.read(size)
40 >
40 >
41 > def __getattr__(self, key):
41 > def __getattr__(self, key):
42 > return getattr(self.real, key)
42 > return getattr(self.real, key)
43 >
43 >
44 > def opener(*args):
44 > def opener(*args):
45 > o = scmutil.opener(*args)
45 > o = scmutil.opener(*args)
46 > def wrapper(*a):
46 > def wrapper(*a):
47 > f = o(*a)
47 > f = o(*a)
48 > return singlebyteread(f)
48 > return singlebyteread(f)
49 > return wrapper
49 > return wrapper
50 >
50 >
51 > cl = changelog.changelog(opener('.hg/store'))
51 > cl = changelog.changelog(opener('.hg/store'))
52 > print len(cl), 'revisions:'
52 > print len(cl), 'revisions:'
53 > for r in cl:
53 > for r in cl:
54 > print short(cl.node(r))
54 > print short(cl.node(r))
55 > EOF
55 > EOF
56 $ python test.py
56 $ python test.py
57 2 revisions:
57 2 revisions:
58 7c31755bf9b5
58 7c31755bf9b5
59 26333235a41c
59 26333235a41c
60
60
61 $ cd ..
61 $ cd ..
62
62
63 #if no-pure
63 #if no-pure
64
64
65 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
65 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
66
66
67 $ cd a
67 $ cd a
68
68
69 $ python <<EOF
69 $ python <<EOF
70 > from mercurial import changelog, scmutil
70 > from mercurial import changelog, scmutil
71 > cl = changelog.changelog(scmutil.vfs('.hg/store'))
71 > cl = changelog.changelog(scmutil.vfs('.hg/store'))
72 > print 'good heads:'
72 > print 'good heads:'
73 > for head in [0, len(cl) - 1, -1]:
73 > for head in [0, len(cl) - 1, -1]:
74 > print'%s: %r' % (head, cl.reachableroots(0, [head], [0]))
74 > print'%s: %r' % (head, cl.reachableroots(0, [head], [0]))
75 > print 'bad heads:'
75 > print 'bad heads:'
76 > for head in [len(cl), 10000, -2, -10000, None]:
76 > for head in [len(cl), 10000, -2, -10000, None]:
77 > print '%s:' % head,
77 > print '%s:' % head,
78 > try:
78 > try:
79 > cl.reachableroots(0, [head], [0])
79 > cl.reachableroots(0, [head], [0])
80 > print 'uncaught buffer overflow?'
80 > print 'uncaught buffer overflow?'
81 > except (IndexError, TypeError) as inst:
81 > except (IndexError, TypeError) as inst:
82 > print inst
82 > print inst
83 > print 'good roots:'
83 > print 'good roots:'
84 > for root in [0, len(cl) - 1, -1]:
84 > for root in [0, len(cl) - 1, -1]:
85 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
85 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
86 > print 'out-of-range roots are ignored:'
86 > print 'out-of-range roots are ignored:'
87 > for root in [len(cl), 10000, -2, -10000]:
87 > for root in [len(cl), 10000, -2, -10000]:
88 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
88 > print '%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root]))
89 > print 'bad roots:'
89 > print 'bad roots:'
90 > for root in [None]:
90 > for root in [None]:
91 > print '%s:' % root,
91 > print '%s:' % root,
92 > try:
92 > try:
93 > cl.reachableroots(root, [len(cl) - 1], [root])
93 > cl.reachableroots(root, [len(cl) - 1], [root])
94 > print 'uncaught error?'
94 > print 'uncaught error?'
95 > except TypeError as inst:
95 > except TypeError as inst:
96 > print inst
96 > print inst
97 > EOF
97 > EOF
98 good heads:
98 good heads:
99 0: <baseset+ [0]>
99 0: [0]
100 1: <baseset+ [0]>
100 1: [0]
101 -1: <baseset+ []>
101 -1: []
102 bad heads:
102 bad heads:
103 2: head out of range
103 2: head out of range
104 10000: head out of range
104 10000: head out of range
105 -2: head out of range
105 -2: head out of range
106 -10000: head out of range
106 -10000: head out of range
107 None: an integer is required
107 None: an integer is required
108 good roots:
108 good roots:
109 0: <baseset+ [0]>
109 0: [0]
110 1: <baseset+ [1]>
110 1: [1]
111 -1: <baseset+ [-1]>
111 -1: [-1]
112 out-of-range roots are ignored:
112 out-of-range roots are ignored:
113 2: <baseset+ []>
113 2: []
114 10000: <baseset+ []>
114 10000: []
115 -2: <baseset+ []>
115 -2: []
116 -10000: <baseset+ []>
116 -10000: []
117 bad roots:
117 bad roots:
118 None: an integer is required
118 None: an integer is required
119
119
120 $ cd ..
120 $ cd ..
121
121
122 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
122 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
123
123
124 $ mkdir invalidparent
124 $ mkdir invalidparent
125 $ cd invalidparent
125 $ cd invalidparent
126
126
127 $ hg clone --pull -q --config phases.publish=False ../a limit
127 $ hg clone --pull -q --config phases.publish=False ../a limit
128 $ hg clone --pull -q --config phases.publish=False ../a segv
128 $ hg clone --pull -q --config phases.publish=False ../a segv
129 $ rm -R limit/.hg/cache segv/.hg/cache
129 $ rm -R limit/.hg/cache segv/.hg/cache
130
130
131 $ python <<EOF
131 $ python <<EOF
132 > data = open("limit/.hg/store/00changelog.i", "rb").read()
132 > data = open("limit/.hg/store/00changelog.i", "rb").read()
133 > for n, p in [('limit', '\0\0\0\x02'), ('segv', '\0\x01\0\0')]:
133 > for n, p in [('limit', '\0\0\0\x02'), ('segv', '\0\x01\0\0')]:
134 > # corrupt p1 at rev0 and p2 at rev1
134 > # corrupt p1 at rev0 and p2 at rev1
135 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
135 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
136 > open(n + "/.hg/store/00changelog.i", "wb").write(d)
136 > open(n + "/.hg/store/00changelog.i", "wb").write(d)
137 > EOF
137 > EOF
138
138
139 $ hg debugindex -f1 limit/.hg/store/00changelog.i
139 $ hg debugindex -f1 limit/.hg/store/00changelog.i
140 rev flag offset length size base link p1 p2 nodeid
140 rev flag offset length size base link p1 p2 nodeid
141 0 0000 0 63 62 0 0 2 -1 7c31755bf9b5
141 0 0000 0 63 62 0 0 2 -1 7c31755bf9b5
142 1 0000 63 66 65 1 1 0 2 26333235a41c
142 1 0000 63 66 65 1 1 0 2 26333235a41c
143 $ hg debugindex -f1 segv/.hg/store/00changelog.i
143 $ hg debugindex -f1 segv/.hg/store/00changelog.i
144 rev flag offset length size base link p1 p2 nodeid
144 rev flag offset length size base link p1 p2 nodeid
145 0 0000 0 63 62 0 0 65536 -1 7c31755bf9b5
145 0 0000 0 63 62 0 0 65536 -1 7c31755bf9b5
146 1 0000 63 66 65 1 1 0 65536 26333235a41c
146 1 0000 63 66 65 1 1 0 65536 26333235a41c
147
147
148 $ cat <<EOF > test.py
148 $ cat <<EOF > test.py
149 > import sys
149 > import sys
150 > from mercurial import changelog, scmutil
150 > from mercurial import changelog, scmutil
151 > cl = changelog.changelog(scmutil.vfs(sys.argv[1]))
151 > cl = changelog.changelog(scmutil.vfs(sys.argv[1]))
152 > n0, n1 = cl.node(0), cl.node(1)
152 > n0, n1 = cl.node(0), cl.node(1)
153 > ops = [
153 > ops = [
154 > ('reachableroots',
154 > ('reachableroots',
155 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
155 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
156 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
156 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
157 > ('index_headrevs', lambda: cl.headrevs()),
157 > ('index_headrevs', lambda: cl.headrevs()),
158 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
158 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
159 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
159 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
160 > ]
160 > ]
161 > for l, f in ops:
161 > for l, f in ops:
162 > print l + ':',
162 > print l + ':',
163 > try:
163 > try:
164 > f()
164 > f()
165 > print 'uncaught buffer overflow?'
165 > print 'uncaught buffer overflow?'
166 > except ValueError, inst:
166 > except ValueError, inst:
167 > print inst
167 > print inst
168 > EOF
168 > EOF
169
169
170 $ python test.py limit/.hg/store
170 $ python test.py limit/.hg/store
171 reachableroots: parent out of range
171 reachableroots: parent out of range
172 compute_phases_map_sets: parent out of range
172 compute_phases_map_sets: parent out of range
173 index_headrevs: parent out of range
173 index_headrevs: parent out of range
174 find_gca_candidates: parent out of range
174 find_gca_candidates: parent out of range
175 find_deepest: parent out of range
175 find_deepest: parent out of range
176 $ python test.py segv/.hg/store
176 $ python test.py segv/.hg/store
177 reachableroots: parent out of range
177 reachableroots: parent out of range
178 compute_phases_map_sets: parent out of range
178 compute_phases_map_sets: parent out of range
179 index_headrevs: parent out of range
179 index_headrevs: parent out of range
180 find_gca_candidates: parent out of range
180 find_gca_candidates: parent out of range
181 find_deepest: parent out of range
181 find_deepest: parent out of range
182
182
183 $ cd ..
183 $ cd ..
184
184
185 #endif
185 #endif
General Comments 0
You need to be logged in to leave comments. Login now