##// END OF EJS Templates
revisionbranchcache: fall back to slow path if starting readonly (issue4531)...
Mads Kiilerich -
r24159:5b4ed033 3.3.1 stable
parent child Browse files
Show More
@@ -1,451 +1,455 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11 import time
11 import time
12 from array import array
12 from array import array
13 from struct import calcsize, pack, unpack
13 from struct import calcsize, pack, unpack
14
14
15 def _filename(repo):
15 def _filename(repo):
16 """name of a branchcache file for a given repo or repoview"""
16 """name of a branchcache file for a given repo or repoview"""
17 filename = "cache/branch2"
17 filename = "cache/branch2"
18 if repo.filtername:
18 if repo.filtername:
19 filename = '%s-%s' % (filename, repo.filtername)
19 filename = '%s-%s' % (filename, repo.filtername)
20 return filename
20 return filename
21
21
22 def read(repo):
22 def read(repo):
23 try:
23 try:
24 f = repo.vfs(_filename(repo))
24 f = repo.vfs(_filename(repo))
25 lines = f.read().split('\n')
25 lines = f.read().split('\n')
26 f.close()
26 f.close()
27 except (IOError, OSError):
27 except (IOError, OSError):
28 return None
28 return None
29
29
30 try:
30 try:
31 cachekey = lines.pop(0).split(" ", 2)
31 cachekey = lines.pop(0).split(" ", 2)
32 last, lrev = cachekey[:2]
32 last, lrev = cachekey[:2]
33 last, lrev = bin(last), int(lrev)
33 last, lrev = bin(last), int(lrev)
34 filteredhash = None
34 filteredhash = None
35 if len(cachekey) > 2:
35 if len(cachekey) > 2:
36 filteredhash = bin(cachekey[2])
36 filteredhash = bin(cachekey[2])
37 partial = branchcache(tipnode=last, tiprev=lrev,
37 partial = branchcache(tipnode=last, tiprev=lrev,
38 filteredhash=filteredhash)
38 filteredhash=filteredhash)
39 if not partial.validfor(repo):
39 if not partial.validfor(repo):
40 # invalidate the cache
40 # invalidate the cache
41 raise ValueError('tip differs')
41 raise ValueError('tip differs')
42 for l in lines:
42 for l in lines:
43 if not l:
43 if not l:
44 continue
44 continue
45 node, state, label = l.split(" ", 2)
45 node, state, label = l.split(" ", 2)
46 if state not in 'oc':
46 if state not in 'oc':
47 raise ValueError('invalid branch state')
47 raise ValueError('invalid branch state')
48 label = encoding.tolocal(label.strip())
48 label = encoding.tolocal(label.strip())
49 if not node in repo:
49 if not node in repo:
50 raise ValueError('node %s does not exist' % node)
50 raise ValueError('node %s does not exist' % node)
51 node = bin(node)
51 node = bin(node)
52 partial.setdefault(label, []).append(node)
52 partial.setdefault(label, []).append(node)
53 if state == 'c':
53 if state == 'c':
54 partial._closednodes.add(node)
54 partial._closednodes.add(node)
55 except KeyboardInterrupt:
55 except KeyboardInterrupt:
56 raise
56 raise
57 except Exception, inst:
57 except Exception, inst:
58 if repo.ui.debugflag:
58 if repo.ui.debugflag:
59 msg = 'invalid branchheads cache'
59 msg = 'invalid branchheads cache'
60 if repo.filtername is not None:
60 if repo.filtername is not None:
61 msg += ' (%s)' % repo.filtername
61 msg += ' (%s)' % repo.filtername
62 msg += ': %s\n'
62 msg += ': %s\n'
63 repo.ui.debug(msg % inst)
63 repo.ui.debug(msg % inst)
64 partial = None
64 partial = None
65 return partial
65 return partial
66
66
67 ### Nearest subset relation
67 ### Nearest subset relation
68 # Nearest subset of filter X is a filter Y so that:
68 # Nearest subset of filter X is a filter Y so that:
69 # * Y is included in X,
69 # * Y is included in X,
70 # * X - Y is as small as possible.
70 # * X - Y is as small as possible.
71 # This create and ordering used for branchmap purpose.
71 # This create and ordering used for branchmap purpose.
72 # the ordering may be partial
72 # the ordering may be partial
73 subsettable = {None: 'visible',
73 subsettable = {None: 'visible',
74 'visible': 'served',
74 'visible': 'served',
75 'served': 'immutable',
75 'served': 'immutable',
76 'immutable': 'base'}
76 'immutable': 'base'}
77
77
78 def updatecache(repo):
78 def updatecache(repo):
79 cl = repo.changelog
79 cl = repo.changelog
80 filtername = repo.filtername
80 filtername = repo.filtername
81 partial = repo._branchcaches.get(filtername)
81 partial = repo._branchcaches.get(filtername)
82
82
83 revs = []
83 revs = []
84 if partial is None or not partial.validfor(repo):
84 if partial is None or not partial.validfor(repo):
85 partial = read(repo)
85 partial = read(repo)
86 if partial is None:
86 if partial is None:
87 subsetname = subsettable.get(filtername)
87 subsetname = subsettable.get(filtername)
88 if subsetname is None:
88 if subsetname is None:
89 partial = branchcache()
89 partial = branchcache()
90 else:
90 else:
91 subset = repo.filtered(subsetname)
91 subset = repo.filtered(subsetname)
92 partial = subset.branchmap().copy()
92 partial = subset.branchmap().copy()
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 revs.extend(cl.revs(start=partial.tiprev + 1))
95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 if revs:
96 if revs:
97 partial.update(repo, revs)
97 partial.update(repo, revs)
98 partial.write(repo)
98 partial.write(repo)
99 assert partial.validfor(repo), filtername
99 assert partial.validfor(repo), filtername
100 repo._branchcaches[repo.filtername] = partial
100 repo._branchcaches[repo.filtername] = partial
101
101
102 class branchcache(dict):
102 class branchcache(dict):
103 """A dict like object that hold branches heads cache.
103 """A dict like object that hold branches heads cache.
104
104
105 This cache is used to avoid costly computations to determine all the
105 This cache is used to avoid costly computations to determine all the
106 branch heads of a repo.
106 branch heads of a repo.
107
107
108 The cache is serialized on disk in the following format:
108 The cache is serialized on disk in the following format:
109
109
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
111 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
113 ...
113 ...
114
114
115 The first line is used to check if the cache is still valid. If the
115 The first line is used to check if the cache is still valid. If the
116 branch cache is for a filtered repo view, an optional third hash is
116 branch cache is for a filtered repo view, an optional third hash is
117 included that hashes the hashes of all filtered revisions.
117 included that hashes the hashes of all filtered revisions.
118
118
119 The open/closed state is represented by a single letter 'o' or 'c'.
119 The open/closed state is represented by a single letter 'o' or 'c'.
120 This field can be used to avoid changelog reads when determining if a
120 This field can be used to avoid changelog reads when determining if a
121 branch head closes a branch or not.
121 branch head closes a branch or not.
122 """
122 """
123
123
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
125 filteredhash=None, closednodes=None):
125 filteredhash=None, closednodes=None):
126 super(branchcache, self).__init__(entries)
126 super(branchcache, self).__init__(entries)
127 self.tipnode = tipnode
127 self.tipnode = tipnode
128 self.tiprev = tiprev
128 self.tiprev = tiprev
129 self.filteredhash = filteredhash
129 self.filteredhash = filteredhash
130 # closednodes is a set of nodes that close their branch. If the branch
130 # closednodes is a set of nodes that close their branch. If the branch
131 # cache has been updated, it may contain nodes that are no longer
131 # cache has been updated, it may contain nodes that are no longer
132 # heads.
132 # heads.
133 if closednodes is None:
133 if closednodes is None:
134 self._closednodes = set()
134 self._closednodes = set()
135 else:
135 else:
136 self._closednodes = closednodes
136 self._closednodes = closednodes
137 self._revbranchcache = None
137 self._revbranchcache = None
138
138
139 def _hashfiltered(self, repo):
139 def _hashfiltered(self, repo):
140 """build hash of revision filtered in the current cache
140 """build hash of revision filtered in the current cache
141
141
142 Tracking tipnode and tiprev is not enough to ensure validity of the
142 Tracking tipnode and tiprev is not enough to ensure validity of the
143 cache as they do not help to distinct cache that ignored various
143 cache as they do not help to distinct cache that ignored various
144 revision bellow tiprev.
144 revision bellow tiprev.
145
145
146 To detect such difference, we build a cache of all ignored revisions.
146 To detect such difference, we build a cache of all ignored revisions.
147 """
147 """
148 cl = repo.changelog
148 cl = repo.changelog
149 if not cl.filteredrevs:
149 if not cl.filteredrevs:
150 return None
150 return None
151 key = None
151 key = None
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 if revs:
153 if revs:
154 s = util.sha1()
154 s = util.sha1()
155 for rev in revs:
155 for rev in revs:
156 s.update('%s;' % rev)
156 s.update('%s;' % rev)
157 key = s.digest()
157 key = s.digest()
158 return key
158 return key
159
159
160 def validfor(self, repo):
160 def validfor(self, repo):
161 """Is the cache content valid regarding a repo
161 """Is the cache content valid regarding a repo
162
162
163 - False when cached tipnode is unknown or if we detect a strip.
163 - False when cached tipnode is unknown or if we detect a strip.
164 - True when cache is up to date or a subset of current repo."""
164 - True when cache is up to date or a subset of current repo."""
165 try:
165 try:
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 and (self.filteredhash == self._hashfiltered(repo)))
167 and (self.filteredhash == self._hashfiltered(repo)))
168 except IndexError:
168 except IndexError:
169 return False
169 return False
170
170
171 def _branchtip(self, heads):
171 def _branchtip(self, heads):
172 '''Return tuple with last open head in heads and false,
172 '''Return tuple with last open head in heads and false,
173 otherwise return last closed head and true.'''
173 otherwise return last closed head and true.'''
174 tip = heads[-1]
174 tip = heads[-1]
175 closed = True
175 closed = True
176 for h in reversed(heads):
176 for h in reversed(heads):
177 if h not in self._closednodes:
177 if h not in self._closednodes:
178 tip = h
178 tip = h
179 closed = False
179 closed = False
180 break
180 break
181 return tip, closed
181 return tip, closed
182
182
183 def branchtip(self, branch):
183 def branchtip(self, branch):
184 '''Return the tipmost open head on branch head, otherwise return the
184 '''Return the tipmost open head on branch head, otherwise return the
185 tipmost closed head on branch.
185 tipmost closed head on branch.
186 Raise KeyError for unknown branch.'''
186 Raise KeyError for unknown branch.'''
187 return self._branchtip(self[branch])[0]
187 return self._branchtip(self[branch])[0]
188
188
189 def branchheads(self, branch, closed=False):
189 def branchheads(self, branch, closed=False):
190 heads = self[branch]
190 heads = self[branch]
191 if not closed:
191 if not closed:
192 heads = [h for h in heads if h not in self._closednodes]
192 heads = [h for h in heads if h not in self._closednodes]
193 return heads
193 return heads
194
194
195 def iterbranches(self):
195 def iterbranches(self):
196 for bn, heads in self.iteritems():
196 for bn, heads in self.iteritems():
197 yield (bn, heads) + self._branchtip(heads)
197 yield (bn, heads) + self._branchtip(heads)
198
198
199 def copy(self):
199 def copy(self):
200 """return an deep copy of the branchcache object"""
200 """return an deep copy of the branchcache object"""
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 self._closednodes)
202 self._closednodes)
203
203
204 def write(self, repo):
204 def write(self, repo):
205 try:
205 try:
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 if self.filteredhash is not None:
208 if self.filteredhash is not None:
209 cachekey.append(hex(self.filteredhash))
209 cachekey.append(hex(self.filteredhash))
210 f.write(" ".join(cachekey) + '\n')
210 f.write(" ".join(cachekey) + '\n')
211 nodecount = 0
211 nodecount = 0
212 for label, nodes in sorted(self.iteritems()):
212 for label, nodes in sorted(self.iteritems()):
213 for node in nodes:
213 for node in nodes:
214 nodecount += 1
214 nodecount += 1
215 if node in self._closednodes:
215 if node in self._closednodes:
216 state = 'c'
216 state = 'c'
217 else:
217 else:
218 state = 'o'
218 state = 'o'
219 f.write("%s %s %s\n" % (hex(node), state,
219 f.write("%s %s %s\n" % (hex(node), state,
220 encoding.fromlocal(label)))
220 encoding.fromlocal(label)))
221 f.close()
221 f.close()
222 repo.ui.log('branchcache',
222 repo.ui.log('branchcache',
223 'wrote %s branch cache with %d labels and %d nodes\n',
223 'wrote %s branch cache with %d labels and %d nodes\n',
224 repo.filtername, len(self), nodecount)
224 repo.filtername, len(self), nodecount)
225 except (IOError, OSError, util.Abort), inst:
225 except (IOError, OSError, util.Abort), inst:
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 # Abort may be raise by read only opener
227 # Abort may be raise by read only opener
228 pass
228 pass
229 if self._revbranchcache:
229 if self._revbranchcache:
230 self._revbranchcache.write(repo.unfiltered())
230 self._revbranchcache.write(repo.unfiltered())
231 self._revbranchcache = None
231 self._revbranchcache = None
232
232
233 def update(self, repo, revgen):
233 def update(self, repo, revgen):
234 """Given a branchhead cache, self, that may have extra nodes or be
234 """Given a branchhead cache, self, that may have extra nodes or be
235 missing heads, and a generator of nodes that are strictly a superset of
235 missing heads, and a generator of nodes that are strictly a superset of
236 heads missing, this function updates self to be correct.
236 heads missing, this function updates self to be correct.
237 """
237 """
238 starttime = time.time()
238 starttime = time.time()
239 cl = repo.changelog
239 cl = repo.changelog
240 # collect new branch entries
240 # collect new branch entries
241 newbranches = {}
241 newbranches = {}
242 urepo = repo.unfiltered()
242 urepo = repo.unfiltered()
243 self._revbranchcache = revbranchcache(urepo)
243 self._revbranchcache = revbranchcache(urepo)
244 getbranchinfo = self._revbranchcache.branchinfo
244 getbranchinfo = self._revbranchcache.branchinfo
245 ucl = urepo.changelog
245 ucl = urepo.changelog
246 for r in revgen:
246 for r in revgen:
247 branch, closesbranch = getbranchinfo(ucl, r)
247 branch, closesbranch = getbranchinfo(ucl, r)
248 newbranches.setdefault(branch, []).append(r)
248 newbranches.setdefault(branch, []).append(r)
249 if closesbranch:
249 if closesbranch:
250 self._closednodes.add(cl.node(r))
250 self._closednodes.add(cl.node(r))
251
251
252 # fetch current topological heads to speed up filtering
252 # fetch current topological heads to speed up filtering
253 topoheads = set(cl.headrevs())
253 topoheads = set(cl.headrevs())
254
254
255 # if older branchheads are reachable from new ones, they aren't
255 # if older branchheads are reachable from new ones, they aren't
256 # really branchheads. Note checking parents is insufficient:
256 # really branchheads. Note checking parents is insufficient:
257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
258 for branch, newheadrevs in newbranches.iteritems():
258 for branch, newheadrevs in newbranches.iteritems():
259 bheads = self.setdefault(branch, [])
259 bheads = self.setdefault(branch, [])
260 bheadset = set(cl.rev(node) for node in bheads)
260 bheadset = set(cl.rev(node) for node in bheads)
261
261
262 # This have been tested True on all internal usage of this function.
262 # This have been tested True on all internal usage of this function.
263 # run it again in case of doubt
263 # run it again in case of doubt
264 # assert not (set(bheadrevs) & set(newheadrevs))
264 # assert not (set(bheadrevs) & set(newheadrevs))
265 newheadrevs.sort()
265 newheadrevs.sort()
266 bheadset.update(newheadrevs)
266 bheadset.update(newheadrevs)
267
267
268 # This prunes out two kinds of heads - heads that are superseded by
268 # This prunes out two kinds of heads - heads that are superseded by
269 # a head in newheadrevs, and newheadrevs that are not heads because
269 # a head in newheadrevs, and newheadrevs that are not heads because
270 # an existing head is their descendant.
270 # an existing head is their descendant.
271 uncertain = bheadset - topoheads
271 uncertain = bheadset - topoheads
272 if uncertain:
272 if uncertain:
273 floorrev = min(uncertain)
273 floorrev = min(uncertain)
274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
275 bheadset -= ancestors
275 bheadset -= ancestors
276 bheadrevs = sorted(bheadset)
276 bheadrevs = sorted(bheadset)
277 self[branch] = [cl.node(rev) for rev in bheadrevs]
277 self[branch] = [cl.node(rev) for rev in bheadrevs]
278 tiprev = bheadrevs[-1]
278 tiprev = bheadrevs[-1]
279 if tiprev > self.tiprev:
279 if tiprev > self.tiprev:
280 self.tipnode = cl.node(tiprev)
280 self.tipnode = cl.node(tiprev)
281 self.tiprev = tiprev
281 self.tiprev = tiprev
282
282
283 if not self.validfor(repo):
283 if not self.validfor(repo):
284 # cache key are not valid anymore
284 # cache key are not valid anymore
285 self.tipnode = nullid
285 self.tipnode = nullid
286 self.tiprev = nullrev
286 self.tiprev = nullrev
287 for heads in self.values():
287 for heads in self.values():
288 tiprev = max(cl.rev(node) for node in heads)
288 tiprev = max(cl.rev(node) for node in heads)
289 if tiprev > self.tiprev:
289 if tiprev > self.tiprev:
290 self.tipnode = cl.node(tiprev)
290 self.tipnode = cl.node(tiprev)
291 self.tiprev = tiprev
291 self.tiprev = tiprev
292 self.filteredhash = self._hashfiltered(repo)
292 self.filteredhash = self._hashfiltered(repo)
293
293
294 duration = time.time() - starttime
294 duration = time.time() - starttime
295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
296 repo.filtername, duration)
296 repo.filtername, duration)
297
297
298 # Revision branch info cache
298 # Revision branch info cache
299
299
300 _rbcversion = '-v1'
300 _rbcversion = '-v1'
301 _rbcnames = 'cache/rbc-names' + _rbcversion
301 _rbcnames = 'cache/rbc-names' + _rbcversion
302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
304 _rbcrecfmt = '>4sI'
304 _rbcrecfmt = '>4sI'
305 _rbcrecsize = calcsize(_rbcrecfmt)
305 _rbcrecsize = calcsize(_rbcrecfmt)
306 _rbcnodelen = 4
306 _rbcnodelen = 4
307 _rbcbranchidxmask = 0x7fffffff
307 _rbcbranchidxmask = 0x7fffffff
308 _rbccloseflag = 0x80000000
308 _rbccloseflag = 0x80000000
309
309
310 class revbranchcache(object):
310 class revbranchcache(object):
311 """Persistent cache, mapping from revision number to branch name and close.
311 """Persistent cache, mapping from revision number to branch name and close.
312 This is a low level cache, independent of filtering.
312 This is a low level cache, independent of filtering.
313
313
314 Branch names are stored in rbc-names in internal encoding separated by 0.
314 Branch names are stored in rbc-names in internal encoding separated by 0.
315 rbc-names is append-only, and each branch name is only stored once and will
315 rbc-names is append-only, and each branch name is only stored once and will
316 thus have a unique index.
316 thus have a unique index.
317
317
318 The branch info for each revision is stored in rbc-revs as constant size
318 The branch info for each revision is stored in rbc-revs as constant size
319 records. The whole file is read into memory, but it is only 'parsed' on
319 records. The whole file is read into memory, but it is only 'parsed' on
320 demand. The file is usually append-only but will be truncated if repo
320 demand. The file is usually append-only but will be truncated if repo
321 modification is detected.
321 modification is detected.
322 The record for each revision contains the first 4 bytes of the
322 The record for each revision contains the first 4 bytes of the
323 corresponding node hash, and the record is only used if it still matches.
323 corresponding node hash, and the record is only used if it still matches.
324 Even a completely trashed rbc-revs fill thus still give the right result
324 Even a completely trashed rbc-revs fill thus still give the right result
325 while converging towards full recovery ... assuming no incorrectly matching
325 while converging towards full recovery ... assuming no incorrectly matching
326 node hashes.
326 node hashes.
327 The record also contains 4 bytes where 31 bits contains the index of the
327 The record also contains 4 bytes where 31 bits contains the index of the
328 branch and the last bit indicate that it is a branch close commit.
328 branch and the last bit indicate that it is a branch close commit.
329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
330 and will grow with it but be 1/8th of its size.
330 and will grow with it but be 1/8th of its size.
331 """
331 """
332
332
333 def __init__(self, repo):
333 def __init__(self, repo, readonly=True):
334 assert repo.filtername is None
334 assert repo.filtername is None
335 self._names = [] # branch names in local encoding with static index
335 self._names = [] # branch names in local encoding with static index
336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
337 self._rbcsnameslen = 0
337 self._rbcsnameslen = 0
338 try:
338 try:
339 bndata = repo.vfs.read(_rbcnames)
339 bndata = repo.vfs.read(_rbcnames)
340 self._rbcsnameslen = len(bndata) # for verification before writing
340 self._rbcsnameslen = len(bndata) # for verification before writing
341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
342 except (IOError, OSError), inst:
342 except (IOError, OSError), inst:
343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
344 inst)
344 inst)
345 if readonly:
346 # don't try to use cache - fall back to the slow path
347 self.branchinfo = self._branchinfo
348
345 if self._names:
349 if self._names:
346 try:
350 try:
347 data = repo.vfs.read(_rbcrevs)
351 data = repo.vfs.read(_rbcrevs)
348 self._rbcrevs.fromstring(data)
352 self._rbcrevs.fromstring(data)
349 except (IOError, OSError), inst:
353 except (IOError, OSError), inst:
350 repo.ui.debug("couldn't read revision branch cache: %s\n" %
354 repo.ui.debug("couldn't read revision branch cache: %s\n" %
351 inst)
355 inst)
352 # remember number of good records on disk
356 # remember number of good records on disk
353 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
357 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
354 len(repo.changelog))
358 len(repo.changelog))
355 if self._rbcrevslen == 0:
359 if self._rbcrevslen == 0:
356 self._names = []
360 self._names = []
357 self._rbcnamescount = len(self._names) # number of good names on disk
361 self._rbcnamescount = len(self._names) # number of good names on disk
358 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
362 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
359
363
360 def branchinfo(self, changelog, rev):
364 def branchinfo(self, changelog, rev):
361 """Return branch name and close flag for rev, using and updating
365 """Return branch name and close flag for rev, using and updating
362 persistent cache."""
366 persistent cache."""
363 rbcrevidx = rev * _rbcrecsize
367 rbcrevidx = rev * _rbcrecsize
364
368
365 # if requested rev is missing, add and populate all missing revs
369 # if requested rev is missing, add and populate all missing revs
366 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
370 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
367 first = len(self._rbcrevs) // _rbcrecsize
371 first = len(self._rbcrevs) // _rbcrecsize
368 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
372 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
369 len(self._rbcrevs)))
373 len(self._rbcrevs)))
370 for r in xrange(first, len(changelog)):
374 for r in xrange(first, len(changelog)):
371 self._branchinfo(changelog, r)
375 self._branchinfo(changelog, r)
372
376
373 # fast path: extract data from cache, use it if node is matching
377 # fast path: extract data from cache, use it if node is matching
374 reponode = changelog.node(rev)[:_rbcnodelen]
378 reponode = changelog.node(rev)[:_rbcnodelen]
375 cachenode, branchidx = unpack(
379 cachenode, branchidx = unpack(
376 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
380 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
377 close = bool(branchidx & _rbccloseflag)
381 close = bool(branchidx & _rbccloseflag)
378 if close:
382 if close:
379 branchidx &= _rbcbranchidxmask
383 branchidx &= _rbcbranchidxmask
380 if cachenode == reponode:
384 if cachenode == reponode:
381 return self._names[branchidx], close
385 return self._names[branchidx], close
382 # fall back to slow path and make sure it will be written to disk
386 # fall back to slow path and make sure it will be written to disk
383 self._rbcrevslen = min(self._rbcrevslen, rev)
387 self._rbcrevslen = min(self._rbcrevslen, rev)
384 return self._branchinfo(changelog, rev)
388 return self._branchinfo(changelog, rev)
385
389
386 def _branchinfo(self, changelog, rev):
390 def _branchinfo(self, changelog, rev):
387 """Retrieve branch info from changelog and update _rbcrevs"""
391 """Retrieve branch info from changelog and update _rbcrevs"""
388 b, close = changelog.branchinfo(rev)
392 b, close = changelog.branchinfo(rev)
389 if b in self._namesreverse:
393 if b in self._namesreverse:
390 branchidx = self._namesreverse[b]
394 branchidx = self._namesreverse[b]
391 else:
395 else:
392 branchidx = len(self._names)
396 branchidx = len(self._names)
393 self._names.append(b)
397 self._names.append(b)
394 self._namesreverse[b] = branchidx
398 self._namesreverse[b] = branchidx
395 reponode = changelog.node(rev)
399 reponode = changelog.node(rev)
396 if close:
400 if close:
397 branchidx |= _rbccloseflag
401 branchidx |= _rbccloseflag
398 rbcrevidx = rev * _rbcrecsize
402 rbcrevidx = rev * _rbcrecsize
399 rec = array('c')
403 rec = array('c')
400 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
404 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
401 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
405 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
402 return b, close
406 return b, close
403
407
404 def write(self, repo):
408 def write(self, repo):
405 """Save branch cache if it is dirty."""
409 """Save branch cache if it is dirty."""
406 if self._rbcnamescount < len(self._names):
410 if self._rbcnamescount < len(self._names):
407 try:
411 try:
408 if self._rbcnamescount != 0:
412 if self._rbcnamescount != 0:
409 f = repo.vfs.open(_rbcnames, 'ab')
413 f = repo.vfs.open(_rbcnames, 'ab')
410 # The position after open(x, 'a') is implementation defined-
414 # The position after open(x, 'a') is implementation defined-
411 # see issue3543. SEEK_END was added in 2.5
415 # see issue3543. SEEK_END was added in 2.5
412 f.seek(0, 2) #os.SEEK_END
416 f.seek(0, 2) #os.SEEK_END
413 if f.tell() == self._rbcsnameslen:
417 if f.tell() == self._rbcsnameslen:
414 f.write('\0')
418 f.write('\0')
415 else:
419 else:
416 f.close()
420 f.close()
417 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
421 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
418 self._rbcnamescount = 0
422 self._rbcnamescount = 0
419 self._rbcrevslen = 0
423 self._rbcrevslen = 0
420 if self._rbcnamescount == 0:
424 if self._rbcnamescount == 0:
421 f = repo.vfs.open(_rbcnames, 'wb')
425 f = repo.vfs.open(_rbcnames, 'wb')
422 f.write('\0'.join(encoding.fromlocal(b)
426 f.write('\0'.join(encoding.fromlocal(b)
423 for b in self._names[self._rbcnamescount:]))
427 for b in self._names[self._rbcnamescount:]))
424 self._rbcsnameslen = f.tell()
428 self._rbcsnameslen = f.tell()
425 f.close()
429 f.close()
426 except (IOError, OSError, util.Abort), inst:
430 except (IOError, OSError, util.Abort), inst:
427 repo.ui.debug("couldn't write revision branch cache names: "
431 repo.ui.debug("couldn't write revision branch cache names: "
428 "%s\n" % inst)
432 "%s\n" % inst)
429 return
433 return
430 self._rbcnamescount = len(self._names)
434 self._rbcnamescount = len(self._names)
431
435
432 start = self._rbcrevslen * _rbcrecsize
436 start = self._rbcrevslen * _rbcrecsize
433 if start != len(self._rbcrevs):
437 if start != len(self._rbcrevs):
434 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
438 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
435 try:
439 try:
436 f = repo.vfs.open(_rbcrevs, 'ab')
440 f = repo.vfs.open(_rbcrevs, 'ab')
437 # The position after open(x, 'a') is implementation defined-
441 # The position after open(x, 'a') is implementation defined-
438 # see issue3543. SEEK_END was added in 2.5
442 # see issue3543. SEEK_END was added in 2.5
439 f.seek(0, 2) #os.SEEK_END
443 f.seek(0, 2) #os.SEEK_END
440 if f.tell() != start:
444 if f.tell() != start:
441 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
445 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
442 f.seek(start)
446 f.seek(start)
443 f.truncate()
447 f.truncate()
444 end = revs * _rbcrecsize
448 end = revs * _rbcrecsize
445 f.write(self._rbcrevs[start:end])
449 f.write(self._rbcrevs[start:end])
446 f.close()
450 f.close()
447 except (IOError, OSError, util.Abort), inst:
451 except (IOError, OSError, util.Abort), inst:
448 repo.ui.debug("couldn't write revision branch cache: %s\n" %
452 repo.ui.debug("couldn't write revision branch cache: %s\n" %
449 inst)
453 inst)
450 return
454 return
451 self._rbcrevslen = revs
455 self._rbcrevslen = revs
@@ -1,3292 +1,3292 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error, discovery, hbisect, phases
9 import parser, util, error, discovery, hbisect, phases
10 import node
10 import node
11 import heapq
11 import heapq
12 import match as matchmod
12 import match as matchmod
13 from i18n import _
13 from i18n import _
14 import encoding
14 import encoding
15 import obsolete as obsmod
15 import obsolete as obsmod
16 import pathutil
16 import pathutil
17 import repoview
17 import repoview
18
18
19 def _revancestors(repo, revs, followfirst):
19 def _revancestors(repo, revs, followfirst):
20 """Like revlog.ancestors(), but supports followfirst."""
20 """Like revlog.ancestors(), but supports followfirst."""
21 cut = followfirst and 1 or None
21 cut = followfirst and 1 or None
22 cl = repo.changelog
22 cl = repo.changelog
23
23
24 def iterate():
24 def iterate():
25 revqueue, revsnode = None, None
25 revqueue, revsnode = None, None
26 h = []
26 h = []
27
27
28 revs.sort(reverse=True)
28 revs.sort(reverse=True)
29 revqueue = util.deque(revs)
29 revqueue = util.deque(revs)
30 if revqueue:
30 if revqueue:
31 revsnode = revqueue.popleft()
31 revsnode = revqueue.popleft()
32 heapq.heappush(h, -revsnode)
32 heapq.heappush(h, -revsnode)
33
33
34 seen = set()
34 seen = set()
35 while h:
35 while h:
36 current = -heapq.heappop(h)
36 current = -heapq.heappop(h)
37 if current not in seen:
37 if current not in seen:
38 if revsnode and current == revsnode:
38 if revsnode and current == revsnode:
39 if revqueue:
39 if revqueue:
40 revsnode = revqueue.popleft()
40 revsnode = revqueue.popleft()
41 heapq.heappush(h, -revsnode)
41 heapq.heappush(h, -revsnode)
42 seen.add(current)
42 seen.add(current)
43 yield current
43 yield current
44 for parent in cl.parentrevs(current)[:cut]:
44 for parent in cl.parentrevs(current)[:cut]:
45 if parent != node.nullrev:
45 if parent != node.nullrev:
46 heapq.heappush(h, -parent)
46 heapq.heappush(h, -parent)
47
47
48 return generatorset(iterate(), iterasc=False)
48 return generatorset(iterate(), iterasc=False)
49
49
50 def _revdescendants(repo, revs, followfirst):
50 def _revdescendants(repo, revs, followfirst):
51 """Like revlog.descendants() but supports followfirst."""
51 """Like revlog.descendants() but supports followfirst."""
52 cut = followfirst and 1 or None
52 cut = followfirst and 1 or None
53
53
54 def iterate():
54 def iterate():
55 cl = repo.changelog
55 cl = repo.changelog
56 first = min(revs)
56 first = min(revs)
57 nullrev = node.nullrev
57 nullrev = node.nullrev
58 if first == nullrev:
58 if first == nullrev:
59 # Are there nodes with a null first parent and a non-null
59 # Are there nodes with a null first parent and a non-null
60 # second one? Maybe. Do we care? Probably not.
60 # second one? Maybe. Do we care? Probably not.
61 for i in cl:
61 for i in cl:
62 yield i
62 yield i
63 else:
63 else:
64 seen = set(revs)
64 seen = set(revs)
65 for i in cl.revs(first + 1):
65 for i in cl.revs(first + 1):
66 for x in cl.parentrevs(i)[:cut]:
66 for x in cl.parentrevs(i)[:cut]:
67 if x != nullrev and x in seen:
67 if x != nullrev and x in seen:
68 seen.add(i)
68 seen.add(i)
69 yield i
69 yield i
70 break
70 break
71
71
72 return generatorset(iterate(), iterasc=True)
72 return generatorset(iterate(), iterasc=True)
73
73
74 def _revsbetween(repo, roots, heads):
74 def _revsbetween(repo, roots, heads):
75 """Return all paths between roots and heads, inclusive of both endpoint
75 """Return all paths between roots and heads, inclusive of both endpoint
76 sets."""
76 sets."""
77 if not roots:
77 if not roots:
78 return baseset()
78 return baseset()
79 parentrevs = repo.changelog.parentrevs
79 parentrevs = repo.changelog.parentrevs
80 visit = list(heads)
80 visit = list(heads)
81 reachable = set()
81 reachable = set()
82 seen = {}
82 seen = {}
83 minroot = min(roots)
83 minroot = min(roots)
84 roots = set(roots)
84 roots = set(roots)
85 # open-code the post-order traversal due to the tiny size of
85 # open-code the post-order traversal due to the tiny size of
86 # sys.getrecursionlimit()
86 # sys.getrecursionlimit()
87 while visit:
87 while visit:
88 rev = visit.pop()
88 rev = visit.pop()
89 if rev in roots:
89 if rev in roots:
90 reachable.add(rev)
90 reachable.add(rev)
91 parents = parentrevs(rev)
91 parents = parentrevs(rev)
92 seen[rev] = parents
92 seen[rev] = parents
93 for parent in parents:
93 for parent in parents:
94 if parent >= minroot and parent not in seen:
94 if parent >= minroot and parent not in seen:
95 visit.append(parent)
95 visit.append(parent)
96 if not reachable:
96 if not reachable:
97 return baseset()
97 return baseset()
98 for rev in sorted(seen):
98 for rev in sorted(seen):
99 for parent in seen[rev]:
99 for parent in seen[rev]:
100 if parent in reachable:
100 if parent in reachable:
101 reachable.add(rev)
101 reachable.add(rev)
102 return baseset(sorted(reachable))
102 return baseset(sorted(reachable))
103
103
104 elements = {
104 elements = {
105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 "##": (20, None, ("_concat", 20)),
106 "##": (20, None, ("_concat", 20)),
107 "~": (18, None, ("ancestor", 18)),
107 "~": (18, None, ("ancestor", 18)),
108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 "-": (5, ("negate", 19), ("minus", 5)),
109 "-": (5, ("negate", 19), ("minus", 5)),
110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 ("dagrangepost", 17)),
111 ("dagrangepost", 17)),
112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 ("dagrangepost", 17)),
113 ("dagrangepost", 17)),
114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 "not": (10, ("not", 10)),
115 "not": (10, ("not", 10)),
116 "!": (10, ("not", 10)),
116 "!": (10, ("not", 10)),
117 "and": (5, None, ("and", 5)),
117 "and": (5, None, ("and", 5)),
118 "&": (5, None, ("and", 5)),
118 "&": (5, None, ("and", 5)),
119 "%": (5, None, ("only", 5), ("onlypost", 5)),
119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 "or": (4, None, ("or", 4)),
120 "or": (4, None, ("or", 4)),
121 "|": (4, None, ("or", 4)),
121 "|": (4, None, ("or", 4)),
122 "+": (4, None, ("or", 4)),
122 "+": (4, None, ("or", 4)),
123 ",": (2, None, ("list", 2)),
123 ",": (2, None, ("list", 2)),
124 ")": (0, None, None),
124 ")": (0, None, None),
125 "symbol": (0, ("symbol",), None),
125 "symbol": (0, ("symbol",), None),
126 "string": (0, ("string",), None),
126 "string": (0, ("string",), None),
127 "end": (0, None, None),
127 "end": (0, None, None),
128 }
128 }
129
129
130 keywords = set(['and', 'or', 'not'])
130 keywords = set(['and', 'or', 'not'])
131
131
132 # default set of valid characters for the initial letter of symbols
132 # default set of valid characters for the initial letter of symbols
133 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
133 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
134 if c.isalnum() or c in '._@' or ord(c) > 127)
134 if c.isalnum() or c in '._@' or ord(c) > 127)
135
135
136 # default set of valid characters for non-initial letters of symbols
136 # default set of valid characters for non-initial letters of symbols
137 _symletters = set(c for c in [chr(i) for i in xrange(256)]
137 _symletters = set(c for c in [chr(i) for i in xrange(256)]
138 if c.isalnum() or c in '-._/@' or ord(c) > 127)
138 if c.isalnum() or c in '-._/@' or ord(c) > 127)
139
139
140 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
140 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
141 '''
141 '''
142 Parse a revset statement into a stream of tokens
142 Parse a revset statement into a stream of tokens
143
143
144 ``syminitletters`` is the set of valid characters for the initial
144 ``syminitletters`` is the set of valid characters for the initial
145 letter of symbols.
145 letter of symbols.
146
146
147 By default, character ``c`` is recognized as valid for initial
147 By default, character ``c`` is recognized as valid for initial
148 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
148 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
149
149
150 ``symletters`` is the set of valid characters for non-initial
150 ``symletters`` is the set of valid characters for non-initial
151 letters of symbols.
151 letters of symbols.
152
152
153 By default, character ``c`` is recognized as valid for non-initial
153 By default, character ``c`` is recognized as valid for non-initial
154 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
154 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
155
155
156 Check that @ is a valid unquoted token character (issue3686):
156 Check that @ is a valid unquoted token character (issue3686):
157 >>> list(tokenize("@::"))
157 >>> list(tokenize("@::"))
158 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
158 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
159
159
160 '''
160 '''
161 if syminitletters is None:
161 if syminitletters is None:
162 syminitletters = _syminitletters
162 syminitletters = _syminitletters
163 if symletters is None:
163 if symletters is None:
164 symletters = _symletters
164 symletters = _symletters
165
165
166 pos, l = 0, len(program)
166 pos, l = 0, len(program)
167 while pos < l:
167 while pos < l:
168 c = program[pos]
168 c = program[pos]
169 if c.isspace(): # skip inter-token whitespace
169 if c.isspace(): # skip inter-token whitespace
170 pass
170 pass
171 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
171 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
172 yield ('::', None, pos)
172 yield ('::', None, pos)
173 pos += 1 # skip ahead
173 pos += 1 # skip ahead
174 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
174 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
175 yield ('..', None, pos)
175 yield ('..', None, pos)
176 pos += 1 # skip ahead
176 pos += 1 # skip ahead
177 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
177 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
178 yield ('##', None, pos)
178 yield ('##', None, pos)
179 pos += 1 # skip ahead
179 pos += 1 # skip ahead
180 elif c in "():,-|&+!~^%": # handle simple operators
180 elif c in "():,-|&+!~^%": # handle simple operators
181 yield (c, None, pos)
181 yield (c, None, pos)
182 elif (c in '"\'' or c == 'r' and
182 elif (c in '"\'' or c == 'r' and
183 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
183 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
184 if c == 'r':
184 if c == 'r':
185 pos += 1
185 pos += 1
186 c = program[pos]
186 c = program[pos]
187 decode = lambda x: x
187 decode = lambda x: x
188 else:
188 else:
189 decode = lambda x: x.decode('string-escape')
189 decode = lambda x: x.decode('string-escape')
190 pos += 1
190 pos += 1
191 s = pos
191 s = pos
192 while pos < l: # find closing quote
192 while pos < l: # find closing quote
193 d = program[pos]
193 d = program[pos]
194 if d == '\\': # skip over escaped characters
194 if d == '\\': # skip over escaped characters
195 pos += 2
195 pos += 2
196 continue
196 continue
197 if d == c:
197 if d == c:
198 yield ('string', decode(program[s:pos]), s)
198 yield ('string', decode(program[s:pos]), s)
199 break
199 break
200 pos += 1
200 pos += 1
201 else:
201 else:
202 raise error.ParseError(_("unterminated string"), s)
202 raise error.ParseError(_("unterminated string"), s)
203 # gather up a symbol/keyword
203 # gather up a symbol/keyword
204 elif c in syminitletters:
204 elif c in syminitletters:
205 s = pos
205 s = pos
206 pos += 1
206 pos += 1
207 while pos < l: # find end of symbol
207 while pos < l: # find end of symbol
208 d = program[pos]
208 d = program[pos]
209 if d not in symletters:
209 if d not in symletters:
210 break
210 break
211 if d == '.' and program[pos - 1] == '.': # special case for ..
211 if d == '.' and program[pos - 1] == '.': # special case for ..
212 pos -= 1
212 pos -= 1
213 break
213 break
214 pos += 1
214 pos += 1
215 sym = program[s:pos]
215 sym = program[s:pos]
216 if sym in keywords: # operator keywords
216 if sym in keywords: # operator keywords
217 yield (sym, None, s)
217 yield (sym, None, s)
218 elif '-' in sym:
218 elif '-' in sym:
219 # some jerk gave us foo-bar-baz, try to check if it's a symbol
219 # some jerk gave us foo-bar-baz, try to check if it's a symbol
220 if lookup and lookup(sym):
220 if lookup and lookup(sym):
221 # looks like a real symbol
221 # looks like a real symbol
222 yield ('symbol', sym, s)
222 yield ('symbol', sym, s)
223 else:
223 else:
224 # looks like an expression
224 # looks like an expression
225 parts = sym.split('-')
225 parts = sym.split('-')
226 for p in parts[:-1]:
226 for p in parts[:-1]:
227 if p: # possible consecutive -
227 if p: # possible consecutive -
228 yield ('symbol', p, s)
228 yield ('symbol', p, s)
229 s += len(p)
229 s += len(p)
230 yield ('-', None, pos)
230 yield ('-', None, pos)
231 s += 1
231 s += 1
232 if parts[-1]: # possible trailing -
232 if parts[-1]: # possible trailing -
233 yield ('symbol', parts[-1], s)
233 yield ('symbol', parts[-1], s)
234 else:
234 else:
235 yield ('symbol', sym, s)
235 yield ('symbol', sym, s)
236 pos -= 1
236 pos -= 1
237 else:
237 else:
238 raise error.ParseError(_("syntax error"), pos)
238 raise error.ParseError(_("syntax error"), pos)
239 pos += 1
239 pos += 1
240 yield ('end', None, pos)
240 yield ('end', None, pos)
241
241
242 def parseerrordetail(inst):
242 def parseerrordetail(inst):
243 """Compose error message from specified ParseError object
243 """Compose error message from specified ParseError object
244 """
244 """
245 if len(inst.args) > 1:
245 if len(inst.args) > 1:
246 return _('at %s: %s') % (inst.args[1], inst.args[0])
246 return _('at %s: %s') % (inst.args[1], inst.args[0])
247 else:
247 else:
248 return inst.args[0]
248 return inst.args[0]
249
249
250 # helpers
250 # helpers
251
251
252 def getstring(x, err):
252 def getstring(x, err):
253 if x and (x[0] == 'string' or x[0] == 'symbol'):
253 if x and (x[0] == 'string' or x[0] == 'symbol'):
254 return x[1]
254 return x[1]
255 raise error.ParseError(err)
255 raise error.ParseError(err)
256
256
257 def getlist(x):
257 def getlist(x):
258 if not x:
258 if not x:
259 return []
259 return []
260 if x[0] == 'list':
260 if x[0] == 'list':
261 return getlist(x[1]) + [x[2]]
261 return getlist(x[1]) + [x[2]]
262 return [x]
262 return [x]
263
263
264 def getargs(x, min, max, err):
264 def getargs(x, min, max, err):
265 l = getlist(x)
265 l = getlist(x)
266 if len(l) < min or (max >= 0 and len(l) > max):
266 if len(l) < min or (max >= 0 and len(l) > max):
267 raise error.ParseError(err)
267 raise error.ParseError(err)
268 return l
268 return l
269
269
270 def isvalidsymbol(tree):
270 def isvalidsymbol(tree):
271 """Examine whether specified ``tree`` is valid ``symbol`` or not
271 """Examine whether specified ``tree`` is valid ``symbol`` or not
272 """
272 """
273 return tree[0] == 'symbol' and len(tree) > 1
273 return tree[0] == 'symbol' and len(tree) > 1
274
274
275 def getsymbol(tree):
275 def getsymbol(tree):
276 """Get symbol name from valid ``symbol`` in ``tree``
276 """Get symbol name from valid ``symbol`` in ``tree``
277
277
278 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
278 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
279 """
279 """
280 return tree[1]
280 return tree[1]
281
281
282 def isvalidfunc(tree):
282 def isvalidfunc(tree):
283 """Examine whether specified ``tree`` is valid ``func`` or not
283 """Examine whether specified ``tree`` is valid ``func`` or not
284 """
284 """
285 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
285 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
286
286
287 def getfuncname(tree):
287 def getfuncname(tree):
288 """Get function name from valid ``func`` in ``tree``
288 """Get function name from valid ``func`` in ``tree``
289
289
290 This assumes that ``tree`` is already examined by ``isvalidfunc``.
290 This assumes that ``tree`` is already examined by ``isvalidfunc``.
291 """
291 """
292 return getsymbol(tree[1])
292 return getsymbol(tree[1])
293
293
294 def getfuncargs(tree):
294 def getfuncargs(tree):
295 """Get list of function arguments from valid ``func`` in ``tree``
295 """Get list of function arguments from valid ``func`` in ``tree``
296
296
297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 """
298 """
299 if len(tree) > 2:
299 if len(tree) > 2:
300 return getlist(tree[2])
300 return getlist(tree[2])
301 else:
301 else:
302 return []
302 return []
303
303
304 def getset(repo, subset, x):
304 def getset(repo, subset, x):
305 if not x:
305 if not x:
306 raise error.ParseError(_("missing argument"))
306 raise error.ParseError(_("missing argument"))
307 s = methods[x[0]](repo, subset, *x[1:])
307 s = methods[x[0]](repo, subset, *x[1:])
308 if util.safehasattr(s, 'isascending'):
308 if util.safehasattr(s, 'isascending'):
309 return s
309 return s
310 return baseset(s)
310 return baseset(s)
311
311
312 def _getrevsource(repo, r):
312 def _getrevsource(repo, r):
313 extra = repo[r].extra()
313 extra = repo[r].extra()
314 for label in ('source', 'transplant_source', 'rebase_source'):
314 for label in ('source', 'transplant_source', 'rebase_source'):
315 if label in extra:
315 if label in extra:
316 try:
316 try:
317 return repo[extra[label]].rev()
317 return repo[extra[label]].rev()
318 except error.RepoLookupError:
318 except error.RepoLookupError:
319 pass
319 pass
320 return None
320 return None
321
321
322 # operator methods
322 # operator methods
323
323
324 def stringset(repo, subset, x):
324 def stringset(repo, subset, x):
325 x = repo[x].rev()
325 x = repo[x].rev()
326 if x == -1 and len(subset) == len(repo):
326 if x == -1 and len(subset) == len(repo):
327 return baseset([-1])
327 return baseset([-1])
328 if x in subset:
328 if x in subset:
329 return baseset([x])
329 return baseset([x])
330 return baseset()
330 return baseset()
331
331
332 def symbolset(repo, subset, x):
332 def symbolset(repo, subset, x):
333 if x in symbols:
333 if x in symbols:
334 raise error.ParseError(_("can't use %s here") % x)
334 raise error.ParseError(_("can't use %s here") % x)
335 return stringset(repo, subset, x)
335 return stringset(repo, subset, x)
336
336
337 def rangeset(repo, subset, x, y):
337 def rangeset(repo, subset, x, y):
338 m = getset(repo, fullreposet(repo), x)
338 m = getset(repo, fullreposet(repo), x)
339 n = getset(repo, fullreposet(repo), y)
339 n = getset(repo, fullreposet(repo), y)
340
340
341 if not m or not n:
341 if not m or not n:
342 return baseset()
342 return baseset()
343 m, n = m.first(), n.last()
343 m, n = m.first(), n.last()
344
344
345 if m < n:
345 if m < n:
346 r = spanset(repo, m, n + 1)
346 r = spanset(repo, m, n + 1)
347 else:
347 else:
348 r = spanset(repo, m, n - 1)
348 r = spanset(repo, m, n - 1)
349 return r & subset
349 return r & subset
350
350
351 def dagrange(repo, subset, x, y):
351 def dagrange(repo, subset, x, y):
352 r = spanset(repo)
352 r = spanset(repo)
353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 return xs & subset
354 return xs & subset
355
355
356 def andset(repo, subset, x, y):
356 def andset(repo, subset, x, y):
357 return getset(repo, getset(repo, subset, x), y)
357 return getset(repo, getset(repo, subset, x), y)
358
358
359 def orset(repo, subset, x, y):
359 def orset(repo, subset, x, y):
360 xl = getset(repo, subset, x)
360 xl = getset(repo, subset, x)
361 yl = getset(repo, subset - xl, y)
361 yl = getset(repo, subset - xl, y)
362 return xl + yl
362 return xl + yl
363
363
364 def notset(repo, subset, x):
364 def notset(repo, subset, x):
365 return subset - getset(repo, subset, x)
365 return subset - getset(repo, subset, x)
366
366
367 def listset(repo, subset, a, b):
367 def listset(repo, subset, a, b):
368 raise error.ParseError(_("can't use a list in this context"))
368 raise error.ParseError(_("can't use a list in this context"))
369
369
370 def func(repo, subset, a, b):
370 def func(repo, subset, a, b):
371 if a[0] == 'symbol' and a[1] in symbols:
371 if a[0] == 'symbol' and a[1] in symbols:
372 return symbols[a[1]](repo, subset, b)
372 return symbols[a[1]](repo, subset, b)
373 raise error.ParseError(_("not a function: %s") % a[1])
373 raise error.ParseError(_("not a function: %s") % a[1])
374
374
375 # functions
375 # functions
376
376
377 def adds(repo, subset, x):
377 def adds(repo, subset, x):
378 """``adds(pattern)``
378 """``adds(pattern)``
379 Changesets that add a file matching pattern.
379 Changesets that add a file matching pattern.
380
380
381 The pattern without explicit kind like ``glob:`` is expected to be
381 The pattern without explicit kind like ``glob:`` is expected to be
382 relative to the current directory and match against a file or a
382 relative to the current directory and match against a file or a
383 directory.
383 directory.
384 """
384 """
385 # i18n: "adds" is a keyword
385 # i18n: "adds" is a keyword
386 pat = getstring(x, _("adds requires a pattern"))
386 pat = getstring(x, _("adds requires a pattern"))
387 return checkstatus(repo, subset, pat, 1)
387 return checkstatus(repo, subset, pat, 1)
388
388
389 def ancestor(repo, subset, x):
389 def ancestor(repo, subset, x):
390 """``ancestor(*changeset)``
390 """``ancestor(*changeset)``
391 A greatest common ancestor of the changesets.
391 A greatest common ancestor of the changesets.
392
392
393 Accepts 0 or more changesets.
393 Accepts 0 or more changesets.
394 Will return empty list when passed no args.
394 Will return empty list when passed no args.
395 Greatest common ancestor of a single changeset is that changeset.
395 Greatest common ancestor of a single changeset is that changeset.
396 """
396 """
397 # i18n: "ancestor" is a keyword
397 # i18n: "ancestor" is a keyword
398 l = getlist(x)
398 l = getlist(x)
399 rl = spanset(repo)
399 rl = spanset(repo)
400 anc = None
400 anc = None
401
401
402 # (getset(repo, rl, i) for i in l) generates a list of lists
402 # (getset(repo, rl, i) for i in l) generates a list of lists
403 for revs in (getset(repo, rl, i) for i in l):
403 for revs in (getset(repo, rl, i) for i in l):
404 for r in revs:
404 for r in revs:
405 if anc is None:
405 if anc is None:
406 anc = repo[r]
406 anc = repo[r]
407 else:
407 else:
408 anc = anc.ancestor(repo[r])
408 anc = anc.ancestor(repo[r])
409
409
410 if anc is not None and anc.rev() in subset:
410 if anc is not None and anc.rev() in subset:
411 return baseset([anc.rev()])
411 return baseset([anc.rev()])
412 return baseset()
412 return baseset()
413
413
414 def _ancestors(repo, subset, x, followfirst=False):
414 def _ancestors(repo, subset, x, followfirst=False):
415 heads = getset(repo, spanset(repo), x)
415 heads = getset(repo, spanset(repo), x)
416 if not heads:
416 if not heads:
417 return baseset()
417 return baseset()
418 s = _revancestors(repo, heads, followfirst)
418 s = _revancestors(repo, heads, followfirst)
419 return subset & s
419 return subset & s
420
420
421 def ancestors(repo, subset, x):
421 def ancestors(repo, subset, x):
422 """``ancestors(set)``
422 """``ancestors(set)``
423 Changesets that are ancestors of a changeset in set.
423 Changesets that are ancestors of a changeset in set.
424 """
424 """
425 return _ancestors(repo, subset, x)
425 return _ancestors(repo, subset, x)
426
426
427 def _firstancestors(repo, subset, x):
427 def _firstancestors(repo, subset, x):
428 # ``_firstancestors(set)``
428 # ``_firstancestors(set)``
429 # Like ``ancestors(set)`` but follows only the first parents.
429 # Like ``ancestors(set)`` but follows only the first parents.
430 return _ancestors(repo, subset, x, followfirst=True)
430 return _ancestors(repo, subset, x, followfirst=True)
431
431
432 def ancestorspec(repo, subset, x, n):
432 def ancestorspec(repo, subset, x, n):
433 """``set~n``
433 """``set~n``
434 Changesets that are the Nth ancestor (first parents only) of a changeset
434 Changesets that are the Nth ancestor (first parents only) of a changeset
435 in set.
435 in set.
436 """
436 """
437 try:
437 try:
438 n = int(n[1])
438 n = int(n[1])
439 except (TypeError, ValueError):
439 except (TypeError, ValueError):
440 raise error.ParseError(_("~ expects a number"))
440 raise error.ParseError(_("~ expects a number"))
441 ps = set()
441 ps = set()
442 cl = repo.changelog
442 cl = repo.changelog
443 for r in getset(repo, fullreposet(repo), x):
443 for r in getset(repo, fullreposet(repo), x):
444 for i in range(n):
444 for i in range(n):
445 r = cl.parentrevs(r)[0]
445 r = cl.parentrevs(r)[0]
446 ps.add(r)
446 ps.add(r)
447 return subset & ps
447 return subset & ps
448
448
449 def author(repo, subset, x):
449 def author(repo, subset, x):
450 """``author(string)``
450 """``author(string)``
451 Alias for ``user(string)``.
451 Alias for ``user(string)``.
452 """
452 """
453 # i18n: "author" is a keyword
453 # i18n: "author" is a keyword
454 n = encoding.lower(getstring(x, _("author requires a string")))
454 n = encoding.lower(getstring(x, _("author requires a string")))
455 kind, pattern, matcher = _substringmatcher(n)
455 kind, pattern, matcher = _substringmatcher(n)
456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457
457
458 def bisect(repo, subset, x):
458 def bisect(repo, subset, x):
459 """``bisect(string)``
459 """``bisect(string)``
460 Changesets marked in the specified bisect status:
460 Changesets marked in the specified bisect status:
461
461
462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 - ``goods``, ``bads`` : csets topologically good/bad
463 - ``goods``, ``bads`` : csets topologically good/bad
464 - ``range`` : csets taking part in the bisection
464 - ``range`` : csets taking part in the bisection
465 - ``pruned`` : csets that are goods, bads or skipped
465 - ``pruned`` : csets that are goods, bads or skipped
466 - ``untested`` : csets whose fate is yet unknown
466 - ``untested`` : csets whose fate is yet unknown
467 - ``ignored`` : csets ignored due to DAG topology
467 - ``ignored`` : csets ignored due to DAG topology
468 - ``current`` : the cset currently being bisected
468 - ``current`` : the cset currently being bisected
469 """
469 """
470 # i18n: "bisect" is a keyword
470 # i18n: "bisect" is a keyword
471 status = getstring(x, _("bisect requires a string")).lower()
471 status = getstring(x, _("bisect requires a string")).lower()
472 state = set(hbisect.get(repo, status))
472 state = set(hbisect.get(repo, status))
473 return subset & state
473 return subset & state
474
474
475 # Backward-compatibility
475 # Backward-compatibility
476 # - no help entry so that we do not advertise it any more
476 # - no help entry so that we do not advertise it any more
477 def bisected(repo, subset, x):
477 def bisected(repo, subset, x):
478 return bisect(repo, subset, x)
478 return bisect(repo, subset, x)
479
479
480 def bookmark(repo, subset, x):
480 def bookmark(repo, subset, x):
481 """``bookmark([name])``
481 """``bookmark([name])``
482 The named bookmark or all bookmarks.
482 The named bookmark or all bookmarks.
483
483
484 If `name` starts with `re:`, the remainder of the name is treated as
484 If `name` starts with `re:`, the remainder of the name is treated as
485 a regular expression. To match a bookmark that actually starts with `re:`,
485 a regular expression. To match a bookmark that actually starts with `re:`,
486 use the prefix `literal:`.
486 use the prefix `literal:`.
487 """
487 """
488 # i18n: "bookmark" is a keyword
488 # i18n: "bookmark" is a keyword
489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 if args:
490 if args:
491 bm = getstring(args[0],
491 bm = getstring(args[0],
492 # i18n: "bookmark" is a keyword
492 # i18n: "bookmark" is a keyword
493 _('the argument to bookmark must be a string'))
493 _('the argument to bookmark must be a string'))
494 kind, pattern, matcher = _stringmatcher(bm)
494 kind, pattern, matcher = _stringmatcher(bm)
495 bms = set()
495 bms = set()
496 if kind == 'literal':
496 if kind == 'literal':
497 bmrev = repo._bookmarks.get(pattern, None)
497 bmrev = repo._bookmarks.get(pattern, None)
498 if not bmrev:
498 if not bmrev:
499 raise error.RepoLookupError(_("bookmark '%s' does not exist")
499 raise error.RepoLookupError(_("bookmark '%s' does not exist")
500 % bm)
500 % bm)
501 bms.add(repo[bmrev].rev())
501 bms.add(repo[bmrev].rev())
502 else:
502 else:
503 matchrevs = set()
503 matchrevs = set()
504 for name, bmrev in repo._bookmarks.iteritems():
504 for name, bmrev in repo._bookmarks.iteritems():
505 if matcher(name):
505 if matcher(name):
506 matchrevs.add(bmrev)
506 matchrevs.add(bmrev)
507 if not matchrevs:
507 if not matchrevs:
508 raise error.RepoLookupError(_("no bookmarks exist"
508 raise error.RepoLookupError(_("no bookmarks exist"
509 " that match '%s'") % pattern)
509 " that match '%s'") % pattern)
510 for bmrev in matchrevs:
510 for bmrev in matchrevs:
511 bms.add(repo[bmrev].rev())
511 bms.add(repo[bmrev].rev())
512 else:
512 else:
513 bms = set([repo[r].rev()
513 bms = set([repo[r].rev()
514 for r in repo._bookmarks.values()])
514 for r in repo._bookmarks.values()])
515 bms -= set([node.nullrev])
515 bms -= set([node.nullrev])
516 return subset & bms
516 return subset & bms
517
517
518 def branch(repo, subset, x):
518 def branch(repo, subset, x):
519 """``branch(string or set)``
519 """``branch(string or set)``
520 All changesets belonging to the given branch or the branches of the given
520 All changesets belonging to the given branch or the branches of the given
521 changesets.
521 changesets.
522
522
523 If `string` starts with `re:`, the remainder of the name is treated as
523 If `string` starts with `re:`, the remainder of the name is treated as
524 a regular expression. To match a branch that actually starts with `re:`,
524 a regular expression. To match a branch that actually starts with `re:`,
525 use the prefix `literal:`.
525 use the prefix `literal:`.
526 """
526 """
527 import branchmap
527 import branchmap
528 urepo = repo.unfiltered()
528 urepo = repo.unfiltered()
529 ucl = urepo.changelog
529 ucl = urepo.changelog
530 getbi = branchmap.revbranchcache(urepo).branchinfo
530 getbi = branchmap.revbranchcache(urepo, readonly=True).branchinfo
531
531
532 try:
532 try:
533 b = getstring(x, '')
533 b = getstring(x, '')
534 except error.ParseError:
534 except error.ParseError:
535 # not a string, but another revspec, e.g. tip()
535 # not a string, but another revspec, e.g. tip()
536 pass
536 pass
537 else:
537 else:
538 kind, pattern, matcher = _stringmatcher(b)
538 kind, pattern, matcher = _stringmatcher(b)
539 if kind == 'literal':
539 if kind == 'literal':
540 # note: falls through to the revspec case if no branch with
540 # note: falls through to the revspec case if no branch with
541 # this name exists
541 # this name exists
542 if pattern in repo.branchmap():
542 if pattern in repo.branchmap():
543 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
543 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
544 else:
544 else:
545 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
545 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
546
546
547 s = getset(repo, spanset(repo), x)
547 s = getset(repo, spanset(repo), x)
548 b = set()
548 b = set()
549 for r in s:
549 for r in s:
550 b.add(getbi(ucl, r)[0])
550 b.add(getbi(ucl, r)[0])
551 c = s.__contains__
551 c = s.__contains__
552 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
552 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
553
553
554 def bumped(repo, subset, x):
554 def bumped(repo, subset, x):
555 """``bumped()``
555 """``bumped()``
556 Mutable changesets marked as successors of public changesets.
556 Mutable changesets marked as successors of public changesets.
557
557
558 Only non-public and non-obsolete changesets can be `bumped`.
558 Only non-public and non-obsolete changesets can be `bumped`.
559 """
559 """
560 # i18n: "bumped" is a keyword
560 # i18n: "bumped" is a keyword
561 getargs(x, 0, 0, _("bumped takes no arguments"))
561 getargs(x, 0, 0, _("bumped takes no arguments"))
562 bumped = obsmod.getrevs(repo, 'bumped')
562 bumped = obsmod.getrevs(repo, 'bumped')
563 return subset & bumped
563 return subset & bumped
564
564
565 def bundle(repo, subset, x):
565 def bundle(repo, subset, x):
566 """``bundle()``
566 """``bundle()``
567 Changesets in the bundle.
567 Changesets in the bundle.
568
568
569 Bundle must be specified by the -R option."""
569 Bundle must be specified by the -R option."""
570
570
571 try:
571 try:
572 bundlerevs = repo.changelog.bundlerevs
572 bundlerevs = repo.changelog.bundlerevs
573 except AttributeError:
573 except AttributeError:
574 raise util.Abort(_("no bundle provided - specify with -R"))
574 raise util.Abort(_("no bundle provided - specify with -R"))
575 return subset & bundlerevs
575 return subset & bundlerevs
576
576
577 def checkstatus(repo, subset, pat, field):
577 def checkstatus(repo, subset, pat, field):
578 hasset = matchmod.patkind(pat) == 'set'
578 hasset = matchmod.patkind(pat) == 'set'
579
579
580 mcache = [None]
580 mcache = [None]
581 def matches(x):
581 def matches(x):
582 c = repo[x]
582 c = repo[x]
583 if not mcache[0] or hasset:
583 if not mcache[0] or hasset:
584 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
584 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
585 m = mcache[0]
585 m = mcache[0]
586 fname = None
586 fname = None
587 if not m.anypats() and len(m.files()) == 1:
587 if not m.anypats() and len(m.files()) == 1:
588 fname = m.files()[0]
588 fname = m.files()[0]
589 if fname is not None:
589 if fname is not None:
590 if fname not in c.files():
590 if fname not in c.files():
591 return False
591 return False
592 else:
592 else:
593 for f in c.files():
593 for f in c.files():
594 if m(f):
594 if m(f):
595 break
595 break
596 else:
596 else:
597 return False
597 return False
598 files = repo.status(c.p1().node(), c.node())[field]
598 files = repo.status(c.p1().node(), c.node())[field]
599 if fname is not None:
599 if fname is not None:
600 if fname in files:
600 if fname in files:
601 return True
601 return True
602 else:
602 else:
603 for f in files:
603 for f in files:
604 if m(f):
604 if m(f):
605 return True
605 return True
606
606
607 return subset.filter(matches)
607 return subset.filter(matches)
608
608
609 def _children(repo, narrow, parentset):
609 def _children(repo, narrow, parentset):
610 cs = set()
610 cs = set()
611 if not parentset:
611 if not parentset:
612 return baseset(cs)
612 return baseset(cs)
613 pr = repo.changelog.parentrevs
613 pr = repo.changelog.parentrevs
614 minrev = min(parentset)
614 minrev = min(parentset)
615 for r in narrow:
615 for r in narrow:
616 if r <= minrev:
616 if r <= minrev:
617 continue
617 continue
618 for p in pr(r):
618 for p in pr(r):
619 if p in parentset:
619 if p in parentset:
620 cs.add(r)
620 cs.add(r)
621 return baseset(cs)
621 return baseset(cs)
622
622
623 def children(repo, subset, x):
623 def children(repo, subset, x):
624 """``children(set)``
624 """``children(set)``
625 Child changesets of changesets in set.
625 Child changesets of changesets in set.
626 """
626 """
627 s = getset(repo, fullreposet(repo), x)
627 s = getset(repo, fullreposet(repo), x)
628 cs = _children(repo, subset, s)
628 cs = _children(repo, subset, s)
629 return subset & cs
629 return subset & cs
630
630
631 def closed(repo, subset, x):
631 def closed(repo, subset, x):
632 """``closed()``
632 """``closed()``
633 Changeset is closed.
633 Changeset is closed.
634 """
634 """
635 # i18n: "closed" is a keyword
635 # i18n: "closed" is a keyword
636 getargs(x, 0, 0, _("closed takes no arguments"))
636 getargs(x, 0, 0, _("closed takes no arguments"))
637 return subset.filter(lambda r: repo[r].closesbranch())
637 return subset.filter(lambda r: repo[r].closesbranch())
638
638
639 def contains(repo, subset, x):
639 def contains(repo, subset, x):
640 """``contains(pattern)``
640 """``contains(pattern)``
641 The revision's manifest contains a file matching pattern (but might not
641 The revision's manifest contains a file matching pattern (but might not
642 modify it). See :hg:`help patterns` for information about file patterns.
642 modify it). See :hg:`help patterns` for information about file patterns.
643
643
644 The pattern without explicit kind like ``glob:`` is expected to be
644 The pattern without explicit kind like ``glob:`` is expected to be
645 relative to the current directory and match against a file exactly
645 relative to the current directory and match against a file exactly
646 for efficiency.
646 for efficiency.
647 """
647 """
648 # i18n: "contains" is a keyword
648 # i18n: "contains" is a keyword
649 pat = getstring(x, _("contains requires a pattern"))
649 pat = getstring(x, _("contains requires a pattern"))
650
650
651 def matches(x):
651 def matches(x):
652 if not matchmod.patkind(pat):
652 if not matchmod.patkind(pat):
653 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
653 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
654 if pats in repo[x]:
654 if pats in repo[x]:
655 return True
655 return True
656 else:
656 else:
657 c = repo[x]
657 c = repo[x]
658 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
658 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
659 for f in c.manifest():
659 for f in c.manifest():
660 if m(f):
660 if m(f):
661 return True
661 return True
662 return False
662 return False
663
663
664 return subset.filter(matches)
664 return subset.filter(matches)
665
665
666 def converted(repo, subset, x):
666 def converted(repo, subset, x):
667 """``converted([id])``
667 """``converted([id])``
668 Changesets converted from the given identifier in the old repository if
668 Changesets converted from the given identifier in the old repository if
669 present, or all converted changesets if no identifier is specified.
669 present, or all converted changesets if no identifier is specified.
670 """
670 """
671
671
672 # There is exactly no chance of resolving the revision, so do a simple
672 # There is exactly no chance of resolving the revision, so do a simple
673 # string compare and hope for the best
673 # string compare and hope for the best
674
674
675 rev = None
675 rev = None
676 # i18n: "converted" is a keyword
676 # i18n: "converted" is a keyword
677 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
677 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
678 if l:
678 if l:
679 # i18n: "converted" is a keyword
679 # i18n: "converted" is a keyword
680 rev = getstring(l[0], _('converted requires a revision'))
680 rev = getstring(l[0], _('converted requires a revision'))
681
681
682 def _matchvalue(r):
682 def _matchvalue(r):
683 source = repo[r].extra().get('convert_revision', None)
683 source = repo[r].extra().get('convert_revision', None)
684 return source is not None and (rev is None or source.startswith(rev))
684 return source is not None and (rev is None or source.startswith(rev))
685
685
686 return subset.filter(lambda r: _matchvalue(r))
686 return subset.filter(lambda r: _matchvalue(r))
687
687
688 def date(repo, subset, x):
688 def date(repo, subset, x):
689 """``date(interval)``
689 """``date(interval)``
690 Changesets within the interval, see :hg:`help dates`.
690 Changesets within the interval, see :hg:`help dates`.
691 """
691 """
692 # i18n: "date" is a keyword
692 # i18n: "date" is a keyword
693 ds = getstring(x, _("date requires a string"))
693 ds = getstring(x, _("date requires a string"))
694 dm = util.matchdate(ds)
694 dm = util.matchdate(ds)
695 return subset.filter(lambda x: dm(repo[x].date()[0]))
695 return subset.filter(lambda x: dm(repo[x].date()[0]))
696
696
697 def desc(repo, subset, x):
697 def desc(repo, subset, x):
698 """``desc(string)``
698 """``desc(string)``
699 Search commit message for string. The match is case-insensitive.
699 Search commit message for string. The match is case-insensitive.
700 """
700 """
701 # i18n: "desc" is a keyword
701 # i18n: "desc" is a keyword
702 ds = encoding.lower(getstring(x, _("desc requires a string")))
702 ds = encoding.lower(getstring(x, _("desc requires a string")))
703
703
704 def matches(x):
704 def matches(x):
705 c = repo[x]
705 c = repo[x]
706 return ds in encoding.lower(c.description())
706 return ds in encoding.lower(c.description())
707
707
708 return subset.filter(matches)
708 return subset.filter(matches)
709
709
710 def _descendants(repo, subset, x, followfirst=False):
710 def _descendants(repo, subset, x, followfirst=False):
711 roots = getset(repo, spanset(repo), x)
711 roots = getset(repo, spanset(repo), x)
712 if not roots:
712 if not roots:
713 return baseset()
713 return baseset()
714 s = _revdescendants(repo, roots, followfirst)
714 s = _revdescendants(repo, roots, followfirst)
715
715
716 # Both sets need to be ascending in order to lazily return the union
716 # Both sets need to be ascending in order to lazily return the union
717 # in the correct order.
717 # in the correct order.
718 base = subset & roots
718 base = subset & roots
719 desc = subset & s
719 desc = subset & s
720 result = base + desc
720 result = base + desc
721 if subset.isascending():
721 if subset.isascending():
722 result.sort()
722 result.sort()
723 elif subset.isdescending():
723 elif subset.isdescending():
724 result.sort(reverse=True)
724 result.sort(reverse=True)
725 else:
725 else:
726 result = subset & result
726 result = subset & result
727 return result
727 return result
728
728
729 def descendants(repo, subset, x):
729 def descendants(repo, subset, x):
730 """``descendants(set)``
730 """``descendants(set)``
731 Changesets which are descendants of changesets in set.
731 Changesets which are descendants of changesets in set.
732 """
732 """
733 return _descendants(repo, subset, x)
733 return _descendants(repo, subset, x)
734
734
735 def _firstdescendants(repo, subset, x):
735 def _firstdescendants(repo, subset, x):
736 # ``_firstdescendants(set)``
736 # ``_firstdescendants(set)``
737 # Like ``descendants(set)`` but follows only the first parents.
737 # Like ``descendants(set)`` but follows only the first parents.
738 return _descendants(repo, subset, x, followfirst=True)
738 return _descendants(repo, subset, x, followfirst=True)
739
739
740 def destination(repo, subset, x):
740 def destination(repo, subset, x):
741 """``destination([set])``
741 """``destination([set])``
742 Changesets that were created by a graft, transplant or rebase operation,
742 Changesets that were created by a graft, transplant or rebase operation,
743 with the given revisions specified as the source. Omitting the optional set
743 with the given revisions specified as the source. Omitting the optional set
744 is the same as passing all().
744 is the same as passing all().
745 """
745 """
746 if x is not None:
746 if x is not None:
747 sources = getset(repo, spanset(repo), x)
747 sources = getset(repo, spanset(repo), x)
748 else:
748 else:
749 sources = getall(repo, spanset(repo), x)
749 sources = getall(repo, spanset(repo), x)
750
750
751 dests = set()
751 dests = set()
752
752
753 # subset contains all of the possible destinations that can be returned, so
753 # subset contains all of the possible destinations that can be returned, so
754 # iterate over them and see if their source(s) were provided in the arg set.
754 # iterate over them and see if their source(s) were provided in the arg set.
755 # Even if the immediate src of r is not in the arg set, src's source (or
755 # Even if the immediate src of r is not in the arg set, src's source (or
756 # further back) may be. Scanning back further than the immediate src allows
756 # further back) may be. Scanning back further than the immediate src allows
757 # transitive transplants and rebases to yield the same results as transitive
757 # transitive transplants and rebases to yield the same results as transitive
758 # grafts.
758 # grafts.
759 for r in subset:
759 for r in subset:
760 src = _getrevsource(repo, r)
760 src = _getrevsource(repo, r)
761 lineage = None
761 lineage = None
762
762
763 while src is not None:
763 while src is not None:
764 if lineage is None:
764 if lineage is None:
765 lineage = list()
765 lineage = list()
766
766
767 lineage.append(r)
767 lineage.append(r)
768
768
769 # The visited lineage is a match if the current source is in the arg
769 # The visited lineage is a match if the current source is in the arg
770 # set. Since every candidate dest is visited by way of iterating
770 # set. Since every candidate dest is visited by way of iterating
771 # subset, any dests further back in the lineage will be tested by a
771 # subset, any dests further back in the lineage will be tested by a
772 # different iteration over subset. Likewise, if the src was already
772 # different iteration over subset. Likewise, if the src was already
773 # selected, the current lineage can be selected without going back
773 # selected, the current lineage can be selected without going back
774 # further.
774 # further.
775 if src in sources or src in dests:
775 if src in sources or src in dests:
776 dests.update(lineage)
776 dests.update(lineage)
777 break
777 break
778
778
779 r = src
779 r = src
780 src = _getrevsource(repo, r)
780 src = _getrevsource(repo, r)
781
781
782 return subset.filter(dests.__contains__)
782 return subset.filter(dests.__contains__)
783
783
784 def divergent(repo, subset, x):
784 def divergent(repo, subset, x):
785 """``divergent()``
785 """``divergent()``
786 Final successors of changesets with an alternative set of final successors.
786 Final successors of changesets with an alternative set of final successors.
787 """
787 """
788 # i18n: "divergent" is a keyword
788 # i18n: "divergent" is a keyword
789 getargs(x, 0, 0, _("divergent takes no arguments"))
789 getargs(x, 0, 0, _("divergent takes no arguments"))
790 divergent = obsmod.getrevs(repo, 'divergent')
790 divergent = obsmod.getrevs(repo, 'divergent')
791 return subset & divergent
791 return subset & divergent
792
792
793 def draft(repo, subset, x):
793 def draft(repo, subset, x):
794 """``draft()``
794 """``draft()``
795 Changeset in draft phase."""
795 Changeset in draft phase."""
796 # i18n: "draft" is a keyword
796 # i18n: "draft" is a keyword
797 getargs(x, 0, 0, _("draft takes no arguments"))
797 getargs(x, 0, 0, _("draft takes no arguments"))
798 phase = repo._phasecache.phase
798 phase = repo._phasecache.phase
799 target = phases.draft
799 target = phases.draft
800 condition = lambda r: phase(repo, r) == target
800 condition = lambda r: phase(repo, r) == target
801 return subset.filter(condition, cache=False)
801 return subset.filter(condition, cache=False)
802
802
803 def extinct(repo, subset, x):
803 def extinct(repo, subset, x):
804 """``extinct()``
804 """``extinct()``
805 Obsolete changesets with obsolete descendants only.
805 Obsolete changesets with obsolete descendants only.
806 """
806 """
807 # i18n: "extinct" is a keyword
807 # i18n: "extinct" is a keyword
808 getargs(x, 0, 0, _("extinct takes no arguments"))
808 getargs(x, 0, 0, _("extinct takes no arguments"))
809 extincts = obsmod.getrevs(repo, 'extinct')
809 extincts = obsmod.getrevs(repo, 'extinct')
810 return subset & extincts
810 return subset & extincts
811
811
812 def extra(repo, subset, x):
812 def extra(repo, subset, x):
813 """``extra(label, [value])``
813 """``extra(label, [value])``
814 Changesets with the given label in the extra metadata, with the given
814 Changesets with the given label in the extra metadata, with the given
815 optional value.
815 optional value.
816
816
817 If `value` starts with `re:`, the remainder of the value is treated as
817 If `value` starts with `re:`, the remainder of the value is treated as
818 a regular expression. To match a value that actually starts with `re:`,
818 a regular expression. To match a value that actually starts with `re:`,
819 use the prefix `literal:`.
819 use the prefix `literal:`.
820 """
820 """
821
821
822 # i18n: "extra" is a keyword
822 # i18n: "extra" is a keyword
823 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
823 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
824 # i18n: "extra" is a keyword
824 # i18n: "extra" is a keyword
825 label = getstring(l[0], _('first argument to extra must be a string'))
825 label = getstring(l[0], _('first argument to extra must be a string'))
826 value = None
826 value = None
827
827
828 if len(l) > 1:
828 if len(l) > 1:
829 # i18n: "extra" is a keyword
829 # i18n: "extra" is a keyword
830 value = getstring(l[1], _('second argument to extra must be a string'))
830 value = getstring(l[1], _('second argument to extra must be a string'))
831 kind, value, matcher = _stringmatcher(value)
831 kind, value, matcher = _stringmatcher(value)
832
832
833 def _matchvalue(r):
833 def _matchvalue(r):
834 extra = repo[r].extra()
834 extra = repo[r].extra()
835 return label in extra and (value is None or matcher(extra[label]))
835 return label in extra and (value is None or matcher(extra[label]))
836
836
837 return subset.filter(lambda r: _matchvalue(r))
837 return subset.filter(lambda r: _matchvalue(r))
838
838
839 def filelog(repo, subset, x):
839 def filelog(repo, subset, x):
840 """``filelog(pattern)``
840 """``filelog(pattern)``
841 Changesets connected to the specified filelog.
841 Changesets connected to the specified filelog.
842
842
843 For performance reasons, visits only revisions mentioned in the file-level
843 For performance reasons, visits only revisions mentioned in the file-level
844 filelog, rather than filtering through all changesets (much faster, but
844 filelog, rather than filtering through all changesets (much faster, but
845 doesn't include deletes or duplicate changes). For a slower, more accurate
845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 result, use ``file()``.
846 result, use ``file()``.
847
847
848 The pattern without explicit kind like ``glob:`` is expected to be
848 The pattern without explicit kind like ``glob:`` is expected to be
849 relative to the current directory and match against a file exactly
849 relative to the current directory and match against a file exactly
850 for efficiency.
850 for efficiency.
851
851
852 If some linkrev points to revisions filtered by the current repoview, we'll
852 If some linkrev points to revisions filtered by the current repoview, we'll
853 work around it to return a non-filtered value.
853 work around it to return a non-filtered value.
854 """
854 """
855
855
856 # i18n: "filelog" is a keyword
856 # i18n: "filelog" is a keyword
857 pat = getstring(x, _("filelog requires a pattern"))
857 pat = getstring(x, _("filelog requires a pattern"))
858 s = set()
858 s = set()
859 cl = repo.changelog
859 cl = repo.changelog
860
860
861 if not matchmod.patkind(pat):
861 if not matchmod.patkind(pat):
862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 files = [f]
863 files = [f]
864 else:
864 else:
865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 files = (f for f in repo[None] if m(f))
866 files = (f for f in repo[None] if m(f))
867
867
868 for f in files:
868 for f in files:
869 backrevref = {} # final value for: filerev -> changerev
869 backrevref = {} # final value for: filerev -> changerev
870 lowestchild = {} # lowest known filerev child of a filerev
870 lowestchild = {} # lowest known filerev child of a filerev
871 delayed = [] # filerev with filtered linkrev, for post-processing
871 delayed = [] # filerev with filtered linkrev, for post-processing
872 lowesthead = None # cache for manifest content of all head revisions
872 lowesthead = None # cache for manifest content of all head revisions
873 fl = repo.file(f)
873 fl = repo.file(f)
874 for fr in list(fl):
874 for fr in list(fl):
875 rev = fl.linkrev(fr)
875 rev = fl.linkrev(fr)
876 if rev not in cl:
876 if rev not in cl:
877 # changerev pointed in linkrev is filtered
877 # changerev pointed in linkrev is filtered
878 # record it for post processing.
878 # record it for post processing.
879 delayed.append((fr, rev))
879 delayed.append((fr, rev))
880 continue
880 continue
881 for p in fl.parentrevs(fr):
881 for p in fl.parentrevs(fr):
882 if 0 <= p and p not in lowestchild:
882 if 0 <= p and p not in lowestchild:
883 lowestchild[p] = fr
883 lowestchild[p] = fr
884 backrevref[fr] = rev
884 backrevref[fr] = rev
885 s.add(rev)
885 s.add(rev)
886
886
887 # Post-processing of all filerevs we skipped because they were
887 # Post-processing of all filerevs we skipped because they were
888 # filtered. If such filerevs have known and unfiltered children, this
888 # filtered. If such filerevs have known and unfiltered children, this
889 # means they have an unfiltered appearance out there. We'll use linkrev
889 # means they have an unfiltered appearance out there. We'll use linkrev
890 # adjustment to find one of these appearances. The lowest known child
890 # adjustment to find one of these appearances. The lowest known child
891 # will be used as a starting point because it is the best upper-bound we
891 # will be used as a starting point because it is the best upper-bound we
892 # have.
892 # have.
893 #
893 #
894 # This approach will fail when an unfiltered but linkrev-shadowed
894 # This approach will fail when an unfiltered but linkrev-shadowed
895 # appearance exists in a head changeset without unfiltered filerev
895 # appearance exists in a head changeset without unfiltered filerev
896 # children anywhere.
896 # children anywhere.
897 while delayed:
897 while delayed:
898 # must be a descending iteration. To slowly fill lowest child
898 # must be a descending iteration. To slowly fill lowest child
899 # information that is of potential use by the next item.
899 # information that is of potential use by the next item.
900 fr, rev = delayed.pop()
900 fr, rev = delayed.pop()
901 lkr = rev
901 lkr = rev
902
902
903 child = lowestchild.get(fr)
903 child = lowestchild.get(fr)
904
904
905 if child is None:
905 if child is None:
906 # search for existence of this file revision in a head revision.
906 # search for existence of this file revision in a head revision.
907 # There are three possibilities:
907 # There are three possibilities:
908 # - the revision exists in a head and we can find an
908 # - the revision exists in a head and we can find an
909 # introduction from there,
909 # introduction from there,
910 # - the revision does not exist in a head because it has been
910 # - the revision does not exist in a head because it has been
911 # changed since its introduction: we would have found a child
911 # changed since its introduction: we would have found a child
912 # and be in the other 'else' clause,
912 # and be in the other 'else' clause,
913 # - all versions of the revision are hidden.
913 # - all versions of the revision are hidden.
914 if lowesthead is None:
914 if lowesthead is None:
915 lowesthead = {}
915 lowesthead = {}
916 for h in repo.heads():
916 for h in repo.heads():
917 fnode = repo[h].manifest().get(f)
917 fnode = repo[h].manifest().get(f)
918 if fnode is not None:
918 if fnode is not None:
919 lowesthead[fl.rev(fnode)] = h
919 lowesthead[fl.rev(fnode)] = h
920 headrev = lowesthead.get(fr)
920 headrev = lowesthead.get(fr)
921 if headrev is None:
921 if headrev is None:
922 # content is nowhere unfiltered
922 # content is nowhere unfiltered
923 continue
923 continue
924 rev = repo[headrev][f].introrev()
924 rev = repo[headrev][f].introrev()
925 else:
925 else:
926 # the lowest known child is a good upper bound
926 # the lowest known child is a good upper bound
927 childcrev = backrevref[child]
927 childcrev = backrevref[child]
928 # XXX this does not guarantee returning the lowest
928 # XXX this does not guarantee returning the lowest
929 # introduction of this revision, but this gives a
929 # introduction of this revision, but this gives a
930 # result which is a good start and will fit in most
930 # result which is a good start and will fit in most
931 # cases. We probably need to fix the multiple
931 # cases. We probably need to fix the multiple
932 # introductions case properly (report each
932 # introductions case properly (report each
933 # introduction, even for identical file revisions)
933 # introduction, even for identical file revisions)
934 # once and for all at some point anyway.
934 # once and for all at some point anyway.
935 for p in repo[childcrev][f].parents():
935 for p in repo[childcrev][f].parents():
936 if p.filerev() == fr:
936 if p.filerev() == fr:
937 rev = p.rev()
937 rev = p.rev()
938 break
938 break
939 if rev == lkr: # no shadowed entry found
939 if rev == lkr: # no shadowed entry found
940 # XXX This should never happen unless some manifest points
940 # XXX This should never happen unless some manifest points
941 # to biggish file revisions (like a revision that uses a
941 # to biggish file revisions (like a revision that uses a
942 # parent that never appears in the manifest ancestors)
942 # parent that never appears in the manifest ancestors)
943 continue
943 continue
944
944
945 # Fill the data for the next iteration.
945 # Fill the data for the next iteration.
946 for p in fl.parentrevs(fr):
946 for p in fl.parentrevs(fr):
947 if 0 <= p and p not in lowestchild:
947 if 0 <= p and p not in lowestchild:
948 lowestchild[p] = fr
948 lowestchild[p] = fr
949 backrevref[fr] = rev
949 backrevref[fr] = rev
950 s.add(rev)
950 s.add(rev)
951
951
952 return subset & s
952 return subset & s
953
953
954 def first(repo, subset, x):
954 def first(repo, subset, x):
955 """``first(set, [n])``
955 """``first(set, [n])``
956 An alias for limit().
956 An alias for limit().
957 """
957 """
958 return limit(repo, subset, x)
958 return limit(repo, subset, x)
959
959
960 def _follow(repo, subset, x, name, followfirst=False):
960 def _follow(repo, subset, x, name, followfirst=False):
961 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
961 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
962 c = repo['.']
962 c = repo['.']
963 if l:
963 if l:
964 x = getstring(l[0], _("%s expected a filename") % name)
964 x = getstring(l[0], _("%s expected a filename") % name)
965 if x in c:
965 if x in c:
966 cx = c[x]
966 cx = c[x]
967 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
967 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
968 # include the revision responsible for the most recent version
968 # include the revision responsible for the most recent version
969 s.add(cx.introrev())
969 s.add(cx.introrev())
970 else:
970 else:
971 return baseset()
971 return baseset()
972 else:
972 else:
973 s = _revancestors(repo, baseset([c.rev()]), followfirst)
973 s = _revancestors(repo, baseset([c.rev()]), followfirst)
974
974
975 return subset & s
975 return subset & s
976
976
977 def follow(repo, subset, x):
977 def follow(repo, subset, x):
978 """``follow([file])``
978 """``follow([file])``
979 An alias for ``::.`` (ancestors of the working copy's first parent).
979 An alias for ``::.`` (ancestors of the working copy's first parent).
980 If a filename is specified, the history of the given file is followed,
980 If a filename is specified, the history of the given file is followed,
981 including copies.
981 including copies.
982 """
982 """
983 return _follow(repo, subset, x, 'follow')
983 return _follow(repo, subset, x, 'follow')
984
984
985 def _followfirst(repo, subset, x):
985 def _followfirst(repo, subset, x):
986 # ``followfirst([file])``
986 # ``followfirst([file])``
987 # Like ``follow([file])`` but follows only the first parent of
987 # Like ``follow([file])`` but follows only the first parent of
988 # every revision or file revision.
988 # every revision or file revision.
989 return _follow(repo, subset, x, '_followfirst', followfirst=True)
989 return _follow(repo, subset, x, '_followfirst', followfirst=True)
990
990
991 def getall(repo, subset, x):
991 def getall(repo, subset, x):
992 """``all()``
992 """``all()``
993 All changesets, the same as ``0:tip``.
993 All changesets, the same as ``0:tip``.
994 """
994 """
995 # i18n: "all" is a keyword
995 # i18n: "all" is a keyword
996 getargs(x, 0, 0, _("all takes no arguments"))
996 getargs(x, 0, 0, _("all takes no arguments"))
997 return subset
997 return subset
998
998
999 def grep(repo, subset, x):
999 def grep(repo, subset, x):
1000 """``grep(regex)``
1000 """``grep(regex)``
1001 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1001 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1002 to ensure special escape characters are handled correctly. Unlike
1002 to ensure special escape characters are handled correctly. Unlike
1003 ``keyword(string)``, the match is case-sensitive.
1003 ``keyword(string)``, the match is case-sensitive.
1004 """
1004 """
1005 try:
1005 try:
1006 # i18n: "grep" is a keyword
1006 # i18n: "grep" is a keyword
1007 gr = re.compile(getstring(x, _("grep requires a string")))
1007 gr = re.compile(getstring(x, _("grep requires a string")))
1008 except re.error, e:
1008 except re.error, e:
1009 raise error.ParseError(_('invalid match pattern: %s') % e)
1009 raise error.ParseError(_('invalid match pattern: %s') % e)
1010
1010
1011 def matches(x):
1011 def matches(x):
1012 c = repo[x]
1012 c = repo[x]
1013 for e in c.files() + [c.user(), c.description()]:
1013 for e in c.files() + [c.user(), c.description()]:
1014 if gr.search(e):
1014 if gr.search(e):
1015 return True
1015 return True
1016 return False
1016 return False
1017
1017
1018 return subset.filter(matches)
1018 return subset.filter(matches)
1019
1019
1020 def _matchfiles(repo, subset, x):
1020 def _matchfiles(repo, subset, x):
1021 # _matchfiles takes a revset list of prefixed arguments:
1021 # _matchfiles takes a revset list of prefixed arguments:
1022 #
1022 #
1023 # [p:foo, i:bar, x:baz]
1023 # [p:foo, i:bar, x:baz]
1024 #
1024 #
1025 # builds a match object from them and filters subset. Allowed
1025 # builds a match object from them and filters subset. Allowed
1026 # prefixes are 'p:' for regular patterns, 'i:' for include
1026 # prefixes are 'p:' for regular patterns, 'i:' for include
1027 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1027 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1028 # a revision identifier, or the empty string to reference the
1028 # a revision identifier, or the empty string to reference the
1029 # working directory, from which the match object is
1029 # working directory, from which the match object is
1030 # initialized. Use 'd:' to set the default matching mode, default
1030 # initialized. Use 'd:' to set the default matching mode, default
1031 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1031 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1032
1032
1033 # i18n: "_matchfiles" is a keyword
1033 # i18n: "_matchfiles" is a keyword
1034 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1034 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1035 pats, inc, exc = [], [], []
1035 pats, inc, exc = [], [], []
1036 rev, default = None, None
1036 rev, default = None, None
1037 for arg in l:
1037 for arg in l:
1038 # i18n: "_matchfiles" is a keyword
1038 # i18n: "_matchfiles" is a keyword
1039 s = getstring(arg, _("_matchfiles requires string arguments"))
1039 s = getstring(arg, _("_matchfiles requires string arguments"))
1040 prefix, value = s[:2], s[2:]
1040 prefix, value = s[:2], s[2:]
1041 if prefix == 'p:':
1041 if prefix == 'p:':
1042 pats.append(value)
1042 pats.append(value)
1043 elif prefix == 'i:':
1043 elif prefix == 'i:':
1044 inc.append(value)
1044 inc.append(value)
1045 elif prefix == 'x:':
1045 elif prefix == 'x:':
1046 exc.append(value)
1046 exc.append(value)
1047 elif prefix == 'r:':
1047 elif prefix == 'r:':
1048 if rev is not None:
1048 if rev is not None:
1049 # i18n: "_matchfiles" is a keyword
1049 # i18n: "_matchfiles" is a keyword
1050 raise error.ParseError(_('_matchfiles expected at most one '
1050 raise error.ParseError(_('_matchfiles expected at most one '
1051 'revision'))
1051 'revision'))
1052 if value != '': # empty means working directory; leave rev as None
1052 if value != '': # empty means working directory; leave rev as None
1053 rev = value
1053 rev = value
1054 elif prefix == 'd:':
1054 elif prefix == 'd:':
1055 if default is not None:
1055 if default is not None:
1056 # i18n: "_matchfiles" is a keyword
1056 # i18n: "_matchfiles" is a keyword
1057 raise error.ParseError(_('_matchfiles expected at most one '
1057 raise error.ParseError(_('_matchfiles expected at most one '
1058 'default mode'))
1058 'default mode'))
1059 default = value
1059 default = value
1060 else:
1060 else:
1061 # i18n: "_matchfiles" is a keyword
1061 # i18n: "_matchfiles" is a keyword
1062 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1062 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1063 if not default:
1063 if not default:
1064 default = 'glob'
1064 default = 'glob'
1065
1065
1066 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1066 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1067 exclude=exc, ctx=repo[rev], default=default)
1067 exclude=exc, ctx=repo[rev], default=default)
1068
1068
1069 def matches(x):
1069 def matches(x):
1070 for f in repo[x].files():
1070 for f in repo[x].files():
1071 if m(f):
1071 if m(f):
1072 return True
1072 return True
1073 return False
1073 return False
1074
1074
1075 return subset.filter(matches)
1075 return subset.filter(matches)
1076
1076
1077 def hasfile(repo, subset, x):
1077 def hasfile(repo, subset, x):
1078 """``file(pattern)``
1078 """``file(pattern)``
1079 Changesets affecting files matched by pattern.
1079 Changesets affecting files matched by pattern.
1080
1080
1081 For a faster but less accurate result, consider using ``filelog()``
1081 For a faster but less accurate result, consider using ``filelog()``
1082 instead.
1082 instead.
1083
1083
1084 This predicate uses ``glob:`` as the default kind of pattern.
1084 This predicate uses ``glob:`` as the default kind of pattern.
1085 """
1085 """
1086 # i18n: "file" is a keyword
1086 # i18n: "file" is a keyword
1087 pat = getstring(x, _("file requires a pattern"))
1087 pat = getstring(x, _("file requires a pattern"))
1088 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1088 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1089
1089
1090 def head(repo, subset, x):
1090 def head(repo, subset, x):
1091 """``head()``
1091 """``head()``
1092 Changeset is a named branch head.
1092 Changeset is a named branch head.
1093 """
1093 """
1094 # i18n: "head" is a keyword
1094 # i18n: "head" is a keyword
1095 getargs(x, 0, 0, _("head takes no arguments"))
1095 getargs(x, 0, 0, _("head takes no arguments"))
1096 hs = set()
1096 hs = set()
1097 for b, ls in repo.branchmap().iteritems():
1097 for b, ls in repo.branchmap().iteritems():
1098 hs.update(repo[h].rev() for h in ls)
1098 hs.update(repo[h].rev() for h in ls)
1099 return baseset(hs).filter(subset.__contains__)
1099 return baseset(hs).filter(subset.__contains__)
1100
1100
1101 def heads(repo, subset, x):
1101 def heads(repo, subset, x):
1102 """``heads(set)``
1102 """``heads(set)``
1103 Members of set with no children in set.
1103 Members of set with no children in set.
1104 """
1104 """
1105 s = getset(repo, subset, x)
1105 s = getset(repo, subset, x)
1106 ps = parents(repo, subset, x)
1106 ps = parents(repo, subset, x)
1107 return s - ps
1107 return s - ps
1108
1108
1109 def hidden(repo, subset, x):
1109 def hidden(repo, subset, x):
1110 """``hidden()``
1110 """``hidden()``
1111 Hidden changesets.
1111 Hidden changesets.
1112 """
1112 """
1113 # i18n: "hidden" is a keyword
1113 # i18n: "hidden" is a keyword
1114 getargs(x, 0, 0, _("hidden takes no arguments"))
1114 getargs(x, 0, 0, _("hidden takes no arguments"))
1115 hiddenrevs = repoview.filterrevs(repo, 'visible')
1115 hiddenrevs = repoview.filterrevs(repo, 'visible')
1116 return subset & hiddenrevs
1116 return subset & hiddenrevs
1117
1117
1118 def keyword(repo, subset, x):
1118 def keyword(repo, subset, x):
1119 """``keyword(string)``
1119 """``keyword(string)``
1120 Search commit message, user name, and names of changed files for
1120 Search commit message, user name, and names of changed files for
1121 string. The match is case-insensitive.
1121 string. The match is case-insensitive.
1122 """
1122 """
1123 # i18n: "keyword" is a keyword
1123 # i18n: "keyword" is a keyword
1124 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1124 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1125
1125
1126 def matches(r):
1126 def matches(r):
1127 c = repo[r]
1127 c = repo[r]
1128 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1128 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1129 c.description()])
1129 c.description()])
1130
1130
1131 return subset.filter(matches)
1131 return subset.filter(matches)
1132
1132
1133 def limit(repo, subset, x):
1133 def limit(repo, subset, x):
1134 """``limit(set, [n])``
1134 """``limit(set, [n])``
1135 First n members of set, defaulting to 1.
1135 First n members of set, defaulting to 1.
1136 """
1136 """
1137 # i18n: "limit" is a keyword
1137 # i18n: "limit" is a keyword
1138 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1138 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1139 try:
1139 try:
1140 lim = 1
1140 lim = 1
1141 if len(l) == 2:
1141 if len(l) == 2:
1142 # i18n: "limit" is a keyword
1142 # i18n: "limit" is a keyword
1143 lim = int(getstring(l[1], _("limit requires a number")))
1143 lim = int(getstring(l[1], _("limit requires a number")))
1144 except (TypeError, ValueError):
1144 except (TypeError, ValueError):
1145 # i18n: "limit" is a keyword
1145 # i18n: "limit" is a keyword
1146 raise error.ParseError(_("limit expects a number"))
1146 raise error.ParseError(_("limit expects a number"))
1147 ss = subset
1147 ss = subset
1148 os = getset(repo, spanset(repo), l[0])
1148 os = getset(repo, spanset(repo), l[0])
1149 result = []
1149 result = []
1150 it = iter(os)
1150 it = iter(os)
1151 for x in xrange(lim):
1151 for x in xrange(lim):
1152 try:
1152 try:
1153 y = it.next()
1153 y = it.next()
1154 if y in ss:
1154 if y in ss:
1155 result.append(y)
1155 result.append(y)
1156 except (StopIteration):
1156 except (StopIteration):
1157 break
1157 break
1158 return baseset(result)
1158 return baseset(result)
1159
1159
1160 def last(repo, subset, x):
1160 def last(repo, subset, x):
1161 """``last(set, [n])``
1161 """``last(set, [n])``
1162 Last n members of set, defaulting to 1.
1162 Last n members of set, defaulting to 1.
1163 """
1163 """
1164 # i18n: "last" is a keyword
1164 # i18n: "last" is a keyword
1165 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1165 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1166 try:
1166 try:
1167 lim = 1
1167 lim = 1
1168 if len(l) == 2:
1168 if len(l) == 2:
1169 # i18n: "last" is a keyword
1169 # i18n: "last" is a keyword
1170 lim = int(getstring(l[1], _("last requires a number")))
1170 lim = int(getstring(l[1], _("last requires a number")))
1171 except (TypeError, ValueError):
1171 except (TypeError, ValueError):
1172 # i18n: "last" is a keyword
1172 # i18n: "last" is a keyword
1173 raise error.ParseError(_("last expects a number"))
1173 raise error.ParseError(_("last expects a number"))
1174 ss = subset
1174 ss = subset
1175 os = getset(repo, spanset(repo), l[0])
1175 os = getset(repo, spanset(repo), l[0])
1176 os.reverse()
1176 os.reverse()
1177 result = []
1177 result = []
1178 it = iter(os)
1178 it = iter(os)
1179 for x in xrange(lim):
1179 for x in xrange(lim):
1180 try:
1180 try:
1181 y = it.next()
1181 y = it.next()
1182 if y in ss:
1182 if y in ss:
1183 result.append(y)
1183 result.append(y)
1184 except (StopIteration):
1184 except (StopIteration):
1185 break
1185 break
1186 return baseset(result)
1186 return baseset(result)
1187
1187
1188 def maxrev(repo, subset, x):
1188 def maxrev(repo, subset, x):
1189 """``max(set)``
1189 """``max(set)``
1190 Changeset with highest revision number in set.
1190 Changeset with highest revision number in set.
1191 """
1191 """
1192 os = getset(repo, spanset(repo), x)
1192 os = getset(repo, spanset(repo), x)
1193 if os:
1193 if os:
1194 m = os.max()
1194 m = os.max()
1195 if m in subset:
1195 if m in subset:
1196 return baseset([m])
1196 return baseset([m])
1197 return baseset()
1197 return baseset()
1198
1198
1199 def merge(repo, subset, x):
1199 def merge(repo, subset, x):
1200 """``merge()``
1200 """``merge()``
1201 Changeset is a merge changeset.
1201 Changeset is a merge changeset.
1202 """
1202 """
1203 # i18n: "merge" is a keyword
1203 # i18n: "merge" is a keyword
1204 getargs(x, 0, 0, _("merge takes no arguments"))
1204 getargs(x, 0, 0, _("merge takes no arguments"))
1205 cl = repo.changelog
1205 cl = repo.changelog
1206 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1206 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1207
1207
1208 def branchpoint(repo, subset, x):
1208 def branchpoint(repo, subset, x):
1209 """``branchpoint()``
1209 """``branchpoint()``
1210 Changesets with more than one child.
1210 Changesets with more than one child.
1211 """
1211 """
1212 # i18n: "branchpoint" is a keyword
1212 # i18n: "branchpoint" is a keyword
1213 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1213 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1214 cl = repo.changelog
1214 cl = repo.changelog
1215 if not subset:
1215 if not subset:
1216 return baseset()
1216 return baseset()
1217 baserev = min(subset)
1217 baserev = min(subset)
1218 parentscount = [0]*(len(repo) - baserev)
1218 parentscount = [0]*(len(repo) - baserev)
1219 for r in cl.revs(start=baserev + 1):
1219 for r in cl.revs(start=baserev + 1):
1220 for p in cl.parentrevs(r):
1220 for p in cl.parentrevs(r):
1221 if p >= baserev:
1221 if p >= baserev:
1222 parentscount[p - baserev] += 1
1222 parentscount[p - baserev] += 1
1223 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1223 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1224
1224
1225 def minrev(repo, subset, x):
1225 def minrev(repo, subset, x):
1226 """``min(set)``
1226 """``min(set)``
1227 Changeset with lowest revision number in set.
1227 Changeset with lowest revision number in set.
1228 """
1228 """
1229 os = getset(repo, spanset(repo), x)
1229 os = getset(repo, spanset(repo), x)
1230 if os:
1230 if os:
1231 m = os.min()
1231 m = os.min()
1232 if m in subset:
1232 if m in subset:
1233 return baseset([m])
1233 return baseset([m])
1234 return baseset()
1234 return baseset()
1235
1235
1236 def modifies(repo, subset, x):
1236 def modifies(repo, subset, x):
1237 """``modifies(pattern)``
1237 """``modifies(pattern)``
1238 Changesets modifying files matched by pattern.
1238 Changesets modifying files matched by pattern.
1239
1239
1240 The pattern without explicit kind like ``glob:`` is expected to be
1240 The pattern without explicit kind like ``glob:`` is expected to be
1241 relative to the current directory and match against a file or a
1241 relative to the current directory and match against a file or a
1242 directory.
1242 directory.
1243 """
1243 """
1244 # i18n: "modifies" is a keyword
1244 # i18n: "modifies" is a keyword
1245 pat = getstring(x, _("modifies requires a pattern"))
1245 pat = getstring(x, _("modifies requires a pattern"))
1246 return checkstatus(repo, subset, pat, 0)
1246 return checkstatus(repo, subset, pat, 0)
1247
1247
1248 def named(repo, subset, x):
1248 def named(repo, subset, x):
1249 """``named(namespace)``
1249 """``named(namespace)``
1250 The changesets in a given namespace.
1250 The changesets in a given namespace.
1251
1251
1252 If `namespace` starts with `re:`, the remainder of the string is treated as
1252 If `namespace` starts with `re:`, the remainder of the string is treated as
1253 a regular expression. To match a namespace that actually starts with `re:`,
1253 a regular expression. To match a namespace that actually starts with `re:`,
1254 use the prefix `literal:`.
1254 use the prefix `literal:`.
1255 """
1255 """
1256 # i18n: "named" is a keyword
1256 # i18n: "named" is a keyword
1257 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1257 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1258
1258
1259 ns = getstring(args[0],
1259 ns = getstring(args[0],
1260 # i18n: "named" is a keyword
1260 # i18n: "named" is a keyword
1261 _('the argument to named must be a string'))
1261 _('the argument to named must be a string'))
1262 kind, pattern, matcher = _stringmatcher(ns)
1262 kind, pattern, matcher = _stringmatcher(ns)
1263 namespaces = set()
1263 namespaces = set()
1264 if kind == 'literal':
1264 if kind == 'literal':
1265 if pattern not in repo.names:
1265 if pattern not in repo.names:
1266 raise error.RepoLookupError(_("namespace '%s' does not exist")
1266 raise error.RepoLookupError(_("namespace '%s' does not exist")
1267 % ns)
1267 % ns)
1268 namespaces.add(repo.names[pattern])
1268 namespaces.add(repo.names[pattern])
1269 else:
1269 else:
1270 for name, ns in repo.names.iteritems():
1270 for name, ns in repo.names.iteritems():
1271 if matcher(name):
1271 if matcher(name):
1272 namespaces.add(ns)
1272 namespaces.add(ns)
1273 if not namespaces:
1273 if not namespaces:
1274 raise error.RepoLookupError(_("no namespace exists"
1274 raise error.RepoLookupError(_("no namespace exists"
1275 " that match '%s'") % pattern)
1275 " that match '%s'") % pattern)
1276
1276
1277 names = set()
1277 names = set()
1278 for ns in namespaces:
1278 for ns in namespaces:
1279 for name in ns.listnames(repo):
1279 for name in ns.listnames(repo):
1280 if name not in ns.deprecated:
1280 if name not in ns.deprecated:
1281 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1281 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1282
1282
1283 names -= set([node.nullrev])
1283 names -= set([node.nullrev])
1284 return subset & names
1284 return subset & names
1285
1285
1286 def node_(repo, subset, x):
1286 def node_(repo, subset, x):
1287 """``id(string)``
1287 """``id(string)``
1288 Revision non-ambiguously specified by the given hex string prefix.
1288 Revision non-ambiguously specified by the given hex string prefix.
1289 """
1289 """
1290 # i18n: "id" is a keyword
1290 # i18n: "id" is a keyword
1291 l = getargs(x, 1, 1, _("id requires one argument"))
1291 l = getargs(x, 1, 1, _("id requires one argument"))
1292 # i18n: "id" is a keyword
1292 # i18n: "id" is a keyword
1293 n = getstring(l[0], _("id requires a string"))
1293 n = getstring(l[0], _("id requires a string"))
1294 if len(n) == 40:
1294 if len(n) == 40:
1295 rn = repo[n].rev()
1295 rn = repo[n].rev()
1296 else:
1296 else:
1297 rn = None
1297 rn = None
1298 pm = repo.changelog._partialmatch(n)
1298 pm = repo.changelog._partialmatch(n)
1299 if pm is not None:
1299 if pm is not None:
1300 rn = repo.changelog.rev(pm)
1300 rn = repo.changelog.rev(pm)
1301
1301
1302 if rn is None:
1302 if rn is None:
1303 return baseset()
1303 return baseset()
1304 result = baseset([rn])
1304 result = baseset([rn])
1305 return result & subset
1305 return result & subset
1306
1306
1307 def obsolete(repo, subset, x):
1307 def obsolete(repo, subset, x):
1308 """``obsolete()``
1308 """``obsolete()``
1309 Mutable changeset with a newer version."""
1309 Mutable changeset with a newer version."""
1310 # i18n: "obsolete" is a keyword
1310 # i18n: "obsolete" is a keyword
1311 getargs(x, 0, 0, _("obsolete takes no arguments"))
1311 getargs(x, 0, 0, _("obsolete takes no arguments"))
1312 obsoletes = obsmod.getrevs(repo, 'obsolete')
1312 obsoletes = obsmod.getrevs(repo, 'obsolete')
1313 return subset & obsoletes
1313 return subset & obsoletes
1314
1314
1315 def only(repo, subset, x):
1315 def only(repo, subset, x):
1316 """``only(set, [set])``
1316 """``only(set, [set])``
1317 Changesets that are ancestors of the first set that are not ancestors
1317 Changesets that are ancestors of the first set that are not ancestors
1318 of any other head in the repo. If a second set is specified, the result
1318 of any other head in the repo. If a second set is specified, the result
1319 is ancestors of the first set that are not ancestors of the second set
1319 is ancestors of the first set that are not ancestors of the second set
1320 (i.e. ::<set1> - ::<set2>).
1320 (i.e. ::<set1> - ::<set2>).
1321 """
1321 """
1322 cl = repo.changelog
1322 cl = repo.changelog
1323 # i18n: "only" is a keyword
1323 # i18n: "only" is a keyword
1324 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1324 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1325 include = getset(repo, spanset(repo), args[0])
1325 include = getset(repo, spanset(repo), args[0])
1326 if len(args) == 1:
1326 if len(args) == 1:
1327 if not include:
1327 if not include:
1328 return baseset()
1328 return baseset()
1329
1329
1330 descendants = set(_revdescendants(repo, include, False))
1330 descendants = set(_revdescendants(repo, include, False))
1331 exclude = [rev for rev in cl.headrevs()
1331 exclude = [rev for rev in cl.headrevs()
1332 if not rev in descendants and not rev in include]
1332 if not rev in descendants and not rev in include]
1333 else:
1333 else:
1334 exclude = getset(repo, spanset(repo), args[1])
1334 exclude = getset(repo, spanset(repo), args[1])
1335
1335
1336 results = set(cl.findmissingrevs(common=exclude, heads=include))
1336 results = set(cl.findmissingrevs(common=exclude, heads=include))
1337 return subset & results
1337 return subset & results
1338
1338
1339 def origin(repo, subset, x):
1339 def origin(repo, subset, x):
1340 """``origin([set])``
1340 """``origin([set])``
1341 Changesets that were specified as a source for the grafts, transplants or
1341 Changesets that were specified as a source for the grafts, transplants or
1342 rebases that created the given revisions. Omitting the optional set is the
1342 rebases that created the given revisions. Omitting the optional set is the
1343 same as passing all(). If a changeset created by these operations is itself
1343 same as passing all(). If a changeset created by these operations is itself
1344 specified as a source for one of these operations, only the source changeset
1344 specified as a source for one of these operations, only the source changeset
1345 for the first operation is selected.
1345 for the first operation is selected.
1346 """
1346 """
1347 if x is not None:
1347 if x is not None:
1348 dests = getset(repo, spanset(repo), x)
1348 dests = getset(repo, spanset(repo), x)
1349 else:
1349 else:
1350 dests = getall(repo, spanset(repo), x)
1350 dests = getall(repo, spanset(repo), x)
1351
1351
1352 def _firstsrc(rev):
1352 def _firstsrc(rev):
1353 src = _getrevsource(repo, rev)
1353 src = _getrevsource(repo, rev)
1354 if src is None:
1354 if src is None:
1355 return None
1355 return None
1356
1356
1357 while True:
1357 while True:
1358 prev = _getrevsource(repo, src)
1358 prev = _getrevsource(repo, src)
1359
1359
1360 if prev is None:
1360 if prev is None:
1361 return src
1361 return src
1362 src = prev
1362 src = prev
1363
1363
1364 o = set([_firstsrc(r) for r in dests])
1364 o = set([_firstsrc(r) for r in dests])
1365 o -= set([None])
1365 o -= set([None])
1366 return subset & o
1366 return subset & o
1367
1367
1368 def outgoing(repo, subset, x):
1368 def outgoing(repo, subset, x):
1369 """``outgoing([path])``
1369 """``outgoing([path])``
1370 Changesets not found in the specified destination repository, or the
1370 Changesets not found in the specified destination repository, or the
1371 default push location.
1371 default push location.
1372 """
1372 """
1373 import hg # avoid start-up nasties
1373 import hg # avoid start-up nasties
1374 # i18n: "outgoing" is a keyword
1374 # i18n: "outgoing" is a keyword
1375 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1375 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1376 # i18n: "outgoing" is a keyword
1376 # i18n: "outgoing" is a keyword
1377 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1377 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1378 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1378 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1379 dest, branches = hg.parseurl(dest)
1379 dest, branches = hg.parseurl(dest)
1380 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1380 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1381 if revs:
1381 if revs:
1382 revs = [repo.lookup(rev) for rev in revs]
1382 revs = [repo.lookup(rev) for rev in revs]
1383 other = hg.peer(repo, {}, dest)
1383 other = hg.peer(repo, {}, dest)
1384 repo.ui.pushbuffer()
1384 repo.ui.pushbuffer()
1385 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1385 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1386 repo.ui.popbuffer()
1386 repo.ui.popbuffer()
1387 cl = repo.changelog
1387 cl = repo.changelog
1388 o = set([cl.rev(r) for r in outgoing.missing])
1388 o = set([cl.rev(r) for r in outgoing.missing])
1389 return subset & o
1389 return subset & o
1390
1390
1391 def p1(repo, subset, x):
1391 def p1(repo, subset, x):
1392 """``p1([set])``
1392 """``p1([set])``
1393 First parent of changesets in set, or the working directory.
1393 First parent of changesets in set, or the working directory.
1394 """
1394 """
1395 if x is None:
1395 if x is None:
1396 p = repo[x].p1().rev()
1396 p = repo[x].p1().rev()
1397 if p >= 0:
1397 if p >= 0:
1398 return subset & baseset([p])
1398 return subset & baseset([p])
1399 return baseset()
1399 return baseset()
1400
1400
1401 ps = set()
1401 ps = set()
1402 cl = repo.changelog
1402 cl = repo.changelog
1403 for r in getset(repo, spanset(repo), x):
1403 for r in getset(repo, spanset(repo), x):
1404 ps.add(cl.parentrevs(r)[0])
1404 ps.add(cl.parentrevs(r)[0])
1405 ps -= set([node.nullrev])
1405 ps -= set([node.nullrev])
1406 return subset & ps
1406 return subset & ps
1407
1407
1408 def p2(repo, subset, x):
1408 def p2(repo, subset, x):
1409 """``p2([set])``
1409 """``p2([set])``
1410 Second parent of changesets in set, or the working directory.
1410 Second parent of changesets in set, or the working directory.
1411 """
1411 """
1412 if x is None:
1412 if x is None:
1413 ps = repo[x].parents()
1413 ps = repo[x].parents()
1414 try:
1414 try:
1415 p = ps[1].rev()
1415 p = ps[1].rev()
1416 if p >= 0:
1416 if p >= 0:
1417 return subset & baseset([p])
1417 return subset & baseset([p])
1418 return baseset()
1418 return baseset()
1419 except IndexError:
1419 except IndexError:
1420 return baseset()
1420 return baseset()
1421
1421
1422 ps = set()
1422 ps = set()
1423 cl = repo.changelog
1423 cl = repo.changelog
1424 for r in getset(repo, spanset(repo), x):
1424 for r in getset(repo, spanset(repo), x):
1425 ps.add(cl.parentrevs(r)[1])
1425 ps.add(cl.parentrevs(r)[1])
1426 ps -= set([node.nullrev])
1426 ps -= set([node.nullrev])
1427 return subset & ps
1427 return subset & ps
1428
1428
1429 def parents(repo, subset, x):
1429 def parents(repo, subset, x):
1430 """``parents([set])``
1430 """``parents([set])``
1431 The set of all parents for all changesets in set, or the working directory.
1431 The set of all parents for all changesets in set, or the working directory.
1432 """
1432 """
1433 if x is None:
1433 if x is None:
1434 ps = set(p.rev() for p in repo[x].parents())
1434 ps = set(p.rev() for p in repo[x].parents())
1435 else:
1435 else:
1436 ps = set()
1436 ps = set()
1437 cl = repo.changelog
1437 cl = repo.changelog
1438 for r in getset(repo, spanset(repo), x):
1438 for r in getset(repo, spanset(repo), x):
1439 ps.update(cl.parentrevs(r))
1439 ps.update(cl.parentrevs(r))
1440 ps -= set([node.nullrev])
1440 ps -= set([node.nullrev])
1441 return subset & ps
1441 return subset & ps
1442
1442
1443 def parentspec(repo, subset, x, n):
1443 def parentspec(repo, subset, x, n):
1444 """``set^0``
1444 """``set^0``
1445 The set.
1445 The set.
1446 ``set^1`` (or ``set^``), ``set^2``
1446 ``set^1`` (or ``set^``), ``set^2``
1447 First or second parent, respectively, of all changesets in set.
1447 First or second parent, respectively, of all changesets in set.
1448 """
1448 """
1449 try:
1449 try:
1450 n = int(n[1])
1450 n = int(n[1])
1451 if n not in (0, 1, 2):
1451 if n not in (0, 1, 2):
1452 raise ValueError
1452 raise ValueError
1453 except (TypeError, ValueError):
1453 except (TypeError, ValueError):
1454 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1454 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1455 ps = set()
1455 ps = set()
1456 cl = repo.changelog
1456 cl = repo.changelog
1457 for r in getset(repo, fullreposet(repo), x):
1457 for r in getset(repo, fullreposet(repo), x):
1458 if n == 0:
1458 if n == 0:
1459 ps.add(r)
1459 ps.add(r)
1460 elif n == 1:
1460 elif n == 1:
1461 ps.add(cl.parentrevs(r)[0])
1461 ps.add(cl.parentrevs(r)[0])
1462 elif n == 2:
1462 elif n == 2:
1463 parents = cl.parentrevs(r)
1463 parents = cl.parentrevs(r)
1464 if len(parents) > 1:
1464 if len(parents) > 1:
1465 ps.add(parents[1])
1465 ps.add(parents[1])
1466 return subset & ps
1466 return subset & ps
1467
1467
1468 def present(repo, subset, x):
1468 def present(repo, subset, x):
1469 """``present(set)``
1469 """``present(set)``
1470 An empty set, if any revision in set isn't found; otherwise,
1470 An empty set, if any revision in set isn't found; otherwise,
1471 all revisions in set.
1471 all revisions in set.
1472
1472
1473 If any of specified revisions is not present in the local repository,
1473 If any of specified revisions is not present in the local repository,
1474 the query is normally aborted. But this predicate allows the query
1474 the query is normally aborted. But this predicate allows the query
1475 to continue even in such cases.
1475 to continue even in such cases.
1476 """
1476 """
1477 try:
1477 try:
1478 return getset(repo, subset, x)
1478 return getset(repo, subset, x)
1479 except error.RepoLookupError:
1479 except error.RepoLookupError:
1480 return baseset()
1480 return baseset()
1481
1481
1482 def public(repo, subset, x):
1482 def public(repo, subset, x):
1483 """``public()``
1483 """``public()``
1484 Changeset in public phase."""
1484 Changeset in public phase."""
1485 # i18n: "public" is a keyword
1485 # i18n: "public" is a keyword
1486 getargs(x, 0, 0, _("public takes no arguments"))
1486 getargs(x, 0, 0, _("public takes no arguments"))
1487 phase = repo._phasecache.phase
1487 phase = repo._phasecache.phase
1488 target = phases.public
1488 target = phases.public
1489 condition = lambda r: phase(repo, r) == target
1489 condition = lambda r: phase(repo, r) == target
1490 return subset.filter(condition, cache=False)
1490 return subset.filter(condition, cache=False)
1491
1491
1492 def remote(repo, subset, x):
1492 def remote(repo, subset, x):
1493 """``remote([id [,path]])``
1493 """``remote([id [,path]])``
1494 Local revision that corresponds to the given identifier in a
1494 Local revision that corresponds to the given identifier in a
1495 remote repository, if present. Here, the '.' identifier is a
1495 remote repository, if present. Here, the '.' identifier is a
1496 synonym for the current local branch.
1496 synonym for the current local branch.
1497 """
1497 """
1498
1498
1499 import hg # avoid start-up nasties
1499 import hg # avoid start-up nasties
1500 # i18n: "remote" is a keyword
1500 # i18n: "remote" is a keyword
1501 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1501 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1502
1502
1503 q = '.'
1503 q = '.'
1504 if len(l) > 0:
1504 if len(l) > 0:
1505 # i18n: "remote" is a keyword
1505 # i18n: "remote" is a keyword
1506 q = getstring(l[0], _("remote requires a string id"))
1506 q = getstring(l[0], _("remote requires a string id"))
1507 if q == '.':
1507 if q == '.':
1508 q = repo['.'].branch()
1508 q = repo['.'].branch()
1509
1509
1510 dest = ''
1510 dest = ''
1511 if len(l) > 1:
1511 if len(l) > 1:
1512 # i18n: "remote" is a keyword
1512 # i18n: "remote" is a keyword
1513 dest = getstring(l[1], _("remote requires a repository path"))
1513 dest = getstring(l[1], _("remote requires a repository path"))
1514 dest = repo.ui.expandpath(dest or 'default')
1514 dest = repo.ui.expandpath(dest or 'default')
1515 dest, branches = hg.parseurl(dest)
1515 dest, branches = hg.parseurl(dest)
1516 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1516 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1517 if revs:
1517 if revs:
1518 revs = [repo.lookup(rev) for rev in revs]
1518 revs = [repo.lookup(rev) for rev in revs]
1519 other = hg.peer(repo, {}, dest)
1519 other = hg.peer(repo, {}, dest)
1520 n = other.lookup(q)
1520 n = other.lookup(q)
1521 if n in repo:
1521 if n in repo:
1522 r = repo[n].rev()
1522 r = repo[n].rev()
1523 if r in subset:
1523 if r in subset:
1524 return baseset([r])
1524 return baseset([r])
1525 return baseset()
1525 return baseset()
1526
1526
1527 def removes(repo, subset, x):
1527 def removes(repo, subset, x):
1528 """``removes(pattern)``
1528 """``removes(pattern)``
1529 Changesets which remove files matching pattern.
1529 Changesets which remove files matching pattern.
1530
1530
1531 The pattern without explicit kind like ``glob:`` is expected to be
1531 The pattern without explicit kind like ``glob:`` is expected to be
1532 relative to the current directory and match against a file or a
1532 relative to the current directory and match against a file or a
1533 directory.
1533 directory.
1534 """
1534 """
1535 # i18n: "removes" is a keyword
1535 # i18n: "removes" is a keyword
1536 pat = getstring(x, _("removes requires a pattern"))
1536 pat = getstring(x, _("removes requires a pattern"))
1537 return checkstatus(repo, subset, pat, 2)
1537 return checkstatus(repo, subset, pat, 2)
1538
1538
1539 def rev(repo, subset, x):
1539 def rev(repo, subset, x):
1540 """``rev(number)``
1540 """``rev(number)``
1541 Revision with the given numeric identifier.
1541 Revision with the given numeric identifier.
1542 """
1542 """
1543 # i18n: "rev" is a keyword
1543 # i18n: "rev" is a keyword
1544 l = getargs(x, 1, 1, _("rev requires one argument"))
1544 l = getargs(x, 1, 1, _("rev requires one argument"))
1545 try:
1545 try:
1546 # i18n: "rev" is a keyword
1546 # i18n: "rev" is a keyword
1547 l = int(getstring(l[0], _("rev requires a number")))
1547 l = int(getstring(l[0], _("rev requires a number")))
1548 except (TypeError, ValueError):
1548 except (TypeError, ValueError):
1549 # i18n: "rev" is a keyword
1549 # i18n: "rev" is a keyword
1550 raise error.ParseError(_("rev expects a number"))
1550 raise error.ParseError(_("rev expects a number"))
1551 if l not in fullreposet(repo) and l != node.nullrev:
1551 if l not in fullreposet(repo) and l != node.nullrev:
1552 return baseset()
1552 return baseset()
1553 return subset & baseset([l])
1553 return subset & baseset([l])
1554
1554
1555 def matching(repo, subset, x):
1555 def matching(repo, subset, x):
1556 """``matching(revision [, field])``
1556 """``matching(revision [, field])``
1557 Changesets in which a given set of fields match the set of fields in the
1557 Changesets in which a given set of fields match the set of fields in the
1558 selected revision or set.
1558 selected revision or set.
1559
1559
1560 To match more than one field pass the list of fields to match separated
1560 To match more than one field pass the list of fields to match separated
1561 by spaces (e.g. ``author description``).
1561 by spaces (e.g. ``author description``).
1562
1562
1563 Valid fields are most regular revision fields and some special fields.
1563 Valid fields are most regular revision fields and some special fields.
1564
1564
1565 Regular revision fields are ``description``, ``author``, ``branch``,
1565 Regular revision fields are ``description``, ``author``, ``branch``,
1566 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1566 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1567 and ``diff``.
1567 and ``diff``.
1568 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1568 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1569 contents of the revision. Two revisions matching their ``diff`` will
1569 contents of the revision. Two revisions matching their ``diff`` will
1570 also match their ``files``.
1570 also match their ``files``.
1571
1571
1572 Special fields are ``summary`` and ``metadata``:
1572 Special fields are ``summary`` and ``metadata``:
1573 ``summary`` matches the first line of the description.
1573 ``summary`` matches the first line of the description.
1574 ``metadata`` is equivalent to matching ``description user date``
1574 ``metadata`` is equivalent to matching ``description user date``
1575 (i.e. it matches the main metadata fields).
1575 (i.e. it matches the main metadata fields).
1576
1576
1577 ``metadata`` is the default field which is used when no fields are
1577 ``metadata`` is the default field which is used when no fields are
1578 specified. You can match more than one field at a time.
1578 specified. You can match more than one field at a time.
1579 """
1579 """
1580 # i18n: "matching" is a keyword
1580 # i18n: "matching" is a keyword
1581 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1581 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1582
1582
1583 revs = getset(repo, fullreposet(repo), l[0])
1583 revs = getset(repo, fullreposet(repo), l[0])
1584
1584
1585 fieldlist = ['metadata']
1585 fieldlist = ['metadata']
1586 if len(l) > 1:
1586 if len(l) > 1:
1587 fieldlist = getstring(l[1],
1587 fieldlist = getstring(l[1],
1588 # i18n: "matching" is a keyword
1588 # i18n: "matching" is a keyword
1589 _("matching requires a string "
1589 _("matching requires a string "
1590 "as its second argument")).split()
1590 "as its second argument")).split()
1591
1591
1592 # Make sure that there are no repeated fields,
1592 # Make sure that there are no repeated fields,
1593 # expand the 'special' 'metadata' field type
1593 # expand the 'special' 'metadata' field type
1594 # and check the 'files' whenever we check the 'diff'
1594 # and check the 'files' whenever we check the 'diff'
1595 fields = []
1595 fields = []
1596 for field in fieldlist:
1596 for field in fieldlist:
1597 if field == 'metadata':
1597 if field == 'metadata':
1598 fields += ['user', 'description', 'date']
1598 fields += ['user', 'description', 'date']
1599 elif field == 'diff':
1599 elif field == 'diff':
1600 # a revision matching the diff must also match the files
1600 # a revision matching the diff must also match the files
1601 # since matching the diff is very costly, make sure to
1601 # since matching the diff is very costly, make sure to
1602 # also match the files first
1602 # also match the files first
1603 fields += ['files', 'diff']
1603 fields += ['files', 'diff']
1604 else:
1604 else:
1605 if field == 'author':
1605 if field == 'author':
1606 field = 'user'
1606 field = 'user'
1607 fields.append(field)
1607 fields.append(field)
1608 fields = set(fields)
1608 fields = set(fields)
1609 if 'summary' in fields and 'description' in fields:
1609 if 'summary' in fields and 'description' in fields:
1610 # If a revision matches its description it also matches its summary
1610 # If a revision matches its description it also matches its summary
1611 fields.discard('summary')
1611 fields.discard('summary')
1612
1612
1613 # We may want to match more than one field
1613 # We may want to match more than one field
1614 # Not all fields take the same amount of time to be matched
1614 # Not all fields take the same amount of time to be matched
1615 # Sort the selected fields in order of increasing matching cost
1615 # Sort the selected fields in order of increasing matching cost
1616 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1616 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1617 'files', 'description', 'substate', 'diff']
1617 'files', 'description', 'substate', 'diff']
1618 def fieldkeyfunc(f):
1618 def fieldkeyfunc(f):
1619 try:
1619 try:
1620 return fieldorder.index(f)
1620 return fieldorder.index(f)
1621 except ValueError:
1621 except ValueError:
1622 # assume an unknown field is very costly
1622 # assume an unknown field is very costly
1623 return len(fieldorder)
1623 return len(fieldorder)
1624 fields = list(fields)
1624 fields = list(fields)
1625 fields.sort(key=fieldkeyfunc)
1625 fields.sort(key=fieldkeyfunc)
1626
1626
1627 # Each field will be matched with its own "getfield" function
1627 # Each field will be matched with its own "getfield" function
1628 # which will be added to the getfieldfuncs array of functions
1628 # which will be added to the getfieldfuncs array of functions
1629 getfieldfuncs = []
1629 getfieldfuncs = []
1630 _funcs = {
1630 _funcs = {
1631 'user': lambda r: repo[r].user(),
1631 'user': lambda r: repo[r].user(),
1632 'branch': lambda r: repo[r].branch(),
1632 'branch': lambda r: repo[r].branch(),
1633 'date': lambda r: repo[r].date(),
1633 'date': lambda r: repo[r].date(),
1634 'description': lambda r: repo[r].description(),
1634 'description': lambda r: repo[r].description(),
1635 'files': lambda r: repo[r].files(),
1635 'files': lambda r: repo[r].files(),
1636 'parents': lambda r: repo[r].parents(),
1636 'parents': lambda r: repo[r].parents(),
1637 'phase': lambda r: repo[r].phase(),
1637 'phase': lambda r: repo[r].phase(),
1638 'substate': lambda r: repo[r].substate,
1638 'substate': lambda r: repo[r].substate,
1639 'summary': lambda r: repo[r].description().splitlines()[0],
1639 'summary': lambda r: repo[r].description().splitlines()[0],
1640 'diff': lambda r: list(repo[r].diff(git=True),)
1640 'diff': lambda r: list(repo[r].diff(git=True),)
1641 }
1641 }
1642 for info in fields:
1642 for info in fields:
1643 getfield = _funcs.get(info, None)
1643 getfield = _funcs.get(info, None)
1644 if getfield is None:
1644 if getfield is None:
1645 raise error.ParseError(
1645 raise error.ParseError(
1646 # i18n: "matching" is a keyword
1646 # i18n: "matching" is a keyword
1647 _("unexpected field name passed to matching: %s") % info)
1647 _("unexpected field name passed to matching: %s") % info)
1648 getfieldfuncs.append(getfield)
1648 getfieldfuncs.append(getfield)
1649 # convert the getfield array of functions into a "getinfo" function
1649 # convert the getfield array of functions into a "getinfo" function
1650 # which returns an array of field values (or a single value if there
1650 # which returns an array of field values (or a single value if there
1651 # is only one field to match)
1651 # is only one field to match)
1652 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1652 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1653
1653
1654 def matches(x):
1654 def matches(x):
1655 for rev in revs:
1655 for rev in revs:
1656 target = getinfo(rev)
1656 target = getinfo(rev)
1657 match = True
1657 match = True
1658 for n, f in enumerate(getfieldfuncs):
1658 for n, f in enumerate(getfieldfuncs):
1659 if target[n] != f(x):
1659 if target[n] != f(x):
1660 match = False
1660 match = False
1661 if match:
1661 if match:
1662 return True
1662 return True
1663 return False
1663 return False
1664
1664
1665 return subset.filter(matches)
1665 return subset.filter(matches)
1666
1666
1667 def reverse(repo, subset, x):
1667 def reverse(repo, subset, x):
1668 """``reverse(set)``
1668 """``reverse(set)``
1669 Reverse order of set.
1669 Reverse order of set.
1670 """
1670 """
1671 l = getset(repo, subset, x)
1671 l = getset(repo, subset, x)
1672 l.reverse()
1672 l.reverse()
1673 return l
1673 return l
1674
1674
1675 def roots(repo, subset, x):
1675 def roots(repo, subset, x):
1676 """``roots(set)``
1676 """``roots(set)``
1677 Changesets in set with no parent changeset in set.
1677 Changesets in set with no parent changeset in set.
1678 """
1678 """
1679 s = getset(repo, spanset(repo), x)
1679 s = getset(repo, spanset(repo), x)
1680 subset = baseset([r for r in s if r in subset])
1680 subset = baseset([r for r in s if r in subset])
1681 cs = _children(repo, subset, s)
1681 cs = _children(repo, subset, s)
1682 return subset - cs
1682 return subset - cs
1683
1683
1684 def secret(repo, subset, x):
1684 def secret(repo, subset, x):
1685 """``secret()``
1685 """``secret()``
1686 Changeset in secret phase."""
1686 Changeset in secret phase."""
1687 # i18n: "secret" is a keyword
1687 # i18n: "secret" is a keyword
1688 getargs(x, 0, 0, _("secret takes no arguments"))
1688 getargs(x, 0, 0, _("secret takes no arguments"))
1689 phase = repo._phasecache.phase
1689 phase = repo._phasecache.phase
1690 target = phases.secret
1690 target = phases.secret
1691 condition = lambda r: phase(repo, r) == target
1691 condition = lambda r: phase(repo, r) == target
1692 return subset.filter(condition, cache=False)
1692 return subset.filter(condition, cache=False)
1693
1693
1694 def sort(repo, subset, x):
1694 def sort(repo, subset, x):
1695 """``sort(set[, [-]key...])``
1695 """``sort(set[, [-]key...])``
1696 Sort set by keys. The default sort order is ascending, specify a key
1696 Sort set by keys. The default sort order is ascending, specify a key
1697 as ``-key`` to sort in descending order.
1697 as ``-key`` to sort in descending order.
1698
1698
1699 The keys can be:
1699 The keys can be:
1700
1700
1701 - ``rev`` for the revision number,
1701 - ``rev`` for the revision number,
1702 - ``branch`` for the branch name,
1702 - ``branch`` for the branch name,
1703 - ``desc`` for the commit message (description),
1703 - ``desc`` for the commit message (description),
1704 - ``user`` for user name (``author`` can be used as an alias),
1704 - ``user`` for user name (``author`` can be used as an alias),
1705 - ``date`` for the commit date
1705 - ``date`` for the commit date
1706 """
1706 """
1707 # i18n: "sort" is a keyword
1707 # i18n: "sort" is a keyword
1708 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1708 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1709 keys = "rev"
1709 keys = "rev"
1710 if len(l) == 2:
1710 if len(l) == 2:
1711 # i18n: "sort" is a keyword
1711 # i18n: "sort" is a keyword
1712 keys = getstring(l[1], _("sort spec must be a string"))
1712 keys = getstring(l[1], _("sort spec must be a string"))
1713
1713
1714 s = l[0]
1714 s = l[0]
1715 keys = keys.split()
1715 keys = keys.split()
1716 l = []
1716 l = []
1717 def invert(s):
1717 def invert(s):
1718 return "".join(chr(255 - ord(c)) for c in s)
1718 return "".join(chr(255 - ord(c)) for c in s)
1719 revs = getset(repo, subset, s)
1719 revs = getset(repo, subset, s)
1720 if keys == ["rev"]:
1720 if keys == ["rev"]:
1721 revs.sort()
1721 revs.sort()
1722 return revs
1722 return revs
1723 elif keys == ["-rev"]:
1723 elif keys == ["-rev"]:
1724 revs.sort(reverse=True)
1724 revs.sort(reverse=True)
1725 return revs
1725 return revs
1726 for r in revs:
1726 for r in revs:
1727 c = repo[r]
1727 c = repo[r]
1728 e = []
1728 e = []
1729 for k in keys:
1729 for k in keys:
1730 if k == 'rev':
1730 if k == 'rev':
1731 e.append(r)
1731 e.append(r)
1732 elif k == '-rev':
1732 elif k == '-rev':
1733 e.append(-r)
1733 e.append(-r)
1734 elif k == 'branch':
1734 elif k == 'branch':
1735 e.append(c.branch())
1735 e.append(c.branch())
1736 elif k == '-branch':
1736 elif k == '-branch':
1737 e.append(invert(c.branch()))
1737 e.append(invert(c.branch()))
1738 elif k == 'desc':
1738 elif k == 'desc':
1739 e.append(c.description())
1739 e.append(c.description())
1740 elif k == '-desc':
1740 elif k == '-desc':
1741 e.append(invert(c.description()))
1741 e.append(invert(c.description()))
1742 elif k in 'user author':
1742 elif k in 'user author':
1743 e.append(c.user())
1743 e.append(c.user())
1744 elif k in '-user -author':
1744 elif k in '-user -author':
1745 e.append(invert(c.user()))
1745 e.append(invert(c.user()))
1746 elif k == 'date':
1746 elif k == 'date':
1747 e.append(c.date()[0])
1747 e.append(c.date()[0])
1748 elif k == '-date':
1748 elif k == '-date':
1749 e.append(-c.date()[0])
1749 e.append(-c.date()[0])
1750 else:
1750 else:
1751 raise error.ParseError(_("unknown sort key %r") % k)
1751 raise error.ParseError(_("unknown sort key %r") % k)
1752 e.append(r)
1752 e.append(r)
1753 l.append(e)
1753 l.append(e)
1754 l.sort()
1754 l.sort()
1755 return baseset([e[-1] for e in l])
1755 return baseset([e[-1] for e in l])
1756
1756
1757 def _stringmatcher(pattern):
1757 def _stringmatcher(pattern):
1758 """
1758 """
1759 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1759 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1760 returns the matcher name, pattern, and matcher function.
1760 returns the matcher name, pattern, and matcher function.
1761 missing or unknown prefixes are treated as literal matches.
1761 missing or unknown prefixes are treated as literal matches.
1762
1762
1763 helper for tests:
1763 helper for tests:
1764 >>> def test(pattern, *tests):
1764 >>> def test(pattern, *tests):
1765 ... kind, pattern, matcher = _stringmatcher(pattern)
1765 ... kind, pattern, matcher = _stringmatcher(pattern)
1766 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1766 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1767
1767
1768 exact matching (no prefix):
1768 exact matching (no prefix):
1769 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1769 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1770 ('literal', 'abcdefg', [False, False, True])
1770 ('literal', 'abcdefg', [False, False, True])
1771
1771
1772 regex matching ('re:' prefix)
1772 regex matching ('re:' prefix)
1773 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1773 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1774 ('re', 'a.+b', [False, False, True])
1774 ('re', 'a.+b', [False, False, True])
1775
1775
1776 force exact matches ('literal:' prefix)
1776 force exact matches ('literal:' prefix)
1777 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1777 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1778 ('literal', 're:foobar', [False, True])
1778 ('literal', 're:foobar', [False, True])
1779
1779
1780 unknown prefixes are ignored and treated as literals
1780 unknown prefixes are ignored and treated as literals
1781 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1781 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1782 ('literal', 'foo:bar', [False, False, True])
1782 ('literal', 'foo:bar', [False, False, True])
1783 """
1783 """
1784 if pattern.startswith('re:'):
1784 if pattern.startswith('re:'):
1785 pattern = pattern[3:]
1785 pattern = pattern[3:]
1786 try:
1786 try:
1787 regex = re.compile(pattern)
1787 regex = re.compile(pattern)
1788 except re.error, e:
1788 except re.error, e:
1789 raise error.ParseError(_('invalid regular expression: %s')
1789 raise error.ParseError(_('invalid regular expression: %s')
1790 % e)
1790 % e)
1791 return 're', pattern, regex.search
1791 return 're', pattern, regex.search
1792 elif pattern.startswith('literal:'):
1792 elif pattern.startswith('literal:'):
1793 pattern = pattern[8:]
1793 pattern = pattern[8:]
1794 return 'literal', pattern, pattern.__eq__
1794 return 'literal', pattern, pattern.__eq__
1795
1795
1796 def _substringmatcher(pattern):
1796 def _substringmatcher(pattern):
1797 kind, pattern, matcher = _stringmatcher(pattern)
1797 kind, pattern, matcher = _stringmatcher(pattern)
1798 if kind == 'literal':
1798 if kind == 'literal':
1799 matcher = lambda s: pattern in s
1799 matcher = lambda s: pattern in s
1800 return kind, pattern, matcher
1800 return kind, pattern, matcher
1801
1801
1802 def tag(repo, subset, x):
1802 def tag(repo, subset, x):
1803 """``tag([name])``
1803 """``tag([name])``
1804 The specified tag by name, or all tagged revisions if no name is given.
1804 The specified tag by name, or all tagged revisions if no name is given.
1805
1805
1806 If `name` starts with `re:`, the remainder of the name is treated as
1806 If `name` starts with `re:`, the remainder of the name is treated as
1807 a regular expression. To match a tag that actually starts with `re:`,
1807 a regular expression. To match a tag that actually starts with `re:`,
1808 use the prefix `literal:`.
1808 use the prefix `literal:`.
1809 """
1809 """
1810 # i18n: "tag" is a keyword
1810 # i18n: "tag" is a keyword
1811 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1811 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1812 cl = repo.changelog
1812 cl = repo.changelog
1813 if args:
1813 if args:
1814 pattern = getstring(args[0],
1814 pattern = getstring(args[0],
1815 # i18n: "tag" is a keyword
1815 # i18n: "tag" is a keyword
1816 _('the argument to tag must be a string'))
1816 _('the argument to tag must be a string'))
1817 kind, pattern, matcher = _stringmatcher(pattern)
1817 kind, pattern, matcher = _stringmatcher(pattern)
1818 if kind == 'literal':
1818 if kind == 'literal':
1819 # avoid resolving all tags
1819 # avoid resolving all tags
1820 tn = repo._tagscache.tags.get(pattern, None)
1820 tn = repo._tagscache.tags.get(pattern, None)
1821 if tn is None:
1821 if tn is None:
1822 raise error.RepoLookupError(_("tag '%s' does not exist")
1822 raise error.RepoLookupError(_("tag '%s' does not exist")
1823 % pattern)
1823 % pattern)
1824 s = set([repo[tn].rev()])
1824 s = set([repo[tn].rev()])
1825 else:
1825 else:
1826 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1826 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1827 else:
1827 else:
1828 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1828 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1829 return subset & s
1829 return subset & s
1830
1830
1831 def tagged(repo, subset, x):
1831 def tagged(repo, subset, x):
1832 return tag(repo, subset, x)
1832 return tag(repo, subset, x)
1833
1833
1834 def unstable(repo, subset, x):
1834 def unstable(repo, subset, x):
1835 """``unstable()``
1835 """``unstable()``
1836 Non-obsolete changesets with obsolete ancestors.
1836 Non-obsolete changesets with obsolete ancestors.
1837 """
1837 """
1838 # i18n: "unstable" is a keyword
1838 # i18n: "unstable" is a keyword
1839 getargs(x, 0, 0, _("unstable takes no arguments"))
1839 getargs(x, 0, 0, _("unstable takes no arguments"))
1840 unstables = obsmod.getrevs(repo, 'unstable')
1840 unstables = obsmod.getrevs(repo, 'unstable')
1841 return subset & unstables
1841 return subset & unstables
1842
1842
1843
1843
1844 def user(repo, subset, x):
1844 def user(repo, subset, x):
1845 """``user(string)``
1845 """``user(string)``
1846 User name contains string. The match is case-insensitive.
1846 User name contains string. The match is case-insensitive.
1847
1847
1848 If `string` starts with `re:`, the remainder of the string is treated as
1848 If `string` starts with `re:`, the remainder of the string is treated as
1849 a regular expression. To match a user that actually contains `re:`, use
1849 a regular expression. To match a user that actually contains `re:`, use
1850 the prefix `literal:`.
1850 the prefix `literal:`.
1851 """
1851 """
1852 return author(repo, subset, x)
1852 return author(repo, subset, x)
1853
1853
1854 # for internal use
1854 # for internal use
1855 def _list(repo, subset, x):
1855 def _list(repo, subset, x):
1856 s = getstring(x, "internal error")
1856 s = getstring(x, "internal error")
1857 if not s:
1857 if not s:
1858 return baseset()
1858 return baseset()
1859 ls = [repo[r].rev() for r in s.split('\0')]
1859 ls = [repo[r].rev() for r in s.split('\0')]
1860 s = subset
1860 s = subset
1861 return baseset([r for r in ls if r in s])
1861 return baseset([r for r in ls if r in s])
1862
1862
1863 # for internal use
1863 # for internal use
1864 def _intlist(repo, subset, x):
1864 def _intlist(repo, subset, x):
1865 s = getstring(x, "internal error")
1865 s = getstring(x, "internal error")
1866 if not s:
1866 if not s:
1867 return baseset()
1867 return baseset()
1868 ls = [int(r) for r in s.split('\0')]
1868 ls = [int(r) for r in s.split('\0')]
1869 s = subset
1869 s = subset
1870 return baseset([r for r in ls if r in s])
1870 return baseset([r for r in ls if r in s])
1871
1871
1872 # for internal use
1872 # for internal use
1873 def _hexlist(repo, subset, x):
1873 def _hexlist(repo, subset, x):
1874 s = getstring(x, "internal error")
1874 s = getstring(x, "internal error")
1875 if not s:
1875 if not s:
1876 return baseset()
1876 return baseset()
1877 cl = repo.changelog
1877 cl = repo.changelog
1878 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1878 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1879 s = subset
1879 s = subset
1880 return baseset([r for r in ls if r in s])
1880 return baseset([r for r in ls if r in s])
1881
1881
1882 symbols = {
1882 symbols = {
1883 "adds": adds,
1883 "adds": adds,
1884 "all": getall,
1884 "all": getall,
1885 "ancestor": ancestor,
1885 "ancestor": ancestor,
1886 "ancestors": ancestors,
1886 "ancestors": ancestors,
1887 "_firstancestors": _firstancestors,
1887 "_firstancestors": _firstancestors,
1888 "author": author,
1888 "author": author,
1889 "bisect": bisect,
1889 "bisect": bisect,
1890 "bisected": bisected,
1890 "bisected": bisected,
1891 "bookmark": bookmark,
1891 "bookmark": bookmark,
1892 "branch": branch,
1892 "branch": branch,
1893 "branchpoint": branchpoint,
1893 "branchpoint": branchpoint,
1894 "bumped": bumped,
1894 "bumped": bumped,
1895 "bundle": bundle,
1895 "bundle": bundle,
1896 "children": children,
1896 "children": children,
1897 "closed": closed,
1897 "closed": closed,
1898 "contains": contains,
1898 "contains": contains,
1899 "converted": converted,
1899 "converted": converted,
1900 "date": date,
1900 "date": date,
1901 "desc": desc,
1901 "desc": desc,
1902 "descendants": descendants,
1902 "descendants": descendants,
1903 "_firstdescendants": _firstdescendants,
1903 "_firstdescendants": _firstdescendants,
1904 "destination": destination,
1904 "destination": destination,
1905 "divergent": divergent,
1905 "divergent": divergent,
1906 "draft": draft,
1906 "draft": draft,
1907 "extinct": extinct,
1907 "extinct": extinct,
1908 "extra": extra,
1908 "extra": extra,
1909 "file": hasfile,
1909 "file": hasfile,
1910 "filelog": filelog,
1910 "filelog": filelog,
1911 "first": first,
1911 "first": first,
1912 "follow": follow,
1912 "follow": follow,
1913 "_followfirst": _followfirst,
1913 "_followfirst": _followfirst,
1914 "grep": grep,
1914 "grep": grep,
1915 "head": head,
1915 "head": head,
1916 "heads": heads,
1916 "heads": heads,
1917 "hidden": hidden,
1917 "hidden": hidden,
1918 "id": node_,
1918 "id": node_,
1919 "keyword": keyword,
1919 "keyword": keyword,
1920 "last": last,
1920 "last": last,
1921 "limit": limit,
1921 "limit": limit,
1922 "_matchfiles": _matchfiles,
1922 "_matchfiles": _matchfiles,
1923 "max": maxrev,
1923 "max": maxrev,
1924 "merge": merge,
1924 "merge": merge,
1925 "min": minrev,
1925 "min": minrev,
1926 "modifies": modifies,
1926 "modifies": modifies,
1927 "named": named,
1927 "named": named,
1928 "obsolete": obsolete,
1928 "obsolete": obsolete,
1929 "only": only,
1929 "only": only,
1930 "origin": origin,
1930 "origin": origin,
1931 "outgoing": outgoing,
1931 "outgoing": outgoing,
1932 "p1": p1,
1932 "p1": p1,
1933 "p2": p2,
1933 "p2": p2,
1934 "parents": parents,
1934 "parents": parents,
1935 "present": present,
1935 "present": present,
1936 "public": public,
1936 "public": public,
1937 "remote": remote,
1937 "remote": remote,
1938 "removes": removes,
1938 "removes": removes,
1939 "rev": rev,
1939 "rev": rev,
1940 "reverse": reverse,
1940 "reverse": reverse,
1941 "roots": roots,
1941 "roots": roots,
1942 "sort": sort,
1942 "sort": sort,
1943 "secret": secret,
1943 "secret": secret,
1944 "matching": matching,
1944 "matching": matching,
1945 "tag": tag,
1945 "tag": tag,
1946 "tagged": tagged,
1946 "tagged": tagged,
1947 "user": user,
1947 "user": user,
1948 "unstable": unstable,
1948 "unstable": unstable,
1949 "_list": _list,
1949 "_list": _list,
1950 "_intlist": _intlist,
1950 "_intlist": _intlist,
1951 "_hexlist": _hexlist,
1951 "_hexlist": _hexlist,
1952 }
1952 }
1953
1953
1954 # symbols which can't be used for a DoS attack for any given input
1954 # symbols which can't be used for a DoS attack for any given input
1955 # (e.g. those which accept regexes as plain strings shouldn't be included)
1955 # (e.g. those which accept regexes as plain strings shouldn't be included)
1956 # functions that just return a lot of changesets (like all) don't count here
1956 # functions that just return a lot of changesets (like all) don't count here
1957 safesymbols = set([
1957 safesymbols = set([
1958 "adds",
1958 "adds",
1959 "all",
1959 "all",
1960 "ancestor",
1960 "ancestor",
1961 "ancestors",
1961 "ancestors",
1962 "_firstancestors",
1962 "_firstancestors",
1963 "author",
1963 "author",
1964 "bisect",
1964 "bisect",
1965 "bisected",
1965 "bisected",
1966 "bookmark",
1966 "bookmark",
1967 "branch",
1967 "branch",
1968 "branchpoint",
1968 "branchpoint",
1969 "bumped",
1969 "bumped",
1970 "bundle",
1970 "bundle",
1971 "children",
1971 "children",
1972 "closed",
1972 "closed",
1973 "converted",
1973 "converted",
1974 "date",
1974 "date",
1975 "desc",
1975 "desc",
1976 "descendants",
1976 "descendants",
1977 "_firstdescendants",
1977 "_firstdescendants",
1978 "destination",
1978 "destination",
1979 "divergent",
1979 "divergent",
1980 "draft",
1980 "draft",
1981 "extinct",
1981 "extinct",
1982 "extra",
1982 "extra",
1983 "file",
1983 "file",
1984 "filelog",
1984 "filelog",
1985 "first",
1985 "first",
1986 "follow",
1986 "follow",
1987 "_followfirst",
1987 "_followfirst",
1988 "head",
1988 "head",
1989 "heads",
1989 "heads",
1990 "hidden",
1990 "hidden",
1991 "id",
1991 "id",
1992 "keyword",
1992 "keyword",
1993 "last",
1993 "last",
1994 "limit",
1994 "limit",
1995 "_matchfiles",
1995 "_matchfiles",
1996 "max",
1996 "max",
1997 "merge",
1997 "merge",
1998 "min",
1998 "min",
1999 "modifies",
1999 "modifies",
2000 "obsolete",
2000 "obsolete",
2001 "only",
2001 "only",
2002 "origin",
2002 "origin",
2003 "outgoing",
2003 "outgoing",
2004 "p1",
2004 "p1",
2005 "p2",
2005 "p2",
2006 "parents",
2006 "parents",
2007 "present",
2007 "present",
2008 "public",
2008 "public",
2009 "remote",
2009 "remote",
2010 "removes",
2010 "removes",
2011 "rev",
2011 "rev",
2012 "reverse",
2012 "reverse",
2013 "roots",
2013 "roots",
2014 "sort",
2014 "sort",
2015 "secret",
2015 "secret",
2016 "matching",
2016 "matching",
2017 "tag",
2017 "tag",
2018 "tagged",
2018 "tagged",
2019 "user",
2019 "user",
2020 "unstable",
2020 "unstable",
2021 "_list",
2021 "_list",
2022 "_intlist",
2022 "_intlist",
2023 "_hexlist",
2023 "_hexlist",
2024 ])
2024 ])
2025
2025
2026 methods = {
2026 methods = {
2027 "range": rangeset,
2027 "range": rangeset,
2028 "dagrange": dagrange,
2028 "dagrange": dagrange,
2029 "string": stringset,
2029 "string": stringset,
2030 "symbol": symbolset,
2030 "symbol": symbolset,
2031 "and": andset,
2031 "and": andset,
2032 "or": orset,
2032 "or": orset,
2033 "not": notset,
2033 "not": notset,
2034 "list": listset,
2034 "list": listset,
2035 "func": func,
2035 "func": func,
2036 "ancestor": ancestorspec,
2036 "ancestor": ancestorspec,
2037 "parent": parentspec,
2037 "parent": parentspec,
2038 "parentpost": p1,
2038 "parentpost": p1,
2039 "only": only,
2039 "only": only,
2040 "onlypost": only,
2040 "onlypost": only,
2041 }
2041 }
2042
2042
2043 def optimize(x, small):
2043 def optimize(x, small):
2044 if x is None:
2044 if x is None:
2045 return 0, x
2045 return 0, x
2046
2046
2047 smallbonus = 1
2047 smallbonus = 1
2048 if small:
2048 if small:
2049 smallbonus = .5
2049 smallbonus = .5
2050
2050
2051 op = x[0]
2051 op = x[0]
2052 if op == 'minus':
2052 if op == 'minus':
2053 return optimize(('and', x[1], ('not', x[2])), small)
2053 return optimize(('and', x[1], ('not', x[2])), small)
2054 elif op == 'only':
2054 elif op == 'only':
2055 return optimize(('func', ('symbol', 'only'),
2055 return optimize(('func', ('symbol', 'only'),
2056 ('list', x[1], x[2])), small)
2056 ('list', x[1], x[2])), small)
2057 elif op == 'dagrangepre':
2057 elif op == 'dagrangepre':
2058 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2058 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2059 elif op == 'dagrangepost':
2059 elif op == 'dagrangepost':
2060 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2060 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2061 elif op == 'rangepre':
2061 elif op == 'rangepre':
2062 return optimize(('range', ('string', '0'), x[1]), small)
2062 return optimize(('range', ('string', '0'), x[1]), small)
2063 elif op == 'rangepost':
2063 elif op == 'rangepost':
2064 return optimize(('range', x[1], ('string', 'tip')), small)
2064 return optimize(('range', x[1], ('string', 'tip')), small)
2065 elif op == 'negate':
2065 elif op == 'negate':
2066 return optimize(('string',
2066 return optimize(('string',
2067 '-' + getstring(x[1], _("can't negate that"))), small)
2067 '-' + getstring(x[1], _("can't negate that"))), small)
2068 elif op in 'string symbol negate':
2068 elif op in 'string symbol negate':
2069 return smallbonus, x # single revisions are small
2069 return smallbonus, x # single revisions are small
2070 elif op == 'and':
2070 elif op == 'and':
2071 wa, ta = optimize(x[1], True)
2071 wa, ta = optimize(x[1], True)
2072 wb, tb = optimize(x[2], True)
2072 wb, tb = optimize(x[2], True)
2073
2073
2074 # (::x and not ::y)/(not ::y and ::x) have a fast path
2074 # (::x and not ::y)/(not ::y and ::x) have a fast path
2075 def isonly(revs, bases):
2075 def isonly(revs, bases):
2076 return (
2076 return (
2077 revs[0] == 'func'
2077 revs[0] == 'func'
2078 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2078 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2079 and bases[0] == 'not'
2079 and bases[0] == 'not'
2080 and bases[1][0] == 'func'
2080 and bases[1][0] == 'func'
2081 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2081 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2082
2082
2083 w = min(wa, wb)
2083 w = min(wa, wb)
2084 if isonly(ta, tb):
2084 if isonly(ta, tb):
2085 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2085 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2086 if isonly(tb, ta):
2086 if isonly(tb, ta):
2087 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2087 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2088
2088
2089 if wa > wb:
2089 if wa > wb:
2090 return w, (op, tb, ta)
2090 return w, (op, tb, ta)
2091 return w, (op, ta, tb)
2091 return w, (op, ta, tb)
2092 elif op == 'or':
2092 elif op == 'or':
2093 wa, ta = optimize(x[1], False)
2093 wa, ta = optimize(x[1], False)
2094 wb, tb = optimize(x[2], False)
2094 wb, tb = optimize(x[2], False)
2095 if wb < wa:
2095 if wb < wa:
2096 wb, wa = wa, wb
2096 wb, wa = wa, wb
2097 return max(wa, wb), (op, ta, tb)
2097 return max(wa, wb), (op, ta, tb)
2098 elif op == 'not':
2098 elif op == 'not':
2099 o = optimize(x[1], not small)
2099 o = optimize(x[1], not small)
2100 return o[0], (op, o[1])
2100 return o[0], (op, o[1])
2101 elif op == 'parentpost':
2101 elif op == 'parentpost':
2102 o = optimize(x[1], small)
2102 o = optimize(x[1], small)
2103 return o[0], (op, o[1])
2103 return o[0], (op, o[1])
2104 elif op == 'group':
2104 elif op == 'group':
2105 return optimize(x[1], small)
2105 return optimize(x[1], small)
2106 elif op in 'dagrange range list parent ancestorspec':
2106 elif op in 'dagrange range list parent ancestorspec':
2107 if op == 'parent':
2107 if op == 'parent':
2108 # x^:y means (x^) : y, not x ^ (:y)
2108 # x^:y means (x^) : y, not x ^ (:y)
2109 post = ('parentpost', x[1])
2109 post = ('parentpost', x[1])
2110 if x[2][0] == 'dagrangepre':
2110 if x[2][0] == 'dagrangepre':
2111 return optimize(('dagrange', post, x[2][1]), small)
2111 return optimize(('dagrange', post, x[2][1]), small)
2112 elif x[2][0] == 'rangepre':
2112 elif x[2][0] == 'rangepre':
2113 return optimize(('range', post, x[2][1]), small)
2113 return optimize(('range', post, x[2][1]), small)
2114
2114
2115 wa, ta = optimize(x[1], small)
2115 wa, ta = optimize(x[1], small)
2116 wb, tb = optimize(x[2], small)
2116 wb, tb = optimize(x[2], small)
2117 return wa + wb, (op, ta, tb)
2117 return wa + wb, (op, ta, tb)
2118 elif op == 'func':
2118 elif op == 'func':
2119 f = getstring(x[1], _("not a symbol"))
2119 f = getstring(x[1], _("not a symbol"))
2120 wa, ta = optimize(x[2], small)
2120 wa, ta = optimize(x[2], small)
2121 if f in ("author branch closed date desc file grep keyword "
2121 if f in ("author branch closed date desc file grep keyword "
2122 "outgoing user"):
2122 "outgoing user"):
2123 w = 10 # slow
2123 w = 10 # slow
2124 elif f in "modifies adds removes":
2124 elif f in "modifies adds removes":
2125 w = 30 # slower
2125 w = 30 # slower
2126 elif f == "contains":
2126 elif f == "contains":
2127 w = 100 # very slow
2127 w = 100 # very slow
2128 elif f == "ancestor":
2128 elif f == "ancestor":
2129 w = 1 * smallbonus
2129 w = 1 * smallbonus
2130 elif f in "reverse limit first _intlist":
2130 elif f in "reverse limit first _intlist":
2131 w = 0
2131 w = 0
2132 elif f in "sort":
2132 elif f in "sort":
2133 w = 10 # assume most sorts look at changelog
2133 w = 10 # assume most sorts look at changelog
2134 else:
2134 else:
2135 w = 1
2135 w = 1
2136 return w + wa, (op, x[1], ta)
2136 return w + wa, (op, x[1], ta)
2137 return 1, x
2137 return 1, x
2138
2138
2139 _aliasarg = ('func', ('symbol', '_aliasarg'))
2139 _aliasarg = ('func', ('symbol', '_aliasarg'))
2140 def _getaliasarg(tree):
2140 def _getaliasarg(tree):
2141 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2141 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2142 return X, None otherwise.
2142 return X, None otherwise.
2143 """
2143 """
2144 if (len(tree) == 3 and tree[:2] == _aliasarg
2144 if (len(tree) == 3 and tree[:2] == _aliasarg
2145 and tree[2][0] == 'string'):
2145 and tree[2][0] == 'string'):
2146 return tree[2][1]
2146 return tree[2][1]
2147 return None
2147 return None
2148
2148
2149 def _checkaliasarg(tree, known=None):
2149 def _checkaliasarg(tree, known=None):
2150 """Check tree contains no _aliasarg construct or only ones which
2150 """Check tree contains no _aliasarg construct or only ones which
2151 value is in known. Used to avoid alias placeholders injection.
2151 value is in known. Used to avoid alias placeholders injection.
2152 """
2152 """
2153 if isinstance(tree, tuple):
2153 if isinstance(tree, tuple):
2154 arg = _getaliasarg(tree)
2154 arg = _getaliasarg(tree)
2155 if arg is not None and (not known or arg not in known):
2155 if arg is not None and (not known or arg not in known):
2156 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2156 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2157 for t in tree:
2157 for t in tree:
2158 _checkaliasarg(t, known)
2158 _checkaliasarg(t, known)
2159
2159
2160 # the set of valid characters for the initial letter of symbols in
2160 # the set of valid characters for the initial letter of symbols in
2161 # alias declarations and definitions
2161 # alias declarations and definitions
2162 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2162 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2163 if c.isalnum() or c in '._@$' or ord(c) > 127)
2163 if c.isalnum() or c in '._@$' or ord(c) > 127)
2164
2164
2165 def _tokenizealias(program, lookup=None):
2165 def _tokenizealias(program, lookup=None):
2166 """Parse alias declaration/definition into a stream of tokens
2166 """Parse alias declaration/definition into a stream of tokens
2167
2167
2168 This allows symbol names to use also ``$`` as an initial letter
2168 This allows symbol names to use also ``$`` as an initial letter
2169 (for backward compatibility), and callers of this function should
2169 (for backward compatibility), and callers of this function should
2170 examine whether ``$`` is used also for unexpected symbols or not.
2170 examine whether ``$`` is used also for unexpected symbols or not.
2171 """
2171 """
2172 return tokenize(program, lookup=lookup,
2172 return tokenize(program, lookup=lookup,
2173 syminitletters=_aliassyminitletters)
2173 syminitletters=_aliassyminitletters)
2174
2174
2175 def _parsealiasdecl(decl):
2175 def _parsealiasdecl(decl):
2176 """Parse alias declaration ``decl``
2176 """Parse alias declaration ``decl``
2177
2177
2178 This returns ``(name, tree, args, errorstr)`` tuple:
2178 This returns ``(name, tree, args, errorstr)`` tuple:
2179
2179
2180 - ``name``: of declared alias (may be ``decl`` itself at error)
2180 - ``name``: of declared alias (may be ``decl`` itself at error)
2181 - ``tree``: parse result (or ``None`` at error)
2181 - ``tree``: parse result (or ``None`` at error)
2182 - ``args``: list of alias argument names (or None for symbol declaration)
2182 - ``args``: list of alias argument names (or None for symbol declaration)
2183 - ``errorstr``: detail about detected error (or None)
2183 - ``errorstr``: detail about detected error (or None)
2184
2184
2185 >>> _parsealiasdecl('foo')
2185 >>> _parsealiasdecl('foo')
2186 ('foo', ('symbol', 'foo'), None, None)
2186 ('foo', ('symbol', 'foo'), None, None)
2187 >>> _parsealiasdecl('$foo')
2187 >>> _parsealiasdecl('$foo')
2188 ('$foo', None, None, "'$' not for alias arguments")
2188 ('$foo', None, None, "'$' not for alias arguments")
2189 >>> _parsealiasdecl('foo::bar')
2189 >>> _parsealiasdecl('foo::bar')
2190 ('foo::bar', None, None, 'invalid format')
2190 ('foo::bar', None, None, 'invalid format')
2191 >>> _parsealiasdecl('foo bar')
2191 >>> _parsealiasdecl('foo bar')
2192 ('foo bar', None, None, 'at 4: invalid token')
2192 ('foo bar', None, None, 'at 4: invalid token')
2193 >>> _parsealiasdecl('foo()')
2193 >>> _parsealiasdecl('foo()')
2194 ('foo', ('func', ('symbol', 'foo')), [], None)
2194 ('foo', ('func', ('symbol', 'foo')), [], None)
2195 >>> _parsealiasdecl('$foo()')
2195 >>> _parsealiasdecl('$foo()')
2196 ('$foo()', None, None, "'$' not for alias arguments")
2196 ('$foo()', None, None, "'$' not for alias arguments")
2197 >>> _parsealiasdecl('foo($1, $2)')
2197 >>> _parsealiasdecl('foo($1, $2)')
2198 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2198 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2199 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2199 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2200 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2200 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2201 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2201 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2202 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2202 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2203 >>> _parsealiasdecl('foo(bar($1, $2))')
2203 >>> _parsealiasdecl('foo(bar($1, $2))')
2204 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2204 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2205 >>> _parsealiasdecl('foo("string")')
2205 >>> _parsealiasdecl('foo("string")')
2206 ('foo("string")', None, None, 'invalid argument list')
2206 ('foo("string")', None, None, 'invalid argument list')
2207 >>> _parsealiasdecl('foo($1, $2')
2207 >>> _parsealiasdecl('foo($1, $2')
2208 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2208 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2209 >>> _parsealiasdecl('foo("string')
2209 >>> _parsealiasdecl('foo("string')
2210 ('foo("string', None, None, 'at 5: unterminated string')
2210 ('foo("string', None, None, 'at 5: unterminated string')
2211 >>> _parsealiasdecl('foo($1, $2, $1)')
2211 >>> _parsealiasdecl('foo($1, $2, $1)')
2212 ('foo', None, None, 'argument names collide with each other')
2212 ('foo', None, None, 'argument names collide with each other')
2213 """
2213 """
2214 p = parser.parser(_tokenizealias, elements)
2214 p = parser.parser(_tokenizealias, elements)
2215 try:
2215 try:
2216 tree, pos = p.parse(decl)
2216 tree, pos = p.parse(decl)
2217 if (pos != len(decl)):
2217 if (pos != len(decl)):
2218 raise error.ParseError(_('invalid token'), pos)
2218 raise error.ParseError(_('invalid token'), pos)
2219
2219
2220 if isvalidsymbol(tree):
2220 if isvalidsymbol(tree):
2221 # "name = ...." style
2221 # "name = ...." style
2222 name = getsymbol(tree)
2222 name = getsymbol(tree)
2223 if name.startswith('$'):
2223 if name.startswith('$'):
2224 return (decl, None, None, _("'$' not for alias arguments"))
2224 return (decl, None, None, _("'$' not for alias arguments"))
2225 return (name, ('symbol', name), None, None)
2225 return (name, ('symbol', name), None, None)
2226
2226
2227 if isvalidfunc(tree):
2227 if isvalidfunc(tree):
2228 # "name(arg, ....) = ...." style
2228 # "name(arg, ....) = ...." style
2229 name = getfuncname(tree)
2229 name = getfuncname(tree)
2230 if name.startswith('$'):
2230 if name.startswith('$'):
2231 return (decl, None, None, _("'$' not for alias arguments"))
2231 return (decl, None, None, _("'$' not for alias arguments"))
2232 args = []
2232 args = []
2233 for arg in getfuncargs(tree):
2233 for arg in getfuncargs(tree):
2234 if not isvalidsymbol(arg):
2234 if not isvalidsymbol(arg):
2235 return (decl, None, None, _("invalid argument list"))
2235 return (decl, None, None, _("invalid argument list"))
2236 args.append(getsymbol(arg))
2236 args.append(getsymbol(arg))
2237 if len(args) != len(set(args)):
2237 if len(args) != len(set(args)):
2238 return (name, None, None,
2238 return (name, None, None,
2239 _("argument names collide with each other"))
2239 _("argument names collide with each other"))
2240 return (name, ('func', ('symbol', name)), args, None)
2240 return (name, ('func', ('symbol', name)), args, None)
2241
2241
2242 return (decl, None, None, _("invalid format"))
2242 return (decl, None, None, _("invalid format"))
2243 except error.ParseError, inst:
2243 except error.ParseError, inst:
2244 return (decl, None, None, parseerrordetail(inst))
2244 return (decl, None, None, parseerrordetail(inst))
2245
2245
2246 class revsetalias(object):
2246 class revsetalias(object):
2247 # whether own `error` information is already shown or not.
2247 # whether own `error` information is already shown or not.
2248 # this avoids showing same warning multiple times at each `findaliases`.
2248 # this avoids showing same warning multiple times at each `findaliases`.
2249 warned = False
2249 warned = False
2250
2250
2251 def __init__(self, name, value):
2251 def __init__(self, name, value):
2252 '''Aliases like:
2252 '''Aliases like:
2253
2253
2254 h = heads(default)
2254 h = heads(default)
2255 b($1) = ancestors($1) - ancestors(default)
2255 b($1) = ancestors($1) - ancestors(default)
2256 '''
2256 '''
2257 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2257 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2258 if self.error:
2258 if self.error:
2259 self.error = _('failed to parse the declaration of revset alias'
2259 self.error = _('failed to parse the declaration of revset alias'
2260 ' "%s": %s') % (self.name, self.error)
2260 ' "%s": %s') % (self.name, self.error)
2261 return
2261 return
2262
2262
2263 if self.args:
2263 if self.args:
2264 for arg in self.args:
2264 for arg in self.args:
2265 # _aliasarg() is an unknown symbol only used separate
2265 # _aliasarg() is an unknown symbol only used separate
2266 # alias argument placeholders from regular strings.
2266 # alias argument placeholders from regular strings.
2267 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2267 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2268
2268
2269 try:
2269 try:
2270 self.replacement, pos = parse(value)
2270 self.replacement, pos = parse(value)
2271 if pos != len(value):
2271 if pos != len(value):
2272 raise error.ParseError(_('invalid token'), pos)
2272 raise error.ParseError(_('invalid token'), pos)
2273 # Check for placeholder injection
2273 # Check for placeholder injection
2274 _checkaliasarg(self.replacement, self.args)
2274 _checkaliasarg(self.replacement, self.args)
2275 except error.ParseError, inst:
2275 except error.ParseError, inst:
2276 self.error = _('failed to parse the definition of revset alias'
2276 self.error = _('failed to parse the definition of revset alias'
2277 ' "%s": %s') % (self.name, parseerrordetail(inst))
2277 ' "%s": %s') % (self.name, parseerrordetail(inst))
2278
2278
2279 def _getalias(aliases, tree):
2279 def _getalias(aliases, tree):
2280 """If tree looks like an unexpanded alias, return it. Return None
2280 """If tree looks like an unexpanded alias, return it. Return None
2281 otherwise.
2281 otherwise.
2282 """
2282 """
2283 if isinstance(tree, tuple) and tree:
2283 if isinstance(tree, tuple) and tree:
2284 if tree[0] == 'symbol' and len(tree) == 2:
2284 if tree[0] == 'symbol' and len(tree) == 2:
2285 name = tree[1]
2285 name = tree[1]
2286 alias = aliases.get(name)
2286 alias = aliases.get(name)
2287 if alias and alias.args is None and alias.tree == tree:
2287 if alias and alias.args is None and alias.tree == tree:
2288 return alias
2288 return alias
2289 if tree[0] == 'func' and len(tree) > 1:
2289 if tree[0] == 'func' and len(tree) > 1:
2290 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2290 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2291 name = tree[1][1]
2291 name = tree[1][1]
2292 alias = aliases.get(name)
2292 alias = aliases.get(name)
2293 if alias and alias.args is not None and alias.tree == tree[:2]:
2293 if alias and alias.args is not None and alias.tree == tree[:2]:
2294 return alias
2294 return alias
2295 return None
2295 return None
2296
2296
2297 def _expandargs(tree, args):
2297 def _expandargs(tree, args):
2298 """Replace _aliasarg instances with the substitution value of the
2298 """Replace _aliasarg instances with the substitution value of the
2299 same name in args, recursively.
2299 same name in args, recursively.
2300 """
2300 """
2301 if not tree or not isinstance(tree, tuple):
2301 if not tree or not isinstance(tree, tuple):
2302 return tree
2302 return tree
2303 arg = _getaliasarg(tree)
2303 arg = _getaliasarg(tree)
2304 if arg is not None:
2304 if arg is not None:
2305 return args[arg]
2305 return args[arg]
2306 return tuple(_expandargs(t, args) for t in tree)
2306 return tuple(_expandargs(t, args) for t in tree)
2307
2307
2308 def _expandaliases(aliases, tree, expanding, cache):
2308 def _expandaliases(aliases, tree, expanding, cache):
2309 """Expand aliases in tree, recursively.
2309 """Expand aliases in tree, recursively.
2310
2310
2311 'aliases' is a dictionary mapping user defined aliases to
2311 'aliases' is a dictionary mapping user defined aliases to
2312 revsetalias objects.
2312 revsetalias objects.
2313 """
2313 """
2314 if not isinstance(tree, tuple):
2314 if not isinstance(tree, tuple):
2315 # Do not expand raw strings
2315 # Do not expand raw strings
2316 return tree
2316 return tree
2317 alias = _getalias(aliases, tree)
2317 alias = _getalias(aliases, tree)
2318 if alias is not None:
2318 if alias is not None:
2319 if alias.error:
2319 if alias.error:
2320 raise util.Abort(alias.error)
2320 raise util.Abort(alias.error)
2321 if alias in expanding:
2321 if alias in expanding:
2322 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2322 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2323 'detected') % alias.name)
2323 'detected') % alias.name)
2324 expanding.append(alias)
2324 expanding.append(alias)
2325 if alias.name not in cache:
2325 if alias.name not in cache:
2326 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2326 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2327 expanding, cache)
2327 expanding, cache)
2328 result = cache[alias.name]
2328 result = cache[alias.name]
2329 expanding.pop()
2329 expanding.pop()
2330 if alias.args is not None:
2330 if alias.args is not None:
2331 l = getlist(tree[2])
2331 l = getlist(tree[2])
2332 if len(l) != len(alias.args):
2332 if len(l) != len(alias.args):
2333 raise error.ParseError(
2333 raise error.ParseError(
2334 _('invalid number of arguments: %s') % len(l))
2334 _('invalid number of arguments: %s') % len(l))
2335 l = [_expandaliases(aliases, a, [], cache) for a in l]
2335 l = [_expandaliases(aliases, a, [], cache) for a in l]
2336 result = _expandargs(result, dict(zip(alias.args, l)))
2336 result = _expandargs(result, dict(zip(alias.args, l)))
2337 else:
2337 else:
2338 result = tuple(_expandaliases(aliases, t, expanding, cache)
2338 result = tuple(_expandaliases(aliases, t, expanding, cache)
2339 for t in tree)
2339 for t in tree)
2340 return result
2340 return result
2341
2341
2342 def findaliases(ui, tree, showwarning=None):
2342 def findaliases(ui, tree, showwarning=None):
2343 _checkaliasarg(tree)
2343 _checkaliasarg(tree)
2344 aliases = {}
2344 aliases = {}
2345 for k, v in ui.configitems('revsetalias'):
2345 for k, v in ui.configitems('revsetalias'):
2346 alias = revsetalias(k, v)
2346 alias = revsetalias(k, v)
2347 aliases[alias.name] = alias
2347 aliases[alias.name] = alias
2348 tree = _expandaliases(aliases, tree, [], {})
2348 tree = _expandaliases(aliases, tree, [], {})
2349 if showwarning:
2349 if showwarning:
2350 # warn about problematic (but not referred) aliases
2350 # warn about problematic (but not referred) aliases
2351 for name, alias in sorted(aliases.iteritems()):
2351 for name, alias in sorted(aliases.iteritems()):
2352 if alias.error and not alias.warned:
2352 if alias.error and not alias.warned:
2353 showwarning(_('warning: %s\n') % (alias.error))
2353 showwarning(_('warning: %s\n') % (alias.error))
2354 alias.warned = True
2354 alias.warned = True
2355 return tree
2355 return tree
2356
2356
2357 def foldconcat(tree):
2357 def foldconcat(tree):
2358 """Fold elements to be concatenated by `##`
2358 """Fold elements to be concatenated by `##`
2359 """
2359 """
2360 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2360 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2361 return tree
2361 return tree
2362 if tree[0] == '_concat':
2362 if tree[0] == '_concat':
2363 pending = [tree]
2363 pending = [tree]
2364 l = []
2364 l = []
2365 while pending:
2365 while pending:
2366 e = pending.pop()
2366 e = pending.pop()
2367 if e[0] == '_concat':
2367 if e[0] == '_concat':
2368 pending.extend(reversed(e[1:]))
2368 pending.extend(reversed(e[1:]))
2369 elif e[0] in ('string', 'symbol'):
2369 elif e[0] in ('string', 'symbol'):
2370 l.append(e[1])
2370 l.append(e[1])
2371 else:
2371 else:
2372 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2372 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2373 raise error.ParseError(msg)
2373 raise error.ParseError(msg)
2374 return ('string', ''.join(l))
2374 return ('string', ''.join(l))
2375 else:
2375 else:
2376 return tuple(foldconcat(t) for t in tree)
2376 return tuple(foldconcat(t) for t in tree)
2377
2377
2378 def parse(spec, lookup=None):
2378 def parse(spec, lookup=None):
2379 p = parser.parser(tokenize, elements)
2379 p = parser.parser(tokenize, elements)
2380 return p.parse(spec, lookup=lookup)
2380 return p.parse(spec, lookup=lookup)
2381
2381
2382 def match(ui, spec, repo=None):
2382 def match(ui, spec, repo=None):
2383 if not spec:
2383 if not spec:
2384 raise error.ParseError(_("empty query"))
2384 raise error.ParseError(_("empty query"))
2385 lookup = None
2385 lookup = None
2386 if repo:
2386 if repo:
2387 lookup = repo.__contains__
2387 lookup = repo.__contains__
2388 tree, pos = parse(spec, lookup)
2388 tree, pos = parse(spec, lookup)
2389 if (pos != len(spec)):
2389 if (pos != len(spec)):
2390 raise error.ParseError(_("invalid token"), pos)
2390 raise error.ParseError(_("invalid token"), pos)
2391 if ui:
2391 if ui:
2392 tree = findaliases(ui, tree, showwarning=ui.warn)
2392 tree = findaliases(ui, tree, showwarning=ui.warn)
2393 tree = foldconcat(tree)
2393 tree = foldconcat(tree)
2394 weight, tree = optimize(tree, True)
2394 weight, tree = optimize(tree, True)
2395 def mfunc(repo, subset):
2395 def mfunc(repo, subset):
2396 if util.safehasattr(subset, 'isascending'):
2396 if util.safehasattr(subset, 'isascending'):
2397 result = getset(repo, subset, tree)
2397 result = getset(repo, subset, tree)
2398 else:
2398 else:
2399 result = getset(repo, baseset(subset), tree)
2399 result = getset(repo, baseset(subset), tree)
2400 return result
2400 return result
2401 return mfunc
2401 return mfunc
2402
2402
2403 def formatspec(expr, *args):
2403 def formatspec(expr, *args):
2404 '''
2404 '''
2405 This is a convenience function for using revsets internally, and
2405 This is a convenience function for using revsets internally, and
2406 escapes arguments appropriately. Aliases are intentionally ignored
2406 escapes arguments appropriately. Aliases are intentionally ignored
2407 so that intended expression behavior isn't accidentally subverted.
2407 so that intended expression behavior isn't accidentally subverted.
2408
2408
2409 Supported arguments:
2409 Supported arguments:
2410
2410
2411 %r = revset expression, parenthesized
2411 %r = revset expression, parenthesized
2412 %d = int(arg), no quoting
2412 %d = int(arg), no quoting
2413 %s = string(arg), escaped and single-quoted
2413 %s = string(arg), escaped and single-quoted
2414 %b = arg.branch(), escaped and single-quoted
2414 %b = arg.branch(), escaped and single-quoted
2415 %n = hex(arg), single-quoted
2415 %n = hex(arg), single-quoted
2416 %% = a literal '%'
2416 %% = a literal '%'
2417
2417
2418 Prefixing the type with 'l' specifies a parenthesized list of that type.
2418 Prefixing the type with 'l' specifies a parenthesized list of that type.
2419
2419
2420 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2420 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2421 '(10 or 11):: and ((this()) or (that()))'
2421 '(10 or 11):: and ((this()) or (that()))'
2422 >>> formatspec('%d:: and not %d::', 10, 20)
2422 >>> formatspec('%d:: and not %d::', 10, 20)
2423 '10:: and not 20::'
2423 '10:: and not 20::'
2424 >>> formatspec('%ld or %ld', [], [1])
2424 >>> formatspec('%ld or %ld', [], [1])
2425 "_list('') or 1"
2425 "_list('') or 1"
2426 >>> formatspec('keyword(%s)', 'foo\\xe9')
2426 >>> formatspec('keyword(%s)', 'foo\\xe9')
2427 "keyword('foo\\\\xe9')"
2427 "keyword('foo\\\\xe9')"
2428 >>> b = lambda: 'default'
2428 >>> b = lambda: 'default'
2429 >>> b.branch = b
2429 >>> b.branch = b
2430 >>> formatspec('branch(%b)', b)
2430 >>> formatspec('branch(%b)', b)
2431 "branch('default')"
2431 "branch('default')"
2432 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2432 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2433 "root(_list('a\\x00b\\x00c\\x00d'))"
2433 "root(_list('a\\x00b\\x00c\\x00d'))"
2434 '''
2434 '''
2435
2435
2436 def quote(s):
2436 def quote(s):
2437 return repr(str(s))
2437 return repr(str(s))
2438
2438
2439 def argtype(c, arg):
2439 def argtype(c, arg):
2440 if c == 'd':
2440 if c == 'd':
2441 return str(int(arg))
2441 return str(int(arg))
2442 elif c == 's':
2442 elif c == 's':
2443 return quote(arg)
2443 return quote(arg)
2444 elif c == 'r':
2444 elif c == 'r':
2445 parse(arg) # make sure syntax errors are confined
2445 parse(arg) # make sure syntax errors are confined
2446 return '(%s)' % arg
2446 return '(%s)' % arg
2447 elif c == 'n':
2447 elif c == 'n':
2448 return quote(node.hex(arg))
2448 return quote(node.hex(arg))
2449 elif c == 'b':
2449 elif c == 'b':
2450 return quote(arg.branch())
2450 return quote(arg.branch())
2451
2451
2452 def listexp(s, t):
2452 def listexp(s, t):
2453 l = len(s)
2453 l = len(s)
2454 if l == 0:
2454 if l == 0:
2455 return "_list('')"
2455 return "_list('')"
2456 elif l == 1:
2456 elif l == 1:
2457 return argtype(t, s[0])
2457 return argtype(t, s[0])
2458 elif t == 'd':
2458 elif t == 'd':
2459 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2459 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2460 elif t == 's':
2460 elif t == 's':
2461 return "_list('%s')" % "\0".join(s)
2461 return "_list('%s')" % "\0".join(s)
2462 elif t == 'n':
2462 elif t == 'n':
2463 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2463 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2464 elif t == 'b':
2464 elif t == 'b':
2465 return "_list('%s')" % "\0".join(a.branch() for a in s)
2465 return "_list('%s')" % "\0".join(a.branch() for a in s)
2466
2466
2467 m = l // 2
2467 m = l // 2
2468 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2468 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2469
2469
2470 ret = ''
2470 ret = ''
2471 pos = 0
2471 pos = 0
2472 arg = 0
2472 arg = 0
2473 while pos < len(expr):
2473 while pos < len(expr):
2474 c = expr[pos]
2474 c = expr[pos]
2475 if c == '%':
2475 if c == '%':
2476 pos += 1
2476 pos += 1
2477 d = expr[pos]
2477 d = expr[pos]
2478 if d == '%':
2478 if d == '%':
2479 ret += d
2479 ret += d
2480 elif d in 'dsnbr':
2480 elif d in 'dsnbr':
2481 ret += argtype(d, args[arg])
2481 ret += argtype(d, args[arg])
2482 arg += 1
2482 arg += 1
2483 elif d == 'l':
2483 elif d == 'l':
2484 # a list of some type
2484 # a list of some type
2485 pos += 1
2485 pos += 1
2486 d = expr[pos]
2486 d = expr[pos]
2487 ret += listexp(list(args[arg]), d)
2487 ret += listexp(list(args[arg]), d)
2488 arg += 1
2488 arg += 1
2489 else:
2489 else:
2490 raise util.Abort('unexpected revspec format character %s' % d)
2490 raise util.Abort('unexpected revspec format character %s' % d)
2491 else:
2491 else:
2492 ret += c
2492 ret += c
2493 pos += 1
2493 pos += 1
2494
2494
2495 return ret
2495 return ret
2496
2496
2497 def prettyformat(tree):
2497 def prettyformat(tree):
2498 def _prettyformat(tree, level, lines):
2498 def _prettyformat(tree, level, lines):
2499 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2499 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2500 lines.append((level, str(tree)))
2500 lines.append((level, str(tree)))
2501 else:
2501 else:
2502 lines.append((level, '(%s' % tree[0]))
2502 lines.append((level, '(%s' % tree[0]))
2503 for s in tree[1:]:
2503 for s in tree[1:]:
2504 _prettyformat(s, level + 1, lines)
2504 _prettyformat(s, level + 1, lines)
2505 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2505 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2506
2506
2507 lines = []
2507 lines = []
2508 _prettyformat(tree, 0, lines)
2508 _prettyformat(tree, 0, lines)
2509 output = '\n'.join((' '*l + s) for l, s in lines)
2509 output = '\n'.join((' '*l + s) for l, s in lines)
2510 return output
2510 return output
2511
2511
2512 def depth(tree):
2512 def depth(tree):
2513 if isinstance(tree, tuple):
2513 if isinstance(tree, tuple):
2514 return max(map(depth, tree)) + 1
2514 return max(map(depth, tree)) + 1
2515 else:
2515 else:
2516 return 0
2516 return 0
2517
2517
2518 def funcsused(tree):
2518 def funcsused(tree):
2519 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2519 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2520 return set()
2520 return set()
2521 else:
2521 else:
2522 funcs = set()
2522 funcs = set()
2523 for s in tree[1:]:
2523 for s in tree[1:]:
2524 funcs |= funcsused(s)
2524 funcs |= funcsused(s)
2525 if tree[0] == 'func':
2525 if tree[0] == 'func':
2526 funcs.add(tree[1][1])
2526 funcs.add(tree[1][1])
2527 return funcs
2527 return funcs
2528
2528
2529 class abstractsmartset(object):
2529 class abstractsmartset(object):
2530
2530
2531 def __nonzero__(self):
2531 def __nonzero__(self):
2532 """True if the smartset is not empty"""
2532 """True if the smartset is not empty"""
2533 raise NotImplementedError()
2533 raise NotImplementedError()
2534
2534
2535 def __contains__(self, rev):
2535 def __contains__(self, rev):
2536 """provide fast membership testing"""
2536 """provide fast membership testing"""
2537 raise NotImplementedError()
2537 raise NotImplementedError()
2538
2538
2539 def __iter__(self):
2539 def __iter__(self):
2540 """iterate the set in the order it is supposed to be iterated"""
2540 """iterate the set in the order it is supposed to be iterated"""
2541 raise NotImplementedError()
2541 raise NotImplementedError()
2542
2542
2543 # Attributes containing a function to perform a fast iteration in a given
2543 # Attributes containing a function to perform a fast iteration in a given
2544 # direction. A smartset can have none, one, or both defined.
2544 # direction. A smartset can have none, one, or both defined.
2545 #
2545 #
2546 # Default value is None instead of a function returning None to avoid
2546 # Default value is None instead of a function returning None to avoid
2547 # initializing an iterator just for testing if a fast method exists.
2547 # initializing an iterator just for testing if a fast method exists.
2548 fastasc = None
2548 fastasc = None
2549 fastdesc = None
2549 fastdesc = None
2550
2550
2551 def isascending(self):
2551 def isascending(self):
2552 """True if the set will iterate in ascending order"""
2552 """True if the set will iterate in ascending order"""
2553 raise NotImplementedError()
2553 raise NotImplementedError()
2554
2554
2555 def isdescending(self):
2555 def isdescending(self):
2556 """True if the set will iterate in descending order"""
2556 """True if the set will iterate in descending order"""
2557 raise NotImplementedError()
2557 raise NotImplementedError()
2558
2558
2559 def min(self):
2559 def min(self):
2560 """return the minimum element in the set"""
2560 """return the minimum element in the set"""
2561 if self.fastasc is not None:
2561 if self.fastasc is not None:
2562 for r in self.fastasc():
2562 for r in self.fastasc():
2563 return r
2563 return r
2564 raise ValueError('arg is an empty sequence')
2564 raise ValueError('arg is an empty sequence')
2565 return min(self)
2565 return min(self)
2566
2566
2567 def max(self):
2567 def max(self):
2568 """return the maximum element in the set"""
2568 """return the maximum element in the set"""
2569 if self.fastdesc is not None:
2569 if self.fastdesc is not None:
2570 for r in self.fastdesc():
2570 for r in self.fastdesc():
2571 return r
2571 return r
2572 raise ValueError('arg is an empty sequence')
2572 raise ValueError('arg is an empty sequence')
2573 return max(self)
2573 return max(self)
2574
2574
2575 def first(self):
2575 def first(self):
2576 """return the first element in the set (user iteration perspective)
2576 """return the first element in the set (user iteration perspective)
2577
2577
2578 Return None if the set is empty"""
2578 Return None if the set is empty"""
2579 raise NotImplementedError()
2579 raise NotImplementedError()
2580
2580
2581 def last(self):
2581 def last(self):
2582 """return the last element in the set (user iteration perspective)
2582 """return the last element in the set (user iteration perspective)
2583
2583
2584 Return None if the set is empty"""
2584 Return None if the set is empty"""
2585 raise NotImplementedError()
2585 raise NotImplementedError()
2586
2586
2587 def __len__(self):
2587 def __len__(self):
2588 """return the length of the smartsets
2588 """return the length of the smartsets
2589
2589
2590 This can be expensive on smartset that could be lazy otherwise."""
2590 This can be expensive on smartset that could be lazy otherwise."""
2591 raise NotImplementedError()
2591 raise NotImplementedError()
2592
2592
2593 def reverse(self):
2593 def reverse(self):
2594 """reverse the expected iteration order"""
2594 """reverse the expected iteration order"""
2595 raise NotImplementedError()
2595 raise NotImplementedError()
2596
2596
2597 def sort(self, reverse=True):
2597 def sort(self, reverse=True):
2598 """get the set to iterate in an ascending or descending order"""
2598 """get the set to iterate in an ascending or descending order"""
2599 raise NotImplementedError()
2599 raise NotImplementedError()
2600
2600
2601 def __and__(self, other):
2601 def __and__(self, other):
2602 """Returns a new object with the intersection of the two collections.
2602 """Returns a new object with the intersection of the two collections.
2603
2603
2604 This is part of the mandatory API for smartset."""
2604 This is part of the mandatory API for smartset."""
2605 return self.filter(other.__contains__, cache=False)
2605 return self.filter(other.__contains__, cache=False)
2606
2606
2607 def __add__(self, other):
2607 def __add__(self, other):
2608 """Returns a new object with the union of the two collections.
2608 """Returns a new object with the union of the two collections.
2609
2609
2610 This is part of the mandatory API for smartset."""
2610 This is part of the mandatory API for smartset."""
2611 return addset(self, other)
2611 return addset(self, other)
2612
2612
2613 def __sub__(self, other):
2613 def __sub__(self, other):
2614 """Returns a new object with the substraction of the two collections.
2614 """Returns a new object with the substraction of the two collections.
2615
2615
2616 This is part of the mandatory API for smartset."""
2616 This is part of the mandatory API for smartset."""
2617 c = other.__contains__
2617 c = other.__contains__
2618 return self.filter(lambda r: not c(r), cache=False)
2618 return self.filter(lambda r: not c(r), cache=False)
2619
2619
2620 def filter(self, condition, cache=True):
2620 def filter(self, condition, cache=True):
2621 """Returns this smartset filtered by condition as a new smartset.
2621 """Returns this smartset filtered by condition as a new smartset.
2622
2622
2623 `condition` is a callable which takes a revision number and returns a
2623 `condition` is a callable which takes a revision number and returns a
2624 boolean.
2624 boolean.
2625
2625
2626 This is part of the mandatory API for smartset."""
2626 This is part of the mandatory API for smartset."""
2627 # builtin cannot be cached. but do not needs to
2627 # builtin cannot be cached. but do not needs to
2628 if cache and util.safehasattr(condition, 'func_code'):
2628 if cache and util.safehasattr(condition, 'func_code'):
2629 condition = util.cachefunc(condition)
2629 condition = util.cachefunc(condition)
2630 return filteredset(self, condition)
2630 return filteredset(self, condition)
2631
2631
2632 class baseset(abstractsmartset):
2632 class baseset(abstractsmartset):
2633 """Basic data structure that represents a revset and contains the basic
2633 """Basic data structure that represents a revset and contains the basic
2634 operation that it should be able to perform.
2634 operation that it should be able to perform.
2635
2635
2636 Every method in this class should be implemented by any smartset class.
2636 Every method in this class should be implemented by any smartset class.
2637 """
2637 """
2638 def __init__(self, data=()):
2638 def __init__(self, data=()):
2639 if not isinstance(data, list):
2639 if not isinstance(data, list):
2640 data = list(data)
2640 data = list(data)
2641 self._list = data
2641 self._list = data
2642 self._ascending = None
2642 self._ascending = None
2643
2643
2644 @util.propertycache
2644 @util.propertycache
2645 def _set(self):
2645 def _set(self):
2646 return set(self._list)
2646 return set(self._list)
2647
2647
2648 @util.propertycache
2648 @util.propertycache
2649 def _asclist(self):
2649 def _asclist(self):
2650 asclist = self._list[:]
2650 asclist = self._list[:]
2651 asclist.sort()
2651 asclist.sort()
2652 return asclist
2652 return asclist
2653
2653
2654 def __iter__(self):
2654 def __iter__(self):
2655 if self._ascending is None:
2655 if self._ascending is None:
2656 return iter(self._list)
2656 return iter(self._list)
2657 elif self._ascending:
2657 elif self._ascending:
2658 return iter(self._asclist)
2658 return iter(self._asclist)
2659 else:
2659 else:
2660 return reversed(self._asclist)
2660 return reversed(self._asclist)
2661
2661
2662 def fastasc(self):
2662 def fastasc(self):
2663 return iter(self._asclist)
2663 return iter(self._asclist)
2664
2664
2665 def fastdesc(self):
2665 def fastdesc(self):
2666 return reversed(self._asclist)
2666 return reversed(self._asclist)
2667
2667
2668 @util.propertycache
2668 @util.propertycache
2669 def __contains__(self):
2669 def __contains__(self):
2670 return self._set.__contains__
2670 return self._set.__contains__
2671
2671
2672 def __nonzero__(self):
2672 def __nonzero__(self):
2673 return bool(self._list)
2673 return bool(self._list)
2674
2674
2675 def sort(self, reverse=False):
2675 def sort(self, reverse=False):
2676 self._ascending = not bool(reverse)
2676 self._ascending = not bool(reverse)
2677
2677
2678 def reverse(self):
2678 def reverse(self):
2679 if self._ascending is None:
2679 if self._ascending is None:
2680 self._list.reverse()
2680 self._list.reverse()
2681 else:
2681 else:
2682 self._ascending = not self._ascending
2682 self._ascending = not self._ascending
2683
2683
2684 def __len__(self):
2684 def __len__(self):
2685 return len(self._list)
2685 return len(self._list)
2686
2686
2687 def isascending(self):
2687 def isascending(self):
2688 """Returns True if the collection is ascending order, False if not.
2688 """Returns True if the collection is ascending order, False if not.
2689
2689
2690 This is part of the mandatory API for smartset."""
2690 This is part of the mandatory API for smartset."""
2691 if len(self) <= 1:
2691 if len(self) <= 1:
2692 return True
2692 return True
2693 return self._ascending is not None and self._ascending
2693 return self._ascending is not None and self._ascending
2694
2694
2695 def isdescending(self):
2695 def isdescending(self):
2696 """Returns True if the collection is descending order, False if not.
2696 """Returns True if the collection is descending order, False if not.
2697
2697
2698 This is part of the mandatory API for smartset."""
2698 This is part of the mandatory API for smartset."""
2699 if len(self) <= 1:
2699 if len(self) <= 1:
2700 return True
2700 return True
2701 return self._ascending is not None and not self._ascending
2701 return self._ascending is not None and not self._ascending
2702
2702
2703 def first(self):
2703 def first(self):
2704 if self:
2704 if self:
2705 if self._ascending is None:
2705 if self._ascending is None:
2706 return self._list[0]
2706 return self._list[0]
2707 elif self._ascending:
2707 elif self._ascending:
2708 return self._asclist[0]
2708 return self._asclist[0]
2709 else:
2709 else:
2710 return self._asclist[-1]
2710 return self._asclist[-1]
2711 return None
2711 return None
2712
2712
2713 def last(self):
2713 def last(self):
2714 if self:
2714 if self:
2715 if self._ascending is None:
2715 if self._ascending is None:
2716 return self._list[-1]
2716 return self._list[-1]
2717 elif self._ascending:
2717 elif self._ascending:
2718 return self._asclist[-1]
2718 return self._asclist[-1]
2719 else:
2719 else:
2720 return self._asclist[0]
2720 return self._asclist[0]
2721 return None
2721 return None
2722
2722
2723 class filteredset(abstractsmartset):
2723 class filteredset(abstractsmartset):
2724 """Duck type for baseset class which iterates lazily over the revisions in
2724 """Duck type for baseset class which iterates lazily over the revisions in
2725 the subset and contains a function which tests for membership in the
2725 the subset and contains a function which tests for membership in the
2726 revset
2726 revset
2727 """
2727 """
2728 def __init__(self, subset, condition=lambda x: True):
2728 def __init__(self, subset, condition=lambda x: True):
2729 """
2729 """
2730 condition: a function that decide whether a revision in the subset
2730 condition: a function that decide whether a revision in the subset
2731 belongs to the revset or not.
2731 belongs to the revset or not.
2732 """
2732 """
2733 self._subset = subset
2733 self._subset = subset
2734 self._condition = condition
2734 self._condition = condition
2735 self._cache = {}
2735 self._cache = {}
2736
2736
2737 def __contains__(self, x):
2737 def __contains__(self, x):
2738 c = self._cache
2738 c = self._cache
2739 if x not in c:
2739 if x not in c:
2740 v = c[x] = x in self._subset and self._condition(x)
2740 v = c[x] = x in self._subset and self._condition(x)
2741 return v
2741 return v
2742 return c[x]
2742 return c[x]
2743
2743
2744 def __iter__(self):
2744 def __iter__(self):
2745 return self._iterfilter(self._subset)
2745 return self._iterfilter(self._subset)
2746
2746
2747 def _iterfilter(self, it):
2747 def _iterfilter(self, it):
2748 cond = self._condition
2748 cond = self._condition
2749 for x in it:
2749 for x in it:
2750 if cond(x):
2750 if cond(x):
2751 yield x
2751 yield x
2752
2752
2753 @property
2753 @property
2754 def fastasc(self):
2754 def fastasc(self):
2755 it = self._subset.fastasc
2755 it = self._subset.fastasc
2756 if it is None:
2756 if it is None:
2757 return None
2757 return None
2758 return lambda: self._iterfilter(it())
2758 return lambda: self._iterfilter(it())
2759
2759
2760 @property
2760 @property
2761 def fastdesc(self):
2761 def fastdesc(self):
2762 it = self._subset.fastdesc
2762 it = self._subset.fastdesc
2763 if it is None:
2763 if it is None:
2764 return None
2764 return None
2765 return lambda: self._iterfilter(it())
2765 return lambda: self._iterfilter(it())
2766
2766
2767 def __nonzero__(self):
2767 def __nonzero__(self):
2768 for r in self:
2768 for r in self:
2769 return True
2769 return True
2770 return False
2770 return False
2771
2771
2772 def __len__(self):
2772 def __len__(self):
2773 # Basic implementation to be changed in future patches.
2773 # Basic implementation to be changed in future patches.
2774 l = baseset([r for r in self])
2774 l = baseset([r for r in self])
2775 return len(l)
2775 return len(l)
2776
2776
2777 def sort(self, reverse=False):
2777 def sort(self, reverse=False):
2778 self._subset.sort(reverse=reverse)
2778 self._subset.sort(reverse=reverse)
2779
2779
2780 def reverse(self):
2780 def reverse(self):
2781 self._subset.reverse()
2781 self._subset.reverse()
2782
2782
2783 def isascending(self):
2783 def isascending(self):
2784 return self._subset.isascending()
2784 return self._subset.isascending()
2785
2785
2786 def isdescending(self):
2786 def isdescending(self):
2787 return self._subset.isdescending()
2787 return self._subset.isdescending()
2788
2788
2789 def first(self):
2789 def first(self):
2790 for x in self:
2790 for x in self:
2791 return x
2791 return x
2792 return None
2792 return None
2793
2793
2794 def last(self):
2794 def last(self):
2795 it = None
2795 it = None
2796 if self._subset.isascending:
2796 if self._subset.isascending:
2797 it = self.fastdesc
2797 it = self.fastdesc
2798 elif self._subset.isdescending:
2798 elif self._subset.isdescending:
2799 it = self.fastdesc
2799 it = self.fastdesc
2800 if it is None:
2800 if it is None:
2801 # slowly consume everything. This needs improvement
2801 # slowly consume everything. This needs improvement
2802 it = lambda: reversed(list(self))
2802 it = lambda: reversed(list(self))
2803 for x in it():
2803 for x in it():
2804 return x
2804 return x
2805 return None
2805 return None
2806
2806
2807 class addset(abstractsmartset):
2807 class addset(abstractsmartset):
2808 """Represent the addition of two sets
2808 """Represent the addition of two sets
2809
2809
2810 Wrapper structure for lazily adding two structures without losing much
2810 Wrapper structure for lazily adding two structures without losing much
2811 performance on the __contains__ method
2811 performance on the __contains__ method
2812
2812
2813 If the ascending attribute is set, that means the two structures are
2813 If the ascending attribute is set, that means the two structures are
2814 ordered in either an ascending or descending way. Therefore, we can add
2814 ordered in either an ascending or descending way. Therefore, we can add
2815 them maintaining the order by iterating over both at the same time
2815 them maintaining the order by iterating over both at the same time
2816 """
2816 """
2817 def __init__(self, revs1, revs2, ascending=None):
2817 def __init__(self, revs1, revs2, ascending=None):
2818 self._r1 = revs1
2818 self._r1 = revs1
2819 self._r2 = revs2
2819 self._r2 = revs2
2820 self._iter = None
2820 self._iter = None
2821 self._ascending = ascending
2821 self._ascending = ascending
2822 self._genlist = None
2822 self._genlist = None
2823 self._asclist = None
2823 self._asclist = None
2824
2824
2825 def __len__(self):
2825 def __len__(self):
2826 return len(self._list)
2826 return len(self._list)
2827
2827
2828 def __nonzero__(self):
2828 def __nonzero__(self):
2829 return bool(self._r1) or bool(self._r2)
2829 return bool(self._r1) or bool(self._r2)
2830
2830
2831 @util.propertycache
2831 @util.propertycache
2832 def _list(self):
2832 def _list(self):
2833 if not self._genlist:
2833 if not self._genlist:
2834 self._genlist = baseset(self._iterator())
2834 self._genlist = baseset(self._iterator())
2835 return self._genlist
2835 return self._genlist
2836
2836
2837 def _iterator(self):
2837 def _iterator(self):
2838 """Iterate over both collections without repeating elements
2838 """Iterate over both collections without repeating elements
2839
2839
2840 If the ascending attribute is not set, iterate over the first one and
2840 If the ascending attribute is not set, iterate over the first one and
2841 then over the second one checking for membership on the first one so we
2841 then over the second one checking for membership on the first one so we
2842 dont yield any duplicates.
2842 dont yield any duplicates.
2843
2843
2844 If the ascending attribute is set, iterate over both collections at the
2844 If the ascending attribute is set, iterate over both collections at the
2845 same time, yielding only one value at a time in the given order.
2845 same time, yielding only one value at a time in the given order.
2846 """
2846 """
2847 if self._ascending is None:
2847 if self._ascending is None:
2848 def gen():
2848 def gen():
2849 for r in self._r1:
2849 for r in self._r1:
2850 yield r
2850 yield r
2851 inr1 = self._r1.__contains__
2851 inr1 = self._r1.__contains__
2852 for r in self._r2:
2852 for r in self._r2:
2853 if not inr1(r):
2853 if not inr1(r):
2854 yield r
2854 yield r
2855 gen = gen()
2855 gen = gen()
2856 else:
2856 else:
2857 iter1 = iter(self._r1)
2857 iter1 = iter(self._r1)
2858 iter2 = iter(self._r2)
2858 iter2 = iter(self._r2)
2859 gen = self._iterordered(self._ascending, iter1, iter2)
2859 gen = self._iterordered(self._ascending, iter1, iter2)
2860 return gen
2860 return gen
2861
2861
2862 def __iter__(self):
2862 def __iter__(self):
2863 if self._ascending is None:
2863 if self._ascending is None:
2864 if self._genlist:
2864 if self._genlist:
2865 return iter(self._genlist)
2865 return iter(self._genlist)
2866 return iter(self._iterator())
2866 return iter(self._iterator())
2867 self._trysetasclist()
2867 self._trysetasclist()
2868 if self._ascending:
2868 if self._ascending:
2869 it = self.fastasc
2869 it = self.fastasc
2870 else:
2870 else:
2871 it = self.fastdesc
2871 it = self.fastdesc
2872 if it is None:
2872 if it is None:
2873 # consume the gen and try again
2873 # consume the gen and try again
2874 self._list
2874 self._list
2875 return iter(self)
2875 return iter(self)
2876 return it()
2876 return it()
2877
2877
2878 def _trysetasclist(self):
2878 def _trysetasclist(self):
2879 """populate the _asclist attribute if possible and necessary"""
2879 """populate the _asclist attribute if possible and necessary"""
2880 if self._genlist is not None and self._asclist is None:
2880 if self._genlist is not None and self._asclist is None:
2881 self._asclist = sorted(self._genlist)
2881 self._asclist = sorted(self._genlist)
2882
2882
2883 @property
2883 @property
2884 def fastasc(self):
2884 def fastasc(self):
2885 self._trysetasclist()
2885 self._trysetasclist()
2886 if self._asclist is not None:
2886 if self._asclist is not None:
2887 return self._asclist.__iter__
2887 return self._asclist.__iter__
2888 iter1 = self._r1.fastasc
2888 iter1 = self._r1.fastasc
2889 iter2 = self._r2.fastasc
2889 iter2 = self._r2.fastasc
2890 if None in (iter1, iter2):
2890 if None in (iter1, iter2):
2891 return None
2891 return None
2892 return lambda: self._iterordered(True, iter1(), iter2())
2892 return lambda: self._iterordered(True, iter1(), iter2())
2893
2893
2894 @property
2894 @property
2895 def fastdesc(self):
2895 def fastdesc(self):
2896 self._trysetasclist()
2896 self._trysetasclist()
2897 if self._asclist is not None:
2897 if self._asclist is not None:
2898 return self._asclist.__reversed__
2898 return self._asclist.__reversed__
2899 iter1 = self._r1.fastdesc
2899 iter1 = self._r1.fastdesc
2900 iter2 = self._r2.fastdesc
2900 iter2 = self._r2.fastdesc
2901 if None in (iter1, iter2):
2901 if None in (iter1, iter2):
2902 return None
2902 return None
2903 return lambda: self._iterordered(False, iter1(), iter2())
2903 return lambda: self._iterordered(False, iter1(), iter2())
2904
2904
2905 def _iterordered(self, ascending, iter1, iter2):
2905 def _iterordered(self, ascending, iter1, iter2):
2906 """produce an ordered iteration from two iterators with the same order
2906 """produce an ordered iteration from two iterators with the same order
2907
2907
2908 The ascending is used to indicated the iteration direction.
2908 The ascending is used to indicated the iteration direction.
2909 """
2909 """
2910 choice = max
2910 choice = max
2911 if ascending:
2911 if ascending:
2912 choice = min
2912 choice = min
2913
2913
2914 val1 = None
2914 val1 = None
2915 val2 = None
2915 val2 = None
2916
2916
2917 choice = max
2917 choice = max
2918 if ascending:
2918 if ascending:
2919 choice = min
2919 choice = min
2920 try:
2920 try:
2921 # Consume both iterators in an ordered way until one is
2921 # Consume both iterators in an ordered way until one is
2922 # empty
2922 # empty
2923 while True:
2923 while True:
2924 if val1 is None:
2924 if val1 is None:
2925 val1 = iter1.next()
2925 val1 = iter1.next()
2926 if val2 is None:
2926 if val2 is None:
2927 val2 = iter2.next()
2927 val2 = iter2.next()
2928 next = choice(val1, val2)
2928 next = choice(val1, val2)
2929 yield next
2929 yield next
2930 if val1 == next:
2930 if val1 == next:
2931 val1 = None
2931 val1 = None
2932 if val2 == next:
2932 if val2 == next:
2933 val2 = None
2933 val2 = None
2934 except StopIteration:
2934 except StopIteration:
2935 # Flush any remaining values and consume the other one
2935 # Flush any remaining values and consume the other one
2936 it = iter2
2936 it = iter2
2937 if val1 is not None:
2937 if val1 is not None:
2938 yield val1
2938 yield val1
2939 it = iter1
2939 it = iter1
2940 elif val2 is not None:
2940 elif val2 is not None:
2941 # might have been equality and both are empty
2941 # might have been equality and both are empty
2942 yield val2
2942 yield val2
2943 for val in it:
2943 for val in it:
2944 yield val
2944 yield val
2945
2945
2946 def __contains__(self, x):
2946 def __contains__(self, x):
2947 return x in self._r1 or x in self._r2
2947 return x in self._r1 or x in self._r2
2948
2948
2949 def sort(self, reverse=False):
2949 def sort(self, reverse=False):
2950 """Sort the added set
2950 """Sort the added set
2951
2951
2952 For this we use the cached list with all the generated values and if we
2952 For this we use the cached list with all the generated values and if we
2953 know they are ascending or descending we can sort them in a smart way.
2953 know they are ascending or descending we can sort them in a smart way.
2954 """
2954 """
2955 self._ascending = not reverse
2955 self._ascending = not reverse
2956
2956
2957 def isascending(self):
2957 def isascending(self):
2958 return self._ascending is not None and self._ascending
2958 return self._ascending is not None and self._ascending
2959
2959
2960 def isdescending(self):
2960 def isdescending(self):
2961 return self._ascending is not None and not self._ascending
2961 return self._ascending is not None and not self._ascending
2962
2962
2963 def reverse(self):
2963 def reverse(self):
2964 if self._ascending is None:
2964 if self._ascending is None:
2965 self._list.reverse()
2965 self._list.reverse()
2966 else:
2966 else:
2967 self._ascending = not self._ascending
2967 self._ascending = not self._ascending
2968
2968
2969 def first(self):
2969 def first(self):
2970 for x in self:
2970 for x in self:
2971 return x
2971 return x
2972 return None
2972 return None
2973
2973
2974 def last(self):
2974 def last(self):
2975 self.reverse()
2975 self.reverse()
2976 val = self.first()
2976 val = self.first()
2977 self.reverse()
2977 self.reverse()
2978 return val
2978 return val
2979
2979
2980 class generatorset(abstractsmartset):
2980 class generatorset(abstractsmartset):
2981 """Wrap a generator for lazy iteration
2981 """Wrap a generator for lazy iteration
2982
2982
2983 Wrapper structure for generators that provides lazy membership and can
2983 Wrapper structure for generators that provides lazy membership and can
2984 be iterated more than once.
2984 be iterated more than once.
2985 When asked for membership it generates values until either it finds the
2985 When asked for membership it generates values until either it finds the
2986 requested one or has gone through all the elements in the generator
2986 requested one or has gone through all the elements in the generator
2987 """
2987 """
2988 def __init__(self, gen, iterasc=None):
2988 def __init__(self, gen, iterasc=None):
2989 """
2989 """
2990 gen: a generator producing the values for the generatorset.
2990 gen: a generator producing the values for the generatorset.
2991 """
2991 """
2992 self._gen = gen
2992 self._gen = gen
2993 self._asclist = None
2993 self._asclist = None
2994 self._cache = {}
2994 self._cache = {}
2995 self._genlist = []
2995 self._genlist = []
2996 self._finished = False
2996 self._finished = False
2997 self._ascending = True
2997 self._ascending = True
2998 if iterasc is not None:
2998 if iterasc is not None:
2999 if iterasc:
2999 if iterasc:
3000 self.fastasc = self._iterator
3000 self.fastasc = self._iterator
3001 self.__contains__ = self._asccontains
3001 self.__contains__ = self._asccontains
3002 else:
3002 else:
3003 self.fastdesc = self._iterator
3003 self.fastdesc = self._iterator
3004 self.__contains__ = self._desccontains
3004 self.__contains__ = self._desccontains
3005
3005
3006 def __nonzero__(self):
3006 def __nonzero__(self):
3007 for r in self:
3007 for r in self:
3008 return True
3008 return True
3009 return False
3009 return False
3010
3010
3011 def __contains__(self, x):
3011 def __contains__(self, x):
3012 if x in self._cache:
3012 if x in self._cache:
3013 return self._cache[x]
3013 return self._cache[x]
3014
3014
3015 # Use new values only, as existing values would be cached.
3015 # Use new values only, as existing values would be cached.
3016 for l in self._consumegen():
3016 for l in self._consumegen():
3017 if l == x:
3017 if l == x:
3018 return True
3018 return True
3019
3019
3020 self._cache[x] = False
3020 self._cache[x] = False
3021 return False
3021 return False
3022
3022
3023 def _asccontains(self, x):
3023 def _asccontains(self, x):
3024 """version of contains optimised for ascending generator"""
3024 """version of contains optimised for ascending generator"""
3025 if x in self._cache:
3025 if x in self._cache:
3026 return self._cache[x]
3026 return self._cache[x]
3027
3027
3028 # Use new values only, as existing values would be cached.
3028 # Use new values only, as existing values would be cached.
3029 for l in self._consumegen():
3029 for l in self._consumegen():
3030 if l == x:
3030 if l == x:
3031 return True
3031 return True
3032 if l > x:
3032 if l > x:
3033 break
3033 break
3034
3034
3035 self._cache[x] = False
3035 self._cache[x] = False
3036 return False
3036 return False
3037
3037
3038 def _desccontains(self, x):
3038 def _desccontains(self, x):
3039 """version of contains optimised for descending generator"""
3039 """version of contains optimised for descending generator"""
3040 if x in self._cache:
3040 if x in self._cache:
3041 return self._cache[x]
3041 return self._cache[x]
3042
3042
3043 # Use new values only, as existing values would be cached.
3043 # Use new values only, as existing values would be cached.
3044 for l in self._consumegen():
3044 for l in self._consumegen():
3045 if l == x:
3045 if l == x:
3046 return True
3046 return True
3047 if l < x:
3047 if l < x:
3048 break
3048 break
3049
3049
3050 self._cache[x] = False
3050 self._cache[x] = False
3051 return False
3051 return False
3052
3052
3053 def __iter__(self):
3053 def __iter__(self):
3054 if self._ascending:
3054 if self._ascending:
3055 it = self.fastasc
3055 it = self.fastasc
3056 else:
3056 else:
3057 it = self.fastdesc
3057 it = self.fastdesc
3058 if it is not None:
3058 if it is not None:
3059 return it()
3059 return it()
3060 # we need to consume the iterator
3060 # we need to consume the iterator
3061 for x in self._consumegen():
3061 for x in self._consumegen():
3062 pass
3062 pass
3063 # recall the same code
3063 # recall the same code
3064 return iter(self)
3064 return iter(self)
3065
3065
3066 def _iterator(self):
3066 def _iterator(self):
3067 if self._finished:
3067 if self._finished:
3068 return iter(self._genlist)
3068 return iter(self._genlist)
3069
3069
3070 # We have to use this complex iteration strategy to allow multiple
3070 # We have to use this complex iteration strategy to allow multiple
3071 # iterations at the same time. We need to be able to catch revision
3071 # iterations at the same time. We need to be able to catch revision
3072 # removed from _consumegen and added to genlist in another instance.
3072 # removed from _consumegen and added to genlist in another instance.
3073 #
3073 #
3074 # Getting rid of it would provide an about 15% speed up on this
3074 # Getting rid of it would provide an about 15% speed up on this
3075 # iteration.
3075 # iteration.
3076 genlist = self._genlist
3076 genlist = self._genlist
3077 nextrev = self._consumegen().next
3077 nextrev = self._consumegen().next
3078 _len = len # cache global lookup
3078 _len = len # cache global lookup
3079 def gen():
3079 def gen():
3080 i = 0
3080 i = 0
3081 while True:
3081 while True:
3082 if i < _len(genlist):
3082 if i < _len(genlist):
3083 yield genlist[i]
3083 yield genlist[i]
3084 else:
3084 else:
3085 yield nextrev()
3085 yield nextrev()
3086 i += 1
3086 i += 1
3087 return gen()
3087 return gen()
3088
3088
3089 def _consumegen(self):
3089 def _consumegen(self):
3090 cache = self._cache
3090 cache = self._cache
3091 genlist = self._genlist.append
3091 genlist = self._genlist.append
3092 for item in self._gen:
3092 for item in self._gen:
3093 cache[item] = True
3093 cache[item] = True
3094 genlist(item)
3094 genlist(item)
3095 yield item
3095 yield item
3096 if not self._finished:
3096 if not self._finished:
3097 self._finished = True
3097 self._finished = True
3098 asc = self._genlist[:]
3098 asc = self._genlist[:]
3099 asc.sort()
3099 asc.sort()
3100 self._asclist = asc
3100 self._asclist = asc
3101 self.fastasc = asc.__iter__
3101 self.fastasc = asc.__iter__
3102 self.fastdesc = asc.__reversed__
3102 self.fastdesc = asc.__reversed__
3103
3103
3104 def __len__(self):
3104 def __len__(self):
3105 for x in self._consumegen():
3105 for x in self._consumegen():
3106 pass
3106 pass
3107 return len(self._genlist)
3107 return len(self._genlist)
3108
3108
3109 def sort(self, reverse=False):
3109 def sort(self, reverse=False):
3110 self._ascending = not reverse
3110 self._ascending = not reverse
3111
3111
3112 def reverse(self):
3112 def reverse(self):
3113 self._ascending = not self._ascending
3113 self._ascending = not self._ascending
3114
3114
3115 def isascending(self):
3115 def isascending(self):
3116 return self._ascending
3116 return self._ascending
3117
3117
3118 def isdescending(self):
3118 def isdescending(self):
3119 return not self._ascending
3119 return not self._ascending
3120
3120
3121 def first(self):
3121 def first(self):
3122 if self._ascending:
3122 if self._ascending:
3123 it = self.fastasc
3123 it = self.fastasc
3124 else:
3124 else:
3125 it = self.fastdesc
3125 it = self.fastdesc
3126 if it is None:
3126 if it is None:
3127 # we need to consume all and try again
3127 # we need to consume all and try again
3128 for x in self._consumegen():
3128 for x in self._consumegen():
3129 pass
3129 pass
3130 return self.first()
3130 return self.first()
3131 if self:
3131 if self:
3132 return it().next()
3132 return it().next()
3133 return None
3133 return None
3134
3134
3135 def last(self):
3135 def last(self):
3136 if self._ascending:
3136 if self._ascending:
3137 it = self.fastdesc
3137 it = self.fastdesc
3138 else:
3138 else:
3139 it = self.fastasc
3139 it = self.fastasc
3140 if it is None:
3140 if it is None:
3141 # we need to consume all and try again
3141 # we need to consume all and try again
3142 for x in self._consumegen():
3142 for x in self._consumegen():
3143 pass
3143 pass
3144 return self.first()
3144 return self.first()
3145 if self:
3145 if self:
3146 return it().next()
3146 return it().next()
3147 return None
3147 return None
3148
3148
3149 def spanset(repo, start=None, end=None):
3149 def spanset(repo, start=None, end=None):
3150 """factory function to dispatch between fullreposet and actual spanset
3150 """factory function to dispatch between fullreposet and actual spanset
3151
3151
3152 Feel free to update all spanset call sites and kill this function at some
3152 Feel free to update all spanset call sites and kill this function at some
3153 point.
3153 point.
3154 """
3154 """
3155 if start is None and end is None:
3155 if start is None and end is None:
3156 return fullreposet(repo)
3156 return fullreposet(repo)
3157 return _spanset(repo, start, end)
3157 return _spanset(repo, start, end)
3158
3158
3159
3159
3160 class _spanset(abstractsmartset):
3160 class _spanset(abstractsmartset):
3161 """Duck type for baseset class which represents a range of revisions and
3161 """Duck type for baseset class which represents a range of revisions and
3162 can work lazily and without having all the range in memory
3162 can work lazily and without having all the range in memory
3163
3163
3164 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3164 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3165 notable points:
3165 notable points:
3166 - when x < y it will be automatically descending,
3166 - when x < y it will be automatically descending,
3167 - revision filtered with this repoview will be skipped.
3167 - revision filtered with this repoview will be skipped.
3168
3168
3169 """
3169 """
3170 def __init__(self, repo, start=0, end=None):
3170 def __init__(self, repo, start=0, end=None):
3171 """
3171 """
3172 start: first revision included the set
3172 start: first revision included the set
3173 (default to 0)
3173 (default to 0)
3174 end: first revision excluded (last+1)
3174 end: first revision excluded (last+1)
3175 (default to len(repo)
3175 (default to len(repo)
3176
3176
3177 Spanset will be descending if `end` < `start`.
3177 Spanset will be descending if `end` < `start`.
3178 """
3178 """
3179 if end is None:
3179 if end is None:
3180 end = len(repo)
3180 end = len(repo)
3181 self._ascending = start <= end
3181 self._ascending = start <= end
3182 if not self._ascending:
3182 if not self._ascending:
3183 start, end = end + 1, start +1
3183 start, end = end + 1, start +1
3184 self._start = start
3184 self._start = start
3185 self._end = end
3185 self._end = end
3186 self._hiddenrevs = repo.changelog.filteredrevs
3186 self._hiddenrevs = repo.changelog.filteredrevs
3187
3187
3188 def sort(self, reverse=False):
3188 def sort(self, reverse=False):
3189 self._ascending = not reverse
3189 self._ascending = not reverse
3190
3190
3191 def reverse(self):
3191 def reverse(self):
3192 self._ascending = not self._ascending
3192 self._ascending = not self._ascending
3193
3193
3194 def _iterfilter(self, iterrange):
3194 def _iterfilter(self, iterrange):
3195 s = self._hiddenrevs
3195 s = self._hiddenrevs
3196 for r in iterrange:
3196 for r in iterrange:
3197 if r not in s:
3197 if r not in s:
3198 yield r
3198 yield r
3199
3199
3200 def __iter__(self):
3200 def __iter__(self):
3201 if self._ascending:
3201 if self._ascending:
3202 return self.fastasc()
3202 return self.fastasc()
3203 else:
3203 else:
3204 return self.fastdesc()
3204 return self.fastdesc()
3205
3205
3206 def fastasc(self):
3206 def fastasc(self):
3207 iterrange = xrange(self._start, self._end)
3207 iterrange = xrange(self._start, self._end)
3208 if self._hiddenrevs:
3208 if self._hiddenrevs:
3209 return self._iterfilter(iterrange)
3209 return self._iterfilter(iterrange)
3210 return iter(iterrange)
3210 return iter(iterrange)
3211
3211
3212 def fastdesc(self):
3212 def fastdesc(self):
3213 iterrange = xrange(self._end - 1, self._start - 1, -1)
3213 iterrange = xrange(self._end - 1, self._start - 1, -1)
3214 if self._hiddenrevs:
3214 if self._hiddenrevs:
3215 return self._iterfilter(iterrange)
3215 return self._iterfilter(iterrange)
3216 return iter(iterrange)
3216 return iter(iterrange)
3217
3217
3218 def __contains__(self, rev):
3218 def __contains__(self, rev):
3219 hidden = self._hiddenrevs
3219 hidden = self._hiddenrevs
3220 return ((self._start <= rev < self._end)
3220 return ((self._start <= rev < self._end)
3221 and not (hidden and rev in hidden))
3221 and not (hidden and rev in hidden))
3222
3222
3223 def __nonzero__(self):
3223 def __nonzero__(self):
3224 for r in self:
3224 for r in self:
3225 return True
3225 return True
3226 return False
3226 return False
3227
3227
3228 def __len__(self):
3228 def __len__(self):
3229 if not self._hiddenrevs:
3229 if not self._hiddenrevs:
3230 return abs(self._end - self._start)
3230 return abs(self._end - self._start)
3231 else:
3231 else:
3232 count = 0
3232 count = 0
3233 start = self._start
3233 start = self._start
3234 end = self._end
3234 end = self._end
3235 for rev in self._hiddenrevs:
3235 for rev in self._hiddenrevs:
3236 if (end < rev <= start) or (start <= rev < end):
3236 if (end < rev <= start) or (start <= rev < end):
3237 count += 1
3237 count += 1
3238 return abs(self._end - self._start) - count
3238 return abs(self._end - self._start) - count
3239
3239
3240 def isascending(self):
3240 def isascending(self):
3241 return self._ascending
3241 return self._ascending
3242
3242
3243 def isdescending(self):
3243 def isdescending(self):
3244 return not self._ascending
3244 return not self._ascending
3245
3245
3246 def first(self):
3246 def first(self):
3247 if self._ascending:
3247 if self._ascending:
3248 it = self.fastasc
3248 it = self.fastasc
3249 else:
3249 else:
3250 it = self.fastdesc
3250 it = self.fastdesc
3251 for x in it():
3251 for x in it():
3252 return x
3252 return x
3253 return None
3253 return None
3254
3254
3255 def last(self):
3255 def last(self):
3256 if self._ascending:
3256 if self._ascending:
3257 it = self.fastdesc
3257 it = self.fastdesc
3258 else:
3258 else:
3259 it = self.fastasc
3259 it = self.fastasc
3260 for x in it():
3260 for x in it():
3261 return x
3261 return x
3262 return None
3262 return None
3263
3263
3264 class fullreposet(_spanset):
3264 class fullreposet(_spanset):
3265 """a set containing all revisions in the repo
3265 """a set containing all revisions in the repo
3266
3266
3267 This class exists to host special optimization.
3267 This class exists to host special optimization.
3268 """
3268 """
3269
3269
3270 def __init__(self, repo):
3270 def __init__(self, repo):
3271 super(fullreposet, self).__init__(repo)
3271 super(fullreposet, self).__init__(repo)
3272
3272
3273 def __and__(self, other):
3273 def __and__(self, other):
3274 """As self contains the whole repo, all of the other set should also be
3274 """As self contains the whole repo, all of the other set should also be
3275 in self. Therefore `self & other = other`.
3275 in self. Therefore `self & other = other`.
3276
3276
3277 This boldly assumes the other contains valid revs only.
3277 This boldly assumes the other contains valid revs only.
3278 """
3278 """
3279 # other not a smartset, make is so
3279 # other not a smartset, make is so
3280 if not util.safehasattr(other, 'isascending'):
3280 if not util.safehasattr(other, 'isascending'):
3281 # filter out hidden revision
3281 # filter out hidden revision
3282 # (this boldly assumes all smartset are pure)
3282 # (this boldly assumes all smartset are pure)
3283 #
3283 #
3284 # `other` was used with "&", let's assume this is a set like
3284 # `other` was used with "&", let's assume this is a set like
3285 # object.
3285 # object.
3286 other = baseset(other - self._hiddenrevs)
3286 other = baseset(other - self._hiddenrevs)
3287
3287
3288 other.sort(reverse=self.isdescending())
3288 other.sort(reverse=self.isdescending())
3289 return other
3289 return other
3290
3290
3291 # tell hggettext to extract docstrings from these functions:
3291 # tell hggettext to extract docstrings from these functions:
3292 i18nfunctions = symbols.values()
3292 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now