##// END OF EJS Templates
revisionbranchcache: fall back to slow path if starting readonly (issue4531)...
Mads Kiilerich -
r24159:5b4ed033 3.3.1 stable
parent child Browse files
Show More
@@ -1,451 +1,455 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10 import util
11 11 import time
12 12 from array import array
13 13 from struct import calcsize, pack, unpack
14 14
15 15 def _filename(repo):
16 16 """name of a branchcache file for a given repo or repoview"""
17 17 filename = "cache/branch2"
18 18 if repo.filtername:
19 19 filename = '%s-%s' % (filename, repo.filtername)
20 20 return filename
21 21
22 22 def read(repo):
23 23 try:
24 24 f = repo.vfs(_filename(repo))
25 25 lines = f.read().split('\n')
26 26 f.close()
27 27 except (IOError, OSError):
28 28 return None
29 29
30 30 try:
31 31 cachekey = lines.pop(0).split(" ", 2)
32 32 last, lrev = cachekey[:2]
33 33 last, lrev = bin(last), int(lrev)
34 34 filteredhash = None
35 35 if len(cachekey) > 2:
36 36 filteredhash = bin(cachekey[2])
37 37 partial = branchcache(tipnode=last, tiprev=lrev,
38 38 filteredhash=filteredhash)
39 39 if not partial.validfor(repo):
40 40 # invalidate the cache
41 41 raise ValueError('tip differs')
42 42 for l in lines:
43 43 if not l:
44 44 continue
45 45 node, state, label = l.split(" ", 2)
46 46 if state not in 'oc':
47 47 raise ValueError('invalid branch state')
48 48 label = encoding.tolocal(label.strip())
49 49 if not node in repo:
50 50 raise ValueError('node %s does not exist' % node)
51 51 node = bin(node)
52 52 partial.setdefault(label, []).append(node)
53 53 if state == 'c':
54 54 partial._closednodes.add(node)
55 55 except KeyboardInterrupt:
56 56 raise
57 57 except Exception, inst:
58 58 if repo.ui.debugflag:
59 59 msg = 'invalid branchheads cache'
60 60 if repo.filtername is not None:
61 61 msg += ' (%s)' % repo.filtername
62 62 msg += ': %s\n'
63 63 repo.ui.debug(msg % inst)
64 64 partial = None
65 65 return partial
66 66
67 67 ### Nearest subset relation
68 68 # Nearest subset of filter X is a filter Y so that:
69 69 # * Y is included in X,
70 70 # * X - Y is as small as possible.
71 71 # This create and ordering used for branchmap purpose.
72 72 # the ordering may be partial
73 73 subsettable = {None: 'visible',
74 74 'visible': 'served',
75 75 'served': 'immutable',
76 76 'immutable': 'base'}
77 77
78 78 def updatecache(repo):
79 79 cl = repo.changelog
80 80 filtername = repo.filtername
81 81 partial = repo._branchcaches.get(filtername)
82 82
83 83 revs = []
84 84 if partial is None or not partial.validfor(repo):
85 85 partial = read(repo)
86 86 if partial is None:
87 87 subsetname = subsettable.get(filtername)
88 88 if subsetname is None:
89 89 partial = branchcache()
90 90 else:
91 91 subset = repo.filtered(subsetname)
92 92 partial = subset.branchmap().copy()
93 93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 96 if revs:
97 97 partial.update(repo, revs)
98 98 partial.write(repo)
99 99 assert partial.validfor(repo), filtername
100 100 repo._branchcaches[repo.filtername] = partial
101 101
102 102 class branchcache(dict):
103 103 """A dict like object that hold branches heads cache.
104 104
105 105 This cache is used to avoid costly computations to determine all the
106 106 branch heads of a repo.
107 107
108 108 The cache is serialized on disk in the following format:
109 109
110 110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
111 111 <branch head hex node> <open/closed state> <branch name>
112 112 <branch head hex node> <open/closed state> <branch name>
113 113 ...
114 114
115 115 The first line is used to check if the cache is still valid. If the
116 116 branch cache is for a filtered repo view, an optional third hash is
117 117 included that hashes the hashes of all filtered revisions.
118 118
119 119 The open/closed state is represented by a single letter 'o' or 'c'.
120 120 This field can be used to avoid changelog reads when determining if a
121 121 branch head closes a branch or not.
122 122 """
123 123
124 124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
125 125 filteredhash=None, closednodes=None):
126 126 super(branchcache, self).__init__(entries)
127 127 self.tipnode = tipnode
128 128 self.tiprev = tiprev
129 129 self.filteredhash = filteredhash
130 130 # closednodes is a set of nodes that close their branch. If the branch
131 131 # cache has been updated, it may contain nodes that are no longer
132 132 # heads.
133 133 if closednodes is None:
134 134 self._closednodes = set()
135 135 else:
136 136 self._closednodes = closednodes
137 137 self._revbranchcache = None
138 138
139 139 def _hashfiltered(self, repo):
140 140 """build hash of revision filtered in the current cache
141 141
142 142 Tracking tipnode and tiprev is not enough to ensure validity of the
143 143 cache as they do not help to distinct cache that ignored various
144 144 revision bellow tiprev.
145 145
146 146 To detect such difference, we build a cache of all ignored revisions.
147 147 """
148 148 cl = repo.changelog
149 149 if not cl.filteredrevs:
150 150 return None
151 151 key = None
152 152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 153 if revs:
154 154 s = util.sha1()
155 155 for rev in revs:
156 156 s.update('%s;' % rev)
157 157 key = s.digest()
158 158 return key
159 159
160 160 def validfor(self, repo):
161 161 """Is the cache content valid regarding a repo
162 162
163 163 - False when cached tipnode is unknown or if we detect a strip.
164 164 - True when cache is up to date or a subset of current repo."""
165 165 try:
166 166 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 167 and (self.filteredhash == self._hashfiltered(repo)))
168 168 except IndexError:
169 169 return False
170 170
171 171 def _branchtip(self, heads):
172 172 '''Return tuple with last open head in heads and false,
173 173 otherwise return last closed head and true.'''
174 174 tip = heads[-1]
175 175 closed = True
176 176 for h in reversed(heads):
177 177 if h not in self._closednodes:
178 178 tip = h
179 179 closed = False
180 180 break
181 181 return tip, closed
182 182
183 183 def branchtip(self, branch):
184 184 '''Return the tipmost open head on branch head, otherwise return the
185 185 tipmost closed head on branch.
186 186 Raise KeyError for unknown branch.'''
187 187 return self._branchtip(self[branch])[0]
188 188
189 189 def branchheads(self, branch, closed=False):
190 190 heads = self[branch]
191 191 if not closed:
192 192 heads = [h for h in heads if h not in self._closednodes]
193 193 return heads
194 194
195 195 def iterbranches(self):
196 196 for bn, heads in self.iteritems():
197 197 yield (bn, heads) + self._branchtip(heads)
198 198
199 199 def copy(self):
200 200 """return an deep copy of the branchcache object"""
201 201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 202 self._closednodes)
203 203
204 204 def write(self, repo):
205 205 try:
206 206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 207 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 208 if self.filteredhash is not None:
209 209 cachekey.append(hex(self.filteredhash))
210 210 f.write(" ".join(cachekey) + '\n')
211 211 nodecount = 0
212 212 for label, nodes in sorted(self.iteritems()):
213 213 for node in nodes:
214 214 nodecount += 1
215 215 if node in self._closednodes:
216 216 state = 'c'
217 217 else:
218 218 state = 'o'
219 219 f.write("%s %s %s\n" % (hex(node), state,
220 220 encoding.fromlocal(label)))
221 221 f.close()
222 222 repo.ui.log('branchcache',
223 223 'wrote %s branch cache with %d labels and %d nodes\n',
224 224 repo.filtername, len(self), nodecount)
225 225 except (IOError, OSError, util.Abort), inst:
226 226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 227 # Abort may be raise by read only opener
228 228 pass
229 229 if self._revbranchcache:
230 230 self._revbranchcache.write(repo.unfiltered())
231 231 self._revbranchcache = None
232 232
233 233 def update(self, repo, revgen):
234 234 """Given a branchhead cache, self, that may have extra nodes or be
235 235 missing heads, and a generator of nodes that are strictly a superset of
236 236 heads missing, this function updates self to be correct.
237 237 """
238 238 starttime = time.time()
239 239 cl = repo.changelog
240 240 # collect new branch entries
241 241 newbranches = {}
242 242 urepo = repo.unfiltered()
243 243 self._revbranchcache = revbranchcache(urepo)
244 244 getbranchinfo = self._revbranchcache.branchinfo
245 245 ucl = urepo.changelog
246 246 for r in revgen:
247 247 branch, closesbranch = getbranchinfo(ucl, r)
248 248 newbranches.setdefault(branch, []).append(r)
249 249 if closesbranch:
250 250 self._closednodes.add(cl.node(r))
251 251
252 252 # fetch current topological heads to speed up filtering
253 253 topoheads = set(cl.headrevs())
254 254
255 255 # if older branchheads are reachable from new ones, they aren't
256 256 # really branchheads. Note checking parents is insufficient:
257 257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
258 258 for branch, newheadrevs in newbranches.iteritems():
259 259 bheads = self.setdefault(branch, [])
260 260 bheadset = set(cl.rev(node) for node in bheads)
261 261
262 262 # This have been tested True on all internal usage of this function.
263 263 # run it again in case of doubt
264 264 # assert not (set(bheadrevs) & set(newheadrevs))
265 265 newheadrevs.sort()
266 266 bheadset.update(newheadrevs)
267 267
268 268 # This prunes out two kinds of heads - heads that are superseded by
269 269 # a head in newheadrevs, and newheadrevs that are not heads because
270 270 # an existing head is their descendant.
271 271 uncertain = bheadset - topoheads
272 272 if uncertain:
273 273 floorrev = min(uncertain)
274 274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
275 275 bheadset -= ancestors
276 276 bheadrevs = sorted(bheadset)
277 277 self[branch] = [cl.node(rev) for rev in bheadrevs]
278 278 tiprev = bheadrevs[-1]
279 279 if tiprev > self.tiprev:
280 280 self.tipnode = cl.node(tiprev)
281 281 self.tiprev = tiprev
282 282
283 283 if not self.validfor(repo):
284 284 # cache key are not valid anymore
285 285 self.tipnode = nullid
286 286 self.tiprev = nullrev
287 287 for heads in self.values():
288 288 tiprev = max(cl.rev(node) for node in heads)
289 289 if tiprev > self.tiprev:
290 290 self.tipnode = cl.node(tiprev)
291 291 self.tiprev = tiprev
292 292 self.filteredhash = self._hashfiltered(repo)
293 293
294 294 duration = time.time() - starttime
295 295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
296 296 repo.filtername, duration)
297 297
298 298 # Revision branch info cache
299 299
300 300 _rbcversion = '-v1'
301 301 _rbcnames = 'cache/rbc-names' + _rbcversion
302 302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
303 303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
304 304 _rbcrecfmt = '>4sI'
305 305 _rbcrecsize = calcsize(_rbcrecfmt)
306 306 _rbcnodelen = 4
307 307 _rbcbranchidxmask = 0x7fffffff
308 308 _rbccloseflag = 0x80000000
309 309
310 310 class revbranchcache(object):
311 311 """Persistent cache, mapping from revision number to branch name and close.
312 312 This is a low level cache, independent of filtering.
313 313
314 314 Branch names are stored in rbc-names in internal encoding separated by 0.
315 315 rbc-names is append-only, and each branch name is only stored once and will
316 316 thus have a unique index.
317 317
318 318 The branch info for each revision is stored in rbc-revs as constant size
319 319 records. The whole file is read into memory, but it is only 'parsed' on
320 320 demand. The file is usually append-only but will be truncated if repo
321 321 modification is detected.
322 322 The record for each revision contains the first 4 bytes of the
323 323 corresponding node hash, and the record is only used if it still matches.
324 324 Even a completely trashed rbc-revs fill thus still give the right result
325 325 while converging towards full recovery ... assuming no incorrectly matching
326 326 node hashes.
327 327 The record also contains 4 bytes where 31 bits contains the index of the
328 328 branch and the last bit indicate that it is a branch close commit.
329 329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
330 330 and will grow with it but be 1/8th of its size.
331 331 """
332 332
333 def __init__(self, repo):
333 def __init__(self, repo, readonly=True):
334 334 assert repo.filtername is None
335 335 self._names = [] # branch names in local encoding with static index
336 336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
337 337 self._rbcsnameslen = 0
338 338 try:
339 339 bndata = repo.vfs.read(_rbcnames)
340 340 self._rbcsnameslen = len(bndata) # for verification before writing
341 341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
342 342 except (IOError, OSError), inst:
343 343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
344 344 inst)
345 if readonly:
346 # don't try to use cache - fall back to the slow path
347 self.branchinfo = self._branchinfo
348
345 349 if self._names:
346 350 try:
347 351 data = repo.vfs.read(_rbcrevs)
348 352 self._rbcrevs.fromstring(data)
349 353 except (IOError, OSError), inst:
350 354 repo.ui.debug("couldn't read revision branch cache: %s\n" %
351 355 inst)
352 356 # remember number of good records on disk
353 357 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
354 358 len(repo.changelog))
355 359 if self._rbcrevslen == 0:
356 360 self._names = []
357 361 self._rbcnamescount = len(self._names) # number of good names on disk
358 362 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
359 363
360 364 def branchinfo(self, changelog, rev):
361 365 """Return branch name and close flag for rev, using and updating
362 366 persistent cache."""
363 367 rbcrevidx = rev * _rbcrecsize
364 368
365 369 # if requested rev is missing, add and populate all missing revs
366 370 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
367 371 first = len(self._rbcrevs) // _rbcrecsize
368 372 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
369 373 len(self._rbcrevs)))
370 374 for r in xrange(first, len(changelog)):
371 375 self._branchinfo(changelog, r)
372 376
373 377 # fast path: extract data from cache, use it if node is matching
374 378 reponode = changelog.node(rev)[:_rbcnodelen]
375 379 cachenode, branchidx = unpack(
376 380 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
377 381 close = bool(branchidx & _rbccloseflag)
378 382 if close:
379 383 branchidx &= _rbcbranchidxmask
380 384 if cachenode == reponode:
381 385 return self._names[branchidx], close
382 386 # fall back to slow path and make sure it will be written to disk
383 387 self._rbcrevslen = min(self._rbcrevslen, rev)
384 388 return self._branchinfo(changelog, rev)
385 389
386 390 def _branchinfo(self, changelog, rev):
387 391 """Retrieve branch info from changelog and update _rbcrevs"""
388 392 b, close = changelog.branchinfo(rev)
389 393 if b in self._namesreverse:
390 394 branchidx = self._namesreverse[b]
391 395 else:
392 396 branchidx = len(self._names)
393 397 self._names.append(b)
394 398 self._namesreverse[b] = branchidx
395 399 reponode = changelog.node(rev)
396 400 if close:
397 401 branchidx |= _rbccloseflag
398 402 rbcrevidx = rev * _rbcrecsize
399 403 rec = array('c')
400 404 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
401 405 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
402 406 return b, close
403 407
404 408 def write(self, repo):
405 409 """Save branch cache if it is dirty."""
406 410 if self._rbcnamescount < len(self._names):
407 411 try:
408 412 if self._rbcnamescount != 0:
409 413 f = repo.vfs.open(_rbcnames, 'ab')
410 414 # The position after open(x, 'a') is implementation defined-
411 415 # see issue3543. SEEK_END was added in 2.5
412 416 f.seek(0, 2) #os.SEEK_END
413 417 if f.tell() == self._rbcsnameslen:
414 418 f.write('\0')
415 419 else:
416 420 f.close()
417 421 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
418 422 self._rbcnamescount = 0
419 423 self._rbcrevslen = 0
420 424 if self._rbcnamescount == 0:
421 425 f = repo.vfs.open(_rbcnames, 'wb')
422 426 f.write('\0'.join(encoding.fromlocal(b)
423 427 for b in self._names[self._rbcnamescount:]))
424 428 self._rbcsnameslen = f.tell()
425 429 f.close()
426 430 except (IOError, OSError, util.Abort), inst:
427 431 repo.ui.debug("couldn't write revision branch cache names: "
428 432 "%s\n" % inst)
429 433 return
430 434 self._rbcnamescount = len(self._names)
431 435
432 436 start = self._rbcrevslen * _rbcrecsize
433 437 if start != len(self._rbcrevs):
434 438 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
435 439 try:
436 440 f = repo.vfs.open(_rbcrevs, 'ab')
437 441 # The position after open(x, 'a') is implementation defined-
438 442 # see issue3543. SEEK_END was added in 2.5
439 443 f.seek(0, 2) #os.SEEK_END
440 444 if f.tell() != start:
441 445 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
442 446 f.seek(start)
443 447 f.truncate()
444 448 end = revs * _rbcrecsize
445 449 f.write(self._rbcrevs[start:end])
446 450 f.close()
447 451 except (IOError, OSError, util.Abort), inst:
448 452 repo.ui.debug("couldn't write revision branch cache: %s\n" %
449 453 inst)
450 454 return
451 455 self._rbcrevslen = revs
@@ -1,3292 +1,3292 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set()
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 106 "##": (20, None, ("_concat", 20)),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 120 "or": (4, None, ("or", 4)),
121 121 "|": (4, None, ("or", 4)),
122 122 "+": (4, None, ("or", 4)),
123 123 ",": (2, None, ("list", 2)),
124 124 ")": (0, None, None),
125 125 "symbol": (0, ("symbol",), None),
126 126 "string": (0, ("string",), None),
127 127 "end": (0, None, None),
128 128 }
129 129
130 130 keywords = set(['and', 'or', 'not'])
131 131
132 132 # default set of valid characters for the initial letter of symbols
133 133 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
134 134 if c.isalnum() or c in '._@' or ord(c) > 127)
135 135
136 136 # default set of valid characters for non-initial letters of symbols
137 137 _symletters = set(c for c in [chr(i) for i in xrange(256)]
138 138 if c.isalnum() or c in '-._/@' or ord(c) > 127)
139 139
140 140 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
141 141 '''
142 142 Parse a revset statement into a stream of tokens
143 143
144 144 ``syminitletters`` is the set of valid characters for the initial
145 145 letter of symbols.
146 146
147 147 By default, character ``c`` is recognized as valid for initial
148 148 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
149 149
150 150 ``symletters`` is the set of valid characters for non-initial
151 151 letters of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for non-initial
154 154 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
155 155
156 156 Check that @ is a valid unquoted token character (issue3686):
157 157 >>> list(tokenize("@::"))
158 158 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
159 159
160 160 '''
161 161 if syminitletters is None:
162 162 syminitletters = _syminitletters
163 163 if symletters is None:
164 164 symletters = _symletters
165 165
166 166 pos, l = 0, len(program)
167 167 while pos < l:
168 168 c = program[pos]
169 169 if c.isspace(): # skip inter-token whitespace
170 170 pass
171 171 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
172 172 yield ('::', None, pos)
173 173 pos += 1 # skip ahead
174 174 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
175 175 yield ('..', None, pos)
176 176 pos += 1 # skip ahead
177 177 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
178 178 yield ('##', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c in "():,-|&+!~^%": # handle simple operators
181 181 yield (c, None, pos)
182 182 elif (c in '"\'' or c == 'r' and
183 183 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
184 184 if c == 'r':
185 185 pos += 1
186 186 c = program[pos]
187 187 decode = lambda x: x
188 188 else:
189 189 decode = lambda x: x.decode('string-escape')
190 190 pos += 1
191 191 s = pos
192 192 while pos < l: # find closing quote
193 193 d = program[pos]
194 194 if d == '\\': # skip over escaped characters
195 195 pos += 2
196 196 continue
197 197 if d == c:
198 198 yield ('string', decode(program[s:pos]), s)
199 199 break
200 200 pos += 1
201 201 else:
202 202 raise error.ParseError(_("unterminated string"), s)
203 203 # gather up a symbol/keyword
204 204 elif c in syminitletters:
205 205 s = pos
206 206 pos += 1
207 207 while pos < l: # find end of symbol
208 208 d = program[pos]
209 209 if d not in symletters:
210 210 break
211 211 if d == '.' and program[pos - 1] == '.': # special case for ..
212 212 pos -= 1
213 213 break
214 214 pos += 1
215 215 sym = program[s:pos]
216 216 if sym in keywords: # operator keywords
217 217 yield (sym, None, s)
218 218 elif '-' in sym:
219 219 # some jerk gave us foo-bar-baz, try to check if it's a symbol
220 220 if lookup and lookup(sym):
221 221 # looks like a real symbol
222 222 yield ('symbol', sym, s)
223 223 else:
224 224 # looks like an expression
225 225 parts = sym.split('-')
226 226 for p in parts[:-1]:
227 227 if p: # possible consecutive -
228 228 yield ('symbol', p, s)
229 229 s += len(p)
230 230 yield ('-', None, pos)
231 231 s += 1
232 232 if parts[-1]: # possible trailing -
233 233 yield ('symbol', parts[-1], s)
234 234 else:
235 235 yield ('symbol', sym, s)
236 236 pos -= 1
237 237 else:
238 238 raise error.ParseError(_("syntax error"), pos)
239 239 pos += 1
240 240 yield ('end', None, pos)
241 241
242 242 def parseerrordetail(inst):
243 243 """Compose error message from specified ParseError object
244 244 """
245 245 if len(inst.args) > 1:
246 246 return _('at %s: %s') % (inst.args[1], inst.args[0])
247 247 else:
248 248 return inst.args[0]
249 249
250 250 # helpers
251 251
252 252 def getstring(x, err):
253 253 if x and (x[0] == 'string' or x[0] == 'symbol'):
254 254 return x[1]
255 255 raise error.ParseError(err)
256 256
257 257 def getlist(x):
258 258 if not x:
259 259 return []
260 260 if x[0] == 'list':
261 261 return getlist(x[1]) + [x[2]]
262 262 return [x]
263 263
264 264 def getargs(x, min, max, err):
265 265 l = getlist(x)
266 266 if len(l) < min or (max >= 0 and len(l) > max):
267 267 raise error.ParseError(err)
268 268 return l
269 269
270 270 def isvalidsymbol(tree):
271 271 """Examine whether specified ``tree`` is valid ``symbol`` or not
272 272 """
273 273 return tree[0] == 'symbol' and len(tree) > 1
274 274
275 275 def getsymbol(tree):
276 276 """Get symbol name from valid ``symbol`` in ``tree``
277 277
278 278 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
279 279 """
280 280 return tree[1]
281 281
282 282 def isvalidfunc(tree):
283 283 """Examine whether specified ``tree`` is valid ``func`` or not
284 284 """
285 285 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
286 286
287 287 def getfuncname(tree):
288 288 """Get function name from valid ``func`` in ``tree``
289 289
290 290 This assumes that ``tree`` is already examined by ``isvalidfunc``.
291 291 """
292 292 return getsymbol(tree[1])
293 293
294 294 def getfuncargs(tree):
295 295 """Get list of function arguments from valid ``func`` in ``tree``
296 296
297 297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 298 """
299 299 if len(tree) > 2:
300 300 return getlist(tree[2])
301 301 else:
302 302 return []
303 303
304 304 def getset(repo, subset, x):
305 305 if not x:
306 306 raise error.ParseError(_("missing argument"))
307 307 s = methods[x[0]](repo, subset, *x[1:])
308 308 if util.safehasattr(s, 'isascending'):
309 309 return s
310 310 return baseset(s)
311 311
312 312 def _getrevsource(repo, r):
313 313 extra = repo[r].extra()
314 314 for label in ('source', 'transplant_source', 'rebase_source'):
315 315 if label in extra:
316 316 try:
317 317 return repo[extra[label]].rev()
318 318 except error.RepoLookupError:
319 319 pass
320 320 return None
321 321
322 322 # operator methods
323 323
324 324 def stringset(repo, subset, x):
325 325 x = repo[x].rev()
326 326 if x == -1 and len(subset) == len(repo):
327 327 return baseset([-1])
328 328 if x in subset:
329 329 return baseset([x])
330 330 return baseset()
331 331
332 332 def symbolset(repo, subset, x):
333 333 if x in symbols:
334 334 raise error.ParseError(_("can't use %s here") % x)
335 335 return stringset(repo, subset, x)
336 336
337 337 def rangeset(repo, subset, x, y):
338 338 m = getset(repo, fullreposet(repo), x)
339 339 n = getset(repo, fullreposet(repo), y)
340 340
341 341 if not m or not n:
342 342 return baseset()
343 343 m, n = m.first(), n.last()
344 344
345 345 if m < n:
346 346 r = spanset(repo, m, n + 1)
347 347 else:
348 348 r = spanset(repo, m, n - 1)
349 349 return r & subset
350 350
351 351 def dagrange(repo, subset, x, y):
352 352 r = spanset(repo)
353 353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 354 return xs & subset
355 355
356 356 def andset(repo, subset, x, y):
357 357 return getset(repo, getset(repo, subset, x), y)
358 358
359 359 def orset(repo, subset, x, y):
360 360 xl = getset(repo, subset, x)
361 361 yl = getset(repo, subset - xl, y)
362 362 return xl + yl
363 363
364 364 def notset(repo, subset, x):
365 365 return subset - getset(repo, subset, x)
366 366
367 367 def listset(repo, subset, a, b):
368 368 raise error.ParseError(_("can't use a list in this context"))
369 369
370 370 def func(repo, subset, a, b):
371 371 if a[0] == 'symbol' and a[1] in symbols:
372 372 return symbols[a[1]](repo, subset, b)
373 373 raise error.ParseError(_("not a function: %s") % a[1])
374 374
375 375 # functions
376 376
377 377 def adds(repo, subset, x):
378 378 """``adds(pattern)``
379 379 Changesets that add a file matching pattern.
380 380
381 381 The pattern without explicit kind like ``glob:`` is expected to be
382 382 relative to the current directory and match against a file or a
383 383 directory.
384 384 """
385 385 # i18n: "adds" is a keyword
386 386 pat = getstring(x, _("adds requires a pattern"))
387 387 return checkstatus(repo, subset, pat, 1)
388 388
389 389 def ancestor(repo, subset, x):
390 390 """``ancestor(*changeset)``
391 391 A greatest common ancestor of the changesets.
392 392
393 393 Accepts 0 or more changesets.
394 394 Will return empty list when passed no args.
395 395 Greatest common ancestor of a single changeset is that changeset.
396 396 """
397 397 # i18n: "ancestor" is a keyword
398 398 l = getlist(x)
399 399 rl = spanset(repo)
400 400 anc = None
401 401
402 402 # (getset(repo, rl, i) for i in l) generates a list of lists
403 403 for revs in (getset(repo, rl, i) for i in l):
404 404 for r in revs:
405 405 if anc is None:
406 406 anc = repo[r]
407 407 else:
408 408 anc = anc.ancestor(repo[r])
409 409
410 410 if anc is not None and anc.rev() in subset:
411 411 return baseset([anc.rev()])
412 412 return baseset()
413 413
414 414 def _ancestors(repo, subset, x, followfirst=False):
415 415 heads = getset(repo, spanset(repo), x)
416 416 if not heads:
417 417 return baseset()
418 418 s = _revancestors(repo, heads, followfirst)
419 419 return subset & s
420 420
421 421 def ancestors(repo, subset, x):
422 422 """``ancestors(set)``
423 423 Changesets that are ancestors of a changeset in set.
424 424 """
425 425 return _ancestors(repo, subset, x)
426 426
427 427 def _firstancestors(repo, subset, x):
428 428 # ``_firstancestors(set)``
429 429 # Like ``ancestors(set)`` but follows only the first parents.
430 430 return _ancestors(repo, subset, x, followfirst=True)
431 431
432 432 def ancestorspec(repo, subset, x, n):
433 433 """``set~n``
434 434 Changesets that are the Nth ancestor (first parents only) of a changeset
435 435 in set.
436 436 """
437 437 try:
438 438 n = int(n[1])
439 439 except (TypeError, ValueError):
440 440 raise error.ParseError(_("~ expects a number"))
441 441 ps = set()
442 442 cl = repo.changelog
443 443 for r in getset(repo, fullreposet(repo), x):
444 444 for i in range(n):
445 445 r = cl.parentrevs(r)[0]
446 446 ps.add(r)
447 447 return subset & ps
448 448
449 449 def author(repo, subset, x):
450 450 """``author(string)``
451 451 Alias for ``user(string)``.
452 452 """
453 453 # i18n: "author" is a keyword
454 454 n = encoding.lower(getstring(x, _("author requires a string")))
455 455 kind, pattern, matcher = _substringmatcher(n)
456 456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457 457
458 458 def bisect(repo, subset, x):
459 459 """``bisect(string)``
460 460 Changesets marked in the specified bisect status:
461 461
462 462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 463 - ``goods``, ``bads`` : csets topologically good/bad
464 464 - ``range`` : csets taking part in the bisection
465 465 - ``pruned`` : csets that are goods, bads or skipped
466 466 - ``untested`` : csets whose fate is yet unknown
467 467 - ``ignored`` : csets ignored due to DAG topology
468 468 - ``current`` : the cset currently being bisected
469 469 """
470 470 # i18n: "bisect" is a keyword
471 471 status = getstring(x, _("bisect requires a string")).lower()
472 472 state = set(hbisect.get(repo, status))
473 473 return subset & state
474 474
475 475 # Backward-compatibility
476 476 # - no help entry so that we do not advertise it any more
477 477 def bisected(repo, subset, x):
478 478 return bisect(repo, subset, x)
479 479
480 480 def bookmark(repo, subset, x):
481 481 """``bookmark([name])``
482 482 The named bookmark or all bookmarks.
483 483
484 484 If `name` starts with `re:`, the remainder of the name is treated as
485 485 a regular expression. To match a bookmark that actually starts with `re:`,
486 486 use the prefix `literal:`.
487 487 """
488 488 # i18n: "bookmark" is a keyword
489 489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 490 if args:
491 491 bm = getstring(args[0],
492 492 # i18n: "bookmark" is a keyword
493 493 _('the argument to bookmark must be a string'))
494 494 kind, pattern, matcher = _stringmatcher(bm)
495 495 bms = set()
496 496 if kind == 'literal':
497 497 bmrev = repo._bookmarks.get(pattern, None)
498 498 if not bmrev:
499 499 raise error.RepoLookupError(_("bookmark '%s' does not exist")
500 500 % bm)
501 501 bms.add(repo[bmrev].rev())
502 502 else:
503 503 matchrevs = set()
504 504 for name, bmrev in repo._bookmarks.iteritems():
505 505 if matcher(name):
506 506 matchrevs.add(bmrev)
507 507 if not matchrevs:
508 508 raise error.RepoLookupError(_("no bookmarks exist"
509 509 " that match '%s'") % pattern)
510 510 for bmrev in matchrevs:
511 511 bms.add(repo[bmrev].rev())
512 512 else:
513 513 bms = set([repo[r].rev()
514 514 for r in repo._bookmarks.values()])
515 515 bms -= set([node.nullrev])
516 516 return subset & bms
517 517
518 518 def branch(repo, subset, x):
519 519 """``branch(string or set)``
520 520 All changesets belonging to the given branch or the branches of the given
521 521 changesets.
522 522
523 523 If `string` starts with `re:`, the remainder of the name is treated as
524 524 a regular expression. To match a branch that actually starts with `re:`,
525 525 use the prefix `literal:`.
526 526 """
527 527 import branchmap
528 528 urepo = repo.unfiltered()
529 529 ucl = urepo.changelog
530 getbi = branchmap.revbranchcache(urepo).branchinfo
530 getbi = branchmap.revbranchcache(urepo, readonly=True).branchinfo
531 531
532 532 try:
533 533 b = getstring(x, '')
534 534 except error.ParseError:
535 535 # not a string, but another revspec, e.g. tip()
536 536 pass
537 537 else:
538 538 kind, pattern, matcher = _stringmatcher(b)
539 539 if kind == 'literal':
540 540 # note: falls through to the revspec case if no branch with
541 541 # this name exists
542 542 if pattern in repo.branchmap():
543 543 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
544 544 else:
545 545 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
546 546
547 547 s = getset(repo, spanset(repo), x)
548 548 b = set()
549 549 for r in s:
550 550 b.add(getbi(ucl, r)[0])
551 551 c = s.__contains__
552 552 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
553 553
554 554 def bumped(repo, subset, x):
555 555 """``bumped()``
556 556 Mutable changesets marked as successors of public changesets.
557 557
558 558 Only non-public and non-obsolete changesets can be `bumped`.
559 559 """
560 560 # i18n: "bumped" is a keyword
561 561 getargs(x, 0, 0, _("bumped takes no arguments"))
562 562 bumped = obsmod.getrevs(repo, 'bumped')
563 563 return subset & bumped
564 564
565 565 def bundle(repo, subset, x):
566 566 """``bundle()``
567 567 Changesets in the bundle.
568 568
569 569 Bundle must be specified by the -R option."""
570 570
571 571 try:
572 572 bundlerevs = repo.changelog.bundlerevs
573 573 except AttributeError:
574 574 raise util.Abort(_("no bundle provided - specify with -R"))
575 575 return subset & bundlerevs
576 576
577 577 def checkstatus(repo, subset, pat, field):
578 578 hasset = matchmod.patkind(pat) == 'set'
579 579
580 580 mcache = [None]
581 581 def matches(x):
582 582 c = repo[x]
583 583 if not mcache[0] or hasset:
584 584 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
585 585 m = mcache[0]
586 586 fname = None
587 587 if not m.anypats() and len(m.files()) == 1:
588 588 fname = m.files()[0]
589 589 if fname is not None:
590 590 if fname not in c.files():
591 591 return False
592 592 else:
593 593 for f in c.files():
594 594 if m(f):
595 595 break
596 596 else:
597 597 return False
598 598 files = repo.status(c.p1().node(), c.node())[field]
599 599 if fname is not None:
600 600 if fname in files:
601 601 return True
602 602 else:
603 603 for f in files:
604 604 if m(f):
605 605 return True
606 606
607 607 return subset.filter(matches)
608 608
609 609 def _children(repo, narrow, parentset):
610 610 cs = set()
611 611 if not parentset:
612 612 return baseset(cs)
613 613 pr = repo.changelog.parentrevs
614 614 minrev = min(parentset)
615 615 for r in narrow:
616 616 if r <= minrev:
617 617 continue
618 618 for p in pr(r):
619 619 if p in parentset:
620 620 cs.add(r)
621 621 return baseset(cs)
622 622
623 623 def children(repo, subset, x):
624 624 """``children(set)``
625 625 Child changesets of changesets in set.
626 626 """
627 627 s = getset(repo, fullreposet(repo), x)
628 628 cs = _children(repo, subset, s)
629 629 return subset & cs
630 630
631 631 def closed(repo, subset, x):
632 632 """``closed()``
633 633 Changeset is closed.
634 634 """
635 635 # i18n: "closed" is a keyword
636 636 getargs(x, 0, 0, _("closed takes no arguments"))
637 637 return subset.filter(lambda r: repo[r].closesbranch())
638 638
639 639 def contains(repo, subset, x):
640 640 """``contains(pattern)``
641 641 The revision's manifest contains a file matching pattern (but might not
642 642 modify it). See :hg:`help patterns` for information about file patterns.
643 643
644 644 The pattern without explicit kind like ``glob:`` is expected to be
645 645 relative to the current directory and match against a file exactly
646 646 for efficiency.
647 647 """
648 648 # i18n: "contains" is a keyword
649 649 pat = getstring(x, _("contains requires a pattern"))
650 650
651 651 def matches(x):
652 652 if not matchmod.patkind(pat):
653 653 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
654 654 if pats in repo[x]:
655 655 return True
656 656 else:
657 657 c = repo[x]
658 658 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
659 659 for f in c.manifest():
660 660 if m(f):
661 661 return True
662 662 return False
663 663
664 664 return subset.filter(matches)
665 665
666 666 def converted(repo, subset, x):
667 667 """``converted([id])``
668 668 Changesets converted from the given identifier in the old repository if
669 669 present, or all converted changesets if no identifier is specified.
670 670 """
671 671
672 672 # There is exactly no chance of resolving the revision, so do a simple
673 673 # string compare and hope for the best
674 674
675 675 rev = None
676 676 # i18n: "converted" is a keyword
677 677 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
678 678 if l:
679 679 # i18n: "converted" is a keyword
680 680 rev = getstring(l[0], _('converted requires a revision'))
681 681
682 682 def _matchvalue(r):
683 683 source = repo[r].extra().get('convert_revision', None)
684 684 return source is not None and (rev is None or source.startswith(rev))
685 685
686 686 return subset.filter(lambda r: _matchvalue(r))
687 687
688 688 def date(repo, subset, x):
689 689 """``date(interval)``
690 690 Changesets within the interval, see :hg:`help dates`.
691 691 """
692 692 # i18n: "date" is a keyword
693 693 ds = getstring(x, _("date requires a string"))
694 694 dm = util.matchdate(ds)
695 695 return subset.filter(lambda x: dm(repo[x].date()[0]))
696 696
697 697 def desc(repo, subset, x):
698 698 """``desc(string)``
699 699 Search commit message for string. The match is case-insensitive.
700 700 """
701 701 # i18n: "desc" is a keyword
702 702 ds = encoding.lower(getstring(x, _("desc requires a string")))
703 703
704 704 def matches(x):
705 705 c = repo[x]
706 706 return ds in encoding.lower(c.description())
707 707
708 708 return subset.filter(matches)
709 709
710 710 def _descendants(repo, subset, x, followfirst=False):
711 711 roots = getset(repo, spanset(repo), x)
712 712 if not roots:
713 713 return baseset()
714 714 s = _revdescendants(repo, roots, followfirst)
715 715
716 716 # Both sets need to be ascending in order to lazily return the union
717 717 # in the correct order.
718 718 base = subset & roots
719 719 desc = subset & s
720 720 result = base + desc
721 721 if subset.isascending():
722 722 result.sort()
723 723 elif subset.isdescending():
724 724 result.sort(reverse=True)
725 725 else:
726 726 result = subset & result
727 727 return result
728 728
729 729 def descendants(repo, subset, x):
730 730 """``descendants(set)``
731 731 Changesets which are descendants of changesets in set.
732 732 """
733 733 return _descendants(repo, subset, x)
734 734
735 735 def _firstdescendants(repo, subset, x):
736 736 # ``_firstdescendants(set)``
737 737 # Like ``descendants(set)`` but follows only the first parents.
738 738 return _descendants(repo, subset, x, followfirst=True)
739 739
740 740 def destination(repo, subset, x):
741 741 """``destination([set])``
742 742 Changesets that were created by a graft, transplant or rebase operation,
743 743 with the given revisions specified as the source. Omitting the optional set
744 744 is the same as passing all().
745 745 """
746 746 if x is not None:
747 747 sources = getset(repo, spanset(repo), x)
748 748 else:
749 749 sources = getall(repo, spanset(repo), x)
750 750
751 751 dests = set()
752 752
753 753 # subset contains all of the possible destinations that can be returned, so
754 754 # iterate over them and see if their source(s) were provided in the arg set.
755 755 # Even if the immediate src of r is not in the arg set, src's source (or
756 756 # further back) may be. Scanning back further than the immediate src allows
757 757 # transitive transplants and rebases to yield the same results as transitive
758 758 # grafts.
759 759 for r in subset:
760 760 src = _getrevsource(repo, r)
761 761 lineage = None
762 762
763 763 while src is not None:
764 764 if lineage is None:
765 765 lineage = list()
766 766
767 767 lineage.append(r)
768 768
769 769 # The visited lineage is a match if the current source is in the arg
770 770 # set. Since every candidate dest is visited by way of iterating
771 771 # subset, any dests further back in the lineage will be tested by a
772 772 # different iteration over subset. Likewise, if the src was already
773 773 # selected, the current lineage can be selected without going back
774 774 # further.
775 775 if src in sources or src in dests:
776 776 dests.update(lineage)
777 777 break
778 778
779 779 r = src
780 780 src = _getrevsource(repo, r)
781 781
782 782 return subset.filter(dests.__contains__)
783 783
784 784 def divergent(repo, subset, x):
785 785 """``divergent()``
786 786 Final successors of changesets with an alternative set of final successors.
787 787 """
788 788 # i18n: "divergent" is a keyword
789 789 getargs(x, 0, 0, _("divergent takes no arguments"))
790 790 divergent = obsmod.getrevs(repo, 'divergent')
791 791 return subset & divergent
792 792
793 793 def draft(repo, subset, x):
794 794 """``draft()``
795 795 Changeset in draft phase."""
796 796 # i18n: "draft" is a keyword
797 797 getargs(x, 0, 0, _("draft takes no arguments"))
798 798 phase = repo._phasecache.phase
799 799 target = phases.draft
800 800 condition = lambda r: phase(repo, r) == target
801 801 return subset.filter(condition, cache=False)
802 802
803 803 def extinct(repo, subset, x):
804 804 """``extinct()``
805 805 Obsolete changesets with obsolete descendants only.
806 806 """
807 807 # i18n: "extinct" is a keyword
808 808 getargs(x, 0, 0, _("extinct takes no arguments"))
809 809 extincts = obsmod.getrevs(repo, 'extinct')
810 810 return subset & extincts
811 811
812 812 def extra(repo, subset, x):
813 813 """``extra(label, [value])``
814 814 Changesets with the given label in the extra metadata, with the given
815 815 optional value.
816 816
817 817 If `value` starts with `re:`, the remainder of the value is treated as
818 818 a regular expression. To match a value that actually starts with `re:`,
819 819 use the prefix `literal:`.
820 820 """
821 821
822 822 # i18n: "extra" is a keyword
823 823 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
824 824 # i18n: "extra" is a keyword
825 825 label = getstring(l[0], _('first argument to extra must be a string'))
826 826 value = None
827 827
828 828 if len(l) > 1:
829 829 # i18n: "extra" is a keyword
830 830 value = getstring(l[1], _('second argument to extra must be a string'))
831 831 kind, value, matcher = _stringmatcher(value)
832 832
833 833 def _matchvalue(r):
834 834 extra = repo[r].extra()
835 835 return label in extra and (value is None or matcher(extra[label]))
836 836
837 837 return subset.filter(lambda r: _matchvalue(r))
838 838
839 839 def filelog(repo, subset, x):
840 840 """``filelog(pattern)``
841 841 Changesets connected to the specified filelog.
842 842
843 843 For performance reasons, visits only revisions mentioned in the file-level
844 844 filelog, rather than filtering through all changesets (much faster, but
845 845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 846 result, use ``file()``.
847 847
848 848 The pattern without explicit kind like ``glob:`` is expected to be
849 849 relative to the current directory and match against a file exactly
850 850 for efficiency.
851 851
852 852 If some linkrev points to revisions filtered by the current repoview, we'll
853 853 work around it to return a non-filtered value.
854 854 """
855 855
856 856 # i18n: "filelog" is a keyword
857 857 pat = getstring(x, _("filelog requires a pattern"))
858 858 s = set()
859 859 cl = repo.changelog
860 860
861 861 if not matchmod.patkind(pat):
862 862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 863 files = [f]
864 864 else:
865 865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 866 files = (f for f in repo[None] if m(f))
867 867
868 868 for f in files:
869 869 backrevref = {} # final value for: filerev -> changerev
870 870 lowestchild = {} # lowest known filerev child of a filerev
871 871 delayed = [] # filerev with filtered linkrev, for post-processing
872 872 lowesthead = None # cache for manifest content of all head revisions
873 873 fl = repo.file(f)
874 874 for fr in list(fl):
875 875 rev = fl.linkrev(fr)
876 876 if rev not in cl:
877 877 # changerev pointed in linkrev is filtered
878 878 # record it for post processing.
879 879 delayed.append((fr, rev))
880 880 continue
881 881 for p in fl.parentrevs(fr):
882 882 if 0 <= p and p not in lowestchild:
883 883 lowestchild[p] = fr
884 884 backrevref[fr] = rev
885 885 s.add(rev)
886 886
887 887 # Post-processing of all filerevs we skipped because they were
888 888 # filtered. If such filerevs have known and unfiltered children, this
889 889 # means they have an unfiltered appearance out there. We'll use linkrev
890 890 # adjustment to find one of these appearances. The lowest known child
891 891 # will be used as a starting point because it is the best upper-bound we
892 892 # have.
893 893 #
894 894 # This approach will fail when an unfiltered but linkrev-shadowed
895 895 # appearance exists in a head changeset without unfiltered filerev
896 896 # children anywhere.
897 897 while delayed:
898 898 # must be a descending iteration. To slowly fill lowest child
899 899 # information that is of potential use by the next item.
900 900 fr, rev = delayed.pop()
901 901 lkr = rev
902 902
903 903 child = lowestchild.get(fr)
904 904
905 905 if child is None:
906 906 # search for existence of this file revision in a head revision.
907 907 # There are three possibilities:
908 908 # - the revision exists in a head and we can find an
909 909 # introduction from there,
910 910 # - the revision does not exist in a head because it has been
911 911 # changed since its introduction: we would have found a child
912 912 # and be in the other 'else' clause,
913 913 # - all versions of the revision are hidden.
914 914 if lowesthead is None:
915 915 lowesthead = {}
916 916 for h in repo.heads():
917 917 fnode = repo[h].manifest().get(f)
918 918 if fnode is not None:
919 919 lowesthead[fl.rev(fnode)] = h
920 920 headrev = lowesthead.get(fr)
921 921 if headrev is None:
922 922 # content is nowhere unfiltered
923 923 continue
924 924 rev = repo[headrev][f].introrev()
925 925 else:
926 926 # the lowest known child is a good upper bound
927 927 childcrev = backrevref[child]
928 928 # XXX this does not guarantee returning the lowest
929 929 # introduction of this revision, but this gives a
930 930 # result which is a good start and will fit in most
931 931 # cases. We probably need to fix the multiple
932 932 # introductions case properly (report each
933 933 # introduction, even for identical file revisions)
934 934 # once and for all at some point anyway.
935 935 for p in repo[childcrev][f].parents():
936 936 if p.filerev() == fr:
937 937 rev = p.rev()
938 938 break
939 939 if rev == lkr: # no shadowed entry found
940 940 # XXX This should never happen unless some manifest points
941 941 # to biggish file revisions (like a revision that uses a
942 942 # parent that never appears in the manifest ancestors)
943 943 continue
944 944
945 945 # Fill the data for the next iteration.
946 946 for p in fl.parentrevs(fr):
947 947 if 0 <= p and p not in lowestchild:
948 948 lowestchild[p] = fr
949 949 backrevref[fr] = rev
950 950 s.add(rev)
951 951
952 952 return subset & s
953 953
954 954 def first(repo, subset, x):
955 955 """``first(set, [n])``
956 956 An alias for limit().
957 957 """
958 958 return limit(repo, subset, x)
959 959
960 960 def _follow(repo, subset, x, name, followfirst=False):
961 961 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
962 962 c = repo['.']
963 963 if l:
964 964 x = getstring(l[0], _("%s expected a filename") % name)
965 965 if x in c:
966 966 cx = c[x]
967 967 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
968 968 # include the revision responsible for the most recent version
969 969 s.add(cx.introrev())
970 970 else:
971 971 return baseset()
972 972 else:
973 973 s = _revancestors(repo, baseset([c.rev()]), followfirst)
974 974
975 975 return subset & s
976 976
977 977 def follow(repo, subset, x):
978 978 """``follow([file])``
979 979 An alias for ``::.`` (ancestors of the working copy's first parent).
980 980 If a filename is specified, the history of the given file is followed,
981 981 including copies.
982 982 """
983 983 return _follow(repo, subset, x, 'follow')
984 984
985 985 def _followfirst(repo, subset, x):
986 986 # ``followfirst([file])``
987 987 # Like ``follow([file])`` but follows only the first parent of
988 988 # every revision or file revision.
989 989 return _follow(repo, subset, x, '_followfirst', followfirst=True)
990 990
991 991 def getall(repo, subset, x):
992 992 """``all()``
993 993 All changesets, the same as ``0:tip``.
994 994 """
995 995 # i18n: "all" is a keyword
996 996 getargs(x, 0, 0, _("all takes no arguments"))
997 997 return subset
998 998
999 999 def grep(repo, subset, x):
1000 1000 """``grep(regex)``
1001 1001 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1002 1002 to ensure special escape characters are handled correctly. Unlike
1003 1003 ``keyword(string)``, the match is case-sensitive.
1004 1004 """
1005 1005 try:
1006 1006 # i18n: "grep" is a keyword
1007 1007 gr = re.compile(getstring(x, _("grep requires a string")))
1008 1008 except re.error, e:
1009 1009 raise error.ParseError(_('invalid match pattern: %s') % e)
1010 1010
1011 1011 def matches(x):
1012 1012 c = repo[x]
1013 1013 for e in c.files() + [c.user(), c.description()]:
1014 1014 if gr.search(e):
1015 1015 return True
1016 1016 return False
1017 1017
1018 1018 return subset.filter(matches)
1019 1019
1020 1020 def _matchfiles(repo, subset, x):
1021 1021 # _matchfiles takes a revset list of prefixed arguments:
1022 1022 #
1023 1023 # [p:foo, i:bar, x:baz]
1024 1024 #
1025 1025 # builds a match object from them and filters subset. Allowed
1026 1026 # prefixes are 'p:' for regular patterns, 'i:' for include
1027 1027 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1028 1028 # a revision identifier, or the empty string to reference the
1029 1029 # working directory, from which the match object is
1030 1030 # initialized. Use 'd:' to set the default matching mode, default
1031 1031 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1032 1032
1033 1033 # i18n: "_matchfiles" is a keyword
1034 1034 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1035 1035 pats, inc, exc = [], [], []
1036 1036 rev, default = None, None
1037 1037 for arg in l:
1038 1038 # i18n: "_matchfiles" is a keyword
1039 1039 s = getstring(arg, _("_matchfiles requires string arguments"))
1040 1040 prefix, value = s[:2], s[2:]
1041 1041 if prefix == 'p:':
1042 1042 pats.append(value)
1043 1043 elif prefix == 'i:':
1044 1044 inc.append(value)
1045 1045 elif prefix == 'x:':
1046 1046 exc.append(value)
1047 1047 elif prefix == 'r:':
1048 1048 if rev is not None:
1049 1049 # i18n: "_matchfiles" is a keyword
1050 1050 raise error.ParseError(_('_matchfiles expected at most one '
1051 1051 'revision'))
1052 1052 if value != '': # empty means working directory; leave rev as None
1053 1053 rev = value
1054 1054 elif prefix == 'd:':
1055 1055 if default is not None:
1056 1056 # i18n: "_matchfiles" is a keyword
1057 1057 raise error.ParseError(_('_matchfiles expected at most one '
1058 1058 'default mode'))
1059 1059 default = value
1060 1060 else:
1061 1061 # i18n: "_matchfiles" is a keyword
1062 1062 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1063 1063 if not default:
1064 1064 default = 'glob'
1065 1065
1066 1066 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1067 1067 exclude=exc, ctx=repo[rev], default=default)
1068 1068
1069 1069 def matches(x):
1070 1070 for f in repo[x].files():
1071 1071 if m(f):
1072 1072 return True
1073 1073 return False
1074 1074
1075 1075 return subset.filter(matches)
1076 1076
1077 1077 def hasfile(repo, subset, x):
1078 1078 """``file(pattern)``
1079 1079 Changesets affecting files matched by pattern.
1080 1080
1081 1081 For a faster but less accurate result, consider using ``filelog()``
1082 1082 instead.
1083 1083
1084 1084 This predicate uses ``glob:`` as the default kind of pattern.
1085 1085 """
1086 1086 # i18n: "file" is a keyword
1087 1087 pat = getstring(x, _("file requires a pattern"))
1088 1088 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1089 1089
1090 1090 def head(repo, subset, x):
1091 1091 """``head()``
1092 1092 Changeset is a named branch head.
1093 1093 """
1094 1094 # i18n: "head" is a keyword
1095 1095 getargs(x, 0, 0, _("head takes no arguments"))
1096 1096 hs = set()
1097 1097 for b, ls in repo.branchmap().iteritems():
1098 1098 hs.update(repo[h].rev() for h in ls)
1099 1099 return baseset(hs).filter(subset.__contains__)
1100 1100
1101 1101 def heads(repo, subset, x):
1102 1102 """``heads(set)``
1103 1103 Members of set with no children in set.
1104 1104 """
1105 1105 s = getset(repo, subset, x)
1106 1106 ps = parents(repo, subset, x)
1107 1107 return s - ps
1108 1108
1109 1109 def hidden(repo, subset, x):
1110 1110 """``hidden()``
1111 1111 Hidden changesets.
1112 1112 """
1113 1113 # i18n: "hidden" is a keyword
1114 1114 getargs(x, 0, 0, _("hidden takes no arguments"))
1115 1115 hiddenrevs = repoview.filterrevs(repo, 'visible')
1116 1116 return subset & hiddenrevs
1117 1117
1118 1118 def keyword(repo, subset, x):
1119 1119 """``keyword(string)``
1120 1120 Search commit message, user name, and names of changed files for
1121 1121 string. The match is case-insensitive.
1122 1122 """
1123 1123 # i18n: "keyword" is a keyword
1124 1124 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1125 1125
1126 1126 def matches(r):
1127 1127 c = repo[r]
1128 1128 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1129 1129 c.description()])
1130 1130
1131 1131 return subset.filter(matches)
1132 1132
1133 1133 def limit(repo, subset, x):
1134 1134 """``limit(set, [n])``
1135 1135 First n members of set, defaulting to 1.
1136 1136 """
1137 1137 # i18n: "limit" is a keyword
1138 1138 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1139 1139 try:
1140 1140 lim = 1
1141 1141 if len(l) == 2:
1142 1142 # i18n: "limit" is a keyword
1143 1143 lim = int(getstring(l[1], _("limit requires a number")))
1144 1144 except (TypeError, ValueError):
1145 1145 # i18n: "limit" is a keyword
1146 1146 raise error.ParseError(_("limit expects a number"))
1147 1147 ss = subset
1148 1148 os = getset(repo, spanset(repo), l[0])
1149 1149 result = []
1150 1150 it = iter(os)
1151 1151 for x in xrange(lim):
1152 1152 try:
1153 1153 y = it.next()
1154 1154 if y in ss:
1155 1155 result.append(y)
1156 1156 except (StopIteration):
1157 1157 break
1158 1158 return baseset(result)
1159 1159
1160 1160 def last(repo, subset, x):
1161 1161 """``last(set, [n])``
1162 1162 Last n members of set, defaulting to 1.
1163 1163 """
1164 1164 # i18n: "last" is a keyword
1165 1165 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1166 1166 try:
1167 1167 lim = 1
1168 1168 if len(l) == 2:
1169 1169 # i18n: "last" is a keyword
1170 1170 lim = int(getstring(l[1], _("last requires a number")))
1171 1171 except (TypeError, ValueError):
1172 1172 # i18n: "last" is a keyword
1173 1173 raise error.ParseError(_("last expects a number"))
1174 1174 ss = subset
1175 1175 os = getset(repo, spanset(repo), l[0])
1176 1176 os.reverse()
1177 1177 result = []
1178 1178 it = iter(os)
1179 1179 for x in xrange(lim):
1180 1180 try:
1181 1181 y = it.next()
1182 1182 if y in ss:
1183 1183 result.append(y)
1184 1184 except (StopIteration):
1185 1185 break
1186 1186 return baseset(result)
1187 1187
1188 1188 def maxrev(repo, subset, x):
1189 1189 """``max(set)``
1190 1190 Changeset with highest revision number in set.
1191 1191 """
1192 1192 os = getset(repo, spanset(repo), x)
1193 1193 if os:
1194 1194 m = os.max()
1195 1195 if m in subset:
1196 1196 return baseset([m])
1197 1197 return baseset()
1198 1198
1199 1199 def merge(repo, subset, x):
1200 1200 """``merge()``
1201 1201 Changeset is a merge changeset.
1202 1202 """
1203 1203 # i18n: "merge" is a keyword
1204 1204 getargs(x, 0, 0, _("merge takes no arguments"))
1205 1205 cl = repo.changelog
1206 1206 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1207 1207
1208 1208 def branchpoint(repo, subset, x):
1209 1209 """``branchpoint()``
1210 1210 Changesets with more than one child.
1211 1211 """
1212 1212 # i18n: "branchpoint" is a keyword
1213 1213 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1214 1214 cl = repo.changelog
1215 1215 if not subset:
1216 1216 return baseset()
1217 1217 baserev = min(subset)
1218 1218 parentscount = [0]*(len(repo) - baserev)
1219 1219 for r in cl.revs(start=baserev + 1):
1220 1220 for p in cl.parentrevs(r):
1221 1221 if p >= baserev:
1222 1222 parentscount[p - baserev] += 1
1223 1223 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1224 1224
1225 1225 def minrev(repo, subset, x):
1226 1226 """``min(set)``
1227 1227 Changeset with lowest revision number in set.
1228 1228 """
1229 1229 os = getset(repo, spanset(repo), x)
1230 1230 if os:
1231 1231 m = os.min()
1232 1232 if m in subset:
1233 1233 return baseset([m])
1234 1234 return baseset()
1235 1235
1236 1236 def modifies(repo, subset, x):
1237 1237 """``modifies(pattern)``
1238 1238 Changesets modifying files matched by pattern.
1239 1239
1240 1240 The pattern without explicit kind like ``glob:`` is expected to be
1241 1241 relative to the current directory and match against a file or a
1242 1242 directory.
1243 1243 """
1244 1244 # i18n: "modifies" is a keyword
1245 1245 pat = getstring(x, _("modifies requires a pattern"))
1246 1246 return checkstatus(repo, subset, pat, 0)
1247 1247
1248 1248 def named(repo, subset, x):
1249 1249 """``named(namespace)``
1250 1250 The changesets in a given namespace.
1251 1251
1252 1252 If `namespace` starts with `re:`, the remainder of the string is treated as
1253 1253 a regular expression. To match a namespace that actually starts with `re:`,
1254 1254 use the prefix `literal:`.
1255 1255 """
1256 1256 # i18n: "named" is a keyword
1257 1257 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1258 1258
1259 1259 ns = getstring(args[0],
1260 1260 # i18n: "named" is a keyword
1261 1261 _('the argument to named must be a string'))
1262 1262 kind, pattern, matcher = _stringmatcher(ns)
1263 1263 namespaces = set()
1264 1264 if kind == 'literal':
1265 1265 if pattern not in repo.names:
1266 1266 raise error.RepoLookupError(_("namespace '%s' does not exist")
1267 1267 % ns)
1268 1268 namespaces.add(repo.names[pattern])
1269 1269 else:
1270 1270 for name, ns in repo.names.iteritems():
1271 1271 if matcher(name):
1272 1272 namespaces.add(ns)
1273 1273 if not namespaces:
1274 1274 raise error.RepoLookupError(_("no namespace exists"
1275 1275 " that match '%s'") % pattern)
1276 1276
1277 1277 names = set()
1278 1278 for ns in namespaces:
1279 1279 for name in ns.listnames(repo):
1280 1280 if name not in ns.deprecated:
1281 1281 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1282 1282
1283 1283 names -= set([node.nullrev])
1284 1284 return subset & names
1285 1285
1286 1286 def node_(repo, subset, x):
1287 1287 """``id(string)``
1288 1288 Revision non-ambiguously specified by the given hex string prefix.
1289 1289 """
1290 1290 # i18n: "id" is a keyword
1291 1291 l = getargs(x, 1, 1, _("id requires one argument"))
1292 1292 # i18n: "id" is a keyword
1293 1293 n = getstring(l[0], _("id requires a string"))
1294 1294 if len(n) == 40:
1295 1295 rn = repo[n].rev()
1296 1296 else:
1297 1297 rn = None
1298 1298 pm = repo.changelog._partialmatch(n)
1299 1299 if pm is not None:
1300 1300 rn = repo.changelog.rev(pm)
1301 1301
1302 1302 if rn is None:
1303 1303 return baseset()
1304 1304 result = baseset([rn])
1305 1305 return result & subset
1306 1306
1307 1307 def obsolete(repo, subset, x):
1308 1308 """``obsolete()``
1309 1309 Mutable changeset with a newer version."""
1310 1310 # i18n: "obsolete" is a keyword
1311 1311 getargs(x, 0, 0, _("obsolete takes no arguments"))
1312 1312 obsoletes = obsmod.getrevs(repo, 'obsolete')
1313 1313 return subset & obsoletes
1314 1314
1315 1315 def only(repo, subset, x):
1316 1316 """``only(set, [set])``
1317 1317 Changesets that are ancestors of the first set that are not ancestors
1318 1318 of any other head in the repo. If a second set is specified, the result
1319 1319 is ancestors of the first set that are not ancestors of the second set
1320 1320 (i.e. ::<set1> - ::<set2>).
1321 1321 """
1322 1322 cl = repo.changelog
1323 1323 # i18n: "only" is a keyword
1324 1324 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1325 1325 include = getset(repo, spanset(repo), args[0])
1326 1326 if len(args) == 1:
1327 1327 if not include:
1328 1328 return baseset()
1329 1329
1330 1330 descendants = set(_revdescendants(repo, include, False))
1331 1331 exclude = [rev for rev in cl.headrevs()
1332 1332 if not rev in descendants and not rev in include]
1333 1333 else:
1334 1334 exclude = getset(repo, spanset(repo), args[1])
1335 1335
1336 1336 results = set(cl.findmissingrevs(common=exclude, heads=include))
1337 1337 return subset & results
1338 1338
1339 1339 def origin(repo, subset, x):
1340 1340 """``origin([set])``
1341 1341 Changesets that were specified as a source for the grafts, transplants or
1342 1342 rebases that created the given revisions. Omitting the optional set is the
1343 1343 same as passing all(). If a changeset created by these operations is itself
1344 1344 specified as a source for one of these operations, only the source changeset
1345 1345 for the first operation is selected.
1346 1346 """
1347 1347 if x is not None:
1348 1348 dests = getset(repo, spanset(repo), x)
1349 1349 else:
1350 1350 dests = getall(repo, spanset(repo), x)
1351 1351
1352 1352 def _firstsrc(rev):
1353 1353 src = _getrevsource(repo, rev)
1354 1354 if src is None:
1355 1355 return None
1356 1356
1357 1357 while True:
1358 1358 prev = _getrevsource(repo, src)
1359 1359
1360 1360 if prev is None:
1361 1361 return src
1362 1362 src = prev
1363 1363
1364 1364 o = set([_firstsrc(r) for r in dests])
1365 1365 o -= set([None])
1366 1366 return subset & o
1367 1367
1368 1368 def outgoing(repo, subset, x):
1369 1369 """``outgoing([path])``
1370 1370 Changesets not found in the specified destination repository, or the
1371 1371 default push location.
1372 1372 """
1373 1373 import hg # avoid start-up nasties
1374 1374 # i18n: "outgoing" is a keyword
1375 1375 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1376 1376 # i18n: "outgoing" is a keyword
1377 1377 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1378 1378 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1379 1379 dest, branches = hg.parseurl(dest)
1380 1380 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1381 1381 if revs:
1382 1382 revs = [repo.lookup(rev) for rev in revs]
1383 1383 other = hg.peer(repo, {}, dest)
1384 1384 repo.ui.pushbuffer()
1385 1385 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1386 1386 repo.ui.popbuffer()
1387 1387 cl = repo.changelog
1388 1388 o = set([cl.rev(r) for r in outgoing.missing])
1389 1389 return subset & o
1390 1390
1391 1391 def p1(repo, subset, x):
1392 1392 """``p1([set])``
1393 1393 First parent of changesets in set, or the working directory.
1394 1394 """
1395 1395 if x is None:
1396 1396 p = repo[x].p1().rev()
1397 1397 if p >= 0:
1398 1398 return subset & baseset([p])
1399 1399 return baseset()
1400 1400
1401 1401 ps = set()
1402 1402 cl = repo.changelog
1403 1403 for r in getset(repo, spanset(repo), x):
1404 1404 ps.add(cl.parentrevs(r)[0])
1405 1405 ps -= set([node.nullrev])
1406 1406 return subset & ps
1407 1407
1408 1408 def p2(repo, subset, x):
1409 1409 """``p2([set])``
1410 1410 Second parent of changesets in set, or the working directory.
1411 1411 """
1412 1412 if x is None:
1413 1413 ps = repo[x].parents()
1414 1414 try:
1415 1415 p = ps[1].rev()
1416 1416 if p >= 0:
1417 1417 return subset & baseset([p])
1418 1418 return baseset()
1419 1419 except IndexError:
1420 1420 return baseset()
1421 1421
1422 1422 ps = set()
1423 1423 cl = repo.changelog
1424 1424 for r in getset(repo, spanset(repo), x):
1425 1425 ps.add(cl.parentrevs(r)[1])
1426 1426 ps -= set([node.nullrev])
1427 1427 return subset & ps
1428 1428
1429 1429 def parents(repo, subset, x):
1430 1430 """``parents([set])``
1431 1431 The set of all parents for all changesets in set, or the working directory.
1432 1432 """
1433 1433 if x is None:
1434 1434 ps = set(p.rev() for p in repo[x].parents())
1435 1435 else:
1436 1436 ps = set()
1437 1437 cl = repo.changelog
1438 1438 for r in getset(repo, spanset(repo), x):
1439 1439 ps.update(cl.parentrevs(r))
1440 1440 ps -= set([node.nullrev])
1441 1441 return subset & ps
1442 1442
1443 1443 def parentspec(repo, subset, x, n):
1444 1444 """``set^0``
1445 1445 The set.
1446 1446 ``set^1`` (or ``set^``), ``set^2``
1447 1447 First or second parent, respectively, of all changesets in set.
1448 1448 """
1449 1449 try:
1450 1450 n = int(n[1])
1451 1451 if n not in (0, 1, 2):
1452 1452 raise ValueError
1453 1453 except (TypeError, ValueError):
1454 1454 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1455 1455 ps = set()
1456 1456 cl = repo.changelog
1457 1457 for r in getset(repo, fullreposet(repo), x):
1458 1458 if n == 0:
1459 1459 ps.add(r)
1460 1460 elif n == 1:
1461 1461 ps.add(cl.parentrevs(r)[0])
1462 1462 elif n == 2:
1463 1463 parents = cl.parentrevs(r)
1464 1464 if len(parents) > 1:
1465 1465 ps.add(parents[1])
1466 1466 return subset & ps
1467 1467
1468 1468 def present(repo, subset, x):
1469 1469 """``present(set)``
1470 1470 An empty set, if any revision in set isn't found; otherwise,
1471 1471 all revisions in set.
1472 1472
1473 1473 If any of specified revisions is not present in the local repository,
1474 1474 the query is normally aborted. But this predicate allows the query
1475 1475 to continue even in such cases.
1476 1476 """
1477 1477 try:
1478 1478 return getset(repo, subset, x)
1479 1479 except error.RepoLookupError:
1480 1480 return baseset()
1481 1481
1482 1482 def public(repo, subset, x):
1483 1483 """``public()``
1484 1484 Changeset in public phase."""
1485 1485 # i18n: "public" is a keyword
1486 1486 getargs(x, 0, 0, _("public takes no arguments"))
1487 1487 phase = repo._phasecache.phase
1488 1488 target = phases.public
1489 1489 condition = lambda r: phase(repo, r) == target
1490 1490 return subset.filter(condition, cache=False)
1491 1491
1492 1492 def remote(repo, subset, x):
1493 1493 """``remote([id [,path]])``
1494 1494 Local revision that corresponds to the given identifier in a
1495 1495 remote repository, if present. Here, the '.' identifier is a
1496 1496 synonym for the current local branch.
1497 1497 """
1498 1498
1499 1499 import hg # avoid start-up nasties
1500 1500 # i18n: "remote" is a keyword
1501 1501 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1502 1502
1503 1503 q = '.'
1504 1504 if len(l) > 0:
1505 1505 # i18n: "remote" is a keyword
1506 1506 q = getstring(l[0], _("remote requires a string id"))
1507 1507 if q == '.':
1508 1508 q = repo['.'].branch()
1509 1509
1510 1510 dest = ''
1511 1511 if len(l) > 1:
1512 1512 # i18n: "remote" is a keyword
1513 1513 dest = getstring(l[1], _("remote requires a repository path"))
1514 1514 dest = repo.ui.expandpath(dest or 'default')
1515 1515 dest, branches = hg.parseurl(dest)
1516 1516 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1517 1517 if revs:
1518 1518 revs = [repo.lookup(rev) for rev in revs]
1519 1519 other = hg.peer(repo, {}, dest)
1520 1520 n = other.lookup(q)
1521 1521 if n in repo:
1522 1522 r = repo[n].rev()
1523 1523 if r in subset:
1524 1524 return baseset([r])
1525 1525 return baseset()
1526 1526
1527 1527 def removes(repo, subset, x):
1528 1528 """``removes(pattern)``
1529 1529 Changesets which remove files matching pattern.
1530 1530
1531 1531 The pattern without explicit kind like ``glob:`` is expected to be
1532 1532 relative to the current directory and match against a file or a
1533 1533 directory.
1534 1534 """
1535 1535 # i18n: "removes" is a keyword
1536 1536 pat = getstring(x, _("removes requires a pattern"))
1537 1537 return checkstatus(repo, subset, pat, 2)
1538 1538
1539 1539 def rev(repo, subset, x):
1540 1540 """``rev(number)``
1541 1541 Revision with the given numeric identifier.
1542 1542 """
1543 1543 # i18n: "rev" is a keyword
1544 1544 l = getargs(x, 1, 1, _("rev requires one argument"))
1545 1545 try:
1546 1546 # i18n: "rev" is a keyword
1547 1547 l = int(getstring(l[0], _("rev requires a number")))
1548 1548 except (TypeError, ValueError):
1549 1549 # i18n: "rev" is a keyword
1550 1550 raise error.ParseError(_("rev expects a number"))
1551 1551 if l not in fullreposet(repo) and l != node.nullrev:
1552 1552 return baseset()
1553 1553 return subset & baseset([l])
1554 1554
1555 1555 def matching(repo, subset, x):
1556 1556 """``matching(revision [, field])``
1557 1557 Changesets in which a given set of fields match the set of fields in the
1558 1558 selected revision or set.
1559 1559
1560 1560 To match more than one field pass the list of fields to match separated
1561 1561 by spaces (e.g. ``author description``).
1562 1562
1563 1563 Valid fields are most regular revision fields and some special fields.
1564 1564
1565 1565 Regular revision fields are ``description``, ``author``, ``branch``,
1566 1566 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1567 1567 and ``diff``.
1568 1568 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1569 1569 contents of the revision. Two revisions matching their ``diff`` will
1570 1570 also match their ``files``.
1571 1571
1572 1572 Special fields are ``summary`` and ``metadata``:
1573 1573 ``summary`` matches the first line of the description.
1574 1574 ``metadata`` is equivalent to matching ``description user date``
1575 1575 (i.e. it matches the main metadata fields).
1576 1576
1577 1577 ``metadata`` is the default field which is used when no fields are
1578 1578 specified. You can match more than one field at a time.
1579 1579 """
1580 1580 # i18n: "matching" is a keyword
1581 1581 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1582 1582
1583 1583 revs = getset(repo, fullreposet(repo), l[0])
1584 1584
1585 1585 fieldlist = ['metadata']
1586 1586 if len(l) > 1:
1587 1587 fieldlist = getstring(l[1],
1588 1588 # i18n: "matching" is a keyword
1589 1589 _("matching requires a string "
1590 1590 "as its second argument")).split()
1591 1591
1592 1592 # Make sure that there are no repeated fields,
1593 1593 # expand the 'special' 'metadata' field type
1594 1594 # and check the 'files' whenever we check the 'diff'
1595 1595 fields = []
1596 1596 for field in fieldlist:
1597 1597 if field == 'metadata':
1598 1598 fields += ['user', 'description', 'date']
1599 1599 elif field == 'diff':
1600 1600 # a revision matching the diff must also match the files
1601 1601 # since matching the diff is very costly, make sure to
1602 1602 # also match the files first
1603 1603 fields += ['files', 'diff']
1604 1604 else:
1605 1605 if field == 'author':
1606 1606 field = 'user'
1607 1607 fields.append(field)
1608 1608 fields = set(fields)
1609 1609 if 'summary' in fields and 'description' in fields:
1610 1610 # If a revision matches its description it also matches its summary
1611 1611 fields.discard('summary')
1612 1612
1613 1613 # We may want to match more than one field
1614 1614 # Not all fields take the same amount of time to be matched
1615 1615 # Sort the selected fields in order of increasing matching cost
1616 1616 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1617 1617 'files', 'description', 'substate', 'diff']
1618 1618 def fieldkeyfunc(f):
1619 1619 try:
1620 1620 return fieldorder.index(f)
1621 1621 except ValueError:
1622 1622 # assume an unknown field is very costly
1623 1623 return len(fieldorder)
1624 1624 fields = list(fields)
1625 1625 fields.sort(key=fieldkeyfunc)
1626 1626
1627 1627 # Each field will be matched with its own "getfield" function
1628 1628 # which will be added to the getfieldfuncs array of functions
1629 1629 getfieldfuncs = []
1630 1630 _funcs = {
1631 1631 'user': lambda r: repo[r].user(),
1632 1632 'branch': lambda r: repo[r].branch(),
1633 1633 'date': lambda r: repo[r].date(),
1634 1634 'description': lambda r: repo[r].description(),
1635 1635 'files': lambda r: repo[r].files(),
1636 1636 'parents': lambda r: repo[r].parents(),
1637 1637 'phase': lambda r: repo[r].phase(),
1638 1638 'substate': lambda r: repo[r].substate,
1639 1639 'summary': lambda r: repo[r].description().splitlines()[0],
1640 1640 'diff': lambda r: list(repo[r].diff(git=True),)
1641 1641 }
1642 1642 for info in fields:
1643 1643 getfield = _funcs.get(info, None)
1644 1644 if getfield is None:
1645 1645 raise error.ParseError(
1646 1646 # i18n: "matching" is a keyword
1647 1647 _("unexpected field name passed to matching: %s") % info)
1648 1648 getfieldfuncs.append(getfield)
1649 1649 # convert the getfield array of functions into a "getinfo" function
1650 1650 # which returns an array of field values (or a single value if there
1651 1651 # is only one field to match)
1652 1652 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1653 1653
1654 1654 def matches(x):
1655 1655 for rev in revs:
1656 1656 target = getinfo(rev)
1657 1657 match = True
1658 1658 for n, f in enumerate(getfieldfuncs):
1659 1659 if target[n] != f(x):
1660 1660 match = False
1661 1661 if match:
1662 1662 return True
1663 1663 return False
1664 1664
1665 1665 return subset.filter(matches)
1666 1666
1667 1667 def reverse(repo, subset, x):
1668 1668 """``reverse(set)``
1669 1669 Reverse order of set.
1670 1670 """
1671 1671 l = getset(repo, subset, x)
1672 1672 l.reverse()
1673 1673 return l
1674 1674
1675 1675 def roots(repo, subset, x):
1676 1676 """``roots(set)``
1677 1677 Changesets in set with no parent changeset in set.
1678 1678 """
1679 1679 s = getset(repo, spanset(repo), x)
1680 1680 subset = baseset([r for r in s if r in subset])
1681 1681 cs = _children(repo, subset, s)
1682 1682 return subset - cs
1683 1683
1684 1684 def secret(repo, subset, x):
1685 1685 """``secret()``
1686 1686 Changeset in secret phase."""
1687 1687 # i18n: "secret" is a keyword
1688 1688 getargs(x, 0, 0, _("secret takes no arguments"))
1689 1689 phase = repo._phasecache.phase
1690 1690 target = phases.secret
1691 1691 condition = lambda r: phase(repo, r) == target
1692 1692 return subset.filter(condition, cache=False)
1693 1693
1694 1694 def sort(repo, subset, x):
1695 1695 """``sort(set[, [-]key...])``
1696 1696 Sort set by keys. The default sort order is ascending, specify a key
1697 1697 as ``-key`` to sort in descending order.
1698 1698
1699 1699 The keys can be:
1700 1700
1701 1701 - ``rev`` for the revision number,
1702 1702 - ``branch`` for the branch name,
1703 1703 - ``desc`` for the commit message (description),
1704 1704 - ``user`` for user name (``author`` can be used as an alias),
1705 1705 - ``date`` for the commit date
1706 1706 """
1707 1707 # i18n: "sort" is a keyword
1708 1708 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1709 1709 keys = "rev"
1710 1710 if len(l) == 2:
1711 1711 # i18n: "sort" is a keyword
1712 1712 keys = getstring(l[1], _("sort spec must be a string"))
1713 1713
1714 1714 s = l[0]
1715 1715 keys = keys.split()
1716 1716 l = []
1717 1717 def invert(s):
1718 1718 return "".join(chr(255 - ord(c)) for c in s)
1719 1719 revs = getset(repo, subset, s)
1720 1720 if keys == ["rev"]:
1721 1721 revs.sort()
1722 1722 return revs
1723 1723 elif keys == ["-rev"]:
1724 1724 revs.sort(reverse=True)
1725 1725 return revs
1726 1726 for r in revs:
1727 1727 c = repo[r]
1728 1728 e = []
1729 1729 for k in keys:
1730 1730 if k == 'rev':
1731 1731 e.append(r)
1732 1732 elif k == '-rev':
1733 1733 e.append(-r)
1734 1734 elif k == 'branch':
1735 1735 e.append(c.branch())
1736 1736 elif k == '-branch':
1737 1737 e.append(invert(c.branch()))
1738 1738 elif k == 'desc':
1739 1739 e.append(c.description())
1740 1740 elif k == '-desc':
1741 1741 e.append(invert(c.description()))
1742 1742 elif k in 'user author':
1743 1743 e.append(c.user())
1744 1744 elif k in '-user -author':
1745 1745 e.append(invert(c.user()))
1746 1746 elif k == 'date':
1747 1747 e.append(c.date()[0])
1748 1748 elif k == '-date':
1749 1749 e.append(-c.date()[0])
1750 1750 else:
1751 1751 raise error.ParseError(_("unknown sort key %r") % k)
1752 1752 e.append(r)
1753 1753 l.append(e)
1754 1754 l.sort()
1755 1755 return baseset([e[-1] for e in l])
1756 1756
1757 1757 def _stringmatcher(pattern):
1758 1758 """
1759 1759 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1760 1760 returns the matcher name, pattern, and matcher function.
1761 1761 missing or unknown prefixes are treated as literal matches.
1762 1762
1763 1763 helper for tests:
1764 1764 >>> def test(pattern, *tests):
1765 1765 ... kind, pattern, matcher = _stringmatcher(pattern)
1766 1766 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1767 1767
1768 1768 exact matching (no prefix):
1769 1769 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1770 1770 ('literal', 'abcdefg', [False, False, True])
1771 1771
1772 1772 regex matching ('re:' prefix)
1773 1773 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1774 1774 ('re', 'a.+b', [False, False, True])
1775 1775
1776 1776 force exact matches ('literal:' prefix)
1777 1777 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1778 1778 ('literal', 're:foobar', [False, True])
1779 1779
1780 1780 unknown prefixes are ignored and treated as literals
1781 1781 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1782 1782 ('literal', 'foo:bar', [False, False, True])
1783 1783 """
1784 1784 if pattern.startswith('re:'):
1785 1785 pattern = pattern[3:]
1786 1786 try:
1787 1787 regex = re.compile(pattern)
1788 1788 except re.error, e:
1789 1789 raise error.ParseError(_('invalid regular expression: %s')
1790 1790 % e)
1791 1791 return 're', pattern, regex.search
1792 1792 elif pattern.startswith('literal:'):
1793 1793 pattern = pattern[8:]
1794 1794 return 'literal', pattern, pattern.__eq__
1795 1795
1796 1796 def _substringmatcher(pattern):
1797 1797 kind, pattern, matcher = _stringmatcher(pattern)
1798 1798 if kind == 'literal':
1799 1799 matcher = lambda s: pattern in s
1800 1800 return kind, pattern, matcher
1801 1801
1802 1802 def tag(repo, subset, x):
1803 1803 """``tag([name])``
1804 1804 The specified tag by name, or all tagged revisions if no name is given.
1805 1805
1806 1806 If `name` starts with `re:`, the remainder of the name is treated as
1807 1807 a regular expression. To match a tag that actually starts with `re:`,
1808 1808 use the prefix `literal:`.
1809 1809 """
1810 1810 # i18n: "tag" is a keyword
1811 1811 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1812 1812 cl = repo.changelog
1813 1813 if args:
1814 1814 pattern = getstring(args[0],
1815 1815 # i18n: "tag" is a keyword
1816 1816 _('the argument to tag must be a string'))
1817 1817 kind, pattern, matcher = _stringmatcher(pattern)
1818 1818 if kind == 'literal':
1819 1819 # avoid resolving all tags
1820 1820 tn = repo._tagscache.tags.get(pattern, None)
1821 1821 if tn is None:
1822 1822 raise error.RepoLookupError(_("tag '%s' does not exist")
1823 1823 % pattern)
1824 1824 s = set([repo[tn].rev()])
1825 1825 else:
1826 1826 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1827 1827 else:
1828 1828 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1829 1829 return subset & s
1830 1830
1831 1831 def tagged(repo, subset, x):
1832 1832 return tag(repo, subset, x)
1833 1833
1834 1834 def unstable(repo, subset, x):
1835 1835 """``unstable()``
1836 1836 Non-obsolete changesets with obsolete ancestors.
1837 1837 """
1838 1838 # i18n: "unstable" is a keyword
1839 1839 getargs(x, 0, 0, _("unstable takes no arguments"))
1840 1840 unstables = obsmod.getrevs(repo, 'unstable')
1841 1841 return subset & unstables
1842 1842
1843 1843
1844 1844 def user(repo, subset, x):
1845 1845 """``user(string)``
1846 1846 User name contains string. The match is case-insensitive.
1847 1847
1848 1848 If `string` starts with `re:`, the remainder of the string is treated as
1849 1849 a regular expression. To match a user that actually contains `re:`, use
1850 1850 the prefix `literal:`.
1851 1851 """
1852 1852 return author(repo, subset, x)
1853 1853
1854 1854 # for internal use
1855 1855 def _list(repo, subset, x):
1856 1856 s = getstring(x, "internal error")
1857 1857 if not s:
1858 1858 return baseset()
1859 1859 ls = [repo[r].rev() for r in s.split('\0')]
1860 1860 s = subset
1861 1861 return baseset([r for r in ls if r in s])
1862 1862
1863 1863 # for internal use
1864 1864 def _intlist(repo, subset, x):
1865 1865 s = getstring(x, "internal error")
1866 1866 if not s:
1867 1867 return baseset()
1868 1868 ls = [int(r) for r in s.split('\0')]
1869 1869 s = subset
1870 1870 return baseset([r for r in ls if r in s])
1871 1871
1872 1872 # for internal use
1873 1873 def _hexlist(repo, subset, x):
1874 1874 s = getstring(x, "internal error")
1875 1875 if not s:
1876 1876 return baseset()
1877 1877 cl = repo.changelog
1878 1878 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1879 1879 s = subset
1880 1880 return baseset([r for r in ls if r in s])
1881 1881
1882 1882 symbols = {
1883 1883 "adds": adds,
1884 1884 "all": getall,
1885 1885 "ancestor": ancestor,
1886 1886 "ancestors": ancestors,
1887 1887 "_firstancestors": _firstancestors,
1888 1888 "author": author,
1889 1889 "bisect": bisect,
1890 1890 "bisected": bisected,
1891 1891 "bookmark": bookmark,
1892 1892 "branch": branch,
1893 1893 "branchpoint": branchpoint,
1894 1894 "bumped": bumped,
1895 1895 "bundle": bundle,
1896 1896 "children": children,
1897 1897 "closed": closed,
1898 1898 "contains": contains,
1899 1899 "converted": converted,
1900 1900 "date": date,
1901 1901 "desc": desc,
1902 1902 "descendants": descendants,
1903 1903 "_firstdescendants": _firstdescendants,
1904 1904 "destination": destination,
1905 1905 "divergent": divergent,
1906 1906 "draft": draft,
1907 1907 "extinct": extinct,
1908 1908 "extra": extra,
1909 1909 "file": hasfile,
1910 1910 "filelog": filelog,
1911 1911 "first": first,
1912 1912 "follow": follow,
1913 1913 "_followfirst": _followfirst,
1914 1914 "grep": grep,
1915 1915 "head": head,
1916 1916 "heads": heads,
1917 1917 "hidden": hidden,
1918 1918 "id": node_,
1919 1919 "keyword": keyword,
1920 1920 "last": last,
1921 1921 "limit": limit,
1922 1922 "_matchfiles": _matchfiles,
1923 1923 "max": maxrev,
1924 1924 "merge": merge,
1925 1925 "min": minrev,
1926 1926 "modifies": modifies,
1927 1927 "named": named,
1928 1928 "obsolete": obsolete,
1929 1929 "only": only,
1930 1930 "origin": origin,
1931 1931 "outgoing": outgoing,
1932 1932 "p1": p1,
1933 1933 "p2": p2,
1934 1934 "parents": parents,
1935 1935 "present": present,
1936 1936 "public": public,
1937 1937 "remote": remote,
1938 1938 "removes": removes,
1939 1939 "rev": rev,
1940 1940 "reverse": reverse,
1941 1941 "roots": roots,
1942 1942 "sort": sort,
1943 1943 "secret": secret,
1944 1944 "matching": matching,
1945 1945 "tag": tag,
1946 1946 "tagged": tagged,
1947 1947 "user": user,
1948 1948 "unstable": unstable,
1949 1949 "_list": _list,
1950 1950 "_intlist": _intlist,
1951 1951 "_hexlist": _hexlist,
1952 1952 }
1953 1953
1954 1954 # symbols which can't be used for a DoS attack for any given input
1955 1955 # (e.g. those which accept regexes as plain strings shouldn't be included)
1956 1956 # functions that just return a lot of changesets (like all) don't count here
1957 1957 safesymbols = set([
1958 1958 "adds",
1959 1959 "all",
1960 1960 "ancestor",
1961 1961 "ancestors",
1962 1962 "_firstancestors",
1963 1963 "author",
1964 1964 "bisect",
1965 1965 "bisected",
1966 1966 "bookmark",
1967 1967 "branch",
1968 1968 "branchpoint",
1969 1969 "bumped",
1970 1970 "bundle",
1971 1971 "children",
1972 1972 "closed",
1973 1973 "converted",
1974 1974 "date",
1975 1975 "desc",
1976 1976 "descendants",
1977 1977 "_firstdescendants",
1978 1978 "destination",
1979 1979 "divergent",
1980 1980 "draft",
1981 1981 "extinct",
1982 1982 "extra",
1983 1983 "file",
1984 1984 "filelog",
1985 1985 "first",
1986 1986 "follow",
1987 1987 "_followfirst",
1988 1988 "head",
1989 1989 "heads",
1990 1990 "hidden",
1991 1991 "id",
1992 1992 "keyword",
1993 1993 "last",
1994 1994 "limit",
1995 1995 "_matchfiles",
1996 1996 "max",
1997 1997 "merge",
1998 1998 "min",
1999 1999 "modifies",
2000 2000 "obsolete",
2001 2001 "only",
2002 2002 "origin",
2003 2003 "outgoing",
2004 2004 "p1",
2005 2005 "p2",
2006 2006 "parents",
2007 2007 "present",
2008 2008 "public",
2009 2009 "remote",
2010 2010 "removes",
2011 2011 "rev",
2012 2012 "reverse",
2013 2013 "roots",
2014 2014 "sort",
2015 2015 "secret",
2016 2016 "matching",
2017 2017 "tag",
2018 2018 "tagged",
2019 2019 "user",
2020 2020 "unstable",
2021 2021 "_list",
2022 2022 "_intlist",
2023 2023 "_hexlist",
2024 2024 ])
2025 2025
2026 2026 methods = {
2027 2027 "range": rangeset,
2028 2028 "dagrange": dagrange,
2029 2029 "string": stringset,
2030 2030 "symbol": symbolset,
2031 2031 "and": andset,
2032 2032 "or": orset,
2033 2033 "not": notset,
2034 2034 "list": listset,
2035 2035 "func": func,
2036 2036 "ancestor": ancestorspec,
2037 2037 "parent": parentspec,
2038 2038 "parentpost": p1,
2039 2039 "only": only,
2040 2040 "onlypost": only,
2041 2041 }
2042 2042
2043 2043 def optimize(x, small):
2044 2044 if x is None:
2045 2045 return 0, x
2046 2046
2047 2047 smallbonus = 1
2048 2048 if small:
2049 2049 smallbonus = .5
2050 2050
2051 2051 op = x[0]
2052 2052 if op == 'minus':
2053 2053 return optimize(('and', x[1], ('not', x[2])), small)
2054 2054 elif op == 'only':
2055 2055 return optimize(('func', ('symbol', 'only'),
2056 2056 ('list', x[1], x[2])), small)
2057 2057 elif op == 'dagrangepre':
2058 2058 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2059 2059 elif op == 'dagrangepost':
2060 2060 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2061 2061 elif op == 'rangepre':
2062 2062 return optimize(('range', ('string', '0'), x[1]), small)
2063 2063 elif op == 'rangepost':
2064 2064 return optimize(('range', x[1], ('string', 'tip')), small)
2065 2065 elif op == 'negate':
2066 2066 return optimize(('string',
2067 2067 '-' + getstring(x[1], _("can't negate that"))), small)
2068 2068 elif op in 'string symbol negate':
2069 2069 return smallbonus, x # single revisions are small
2070 2070 elif op == 'and':
2071 2071 wa, ta = optimize(x[1], True)
2072 2072 wb, tb = optimize(x[2], True)
2073 2073
2074 2074 # (::x and not ::y)/(not ::y and ::x) have a fast path
2075 2075 def isonly(revs, bases):
2076 2076 return (
2077 2077 revs[0] == 'func'
2078 2078 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2079 2079 and bases[0] == 'not'
2080 2080 and bases[1][0] == 'func'
2081 2081 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2082 2082
2083 2083 w = min(wa, wb)
2084 2084 if isonly(ta, tb):
2085 2085 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2086 2086 if isonly(tb, ta):
2087 2087 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2088 2088
2089 2089 if wa > wb:
2090 2090 return w, (op, tb, ta)
2091 2091 return w, (op, ta, tb)
2092 2092 elif op == 'or':
2093 2093 wa, ta = optimize(x[1], False)
2094 2094 wb, tb = optimize(x[2], False)
2095 2095 if wb < wa:
2096 2096 wb, wa = wa, wb
2097 2097 return max(wa, wb), (op, ta, tb)
2098 2098 elif op == 'not':
2099 2099 o = optimize(x[1], not small)
2100 2100 return o[0], (op, o[1])
2101 2101 elif op == 'parentpost':
2102 2102 o = optimize(x[1], small)
2103 2103 return o[0], (op, o[1])
2104 2104 elif op == 'group':
2105 2105 return optimize(x[1], small)
2106 2106 elif op in 'dagrange range list parent ancestorspec':
2107 2107 if op == 'parent':
2108 2108 # x^:y means (x^) : y, not x ^ (:y)
2109 2109 post = ('parentpost', x[1])
2110 2110 if x[2][0] == 'dagrangepre':
2111 2111 return optimize(('dagrange', post, x[2][1]), small)
2112 2112 elif x[2][0] == 'rangepre':
2113 2113 return optimize(('range', post, x[2][1]), small)
2114 2114
2115 2115 wa, ta = optimize(x[1], small)
2116 2116 wb, tb = optimize(x[2], small)
2117 2117 return wa + wb, (op, ta, tb)
2118 2118 elif op == 'func':
2119 2119 f = getstring(x[1], _("not a symbol"))
2120 2120 wa, ta = optimize(x[2], small)
2121 2121 if f in ("author branch closed date desc file grep keyword "
2122 2122 "outgoing user"):
2123 2123 w = 10 # slow
2124 2124 elif f in "modifies adds removes":
2125 2125 w = 30 # slower
2126 2126 elif f == "contains":
2127 2127 w = 100 # very slow
2128 2128 elif f == "ancestor":
2129 2129 w = 1 * smallbonus
2130 2130 elif f in "reverse limit first _intlist":
2131 2131 w = 0
2132 2132 elif f in "sort":
2133 2133 w = 10 # assume most sorts look at changelog
2134 2134 else:
2135 2135 w = 1
2136 2136 return w + wa, (op, x[1], ta)
2137 2137 return 1, x
2138 2138
2139 2139 _aliasarg = ('func', ('symbol', '_aliasarg'))
2140 2140 def _getaliasarg(tree):
2141 2141 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2142 2142 return X, None otherwise.
2143 2143 """
2144 2144 if (len(tree) == 3 and tree[:2] == _aliasarg
2145 2145 and tree[2][0] == 'string'):
2146 2146 return tree[2][1]
2147 2147 return None
2148 2148
2149 2149 def _checkaliasarg(tree, known=None):
2150 2150 """Check tree contains no _aliasarg construct or only ones which
2151 2151 value is in known. Used to avoid alias placeholders injection.
2152 2152 """
2153 2153 if isinstance(tree, tuple):
2154 2154 arg = _getaliasarg(tree)
2155 2155 if arg is not None and (not known or arg not in known):
2156 2156 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2157 2157 for t in tree:
2158 2158 _checkaliasarg(t, known)
2159 2159
2160 2160 # the set of valid characters for the initial letter of symbols in
2161 2161 # alias declarations and definitions
2162 2162 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2163 2163 if c.isalnum() or c in '._@$' or ord(c) > 127)
2164 2164
2165 2165 def _tokenizealias(program, lookup=None):
2166 2166 """Parse alias declaration/definition into a stream of tokens
2167 2167
2168 2168 This allows symbol names to use also ``$`` as an initial letter
2169 2169 (for backward compatibility), and callers of this function should
2170 2170 examine whether ``$`` is used also for unexpected symbols or not.
2171 2171 """
2172 2172 return tokenize(program, lookup=lookup,
2173 2173 syminitletters=_aliassyminitletters)
2174 2174
2175 2175 def _parsealiasdecl(decl):
2176 2176 """Parse alias declaration ``decl``
2177 2177
2178 2178 This returns ``(name, tree, args, errorstr)`` tuple:
2179 2179
2180 2180 - ``name``: of declared alias (may be ``decl`` itself at error)
2181 2181 - ``tree``: parse result (or ``None`` at error)
2182 2182 - ``args``: list of alias argument names (or None for symbol declaration)
2183 2183 - ``errorstr``: detail about detected error (or None)
2184 2184
2185 2185 >>> _parsealiasdecl('foo')
2186 2186 ('foo', ('symbol', 'foo'), None, None)
2187 2187 >>> _parsealiasdecl('$foo')
2188 2188 ('$foo', None, None, "'$' not for alias arguments")
2189 2189 >>> _parsealiasdecl('foo::bar')
2190 2190 ('foo::bar', None, None, 'invalid format')
2191 2191 >>> _parsealiasdecl('foo bar')
2192 2192 ('foo bar', None, None, 'at 4: invalid token')
2193 2193 >>> _parsealiasdecl('foo()')
2194 2194 ('foo', ('func', ('symbol', 'foo')), [], None)
2195 2195 >>> _parsealiasdecl('$foo()')
2196 2196 ('$foo()', None, None, "'$' not for alias arguments")
2197 2197 >>> _parsealiasdecl('foo($1, $2)')
2198 2198 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2199 2199 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2200 2200 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2201 2201 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2202 2202 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2203 2203 >>> _parsealiasdecl('foo(bar($1, $2))')
2204 2204 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2205 2205 >>> _parsealiasdecl('foo("string")')
2206 2206 ('foo("string")', None, None, 'invalid argument list')
2207 2207 >>> _parsealiasdecl('foo($1, $2')
2208 2208 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2209 2209 >>> _parsealiasdecl('foo("string')
2210 2210 ('foo("string', None, None, 'at 5: unterminated string')
2211 2211 >>> _parsealiasdecl('foo($1, $2, $1)')
2212 2212 ('foo', None, None, 'argument names collide with each other')
2213 2213 """
2214 2214 p = parser.parser(_tokenizealias, elements)
2215 2215 try:
2216 2216 tree, pos = p.parse(decl)
2217 2217 if (pos != len(decl)):
2218 2218 raise error.ParseError(_('invalid token'), pos)
2219 2219
2220 2220 if isvalidsymbol(tree):
2221 2221 # "name = ...." style
2222 2222 name = getsymbol(tree)
2223 2223 if name.startswith('$'):
2224 2224 return (decl, None, None, _("'$' not for alias arguments"))
2225 2225 return (name, ('symbol', name), None, None)
2226 2226
2227 2227 if isvalidfunc(tree):
2228 2228 # "name(arg, ....) = ...." style
2229 2229 name = getfuncname(tree)
2230 2230 if name.startswith('$'):
2231 2231 return (decl, None, None, _("'$' not for alias arguments"))
2232 2232 args = []
2233 2233 for arg in getfuncargs(tree):
2234 2234 if not isvalidsymbol(arg):
2235 2235 return (decl, None, None, _("invalid argument list"))
2236 2236 args.append(getsymbol(arg))
2237 2237 if len(args) != len(set(args)):
2238 2238 return (name, None, None,
2239 2239 _("argument names collide with each other"))
2240 2240 return (name, ('func', ('symbol', name)), args, None)
2241 2241
2242 2242 return (decl, None, None, _("invalid format"))
2243 2243 except error.ParseError, inst:
2244 2244 return (decl, None, None, parseerrordetail(inst))
2245 2245
2246 2246 class revsetalias(object):
2247 2247 # whether own `error` information is already shown or not.
2248 2248 # this avoids showing same warning multiple times at each `findaliases`.
2249 2249 warned = False
2250 2250
2251 2251 def __init__(self, name, value):
2252 2252 '''Aliases like:
2253 2253
2254 2254 h = heads(default)
2255 2255 b($1) = ancestors($1) - ancestors(default)
2256 2256 '''
2257 2257 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2258 2258 if self.error:
2259 2259 self.error = _('failed to parse the declaration of revset alias'
2260 2260 ' "%s": %s') % (self.name, self.error)
2261 2261 return
2262 2262
2263 2263 if self.args:
2264 2264 for arg in self.args:
2265 2265 # _aliasarg() is an unknown symbol only used separate
2266 2266 # alias argument placeholders from regular strings.
2267 2267 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2268 2268
2269 2269 try:
2270 2270 self.replacement, pos = parse(value)
2271 2271 if pos != len(value):
2272 2272 raise error.ParseError(_('invalid token'), pos)
2273 2273 # Check for placeholder injection
2274 2274 _checkaliasarg(self.replacement, self.args)
2275 2275 except error.ParseError, inst:
2276 2276 self.error = _('failed to parse the definition of revset alias'
2277 2277 ' "%s": %s') % (self.name, parseerrordetail(inst))
2278 2278
2279 2279 def _getalias(aliases, tree):
2280 2280 """If tree looks like an unexpanded alias, return it. Return None
2281 2281 otherwise.
2282 2282 """
2283 2283 if isinstance(tree, tuple) and tree:
2284 2284 if tree[0] == 'symbol' and len(tree) == 2:
2285 2285 name = tree[1]
2286 2286 alias = aliases.get(name)
2287 2287 if alias and alias.args is None and alias.tree == tree:
2288 2288 return alias
2289 2289 if tree[0] == 'func' and len(tree) > 1:
2290 2290 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2291 2291 name = tree[1][1]
2292 2292 alias = aliases.get(name)
2293 2293 if alias and alias.args is not None and alias.tree == tree[:2]:
2294 2294 return alias
2295 2295 return None
2296 2296
2297 2297 def _expandargs(tree, args):
2298 2298 """Replace _aliasarg instances with the substitution value of the
2299 2299 same name in args, recursively.
2300 2300 """
2301 2301 if not tree or not isinstance(tree, tuple):
2302 2302 return tree
2303 2303 arg = _getaliasarg(tree)
2304 2304 if arg is not None:
2305 2305 return args[arg]
2306 2306 return tuple(_expandargs(t, args) for t in tree)
2307 2307
2308 2308 def _expandaliases(aliases, tree, expanding, cache):
2309 2309 """Expand aliases in tree, recursively.
2310 2310
2311 2311 'aliases' is a dictionary mapping user defined aliases to
2312 2312 revsetalias objects.
2313 2313 """
2314 2314 if not isinstance(tree, tuple):
2315 2315 # Do not expand raw strings
2316 2316 return tree
2317 2317 alias = _getalias(aliases, tree)
2318 2318 if alias is not None:
2319 2319 if alias.error:
2320 2320 raise util.Abort(alias.error)
2321 2321 if alias in expanding:
2322 2322 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2323 2323 'detected') % alias.name)
2324 2324 expanding.append(alias)
2325 2325 if alias.name not in cache:
2326 2326 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2327 2327 expanding, cache)
2328 2328 result = cache[alias.name]
2329 2329 expanding.pop()
2330 2330 if alias.args is not None:
2331 2331 l = getlist(tree[2])
2332 2332 if len(l) != len(alias.args):
2333 2333 raise error.ParseError(
2334 2334 _('invalid number of arguments: %s') % len(l))
2335 2335 l = [_expandaliases(aliases, a, [], cache) for a in l]
2336 2336 result = _expandargs(result, dict(zip(alias.args, l)))
2337 2337 else:
2338 2338 result = tuple(_expandaliases(aliases, t, expanding, cache)
2339 2339 for t in tree)
2340 2340 return result
2341 2341
2342 2342 def findaliases(ui, tree, showwarning=None):
2343 2343 _checkaliasarg(tree)
2344 2344 aliases = {}
2345 2345 for k, v in ui.configitems('revsetalias'):
2346 2346 alias = revsetalias(k, v)
2347 2347 aliases[alias.name] = alias
2348 2348 tree = _expandaliases(aliases, tree, [], {})
2349 2349 if showwarning:
2350 2350 # warn about problematic (but not referred) aliases
2351 2351 for name, alias in sorted(aliases.iteritems()):
2352 2352 if alias.error and not alias.warned:
2353 2353 showwarning(_('warning: %s\n') % (alias.error))
2354 2354 alias.warned = True
2355 2355 return tree
2356 2356
2357 2357 def foldconcat(tree):
2358 2358 """Fold elements to be concatenated by `##`
2359 2359 """
2360 2360 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2361 2361 return tree
2362 2362 if tree[0] == '_concat':
2363 2363 pending = [tree]
2364 2364 l = []
2365 2365 while pending:
2366 2366 e = pending.pop()
2367 2367 if e[0] == '_concat':
2368 2368 pending.extend(reversed(e[1:]))
2369 2369 elif e[0] in ('string', 'symbol'):
2370 2370 l.append(e[1])
2371 2371 else:
2372 2372 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2373 2373 raise error.ParseError(msg)
2374 2374 return ('string', ''.join(l))
2375 2375 else:
2376 2376 return tuple(foldconcat(t) for t in tree)
2377 2377
2378 2378 def parse(spec, lookup=None):
2379 2379 p = parser.parser(tokenize, elements)
2380 2380 return p.parse(spec, lookup=lookup)
2381 2381
2382 2382 def match(ui, spec, repo=None):
2383 2383 if not spec:
2384 2384 raise error.ParseError(_("empty query"))
2385 2385 lookup = None
2386 2386 if repo:
2387 2387 lookup = repo.__contains__
2388 2388 tree, pos = parse(spec, lookup)
2389 2389 if (pos != len(spec)):
2390 2390 raise error.ParseError(_("invalid token"), pos)
2391 2391 if ui:
2392 2392 tree = findaliases(ui, tree, showwarning=ui.warn)
2393 2393 tree = foldconcat(tree)
2394 2394 weight, tree = optimize(tree, True)
2395 2395 def mfunc(repo, subset):
2396 2396 if util.safehasattr(subset, 'isascending'):
2397 2397 result = getset(repo, subset, tree)
2398 2398 else:
2399 2399 result = getset(repo, baseset(subset), tree)
2400 2400 return result
2401 2401 return mfunc
2402 2402
2403 2403 def formatspec(expr, *args):
2404 2404 '''
2405 2405 This is a convenience function for using revsets internally, and
2406 2406 escapes arguments appropriately. Aliases are intentionally ignored
2407 2407 so that intended expression behavior isn't accidentally subverted.
2408 2408
2409 2409 Supported arguments:
2410 2410
2411 2411 %r = revset expression, parenthesized
2412 2412 %d = int(arg), no quoting
2413 2413 %s = string(arg), escaped and single-quoted
2414 2414 %b = arg.branch(), escaped and single-quoted
2415 2415 %n = hex(arg), single-quoted
2416 2416 %% = a literal '%'
2417 2417
2418 2418 Prefixing the type with 'l' specifies a parenthesized list of that type.
2419 2419
2420 2420 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2421 2421 '(10 or 11):: and ((this()) or (that()))'
2422 2422 >>> formatspec('%d:: and not %d::', 10, 20)
2423 2423 '10:: and not 20::'
2424 2424 >>> formatspec('%ld or %ld', [], [1])
2425 2425 "_list('') or 1"
2426 2426 >>> formatspec('keyword(%s)', 'foo\\xe9')
2427 2427 "keyword('foo\\\\xe9')"
2428 2428 >>> b = lambda: 'default'
2429 2429 >>> b.branch = b
2430 2430 >>> formatspec('branch(%b)', b)
2431 2431 "branch('default')"
2432 2432 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2433 2433 "root(_list('a\\x00b\\x00c\\x00d'))"
2434 2434 '''
2435 2435
2436 2436 def quote(s):
2437 2437 return repr(str(s))
2438 2438
2439 2439 def argtype(c, arg):
2440 2440 if c == 'd':
2441 2441 return str(int(arg))
2442 2442 elif c == 's':
2443 2443 return quote(arg)
2444 2444 elif c == 'r':
2445 2445 parse(arg) # make sure syntax errors are confined
2446 2446 return '(%s)' % arg
2447 2447 elif c == 'n':
2448 2448 return quote(node.hex(arg))
2449 2449 elif c == 'b':
2450 2450 return quote(arg.branch())
2451 2451
2452 2452 def listexp(s, t):
2453 2453 l = len(s)
2454 2454 if l == 0:
2455 2455 return "_list('')"
2456 2456 elif l == 1:
2457 2457 return argtype(t, s[0])
2458 2458 elif t == 'd':
2459 2459 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2460 2460 elif t == 's':
2461 2461 return "_list('%s')" % "\0".join(s)
2462 2462 elif t == 'n':
2463 2463 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2464 2464 elif t == 'b':
2465 2465 return "_list('%s')" % "\0".join(a.branch() for a in s)
2466 2466
2467 2467 m = l // 2
2468 2468 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2469 2469
2470 2470 ret = ''
2471 2471 pos = 0
2472 2472 arg = 0
2473 2473 while pos < len(expr):
2474 2474 c = expr[pos]
2475 2475 if c == '%':
2476 2476 pos += 1
2477 2477 d = expr[pos]
2478 2478 if d == '%':
2479 2479 ret += d
2480 2480 elif d in 'dsnbr':
2481 2481 ret += argtype(d, args[arg])
2482 2482 arg += 1
2483 2483 elif d == 'l':
2484 2484 # a list of some type
2485 2485 pos += 1
2486 2486 d = expr[pos]
2487 2487 ret += listexp(list(args[arg]), d)
2488 2488 arg += 1
2489 2489 else:
2490 2490 raise util.Abort('unexpected revspec format character %s' % d)
2491 2491 else:
2492 2492 ret += c
2493 2493 pos += 1
2494 2494
2495 2495 return ret
2496 2496
2497 2497 def prettyformat(tree):
2498 2498 def _prettyformat(tree, level, lines):
2499 2499 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2500 2500 lines.append((level, str(tree)))
2501 2501 else:
2502 2502 lines.append((level, '(%s' % tree[0]))
2503 2503 for s in tree[1:]:
2504 2504 _prettyformat(s, level + 1, lines)
2505 2505 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2506 2506
2507 2507 lines = []
2508 2508 _prettyformat(tree, 0, lines)
2509 2509 output = '\n'.join((' '*l + s) for l, s in lines)
2510 2510 return output
2511 2511
2512 2512 def depth(tree):
2513 2513 if isinstance(tree, tuple):
2514 2514 return max(map(depth, tree)) + 1
2515 2515 else:
2516 2516 return 0
2517 2517
2518 2518 def funcsused(tree):
2519 2519 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2520 2520 return set()
2521 2521 else:
2522 2522 funcs = set()
2523 2523 for s in tree[1:]:
2524 2524 funcs |= funcsused(s)
2525 2525 if tree[0] == 'func':
2526 2526 funcs.add(tree[1][1])
2527 2527 return funcs
2528 2528
2529 2529 class abstractsmartset(object):
2530 2530
2531 2531 def __nonzero__(self):
2532 2532 """True if the smartset is not empty"""
2533 2533 raise NotImplementedError()
2534 2534
2535 2535 def __contains__(self, rev):
2536 2536 """provide fast membership testing"""
2537 2537 raise NotImplementedError()
2538 2538
2539 2539 def __iter__(self):
2540 2540 """iterate the set in the order it is supposed to be iterated"""
2541 2541 raise NotImplementedError()
2542 2542
2543 2543 # Attributes containing a function to perform a fast iteration in a given
2544 2544 # direction. A smartset can have none, one, or both defined.
2545 2545 #
2546 2546 # Default value is None instead of a function returning None to avoid
2547 2547 # initializing an iterator just for testing if a fast method exists.
2548 2548 fastasc = None
2549 2549 fastdesc = None
2550 2550
2551 2551 def isascending(self):
2552 2552 """True if the set will iterate in ascending order"""
2553 2553 raise NotImplementedError()
2554 2554
2555 2555 def isdescending(self):
2556 2556 """True if the set will iterate in descending order"""
2557 2557 raise NotImplementedError()
2558 2558
2559 2559 def min(self):
2560 2560 """return the minimum element in the set"""
2561 2561 if self.fastasc is not None:
2562 2562 for r in self.fastasc():
2563 2563 return r
2564 2564 raise ValueError('arg is an empty sequence')
2565 2565 return min(self)
2566 2566
2567 2567 def max(self):
2568 2568 """return the maximum element in the set"""
2569 2569 if self.fastdesc is not None:
2570 2570 for r in self.fastdesc():
2571 2571 return r
2572 2572 raise ValueError('arg is an empty sequence')
2573 2573 return max(self)
2574 2574
2575 2575 def first(self):
2576 2576 """return the first element in the set (user iteration perspective)
2577 2577
2578 2578 Return None if the set is empty"""
2579 2579 raise NotImplementedError()
2580 2580
2581 2581 def last(self):
2582 2582 """return the last element in the set (user iteration perspective)
2583 2583
2584 2584 Return None if the set is empty"""
2585 2585 raise NotImplementedError()
2586 2586
2587 2587 def __len__(self):
2588 2588 """return the length of the smartsets
2589 2589
2590 2590 This can be expensive on smartset that could be lazy otherwise."""
2591 2591 raise NotImplementedError()
2592 2592
2593 2593 def reverse(self):
2594 2594 """reverse the expected iteration order"""
2595 2595 raise NotImplementedError()
2596 2596
2597 2597 def sort(self, reverse=True):
2598 2598 """get the set to iterate in an ascending or descending order"""
2599 2599 raise NotImplementedError()
2600 2600
2601 2601 def __and__(self, other):
2602 2602 """Returns a new object with the intersection of the two collections.
2603 2603
2604 2604 This is part of the mandatory API for smartset."""
2605 2605 return self.filter(other.__contains__, cache=False)
2606 2606
2607 2607 def __add__(self, other):
2608 2608 """Returns a new object with the union of the two collections.
2609 2609
2610 2610 This is part of the mandatory API for smartset."""
2611 2611 return addset(self, other)
2612 2612
2613 2613 def __sub__(self, other):
2614 2614 """Returns a new object with the substraction of the two collections.
2615 2615
2616 2616 This is part of the mandatory API for smartset."""
2617 2617 c = other.__contains__
2618 2618 return self.filter(lambda r: not c(r), cache=False)
2619 2619
2620 2620 def filter(self, condition, cache=True):
2621 2621 """Returns this smartset filtered by condition as a new smartset.
2622 2622
2623 2623 `condition` is a callable which takes a revision number and returns a
2624 2624 boolean.
2625 2625
2626 2626 This is part of the mandatory API for smartset."""
2627 2627 # builtin cannot be cached. but do not needs to
2628 2628 if cache and util.safehasattr(condition, 'func_code'):
2629 2629 condition = util.cachefunc(condition)
2630 2630 return filteredset(self, condition)
2631 2631
2632 2632 class baseset(abstractsmartset):
2633 2633 """Basic data structure that represents a revset and contains the basic
2634 2634 operation that it should be able to perform.
2635 2635
2636 2636 Every method in this class should be implemented by any smartset class.
2637 2637 """
2638 2638 def __init__(self, data=()):
2639 2639 if not isinstance(data, list):
2640 2640 data = list(data)
2641 2641 self._list = data
2642 2642 self._ascending = None
2643 2643
2644 2644 @util.propertycache
2645 2645 def _set(self):
2646 2646 return set(self._list)
2647 2647
2648 2648 @util.propertycache
2649 2649 def _asclist(self):
2650 2650 asclist = self._list[:]
2651 2651 asclist.sort()
2652 2652 return asclist
2653 2653
2654 2654 def __iter__(self):
2655 2655 if self._ascending is None:
2656 2656 return iter(self._list)
2657 2657 elif self._ascending:
2658 2658 return iter(self._asclist)
2659 2659 else:
2660 2660 return reversed(self._asclist)
2661 2661
2662 2662 def fastasc(self):
2663 2663 return iter(self._asclist)
2664 2664
2665 2665 def fastdesc(self):
2666 2666 return reversed(self._asclist)
2667 2667
2668 2668 @util.propertycache
2669 2669 def __contains__(self):
2670 2670 return self._set.__contains__
2671 2671
2672 2672 def __nonzero__(self):
2673 2673 return bool(self._list)
2674 2674
2675 2675 def sort(self, reverse=False):
2676 2676 self._ascending = not bool(reverse)
2677 2677
2678 2678 def reverse(self):
2679 2679 if self._ascending is None:
2680 2680 self._list.reverse()
2681 2681 else:
2682 2682 self._ascending = not self._ascending
2683 2683
2684 2684 def __len__(self):
2685 2685 return len(self._list)
2686 2686
2687 2687 def isascending(self):
2688 2688 """Returns True if the collection is ascending order, False if not.
2689 2689
2690 2690 This is part of the mandatory API for smartset."""
2691 2691 if len(self) <= 1:
2692 2692 return True
2693 2693 return self._ascending is not None and self._ascending
2694 2694
2695 2695 def isdescending(self):
2696 2696 """Returns True if the collection is descending order, False if not.
2697 2697
2698 2698 This is part of the mandatory API for smartset."""
2699 2699 if len(self) <= 1:
2700 2700 return True
2701 2701 return self._ascending is not None and not self._ascending
2702 2702
2703 2703 def first(self):
2704 2704 if self:
2705 2705 if self._ascending is None:
2706 2706 return self._list[0]
2707 2707 elif self._ascending:
2708 2708 return self._asclist[0]
2709 2709 else:
2710 2710 return self._asclist[-1]
2711 2711 return None
2712 2712
2713 2713 def last(self):
2714 2714 if self:
2715 2715 if self._ascending is None:
2716 2716 return self._list[-1]
2717 2717 elif self._ascending:
2718 2718 return self._asclist[-1]
2719 2719 else:
2720 2720 return self._asclist[0]
2721 2721 return None
2722 2722
2723 2723 class filteredset(abstractsmartset):
2724 2724 """Duck type for baseset class which iterates lazily over the revisions in
2725 2725 the subset and contains a function which tests for membership in the
2726 2726 revset
2727 2727 """
2728 2728 def __init__(self, subset, condition=lambda x: True):
2729 2729 """
2730 2730 condition: a function that decide whether a revision in the subset
2731 2731 belongs to the revset or not.
2732 2732 """
2733 2733 self._subset = subset
2734 2734 self._condition = condition
2735 2735 self._cache = {}
2736 2736
2737 2737 def __contains__(self, x):
2738 2738 c = self._cache
2739 2739 if x not in c:
2740 2740 v = c[x] = x in self._subset and self._condition(x)
2741 2741 return v
2742 2742 return c[x]
2743 2743
2744 2744 def __iter__(self):
2745 2745 return self._iterfilter(self._subset)
2746 2746
2747 2747 def _iterfilter(self, it):
2748 2748 cond = self._condition
2749 2749 for x in it:
2750 2750 if cond(x):
2751 2751 yield x
2752 2752
2753 2753 @property
2754 2754 def fastasc(self):
2755 2755 it = self._subset.fastasc
2756 2756 if it is None:
2757 2757 return None
2758 2758 return lambda: self._iterfilter(it())
2759 2759
2760 2760 @property
2761 2761 def fastdesc(self):
2762 2762 it = self._subset.fastdesc
2763 2763 if it is None:
2764 2764 return None
2765 2765 return lambda: self._iterfilter(it())
2766 2766
2767 2767 def __nonzero__(self):
2768 2768 for r in self:
2769 2769 return True
2770 2770 return False
2771 2771
2772 2772 def __len__(self):
2773 2773 # Basic implementation to be changed in future patches.
2774 2774 l = baseset([r for r in self])
2775 2775 return len(l)
2776 2776
2777 2777 def sort(self, reverse=False):
2778 2778 self._subset.sort(reverse=reverse)
2779 2779
2780 2780 def reverse(self):
2781 2781 self._subset.reverse()
2782 2782
2783 2783 def isascending(self):
2784 2784 return self._subset.isascending()
2785 2785
2786 2786 def isdescending(self):
2787 2787 return self._subset.isdescending()
2788 2788
2789 2789 def first(self):
2790 2790 for x in self:
2791 2791 return x
2792 2792 return None
2793 2793
2794 2794 def last(self):
2795 2795 it = None
2796 2796 if self._subset.isascending:
2797 2797 it = self.fastdesc
2798 2798 elif self._subset.isdescending:
2799 2799 it = self.fastdesc
2800 2800 if it is None:
2801 2801 # slowly consume everything. This needs improvement
2802 2802 it = lambda: reversed(list(self))
2803 2803 for x in it():
2804 2804 return x
2805 2805 return None
2806 2806
2807 2807 class addset(abstractsmartset):
2808 2808 """Represent the addition of two sets
2809 2809
2810 2810 Wrapper structure for lazily adding two structures without losing much
2811 2811 performance on the __contains__ method
2812 2812
2813 2813 If the ascending attribute is set, that means the two structures are
2814 2814 ordered in either an ascending or descending way. Therefore, we can add
2815 2815 them maintaining the order by iterating over both at the same time
2816 2816 """
2817 2817 def __init__(self, revs1, revs2, ascending=None):
2818 2818 self._r1 = revs1
2819 2819 self._r2 = revs2
2820 2820 self._iter = None
2821 2821 self._ascending = ascending
2822 2822 self._genlist = None
2823 2823 self._asclist = None
2824 2824
2825 2825 def __len__(self):
2826 2826 return len(self._list)
2827 2827
2828 2828 def __nonzero__(self):
2829 2829 return bool(self._r1) or bool(self._r2)
2830 2830
2831 2831 @util.propertycache
2832 2832 def _list(self):
2833 2833 if not self._genlist:
2834 2834 self._genlist = baseset(self._iterator())
2835 2835 return self._genlist
2836 2836
2837 2837 def _iterator(self):
2838 2838 """Iterate over both collections without repeating elements
2839 2839
2840 2840 If the ascending attribute is not set, iterate over the first one and
2841 2841 then over the second one checking for membership on the first one so we
2842 2842 dont yield any duplicates.
2843 2843
2844 2844 If the ascending attribute is set, iterate over both collections at the
2845 2845 same time, yielding only one value at a time in the given order.
2846 2846 """
2847 2847 if self._ascending is None:
2848 2848 def gen():
2849 2849 for r in self._r1:
2850 2850 yield r
2851 2851 inr1 = self._r1.__contains__
2852 2852 for r in self._r2:
2853 2853 if not inr1(r):
2854 2854 yield r
2855 2855 gen = gen()
2856 2856 else:
2857 2857 iter1 = iter(self._r1)
2858 2858 iter2 = iter(self._r2)
2859 2859 gen = self._iterordered(self._ascending, iter1, iter2)
2860 2860 return gen
2861 2861
2862 2862 def __iter__(self):
2863 2863 if self._ascending is None:
2864 2864 if self._genlist:
2865 2865 return iter(self._genlist)
2866 2866 return iter(self._iterator())
2867 2867 self._trysetasclist()
2868 2868 if self._ascending:
2869 2869 it = self.fastasc
2870 2870 else:
2871 2871 it = self.fastdesc
2872 2872 if it is None:
2873 2873 # consume the gen and try again
2874 2874 self._list
2875 2875 return iter(self)
2876 2876 return it()
2877 2877
2878 2878 def _trysetasclist(self):
2879 2879 """populate the _asclist attribute if possible and necessary"""
2880 2880 if self._genlist is not None and self._asclist is None:
2881 2881 self._asclist = sorted(self._genlist)
2882 2882
2883 2883 @property
2884 2884 def fastasc(self):
2885 2885 self._trysetasclist()
2886 2886 if self._asclist is not None:
2887 2887 return self._asclist.__iter__
2888 2888 iter1 = self._r1.fastasc
2889 2889 iter2 = self._r2.fastasc
2890 2890 if None in (iter1, iter2):
2891 2891 return None
2892 2892 return lambda: self._iterordered(True, iter1(), iter2())
2893 2893
2894 2894 @property
2895 2895 def fastdesc(self):
2896 2896 self._trysetasclist()
2897 2897 if self._asclist is not None:
2898 2898 return self._asclist.__reversed__
2899 2899 iter1 = self._r1.fastdesc
2900 2900 iter2 = self._r2.fastdesc
2901 2901 if None in (iter1, iter2):
2902 2902 return None
2903 2903 return lambda: self._iterordered(False, iter1(), iter2())
2904 2904
2905 2905 def _iterordered(self, ascending, iter1, iter2):
2906 2906 """produce an ordered iteration from two iterators with the same order
2907 2907
2908 2908 The ascending is used to indicated the iteration direction.
2909 2909 """
2910 2910 choice = max
2911 2911 if ascending:
2912 2912 choice = min
2913 2913
2914 2914 val1 = None
2915 2915 val2 = None
2916 2916
2917 2917 choice = max
2918 2918 if ascending:
2919 2919 choice = min
2920 2920 try:
2921 2921 # Consume both iterators in an ordered way until one is
2922 2922 # empty
2923 2923 while True:
2924 2924 if val1 is None:
2925 2925 val1 = iter1.next()
2926 2926 if val2 is None:
2927 2927 val2 = iter2.next()
2928 2928 next = choice(val1, val2)
2929 2929 yield next
2930 2930 if val1 == next:
2931 2931 val1 = None
2932 2932 if val2 == next:
2933 2933 val2 = None
2934 2934 except StopIteration:
2935 2935 # Flush any remaining values and consume the other one
2936 2936 it = iter2
2937 2937 if val1 is not None:
2938 2938 yield val1
2939 2939 it = iter1
2940 2940 elif val2 is not None:
2941 2941 # might have been equality and both are empty
2942 2942 yield val2
2943 2943 for val in it:
2944 2944 yield val
2945 2945
2946 2946 def __contains__(self, x):
2947 2947 return x in self._r1 or x in self._r2
2948 2948
2949 2949 def sort(self, reverse=False):
2950 2950 """Sort the added set
2951 2951
2952 2952 For this we use the cached list with all the generated values and if we
2953 2953 know they are ascending or descending we can sort them in a smart way.
2954 2954 """
2955 2955 self._ascending = not reverse
2956 2956
2957 2957 def isascending(self):
2958 2958 return self._ascending is not None and self._ascending
2959 2959
2960 2960 def isdescending(self):
2961 2961 return self._ascending is not None and not self._ascending
2962 2962
2963 2963 def reverse(self):
2964 2964 if self._ascending is None:
2965 2965 self._list.reverse()
2966 2966 else:
2967 2967 self._ascending = not self._ascending
2968 2968
2969 2969 def first(self):
2970 2970 for x in self:
2971 2971 return x
2972 2972 return None
2973 2973
2974 2974 def last(self):
2975 2975 self.reverse()
2976 2976 val = self.first()
2977 2977 self.reverse()
2978 2978 return val
2979 2979
2980 2980 class generatorset(abstractsmartset):
2981 2981 """Wrap a generator for lazy iteration
2982 2982
2983 2983 Wrapper structure for generators that provides lazy membership and can
2984 2984 be iterated more than once.
2985 2985 When asked for membership it generates values until either it finds the
2986 2986 requested one or has gone through all the elements in the generator
2987 2987 """
2988 2988 def __init__(self, gen, iterasc=None):
2989 2989 """
2990 2990 gen: a generator producing the values for the generatorset.
2991 2991 """
2992 2992 self._gen = gen
2993 2993 self._asclist = None
2994 2994 self._cache = {}
2995 2995 self._genlist = []
2996 2996 self._finished = False
2997 2997 self._ascending = True
2998 2998 if iterasc is not None:
2999 2999 if iterasc:
3000 3000 self.fastasc = self._iterator
3001 3001 self.__contains__ = self._asccontains
3002 3002 else:
3003 3003 self.fastdesc = self._iterator
3004 3004 self.__contains__ = self._desccontains
3005 3005
3006 3006 def __nonzero__(self):
3007 3007 for r in self:
3008 3008 return True
3009 3009 return False
3010 3010
3011 3011 def __contains__(self, x):
3012 3012 if x in self._cache:
3013 3013 return self._cache[x]
3014 3014
3015 3015 # Use new values only, as existing values would be cached.
3016 3016 for l in self._consumegen():
3017 3017 if l == x:
3018 3018 return True
3019 3019
3020 3020 self._cache[x] = False
3021 3021 return False
3022 3022
3023 3023 def _asccontains(self, x):
3024 3024 """version of contains optimised for ascending generator"""
3025 3025 if x in self._cache:
3026 3026 return self._cache[x]
3027 3027
3028 3028 # Use new values only, as existing values would be cached.
3029 3029 for l in self._consumegen():
3030 3030 if l == x:
3031 3031 return True
3032 3032 if l > x:
3033 3033 break
3034 3034
3035 3035 self._cache[x] = False
3036 3036 return False
3037 3037
3038 3038 def _desccontains(self, x):
3039 3039 """version of contains optimised for descending generator"""
3040 3040 if x in self._cache:
3041 3041 return self._cache[x]
3042 3042
3043 3043 # Use new values only, as existing values would be cached.
3044 3044 for l in self._consumegen():
3045 3045 if l == x:
3046 3046 return True
3047 3047 if l < x:
3048 3048 break
3049 3049
3050 3050 self._cache[x] = False
3051 3051 return False
3052 3052
3053 3053 def __iter__(self):
3054 3054 if self._ascending:
3055 3055 it = self.fastasc
3056 3056 else:
3057 3057 it = self.fastdesc
3058 3058 if it is not None:
3059 3059 return it()
3060 3060 # we need to consume the iterator
3061 3061 for x in self._consumegen():
3062 3062 pass
3063 3063 # recall the same code
3064 3064 return iter(self)
3065 3065
3066 3066 def _iterator(self):
3067 3067 if self._finished:
3068 3068 return iter(self._genlist)
3069 3069
3070 3070 # We have to use this complex iteration strategy to allow multiple
3071 3071 # iterations at the same time. We need to be able to catch revision
3072 3072 # removed from _consumegen and added to genlist in another instance.
3073 3073 #
3074 3074 # Getting rid of it would provide an about 15% speed up on this
3075 3075 # iteration.
3076 3076 genlist = self._genlist
3077 3077 nextrev = self._consumegen().next
3078 3078 _len = len # cache global lookup
3079 3079 def gen():
3080 3080 i = 0
3081 3081 while True:
3082 3082 if i < _len(genlist):
3083 3083 yield genlist[i]
3084 3084 else:
3085 3085 yield nextrev()
3086 3086 i += 1
3087 3087 return gen()
3088 3088
3089 3089 def _consumegen(self):
3090 3090 cache = self._cache
3091 3091 genlist = self._genlist.append
3092 3092 for item in self._gen:
3093 3093 cache[item] = True
3094 3094 genlist(item)
3095 3095 yield item
3096 3096 if not self._finished:
3097 3097 self._finished = True
3098 3098 asc = self._genlist[:]
3099 3099 asc.sort()
3100 3100 self._asclist = asc
3101 3101 self.fastasc = asc.__iter__
3102 3102 self.fastdesc = asc.__reversed__
3103 3103
3104 3104 def __len__(self):
3105 3105 for x in self._consumegen():
3106 3106 pass
3107 3107 return len(self._genlist)
3108 3108
3109 3109 def sort(self, reverse=False):
3110 3110 self._ascending = not reverse
3111 3111
3112 3112 def reverse(self):
3113 3113 self._ascending = not self._ascending
3114 3114
3115 3115 def isascending(self):
3116 3116 return self._ascending
3117 3117
3118 3118 def isdescending(self):
3119 3119 return not self._ascending
3120 3120
3121 3121 def first(self):
3122 3122 if self._ascending:
3123 3123 it = self.fastasc
3124 3124 else:
3125 3125 it = self.fastdesc
3126 3126 if it is None:
3127 3127 # we need to consume all and try again
3128 3128 for x in self._consumegen():
3129 3129 pass
3130 3130 return self.first()
3131 3131 if self:
3132 3132 return it().next()
3133 3133 return None
3134 3134
3135 3135 def last(self):
3136 3136 if self._ascending:
3137 3137 it = self.fastdesc
3138 3138 else:
3139 3139 it = self.fastasc
3140 3140 if it is None:
3141 3141 # we need to consume all and try again
3142 3142 for x in self._consumegen():
3143 3143 pass
3144 3144 return self.first()
3145 3145 if self:
3146 3146 return it().next()
3147 3147 return None
3148 3148
3149 3149 def spanset(repo, start=None, end=None):
3150 3150 """factory function to dispatch between fullreposet and actual spanset
3151 3151
3152 3152 Feel free to update all spanset call sites and kill this function at some
3153 3153 point.
3154 3154 """
3155 3155 if start is None and end is None:
3156 3156 return fullreposet(repo)
3157 3157 return _spanset(repo, start, end)
3158 3158
3159 3159
3160 3160 class _spanset(abstractsmartset):
3161 3161 """Duck type for baseset class which represents a range of revisions and
3162 3162 can work lazily and without having all the range in memory
3163 3163
3164 3164 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3165 3165 notable points:
3166 3166 - when x < y it will be automatically descending,
3167 3167 - revision filtered with this repoview will be skipped.
3168 3168
3169 3169 """
3170 3170 def __init__(self, repo, start=0, end=None):
3171 3171 """
3172 3172 start: first revision included the set
3173 3173 (default to 0)
3174 3174 end: first revision excluded (last+1)
3175 3175 (default to len(repo)
3176 3176
3177 3177 Spanset will be descending if `end` < `start`.
3178 3178 """
3179 3179 if end is None:
3180 3180 end = len(repo)
3181 3181 self._ascending = start <= end
3182 3182 if not self._ascending:
3183 3183 start, end = end + 1, start +1
3184 3184 self._start = start
3185 3185 self._end = end
3186 3186 self._hiddenrevs = repo.changelog.filteredrevs
3187 3187
3188 3188 def sort(self, reverse=False):
3189 3189 self._ascending = not reverse
3190 3190
3191 3191 def reverse(self):
3192 3192 self._ascending = not self._ascending
3193 3193
3194 3194 def _iterfilter(self, iterrange):
3195 3195 s = self._hiddenrevs
3196 3196 for r in iterrange:
3197 3197 if r not in s:
3198 3198 yield r
3199 3199
3200 3200 def __iter__(self):
3201 3201 if self._ascending:
3202 3202 return self.fastasc()
3203 3203 else:
3204 3204 return self.fastdesc()
3205 3205
3206 3206 def fastasc(self):
3207 3207 iterrange = xrange(self._start, self._end)
3208 3208 if self._hiddenrevs:
3209 3209 return self._iterfilter(iterrange)
3210 3210 return iter(iterrange)
3211 3211
3212 3212 def fastdesc(self):
3213 3213 iterrange = xrange(self._end - 1, self._start - 1, -1)
3214 3214 if self._hiddenrevs:
3215 3215 return self._iterfilter(iterrange)
3216 3216 return iter(iterrange)
3217 3217
3218 3218 def __contains__(self, rev):
3219 3219 hidden = self._hiddenrevs
3220 3220 return ((self._start <= rev < self._end)
3221 3221 and not (hidden and rev in hidden))
3222 3222
3223 3223 def __nonzero__(self):
3224 3224 for r in self:
3225 3225 return True
3226 3226 return False
3227 3227
3228 3228 def __len__(self):
3229 3229 if not self._hiddenrevs:
3230 3230 return abs(self._end - self._start)
3231 3231 else:
3232 3232 count = 0
3233 3233 start = self._start
3234 3234 end = self._end
3235 3235 for rev in self._hiddenrevs:
3236 3236 if (end < rev <= start) or (start <= rev < end):
3237 3237 count += 1
3238 3238 return abs(self._end - self._start) - count
3239 3239
3240 3240 def isascending(self):
3241 3241 return self._ascending
3242 3242
3243 3243 def isdescending(self):
3244 3244 return not self._ascending
3245 3245
3246 3246 def first(self):
3247 3247 if self._ascending:
3248 3248 it = self.fastasc
3249 3249 else:
3250 3250 it = self.fastdesc
3251 3251 for x in it():
3252 3252 return x
3253 3253 return None
3254 3254
3255 3255 def last(self):
3256 3256 if self._ascending:
3257 3257 it = self.fastdesc
3258 3258 else:
3259 3259 it = self.fastasc
3260 3260 for x in it():
3261 3261 return x
3262 3262 return None
3263 3263
3264 3264 class fullreposet(_spanset):
3265 3265 """a set containing all revisions in the repo
3266 3266
3267 3267 This class exists to host special optimization.
3268 3268 """
3269 3269
3270 3270 def __init__(self, repo):
3271 3271 super(fullreposet, self).__init__(repo)
3272 3272
3273 3273 def __and__(self, other):
3274 3274 """As self contains the whole repo, all of the other set should also be
3275 3275 in self. Therefore `self & other = other`.
3276 3276
3277 3277 This boldly assumes the other contains valid revs only.
3278 3278 """
3279 3279 # other not a smartset, make is so
3280 3280 if not util.safehasattr(other, 'isascending'):
3281 3281 # filter out hidden revision
3282 3282 # (this boldly assumes all smartset are pure)
3283 3283 #
3284 3284 # `other` was used with "&", let's assume this is a set like
3285 3285 # object.
3286 3286 other = baseset(other - self._hiddenrevs)
3287 3287
3288 3288 other.sort(reverse=self.isdescending())
3289 3289 return other
3290 3290
3291 3291 # tell hggettext to extract docstrings from these functions:
3292 3292 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now