##// END OF EJS Templates
revset: use phasecache.getrevset to calculate public()...
Jun Wu -
r35331:0c1aff6d default
parent child Browse files
Show More
@@ -1,674 +1,679
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 from __future__ import absolute_import
104 104
105 105 import errno
106 106 import struct
107 107
108 108 from .i18n import _
109 109 from .node import (
110 110 bin,
111 111 hex,
112 112 nullid,
113 113 nullrev,
114 114 short,
115 115 )
116 116 from . import (
117 117 error,
118 118 pycompat,
119 119 smartset,
120 120 txnutil,
121 121 util,
122 122 )
123 123
124 124 _fphasesentry = struct.Struct('>i20s')
125 125
126 126 allphases = public, draft, secret = range(3)
127 127 trackedphases = allphases[1:]
128 128 phasenames = ['public', 'draft', 'secret']
129 129
130 130 def _readroots(repo, phasedefaults=None):
131 131 """Read phase roots from disk
132 132
133 133 phasedefaults is a list of fn(repo, roots) callable, which are
134 134 executed if the phase roots file does not exist. When phases are
135 135 being initialized on an existing repository, this could be used to
136 136 set selected changesets phase to something else than public.
137 137
138 138 Return (roots, dirty) where dirty is true if roots differ from
139 139 what is being stored.
140 140 """
141 141 repo = repo.unfiltered()
142 142 dirty = False
143 143 roots = [set() for i in allphases]
144 144 try:
145 145 f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
146 146 try:
147 147 for line in f:
148 148 phase, nh = line.split()
149 149 roots[int(phase)].add(bin(nh))
150 150 finally:
151 151 f.close()
152 152 except IOError as inst:
153 153 if inst.errno != errno.ENOENT:
154 154 raise
155 155 if phasedefaults:
156 156 for f in phasedefaults:
157 157 roots = f(repo, roots)
158 158 dirty = True
159 159 return roots, dirty
160 160
161 161 def binaryencode(phasemapping):
162 162 """encode a 'phase -> nodes' mapping into a binary stream
163 163
164 164 Since phases are integer the mapping is actually a python list:
165 165 [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
166 166 """
167 167 binarydata = []
168 168 for phase, nodes in enumerate(phasemapping):
169 169 for head in nodes:
170 170 binarydata.append(_fphasesentry.pack(phase, head))
171 171 return ''.join(binarydata)
172 172
173 173 def binarydecode(stream):
174 174 """decode a binary stream into a 'phase -> nodes' mapping
175 175
176 176 Since phases are integer the mapping is actually a python list."""
177 177 headsbyphase = [[] for i in allphases]
178 178 entrysize = _fphasesentry.size
179 179 while True:
180 180 entry = stream.read(entrysize)
181 181 if len(entry) < entrysize:
182 182 if entry:
183 183 raise error.Abort(_('bad phase-heads stream'))
184 184 break
185 185 phase, node = _fphasesentry.unpack(entry)
186 186 headsbyphase[phase].append(node)
187 187 return headsbyphase
188 188
189 189 def _trackphasechange(data, rev, old, new):
190 190 """add a phase move the <data> dictionnary
191 191
192 192 If data is None, nothing happens.
193 193 """
194 194 if data is None:
195 195 return
196 196 existing = data.get(rev)
197 197 if existing is not None:
198 198 old = existing[0]
199 199 data[rev] = (old, new)
200 200
201 201 class phasecache(object):
202 202 def __init__(self, repo, phasedefaults, _load=True):
203 203 if _load:
204 204 # Cheap trick to allow shallow-copy without copy module
205 205 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
206 206 self._phasemaxrev = nullrev
207 207 self._phasesets = None
208 208 self.filterunknown(repo)
209 209 self.opener = repo.svfs
210 210
211 def getrevset(self, repo, phases):
211 def getrevset(self, repo, phases, subset=None):
212 212 """return a smartset for the given phases"""
213 213 self.loadphaserevs(repo) # ensure phase's sets are loaded
214 214 phases = set(phases)
215 215 if public not in phases:
216 216 # fast path: _phasesets contains the interesting sets,
217 217 # might only need a union and post-filtering.
218 218 if len(phases) == 1:
219 219 [p] = phases
220 220 revs = self._phasesets[p]
221 221 else:
222 222 revs = set.union(*[self._phasesets[p] for p in phases])
223 223 if repo.changelog.filteredrevs:
224 224 revs = revs - repo.changelog.filteredrevs
225 return smartset.baseset(revs)
225 if subset is None:
226 return smartset.baseset(revs)
227 else:
228 return subset & smartset.baseset(revs)
226 229 else:
227 230 phases = set(allphases).difference(phases)
228 231 if not phases:
229 232 return smartset.fullreposet(repo)
230 233 if len(phases) == 1:
231 234 [p] = phases
232 235 revs = self._phasesets[p]
233 236 else:
234 237 revs = set.union(*[self._phasesets[p] for p in phases])
238 if subset is None:
239 subset = smartset.fullreposet(repo)
235 240 if not revs:
236 return smartset.fullreposet(repo)
237 return smartset.fullreposet(repo).filter(lambda r: r not in revs)
241 return subset
242 return subset.filter(lambda r: r not in revs)
238 243
239 244 def copy(self):
240 245 # Shallow copy meant to ensure isolation in
241 246 # advance/retractboundary(), nothing more.
242 247 ph = self.__class__(None, None, _load=False)
243 248 ph.phaseroots = self.phaseroots[:]
244 249 ph.dirty = self.dirty
245 250 ph.opener = self.opener
246 251 ph._phasemaxrev = self._phasemaxrev
247 252 ph._phasesets = self._phasesets
248 253 return ph
249 254
250 255 def replace(self, phcache):
251 256 """replace all values in 'self' with content of phcache"""
252 257 for a in ('phaseroots', 'dirty', 'opener', '_phasemaxrev',
253 258 '_phasesets'):
254 259 setattr(self, a, getattr(phcache, a))
255 260
256 261 def _getphaserevsnative(self, repo):
257 262 repo = repo.unfiltered()
258 263 nativeroots = []
259 264 for phase in trackedphases:
260 265 nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase]))
261 266 return repo.changelog.computephases(nativeroots)
262 267
263 268 def _computephaserevspure(self, repo):
264 269 repo = repo.unfiltered()
265 270 cl = repo.changelog
266 271 self._phasesets = [set() for phase in allphases]
267 272 roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
268 273 if roots:
269 274 ps = set(cl.descendants(roots))
270 275 for root in roots:
271 276 ps.add(root)
272 277 self._phasesets[secret] = ps
273 278 roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
274 279 if roots:
275 280 ps = set(cl.descendants(roots))
276 281 for root in roots:
277 282 ps.add(root)
278 283 ps.difference_update(self._phasesets[secret])
279 284 self._phasesets[draft] = ps
280 285 self._phasemaxrev = len(cl)
281 286
282 287 def loadphaserevs(self, repo):
283 288 """ensure phase information is loaded in the object"""
284 289 if self._phasesets is None:
285 290 try:
286 291 res = self._getphaserevsnative(repo)
287 292 self._phasemaxrev, self._phasesets = res
288 293 except AttributeError:
289 294 self._computephaserevspure(repo)
290 295
291 296 def invalidate(self):
292 297 self._phasemaxrev = nullrev
293 298 self._phasesets = None
294 299
295 300 def phase(self, repo, rev):
296 301 # We need a repo argument here to be able to build _phasesets
297 302 # if necessary. The repository instance is not stored in
298 303 # phasecache to avoid reference cycles. The changelog instance
299 304 # is not stored because it is a filecache() property and can
300 305 # be replaced without us being notified.
301 306 if rev == nullrev:
302 307 return public
303 308 if rev < nullrev:
304 309 raise ValueError(_('cannot lookup negative revision'))
305 310 if rev >= self._phasemaxrev:
306 311 self.invalidate()
307 312 self.loadphaserevs(repo)
308 313 for phase in trackedphases:
309 314 if rev in self._phasesets[phase]:
310 315 return phase
311 316 return public
312 317
313 318 def write(self):
314 319 if not self.dirty:
315 320 return
316 321 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
317 322 try:
318 323 self._write(f)
319 324 finally:
320 325 f.close()
321 326
322 327 def _write(self, fp):
323 328 for phase, roots in enumerate(self.phaseroots):
324 329 for h in roots:
325 330 fp.write('%i %s\n' % (phase, hex(h)))
326 331 self.dirty = False
327 332
328 333 def _updateroots(self, phase, newroots, tr):
329 334 self.phaseroots[phase] = newroots
330 335 self.invalidate()
331 336 self.dirty = True
332 337
333 338 tr.addfilegenerator('phase', ('phaseroots',), self._write)
334 339 tr.hookargs['phases_moved'] = '1'
335 340
336 341 def registernew(self, repo, tr, targetphase, nodes):
337 342 repo = repo.unfiltered()
338 343 self._retractboundary(repo, tr, targetphase, nodes)
339 344 if tr is not None and 'phases' in tr.changes:
340 345 phasetracking = tr.changes['phases']
341 346 torev = repo.changelog.rev
342 347 phase = self.phase
343 348 for n in nodes:
344 349 rev = torev(n)
345 350 revphase = phase(repo, rev)
346 351 _trackphasechange(phasetracking, rev, None, revphase)
347 352 repo.invalidatevolatilesets()
348 353
349 354 def advanceboundary(self, repo, tr, targetphase, nodes):
350 355 """Set all 'nodes' to phase 'targetphase'
351 356
352 357 Nodes with a phase lower than 'targetphase' are not affected.
353 358 """
354 359 # Be careful to preserve shallow-copied values: do not update
355 360 # phaseroots values, replace them.
356 361 if tr is None:
357 362 phasetracking = None
358 363 else:
359 364 phasetracking = tr.changes.get('phases')
360 365
361 366 repo = repo.unfiltered()
362 367
363 368 delroots = [] # set of root deleted by this path
364 369 for phase in xrange(targetphase + 1, len(allphases)):
365 370 # filter nodes that are not in a compatible phase already
366 371 nodes = [n for n in nodes
367 372 if self.phase(repo, repo[n].rev()) >= phase]
368 373 if not nodes:
369 374 break # no roots to move anymore
370 375
371 376 olds = self.phaseroots[phase]
372 377
373 378 affected = repo.revs('%ln::%ln', olds, nodes)
374 379 for r in affected:
375 380 _trackphasechange(phasetracking, r, self.phase(repo, r),
376 381 targetphase)
377 382
378 383 roots = set(ctx.node() for ctx in repo.set(
379 384 'roots((%ln::) - %ld)', olds, affected))
380 385 if olds != roots:
381 386 self._updateroots(phase, roots, tr)
382 387 # some roots may need to be declared for lower phases
383 388 delroots.extend(olds - roots)
384 389 # declare deleted root in the target phase
385 390 if targetphase != 0:
386 391 self._retractboundary(repo, tr, targetphase, delroots)
387 392 repo.invalidatevolatilesets()
388 393
389 394 def retractboundary(self, repo, tr, targetphase, nodes):
390 395 oldroots = self.phaseroots[:targetphase + 1]
391 396 if tr is None:
392 397 phasetracking = None
393 398 else:
394 399 phasetracking = tr.changes.get('phases')
395 400 repo = repo.unfiltered()
396 401 if (self._retractboundary(repo, tr, targetphase, nodes)
397 402 and phasetracking is not None):
398 403
399 404 # find the affected revisions
400 405 new = self.phaseroots[targetphase]
401 406 old = oldroots[targetphase]
402 407 affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
403 408
404 409 # find the phase of the affected revision
405 410 for phase in xrange(targetphase, -1, -1):
406 411 if phase:
407 412 roots = oldroots[phase]
408 413 revs = set(repo.revs('%ln::%ld', roots, affected))
409 414 affected -= revs
410 415 else: # public phase
411 416 revs = affected
412 417 for r in revs:
413 418 _trackphasechange(phasetracking, r, phase, targetphase)
414 419 repo.invalidatevolatilesets()
415 420
416 421 def _retractboundary(self, repo, tr, targetphase, nodes):
417 422 # Be careful to preserve shallow-copied values: do not update
418 423 # phaseroots values, replace them.
419 424
420 425 repo = repo.unfiltered()
421 426 currentroots = self.phaseroots[targetphase]
422 427 finalroots = oldroots = set(currentroots)
423 428 newroots = [n for n in nodes
424 429 if self.phase(repo, repo[n].rev()) < targetphase]
425 430 if newroots:
426 431
427 432 if nullid in newroots:
428 433 raise error.Abort(_('cannot change null revision phase'))
429 434 currentroots = currentroots.copy()
430 435 currentroots.update(newroots)
431 436
432 437 # Only compute new roots for revs above the roots that are being
433 438 # retracted.
434 439 minnewroot = min(repo[n].rev() for n in newroots)
435 440 aboveroots = [n for n in currentroots
436 441 if repo[n].rev() >= minnewroot]
437 442 updatedroots = repo.set('roots(%ln::)', aboveroots)
438 443
439 444 finalroots = set(n for n in currentroots if repo[n].rev() <
440 445 minnewroot)
441 446 finalroots.update(ctx.node() for ctx in updatedroots)
442 447 if finalroots != oldroots:
443 448 self._updateroots(targetphase, finalroots, tr)
444 449 return True
445 450 return False
446 451
447 452 def filterunknown(self, repo):
448 453 """remove unknown nodes from the phase boundary
449 454
450 455 Nothing is lost as unknown nodes only hold data for their descendants.
451 456 """
452 457 filtered = False
453 458 nodemap = repo.changelog.nodemap # to filter unknown nodes
454 459 for phase, nodes in enumerate(self.phaseroots):
455 460 missing = sorted(node for node in nodes if node not in nodemap)
456 461 if missing:
457 462 for mnode in missing:
458 463 repo.ui.debug(
459 464 'removing unknown node %s from %i-phase boundary\n'
460 465 % (short(mnode), phase))
461 466 nodes.symmetric_difference_update(missing)
462 467 filtered = True
463 468 if filtered:
464 469 self.dirty = True
465 470 # filterunknown is called by repo.destroyed, we may have no changes in
466 471 # root but _phasesets contents is certainly invalid (or at least we
467 472 # have not proper way to check that). related to issue 3858.
468 473 #
469 474 # The other caller is __init__ that have no _phasesets initialized
470 475 # anyway. If this change we should consider adding a dedicated
471 476 # "destroyed" function to phasecache or a proper cache key mechanism
472 477 # (see branchmap one)
473 478 self.invalidate()
474 479
475 480 def advanceboundary(repo, tr, targetphase, nodes):
476 481 """Add nodes to a phase changing other nodes phases if necessary.
477 482
478 483 This function move boundary *forward* this means that all nodes
479 484 are set in the target phase or kept in a *lower* phase.
480 485
481 486 Simplify boundary to contains phase roots only."""
482 487 phcache = repo._phasecache.copy()
483 488 phcache.advanceboundary(repo, tr, targetphase, nodes)
484 489 repo._phasecache.replace(phcache)
485 490
486 491 def retractboundary(repo, tr, targetphase, nodes):
487 492 """Set nodes back to a phase changing other nodes phases if
488 493 necessary.
489 494
490 495 This function move boundary *backward* this means that all nodes
491 496 are set in the target phase or kept in a *higher* phase.
492 497
493 498 Simplify boundary to contains phase roots only."""
494 499 phcache = repo._phasecache.copy()
495 500 phcache.retractboundary(repo, tr, targetphase, nodes)
496 501 repo._phasecache.replace(phcache)
497 502
498 503 def registernew(repo, tr, targetphase, nodes):
499 504 """register a new revision and its phase
500 505
501 506 Code adding revisions to the repository should use this function to
502 507 set new changeset in their target phase (or higher).
503 508 """
504 509 phcache = repo._phasecache.copy()
505 510 phcache.registernew(repo, tr, targetphase, nodes)
506 511 repo._phasecache.replace(phcache)
507 512
508 513 def listphases(repo):
509 514 """List phases root for serialization over pushkey"""
510 515 # Use ordered dictionary so behavior is deterministic.
511 516 keys = util.sortdict()
512 517 value = '%i' % draft
513 518 cl = repo.unfiltered().changelog
514 519 for root in repo._phasecache.phaseroots[draft]:
515 520 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
516 521 keys[hex(root)] = value
517 522
518 523 if repo.publishing():
519 524 # Add an extra data to let remote know we are a publishing
520 525 # repo. Publishing repo can't just pretend they are old repo.
521 526 # When pushing to a publishing repo, the client still need to
522 527 # push phase boundary
523 528 #
524 529 # Push do not only push changeset. It also push phase data.
525 530 # New phase data may apply to common changeset which won't be
526 531 # push (as they are common). Here is a very simple example:
527 532 #
528 533 # 1) repo A push changeset X as draft to repo B
529 534 # 2) repo B make changeset X public
530 535 # 3) repo B push to repo A. X is not pushed but the data that
531 536 # X as now public should
532 537 #
533 538 # The server can't handle it on it's own as it has no idea of
534 539 # client phase data.
535 540 keys['publishing'] = 'True'
536 541 return keys
537 542
538 543 def pushphase(repo, nhex, oldphasestr, newphasestr):
539 544 """List phases root for serialization over pushkey"""
540 545 repo = repo.unfiltered()
541 546 with repo.lock():
542 547 currentphase = repo[nhex].phase()
543 548 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
544 549 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
545 550 if currentphase == oldphase and newphase < oldphase:
546 551 with repo.transaction('pushkey-phase') as tr:
547 552 advanceboundary(repo, tr, newphase, [bin(nhex)])
548 553 return True
549 554 elif currentphase == newphase:
550 555 # raced, but got correct result
551 556 return True
552 557 else:
553 558 return False
554 559
555 560 def subsetphaseheads(repo, subset):
556 561 """Finds the phase heads for a subset of a history
557 562
558 563 Returns a list indexed by phase number where each item is a list of phase
559 564 head nodes.
560 565 """
561 566 cl = repo.changelog
562 567
563 568 headsbyphase = [[] for i in allphases]
564 569 # No need to keep track of secret phase; any heads in the subset that
565 570 # are not mentioned are implicitly secret.
566 571 for phase in allphases[:-1]:
567 572 revset = "heads(%%ln & %s())" % phasenames[phase]
568 573 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
569 574 return headsbyphase
570 575
571 576 def updatephases(repo, trgetter, headsbyphase):
572 577 """Updates the repo with the given phase heads"""
573 578 # Now advance phase boundaries of all but secret phase
574 579 #
575 580 # run the update (and fetch transaction) only if there are actually things
576 581 # to update. This avoid creating empty transaction during no-op operation.
577 582
578 583 for phase in allphases[:-1]:
579 584 revset = '%%ln - %s()' % phasenames[phase]
580 585 heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
581 586 if heads:
582 587 advanceboundary(repo, trgetter(), phase, heads)
583 588
584 589 def analyzeremotephases(repo, subset, roots):
585 590 """Compute phases heads and root in a subset of node from root dict
586 591
587 592 * subset is heads of the subset
588 593 * roots is {<nodeid> => phase} mapping. key and value are string.
589 594
590 595 Accept unknown element input
591 596 """
592 597 repo = repo.unfiltered()
593 598 # build list from dictionary
594 599 draftroots = []
595 600 nodemap = repo.changelog.nodemap # to filter unknown nodes
596 601 for nhex, phase in roots.iteritems():
597 602 if nhex == 'publishing': # ignore data related to publish option
598 603 continue
599 604 node = bin(nhex)
600 605 phase = int(phase)
601 606 if phase == public:
602 607 if node != nullid:
603 608 repo.ui.warn(_('ignoring inconsistent public root'
604 609 ' from remote: %s\n') % nhex)
605 610 elif phase == draft:
606 611 if node in nodemap:
607 612 draftroots.append(node)
608 613 else:
609 614 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
610 615 % (phase, nhex))
611 616 # compute heads
612 617 publicheads = newheads(repo, subset, draftroots)
613 618 return publicheads, draftroots
614 619
615 620 class remotephasessummary(object):
616 621 """summarize phase information on the remote side
617 622
618 623 :publishing: True is the remote is publishing
619 624 :publicheads: list of remote public phase heads (nodes)
620 625 :draftheads: list of remote draft phase heads (nodes)
621 626 :draftroots: list of remote draft phase root (nodes)
622 627 """
623 628
624 629 def __init__(self, repo, remotesubset, remoteroots):
625 630 unfi = repo.unfiltered()
626 631 self._allremoteroots = remoteroots
627 632
628 633 self.publishing = remoteroots.get('publishing', False)
629 634
630 635 ana = analyzeremotephases(repo, remotesubset, remoteroots)
631 636 self.publicheads, self.draftroots = ana
632 637 # Get the list of all "heads" revs draft on remote
633 638 dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
634 639 self.draftheads = [c.node() for c in dheads]
635 640
636 641 def newheads(repo, heads, roots):
637 642 """compute new head of a subset minus another
638 643
639 644 * `heads`: define the first subset
640 645 * `roots`: define the second we subtract from the first"""
641 646 repo = repo.unfiltered()
642 647 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
643 648 heads, roots, roots, heads)
644 649 return [c.node() for c in revset]
645 650
646 651
647 652 def newcommitphase(ui):
648 653 """helper to get the target phase of new commit
649 654
650 655 Handle all possible values for the phases.new-commit options.
651 656
652 657 """
653 658 v = ui.config('phases', 'new-commit')
654 659 try:
655 660 return phasenames.index(v)
656 661 except ValueError:
657 662 try:
658 663 return int(v)
659 664 except ValueError:
660 665 msg = _("phases.new-commit: not a valid phase name ('%s')")
661 666 raise error.ConfigError(msg % v)
662 667
663 668 def hassecret(repo):
664 669 """utility function that check if a repo have any secret changeset."""
665 670 return bool(repo._phasecache.phaseroots[2])
666 671
667 672 def preparehookargs(node, old, new):
668 673 if old is None:
669 674 old = ''
670 675 else:
671 676 old = phasenames[old]
672 677 return {'node': node,
673 678 'oldphase': old,
674 679 'phase': phasenames[new]}
@@ -1,2220 +1,2215
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 obsutil,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 revsetlang,
28 28 scmutil,
29 29 smartset,
30 30 util,
31 31 )
32 32
33 33 # helpers for processing parsed tree
34 34 getsymbol = revsetlang.getsymbol
35 35 getstring = revsetlang.getstring
36 36 getinteger = revsetlang.getinteger
37 37 getboolean = revsetlang.getboolean
38 38 getlist = revsetlang.getlist
39 39 getrange = revsetlang.getrange
40 40 getargs = revsetlang.getargs
41 41 getargsdict = revsetlang.getargsdict
42 42
43 43 baseset = smartset.baseset
44 44 generatorset = smartset.generatorset
45 45 spanset = smartset.spanset
46 46 fullreposet = smartset.fullreposet
47 47
48 48 # Constants for ordering requirement, used in getset():
49 49 #
50 50 # If 'define', any nested functions and operations MAY change the ordering of
51 51 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
52 52 # it). If 'follow', any nested functions and operations MUST take the ordering
53 53 # specified by the first operand to the '&' operator.
54 54 #
55 55 # For instance,
56 56 #
57 57 # X & (Y | Z)
58 58 # ^ ^^^^^^^
59 59 # | follow
60 60 # define
61 61 #
62 62 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
63 63 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
64 64 #
65 65 # 'any' means the order doesn't matter. For instance,
66 66 #
67 67 # (X & !Y) | ancestors(Z)
68 68 # ^ ^
69 69 # any any
70 70 #
71 71 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
72 72 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
73 73 # since 'ancestors' does not care about the order of its argument.
74 74 #
75 75 # Currently, most revsets do not care about the order, so 'define' is
76 76 # equivalent to 'follow' for them, and the resulting order is based on the
77 77 # 'subset' parameter passed down to them:
78 78 #
79 79 # m = revset.match(...)
80 80 # m(repo, subset, order=defineorder)
81 81 # ^^^^^^
82 82 # For most revsets, 'define' means using the order this subset provides
83 83 #
84 84 # There are a few revsets that always redefine the order if 'define' is
85 85 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
86 86 anyorder = 'any' # don't care the order, could be even random-shuffled
87 87 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
88 88 followorder = 'follow' # MUST follow the current order
89 89
90 90 # helpers
91 91
92 92 def getset(repo, subset, x, order=defineorder):
93 93 if not x:
94 94 raise error.ParseError(_("missing argument"))
95 95 return methods[x[0]](repo, subset, *x[1:], order=order)
96 96
97 97 def _getrevsource(repo, r):
98 98 extra = repo[r].extra()
99 99 for label in ('source', 'transplant_source', 'rebase_source'):
100 100 if label in extra:
101 101 try:
102 102 return repo[extra[label]].rev()
103 103 except error.RepoLookupError:
104 104 pass
105 105 return None
106 106
107 107 # operator methods
108 108
109 109 def stringset(repo, subset, x, order):
110 110 x = scmutil.intrev(repo[x])
111 111 if (x in subset
112 112 or x == node.nullrev and isinstance(subset, fullreposet)):
113 113 return baseset([x])
114 114 return baseset()
115 115
116 116 def rangeset(repo, subset, x, y, order):
117 117 m = getset(repo, fullreposet(repo), x)
118 118 n = getset(repo, fullreposet(repo), y)
119 119
120 120 if not m or not n:
121 121 return baseset()
122 122 return _makerangeset(repo, subset, m.first(), n.last(), order)
123 123
124 124 def rangeall(repo, subset, x, order):
125 125 assert x is None
126 126 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
127 127
128 128 def rangepre(repo, subset, y, order):
129 129 # ':y' can't be rewritten to '0:y' since '0' may be hidden
130 130 n = getset(repo, fullreposet(repo), y)
131 131 if not n:
132 132 return baseset()
133 133 return _makerangeset(repo, subset, 0, n.last(), order)
134 134
135 135 def rangepost(repo, subset, x, order):
136 136 m = getset(repo, fullreposet(repo), x)
137 137 if not m:
138 138 return baseset()
139 139 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
140 140
141 141 def _makerangeset(repo, subset, m, n, order):
142 142 if m == n:
143 143 r = baseset([m])
144 144 elif n == node.wdirrev:
145 145 r = spanset(repo, m, len(repo)) + baseset([n])
146 146 elif m == node.wdirrev:
147 147 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
148 148 elif m < n:
149 149 r = spanset(repo, m, n + 1)
150 150 else:
151 151 r = spanset(repo, m, n - 1)
152 152
153 153 if order == defineorder:
154 154 return r & subset
155 155 else:
156 156 # carrying the sorting over when possible would be more efficient
157 157 return subset & r
158 158
159 159 def dagrange(repo, subset, x, y, order):
160 160 r = fullreposet(repo)
161 161 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
162 162 includepath=True)
163 163 return subset & xs
164 164
165 165 def andset(repo, subset, x, y, order):
166 166 if order == anyorder:
167 167 yorder = anyorder
168 168 else:
169 169 yorder = followorder
170 170 return getset(repo, getset(repo, subset, x, order), y, yorder)
171 171
172 172 def andsmallyset(repo, subset, x, y, order):
173 173 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
174 174 if order == anyorder:
175 175 yorder = anyorder
176 176 else:
177 177 yorder = followorder
178 178 return getset(repo, getset(repo, subset, y, yorder), x, order)
179 179
180 180 def differenceset(repo, subset, x, y, order):
181 181 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
182 182
183 183 def _orsetlist(repo, subset, xs, order):
184 184 assert xs
185 185 if len(xs) == 1:
186 186 return getset(repo, subset, xs[0], order)
187 187 p = len(xs) // 2
188 188 a = _orsetlist(repo, subset, xs[:p], order)
189 189 b = _orsetlist(repo, subset, xs[p:], order)
190 190 return a + b
191 191
192 192 def orset(repo, subset, x, order):
193 193 xs = getlist(x)
194 194 if order == followorder:
195 195 # slow path to take the subset order
196 196 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
197 197 else:
198 198 return _orsetlist(repo, subset, xs, order)
199 199
200 200 def notset(repo, subset, x, order):
201 201 return subset - getset(repo, subset, x, anyorder)
202 202
203 203 def relationset(repo, subset, x, y, order):
204 204 raise error.ParseError(_("can't use a relation in this context"))
205 205
206 206 def relsubscriptset(repo, subset, x, y, z, order):
207 207 # this is pretty basic implementation of 'x#y[z]' operator, still
208 208 # experimental so undocumented. see the wiki for further ideas.
209 209 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
210 210 rel = getsymbol(y)
211 211 n = getinteger(z, _("relation subscript must be an integer"))
212 212
213 213 # TODO: perhaps this should be a table of relation functions
214 214 if rel in ('g', 'generations'):
215 215 # TODO: support range, rewrite tests, and drop startdepth argument
216 216 # from ancestors() and descendants() predicates
217 217 if n <= 0:
218 218 n = -n
219 219 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
220 220 else:
221 221 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
222 222
223 223 raise error.UnknownIdentifier(rel, ['generations'])
224 224
225 225 def subscriptset(repo, subset, x, y, order):
226 226 raise error.ParseError(_("can't use a subscript in this context"))
227 227
228 228 def listset(repo, subset, *xs, **opts):
229 229 raise error.ParseError(_("can't use a list in this context"),
230 230 hint=_('see hg help "revsets.x or y"'))
231 231
232 232 def keyvaluepair(repo, subset, k, v, order):
233 233 raise error.ParseError(_("can't use a key-value pair in this context"))
234 234
235 235 def func(repo, subset, a, b, order):
236 236 f = getsymbol(a)
237 237 if f in symbols:
238 238 func = symbols[f]
239 239 if getattr(func, '_takeorder', False):
240 240 return func(repo, subset, b, order)
241 241 return func(repo, subset, b)
242 242
243 243 keep = lambda fn: getattr(fn, '__doc__', None) is not None
244 244
245 245 syms = [s for (s, fn) in symbols.items() if keep(fn)]
246 246 raise error.UnknownIdentifier(f, syms)
247 247
248 248 # functions
249 249
250 250 # symbols are callables like:
251 251 # fn(repo, subset, x)
252 252 # with:
253 253 # repo - current repository instance
254 254 # subset - of revisions to be examined
255 255 # x - argument in tree form
256 256 symbols = revsetlang.symbols
257 257
258 258 # symbols which can't be used for a DoS attack for any given input
259 259 # (e.g. those which accept regexes as plain strings shouldn't be included)
260 260 # functions that just return a lot of changesets (like all) don't count here
261 261 safesymbols = set()
262 262
263 263 predicate = registrar.revsetpredicate()
264 264
265 265 @predicate('_destupdate')
266 266 def _destupdate(repo, subset, x):
267 267 # experimental revset for update destination
268 268 args = getargsdict(x, 'limit', 'clean')
269 269 return subset & baseset([destutil.destupdate(repo, **args)[0]])
270 270
271 271 @predicate('_destmerge')
272 272 def _destmerge(repo, subset, x):
273 273 # experimental revset for merge destination
274 274 sourceset = None
275 275 if x is not None:
276 276 sourceset = getset(repo, fullreposet(repo), x)
277 277 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
278 278
279 279 @predicate('adds(pattern)', safe=True, weight=30)
280 280 def adds(repo, subset, x):
281 281 """Changesets that add a file matching pattern.
282 282
283 283 The pattern without explicit kind like ``glob:`` is expected to be
284 284 relative to the current directory and match against a file or a
285 285 directory.
286 286 """
287 287 # i18n: "adds" is a keyword
288 288 pat = getstring(x, _("adds requires a pattern"))
289 289 return checkstatus(repo, subset, pat, 1)
290 290
291 291 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
292 292 def ancestor(repo, subset, x):
293 293 """A greatest common ancestor of the changesets.
294 294
295 295 Accepts 0 or more changesets.
296 296 Will return empty list when passed no args.
297 297 Greatest common ancestor of a single changeset is that changeset.
298 298 """
299 299 # i18n: "ancestor" is a keyword
300 300 l = getlist(x)
301 301 rl = fullreposet(repo)
302 302 anc = None
303 303
304 304 # (getset(repo, rl, i) for i in l) generates a list of lists
305 305 for revs in (getset(repo, rl, i) for i in l):
306 306 for r in revs:
307 307 if anc is None:
308 308 anc = repo[r]
309 309 else:
310 310 anc = anc.ancestor(repo[r])
311 311
312 312 if anc is not None and anc.rev() in subset:
313 313 return baseset([anc.rev()])
314 314 return baseset()
315 315
316 316 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
317 317 stopdepth=None):
318 318 heads = getset(repo, fullreposet(repo), x)
319 319 if not heads:
320 320 return baseset()
321 321 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
322 322 return subset & s
323 323
324 324 @predicate('ancestors(set[, depth])', safe=True)
325 325 def ancestors(repo, subset, x):
326 326 """Changesets that are ancestors of changesets in set, including the
327 327 given changesets themselves.
328 328
329 329 If depth is specified, the result only includes changesets up to
330 330 the specified generation.
331 331 """
332 332 # startdepth is for internal use only until we can decide the UI
333 333 args = getargsdict(x, 'ancestors', 'set depth startdepth')
334 334 if 'set' not in args:
335 335 # i18n: "ancestors" is a keyword
336 336 raise error.ParseError(_('ancestors takes at least 1 argument'))
337 337 startdepth = stopdepth = None
338 338 if 'startdepth' in args:
339 339 n = getinteger(args['startdepth'],
340 340 "ancestors expects an integer startdepth")
341 341 if n < 0:
342 342 raise error.ParseError("negative startdepth")
343 343 startdepth = n
344 344 if 'depth' in args:
345 345 # i18n: "ancestors" is a keyword
346 346 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
347 347 if n < 0:
348 348 raise error.ParseError(_("negative depth"))
349 349 stopdepth = n + 1
350 350 return _ancestors(repo, subset, args['set'],
351 351 startdepth=startdepth, stopdepth=stopdepth)
352 352
353 353 @predicate('_firstancestors', safe=True)
354 354 def _firstancestors(repo, subset, x):
355 355 # ``_firstancestors(set)``
356 356 # Like ``ancestors(set)`` but follows only the first parents.
357 357 return _ancestors(repo, subset, x, followfirst=True)
358 358
359 359 def _childrenspec(repo, subset, x, n, order):
360 360 """Changesets that are the Nth child of a changeset
361 361 in set.
362 362 """
363 363 cs = set()
364 364 for r in getset(repo, fullreposet(repo), x):
365 365 for i in range(n):
366 366 c = repo[r].children()
367 367 if len(c) == 0:
368 368 break
369 369 if len(c) > 1:
370 370 raise error.RepoLookupError(
371 371 _("revision in set has more than one child"))
372 372 r = c[0].rev()
373 373 else:
374 374 cs.add(r)
375 375 return subset & cs
376 376
377 377 def ancestorspec(repo, subset, x, n, order):
378 378 """``set~n``
379 379 Changesets that are the Nth ancestor (first parents only) of a changeset
380 380 in set.
381 381 """
382 382 n = getinteger(n, _("~ expects a number"))
383 383 if n < 0:
384 384 # children lookup
385 385 return _childrenspec(repo, subset, x, -n, order)
386 386 ps = set()
387 387 cl = repo.changelog
388 388 for r in getset(repo, fullreposet(repo), x):
389 389 for i in range(n):
390 390 try:
391 391 r = cl.parentrevs(r)[0]
392 392 except error.WdirUnsupported:
393 393 r = repo[r].parents()[0].rev()
394 394 ps.add(r)
395 395 return subset & ps
396 396
397 397 @predicate('author(string)', safe=True, weight=10)
398 398 def author(repo, subset, x):
399 399 """Alias for ``user(string)``.
400 400 """
401 401 # i18n: "author" is a keyword
402 402 n = getstring(x, _("author requires a string"))
403 403 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
404 404 return subset.filter(lambda x: matcher(repo[x].user()),
405 405 condrepr=('<user %r>', n))
406 406
407 407 @predicate('bisect(string)', safe=True)
408 408 def bisect(repo, subset, x):
409 409 """Changesets marked in the specified bisect status:
410 410
411 411 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
412 412 - ``goods``, ``bads`` : csets topologically good/bad
413 413 - ``range`` : csets taking part in the bisection
414 414 - ``pruned`` : csets that are goods, bads or skipped
415 415 - ``untested`` : csets whose fate is yet unknown
416 416 - ``ignored`` : csets ignored due to DAG topology
417 417 - ``current`` : the cset currently being bisected
418 418 """
419 419 # i18n: "bisect" is a keyword
420 420 status = getstring(x, _("bisect requires a string")).lower()
421 421 state = set(hbisect.get(repo, status))
422 422 return subset & state
423 423
424 424 # Backward-compatibility
425 425 # - no help entry so that we do not advertise it any more
426 426 @predicate('bisected', safe=True)
427 427 def bisected(repo, subset, x):
428 428 return bisect(repo, subset, x)
429 429
430 430 @predicate('bookmark([name])', safe=True)
431 431 def bookmark(repo, subset, x):
432 432 """The named bookmark or all bookmarks.
433 433
434 434 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
435 435 """
436 436 # i18n: "bookmark" is a keyword
437 437 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
438 438 if args:
439 439 bm = getstring(args[0],
440 440 # i18n: "bookmark" is a keyword
441 441 _('the argument to bookmark must be a string'))
442 442 kind, pattern, matcher = util.stringmatcher(bm)
443 443 bms = set()
444 444 if kind == 'literal':
445 445 bmrev = repo._bookmarks.get(pattern, None)
446 446 if not bmrev:
447 447 raise error.RepoLookupError(_("bookmark '%s' does not exist")
448 448 % pattern)
449 449 bms.add(repo[bmrev].rev())
450 450 else:
451 451 matchrevs = set()
452 452 for name, bmrev in repo._bookmarks.iteritems():
453 453 if matcher(name):
454 454 matchrevs.add(bmrev)
455 455 if not matchrevs:
456 456 raise error.RepoLookupError(_("no bookmarks exist"
457 457 " that match '%s'") % pattern)
458 458 for bmrev in matchrevs:
459 459 bms.add(repo[bmrev].rev())
460 460 else:
461 461 bms = {repo[r].rev() for r in repo._bookmarks.values()}
462 462 bms -= {node.nullrev}
463 463 return subset & bms
464 464
465 465 @predicate('branch(string or set)', safe=True, weight=10)
466 466 def branch(repo, subset, x):
467 467 """
468 468 All changesets belonging to the given branch or the branches of the given
469 469 changesets.
470 470
471 471 Pattern matching is supported for `string`. See
472 472 :hg:`help revisions.patterns`.
473 473 """
474 474 getbi = repo.revbranchcache().branchinfo
475 475 def getbranch(r):
476 476 try:
477 477 return getbi(r)[0]
478 478 except error.WdirUnsupported:
479 479 return repo[r].branch()
480 480
481 481 try:
482 482 b = getstring(x, '')
483 483 except error.ParseError:
484 484 # not a string, but another revspec, e.g. tip()
485 485 pass
486 486 else:
487 487 kind, pattern, matcher = util.stringmatcher(b)
488 488 if kind == 'literal':
489 489 # note: falls through to the revspec case if no branch with
490 490 # this name exists and pattern kind is not specified explicitly
491 491 if pattern in repo.branchmap():
492 492 return subset.filter(lambda r: matcher(getbranch(r)),
493 493 condrepr=('<branch %r>', b))
494 494 if b.startswith('literal:'):
495 495 raise error.RepoLookupError(_("branch '%s' does not exist")
496 496 % pattern)
497 497 else:
498 498 return subset.filter(lambda r: matcher(getbranch(r)),
499 499 condrepr=('<branch %r>', b))
500 500
501 501 s = getset(repo, fullreposet(repo), x)
502 502 b = set()
503 503 for r in s:
504 504 b.add(getbranch(r))
505 505 c = s.__contains__
506 506 return subset.filter(lambda r: c(r) or getbranch(r) in b,
507 507 condrepr=lambda: '<branch %r>' % sorted(b))
508 508
509 509 @predicate('bumped()', safe=True)
510 510 def bumped(repo, subset, x):
511 511 msg = ("'bumped()' is deprecated, "
512 512 "use 'phasedivergent()'")
513 513 repo.ui.deprecwarn(msg, '4.4')
514 514
515 515 return phasedivergent(repo, subset, x)
516 516
517 517 @predicate('phasedivergent()', safe=True)
518 518 def phasedivergent(repo, subset, x):
519 519 """Mutable changesets marked as successors of public changesets.
520 520
521 521 Only non-public and non-obsolete changesets can be `phasedivergent`.
522 522 (EXPERIMENTAL)
523 523 """
524 524 # i18n: "phasedivergent" is a keyword
525 525 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
526 526 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
527 527 return subset & phasedivergent
528 528
529 529 @predicate('bundle()', safe=True)
530 530 def bundle(repo, subset, x):
531 531 """Changesets in the bundle.
532 532
533 533 Bundle must be specified by the -R option."""
534 534
535 535 try:
536 536 bundlerevs = repo.changelog.bundlerevs
537 537 except AttributeError:
538 538 raise error.Abort(_("no bundle provided - specify with -R"))
539 539 return subset & bundlerevs
540 540
541 541 def checkstatus(repo, subset, pat, field):
542 542 hasset = matchmod.patkind(pat) == 'set'
543 543
544 544 mcache = [None]
545 545 def matches(x):
546 546 c = repo[x]
547 547 if not mcache[0] or hasset:
548 548 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
549 549 m = mcache[0]
550 550 fname = None
551 551 if not m.anypats() and len(m.files()) == 1:
552 552 fname = m.files()[0]
553 553 if fname is not None:
554 554 if fname not in c.files():
555 555 return False
556 556 else:
557 557 for f in c.files():
558 558 if m(f):
559 559 break
560 560 else:
561 561 return False
562 562 files = repo.status(c.p1().node(), c.node())[field]
563 563 if fname is not None:
564 564 if fname in files:
565 565 return True
566 566 else:
567 567 for f in files:
568 568 if m(f):
569 569 return True
570 570
571 571 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
572 572
573 573 def _children(repo, subset, parentset):
574 574 if not parentset:
575 575 return baseset()
576 576 cs = set()
577 577 pr = repo.changelog.parentrevs
578 578 minrev = parentset.min()
579 579 nullrev = node.nullrev
580 580 for r in subset:
581 581 if r <= minrev:
582 582 continue
583 583 p1, p2 = pr(r)
584 584 if p1 in parentset:
585 585 cs.add(r)
586 586 if p2 != nullrev and p2 in parentset:
587 587 cs.add(r)
588 588 return baseset(cs)
589 589
590 590 @predicate('children(set)', safe=True)
591 591 def children(repo, subset, x):
592 592 """Child changesets of changesets in set.
593 593 """
594 594 s = getset(repo, fullreposet(repo), x)
595 595 cs = _children(repo, subset, s)
596 596 return subset & cs
597 597
598 598 @predicate('closed()', safe=True, weight=10)
599 599 def closed(repo, subset, x):
600 600 """Changeset is closed.
601 601 """
602 602 # i18n: "closed" is a keyword
603 603 getargs(x, 0, 0, _("closed takes no arguments"))
604 604 return subset.filter(lambda r: repo[r].closesbranch(),
605 605 condrepr='<branch closed>')
606 606
607 607 @predicate('contains(pattern)', weight=100)
608 608 def contains(repo, subset, x):
609 609 """The revision's manifest contains a file matching pattern (but might not
610 610 modify it). See :hg:`help patterns` for information about file patterns.
611 611
612 612 The pattern without explicit kind like ``glob:`` is expected to be
613 613 relative to the current directory and match against a file exactly
614 614 for efficiency.
615 615 """
616 616 # i18n: "contains" is a keyword
617 617 pat = getstring(x, _("contains requires a pattern"))
618 618
619 619 def matches(x):
620 620 if not matchmod.patkind(pat):
621 621 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
622 622 if pats in repo[x]:
623 623 return True
624 624 else:
625 625 c = repo[x]
626 626 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
627 627 for f in c.manifest():
628 628 if m(f):
629 629 return True
630 630 return False
631 631
632 632 return subset.filter(matches, condrepr=('<contains %r>', pat))
633 633
634 634 @predicate('converted([id])', safe=True)
635 635 def converted(repo, subset, x):
636 636 """Changesets converted from the given identifier in the old repository if
637 637 present, or all converted changesets if no identifier is specified.
638 638 """
639 639
640 640 # There is exactly no chance of resolving the revision, so do a simple
641 641 # string compare and hope for the best
642 642
643 643 rev = None
644 644 # i18n: "converted" is a keyword
645 645 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
646 646 if l:
647 647 # i18n: "converted" is a keyword
648 648 rev = getstring(l[0], _('converted requires a revision'))
649 649
650 650 def _matchvalue(r):
651 651 source = repo[r].extra().get('convert_revision', None)
652 652 return source is not None and (rev is None or source.startswith(rev))
653 653
654 654 return subset.filter(lambda r: _matchvalue(r),
655 655 condrepr=('<converted %r>', rev))
656 656
657 657 @predicate('date(interval)', safe=True, weight=10)
658 658 def date(repo, subset, x):
659 659 """Changesets within the interval, see :hg:`help dates`.
660 660 """
661 661 # i18n: "date" is a keyword
662 662 ds = getstring(x, _("date requires a string"))
663 663 dm = util.matchdate(ds)
664 664 return subset.filter(lambda x: dm(repo[x].date()[0]),
665 665 condrepr=('<date %r>', ds))
666 666
667 667 @predicate('desc(string)', safe=True, weight=10)
668 668 def desc(repo, subset, x):
669 669 """Search commit message for string. The match is case-insensitive.
670 670
671 671 Pattern matching is supported for `string`. See
672 672 :hg:`help revisions.patterns`.
673 673 """
674 674 # i18n: "desc" is a keyword
675 675 ds = getstring(x, _("desc requires a string"))
676 676
677 677 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
678 678
679 679 return subset.filter(lambda r: matcher(repo[r].description()),
680 680 condrepr=('<desc %r>', ds))
681 681
682 682 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
683 683 stopdepth=None):
684 684 roots = getset(repo, fullreposet(repo), x)
685 685 if not roots:
686 686 return baseset()
687 687 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
688 688 return subset & s
689 689
690 690 @predicate('descendants(set[, depth])', safe=True)
691 691 def descendants(repo, subset, x):
692 692 """Changesets which are descendants of changesets in set, including the
693 693 given changesets themselves.
694 694
695 695 If depth is specified, the result only includes changesets up to
696 696 the specified generation.
697 697 """
698 698 # startdepth is for internal use only until we can decide the UI
699 699 args = getargsdict(x, 'descendants', 'set depth startdepth')
700 700 if 'set' not in args:
701 701 # i18n: "descendants" is a keyword
702 702 raise error.ParseError(_('descendants takes at least 1 argument'))
703 703 startdepth = stopdepth = None
704 704 if 'startdepth' in args:
705 705 n = getinteger(args['startdepth'],
706 706 "descendants expects an integer startdepth")
707 707 if n < 0:
708 708 raise error.ParseError("negative startdepth")
709 709 startdepth = n
710 710 if 'depth' in args:
711 711 # i18n: "descendants" is a keyword
712 712 n = getinteger(args['depth'], _("descendants expects an integer depth"))
713 713 if n < 0:
714 714 raise error.ParseError(_("negative depth"))
715 715 stopdepth = n + 1
716 716 return _descendants(repo, subset, args['set'],
717 717 startdepth=startdepth, stopdepth=stopdepth)
718 718
719 719 @predicate('_firstdescendants', safe=True)
720 720 def _firstdescendants(repo, subset, x):
721 721 # ``_firstdescendants(set)``
722 722 # Like ``descendants(set)`` but follows only the first parents.
723 723 return _descendants(repo, subset, x, followfirst=True)
724 724
725 725 @predicate('destination([set])', safe=True, weight=10)
726 726 def destination(repo, subset, x):
727 727 """Changesets that were created by a graft, transplant or rebase operation,
728 728 with the given revisions specified as the source. Omitting the optional set
729 729 is the same as passing all().
730 730 """
731 731 if x is not None:
732 732 sources = getset(repo, fullreposet(repo), x)
733 733 else:
734 734 sources = fullreposet(repo)
735 735
736 736 dests = set()
737 737
738 738 # subset contains all of the possible destinations that can be returned, so
739 739 # iterate over them and see if their source(s) were provided in the arg set.
740 740 # Even if the immediate src of r is not in the arg set, src's source (or
741 741 # further back) may be. Scanning back further than the immediate src allows
742 742 # transitive transplants and rebases to yield the same results as transitive
743 743 # grafts.
744 744 for r in subset:
745 745 src = _getrevsource(repo, r)
746 746 lineage = None
747 747
748 748 while src is not None:
749 749 if lineage is None:
750 750 lineage = list()
751 751
752 752 lineage.append(r)
753 753
754 754 # The visited lineage is a match if the current source is in the arg
755 755 # set. Since every candidate dest is visited by way of iterating
756 756 # subset, any dests further back in the lineage will be tested by a
757 757 # different iteration over subset. Likewise, if the src was already
758 758 # selected, the current lineage can be selected without going back
759 759 # further.
760 760 if src in sources or src in dests:
761 761 dests.update(lineage)
762 762 break
763 763
764 764 r = src
765 765 src = _getrevsource(repo, r)
766 766
767 767 return subset.filter(dests.__contains__,
768 768 condrepr=lambda: '<destination %r>' % sorted(dests))
769 769
770 770 @predicate('divergent()', safe=True)
771 771 def divergent(repo, subset, x):
772 772 msg = ("'divergent()' is deprecated, "
773 773 "use 'contentdivergent()'")
774 774 repo.ui.deprecwarn(msg, '4.4')
775 775
776 776 return contentdivergent(repo, subset, x)
777 777
778 778 @predicate('contentdivergent()', safe=True)
779 779 def contentdivergent(repo, subset, x):
780 780 """
781 781 Final successors of changesets with an alternative set of final
782 782 successors. (EXPERIMENTAL)
783 783 """
784 784 # i18n: "contentdivergent" is a keyword
785 785 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
786 786 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
787 787 return subset & contentdivergent
788 788
789 789 @predicate('extdata(source)', safe=False, weight=100)
790 790 def extdata(repo, subset, x):
791 791 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
792 792 # i18n: "extdata" is a keyword
793 793 args = getargsdict(x, 'extdata', 'source')
794 794 source = getstring(args.get('source'),
795 795 # i18n: "extdata" is a keyword
796 796 _('extdata takes at least 1 string argument'))
797 797 data = scmutil.extdatasource(repo, source)
798 798 return subset & baseset(data)
799 799
800 800 @predicate('extinct()', safe=True)
801 801 def extinct(repo, subset, x):
802 802 """Obsolete changesets with obsolete descendants only.
803 803 """
804 804 # i18n: "extinct" is a keyword
805 805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 806 extincts = obsmod.getrevs(repo, 'extinct')
807 807 return subset & extincts
808 808
809 809 @predicate('extra(label, [value])', safe=True)
810 810 def extra(repo, subset, x):
811 811 """Changesets with the given label in the extra metadata, with the given
812 812 optional value.
813 813
814 814 Pattern matching is supported for `value`. See
815 815 :hg:`help revisions.patterns`.
816 816 """
817 817 args = getargsdict(x, 'extra', 'label value')
818 818 if 'label' not in args:
819 819 # i18n: "extra" is a keyword
820 820 raise error.ParseError(_('extra takes at least 1 argument'))
821 821 # i18n: "extra" is a keyword
822 822 label = getstring(args['label'], _('first argument to extra must be '
823 823 'a string'))
824 824 value = None
825 825
826 826 if 'value' in args:
827 827 # i18n: "extra" is a keyword
828 828 value = getstring(args['value'], _('second argument to extra must be '
829 829 'a string'))
830 830 kind, value, matcher = util.stringmatcher(value)
831 831
832 832 def _matchvalue(r):
833 833 extra = repo[r].extra()
834 834 return label in extra and (value is None or matcher(extra[label]))
835 835
836 836 return subset.filter(lambda r: _matchvalue(r),
837 837 condrepr=('<extra[%r] %r>', label, value))
838 838
839 839 @predicate('filelog(pattern)', safe=True)
840 840 def filelog(repo, subset, x):
841 841 """Changesets connected to the specified filelog.
842 842
843 843 For performance reasons, visits only revisions mentioned in the file-level
844 844 filelog, rather than filtering through all changesets (much faster, but
845 845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 846 result, use ``file()``.
847 847
848 848 The pattern without explicit kind like ``glob:`` is expected to be
849 849 relative to the current directory and match against a file exactly
850 850 for efficiency.
851 851
852 852 If some linkrev points to revisions filtered by the current repoview, we'll
853 853 work around it to return a non-filtered value.
854 854 """
855 855
856 856 # i18n: "filelog" is a keyword
857 857 pat = getstring(x, _("filelog requires a pattern"))
858 858 s = set()
859 859 cl = repo.changelog
860 860
861 861 if not matchmod.patkind(pat):
862 862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 863 files = [f]
864 864 else:
865 865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 866 files = (f for f in repo[None] if m(f))
867 867
868 868 for f in files:
869 869 fl = repo.file(f)
870 870 known = {}
871 871 scanpos = 0
872 872 for fr in list(fl):
873 873 fn = fl.node(fr)
874 874 if fn in known:
875 875 s.add(known[fn])
876 876 continue
877 877
878 878 lr = fl.linkrev(fr)
879 879 if lr in cl:
880 880 s.add(lr)
881 881 elif scanpos is not None:
882 882 # lowest matching changeset is filtered, scan further
883 883 # ahead in changelog
884 884 start = max(lr, scanpos) + 1
885 885 scanpos = None
886 886 for r in cl.revs(start):
887 887 # minimize parsing of non-matching entries
888 888 if f in cl.revision(r) and f in cl.readfiles(r):
889 889 try:
890 890 # try to use manifest delta fastpath
891 891 n = repo[r].filenode(f)
892 892 if n not in known:
893 893 if n == fn:
894 894 s.add(r)
895 895 scanpos = r
896 896 break
897 897 else:
898 898 known[n] = r
899 899 except error.ManifestLookupError:
900 900 # deletion in changelog
901 901 continue
902 902
903 903 return subset & s
904 904
905 905 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
906 906 def first(repo, subset, x, order):
907 907 """An alias for limit().
908 908 """
909 909 return limit(repo, subset, x, order)
910 910
911 911 def _follow(repo, subset, x, name, followfirst=False):
912 912 args = getargsdict(x, name, 'file startrev')
913 913 revs = None
914 914 if 'startrev' in args:
915 915 revs = getset(repo, fullreposet(repo), args['startrev'])
916 916 if 'file' in args:
917 917 x = getstring(args['file'], _("%s expected a pattern") % name)
918 918 if revs is None:
919 919 revs = [None]
920 920 fctxs = []
921 921 for r in revs:
922 922 ctx = mctx = repo[r]
923 923 if r is None:
924 924 ctx = repo['.']
925 925 m = matchmod.match(repo.root, repo.getcwd(), [x],
926 926 ctx=mctx, default='path')
927 927 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
928 928 s = dagop.filerevancestors(fctxs, followfirst)
929 929 else:
930 930 if revs is None:
931 931 revs = baseset([repo['.'].rev()])
932 932 s = dagop.revancestors(repo, revs, followfirst)
933 933
934 934 return subset & s
935 935
936 936 @predicate('follow([file[, startrev]])', safe=True)
937 937 def follow(repo, subset, x):
938 938 """
939 939 An alias for ``::.`` (ancestors of the working directory's first parent).
940 940 If file pattern is specified, the histories of files matching given
941 941 pattern in the revision given by startrev are followed, including copies.
942 942 """
943 943 return _follow(repo, subset, x, 'follow')
944 944
945 945 @predicate('_followfirst', safe=True)
946 946 def _followfirst(repo, subset, x):
947 947 # ``followfirst([file[, startrev]])``
948 948 # Like ``follow([file[, startrev]])`` but follows only the first parent
949 949 # of every revisions or files revisions.
950 950 return _follow(repo, subset, x, '_followfirst', followfirst=True)
951 951
952 952 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
953 953 safe=True)
954 954 def followlines(repo, subset, x):
955 955 """Changesets modifying `file` in line range ('fromline', 'toline').
956 956
957 957 Line range corresponds to 'file' content at 'startrev' and should hence be
958 958 consistent with file size. If startrev is not specified, working directory's
959 959 parent is used.
960 960
961 961 By default, ancestors of 'startrev' are returned. If 'descend' is True,
962 962 descendants of 'startrev' are returned though renames are (currently) not
963 963 followed in this direction.
964 964 """
965 965 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
966 966 if len(args['lines']) != 1:
967 967 raise error.ParseError(_("followlines requires a line range"))
968 968
969 969 rev = '.'
970 970 if 'startrev' in args:
971 971 revs = getset(repo, fullreposet(repo), args['startrev'])
972 972 if len(revs) != 1:
973 973 raise error.ParseError(
974 974 # i18n: "followlines" is a keyword
975 975 _("followlines expects exactly one revision"))
976 976 rev = revs.last()
977 977
978 978 pat = getstring(args['file'], _("followlines requires a pattern"))
979 979 # i18n: "followlines" is a keyword
980 980 msg = _("followlines expects exactly one file")
981 981 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
982 982 # i18n: "followlines" is a keyword
983 983 lr = getrange(args['lines'][0], _("followlines expects a line range"))
984 984 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
985 985 for a in lr]
986 986 fromline, toline = util.processlinerange(fromline, toline)
987 987
988 988 fctx = repo[rev].filectx(fname)
989 989 descend = False
990 990 if 'descend' in args:
991 991 descend = getboolean(args['descend'],
992 992 # i18n: "descend" is a keyword
993 993 _("descend argument must be a boolean"))
994 994 if descend:
995 995 rs = generatorset(
996 996 (c.rev() for c, _linerange
997 997 in dagop.blockdescendants(fctx, fromline, toline)),
998 998 iterasc=True)
999 999 else:
1000 1000 rs = generatorset(
1001 1001 (c.rev() for c, _linerange
1002 1002 in dagop.blockancestors(fctx, fromline, toline)),
1003 1003 iterasc=False)
1004 1004 return subset & rs
1005 1005
1006 1006 @predicate('all()', safe=True)
1007 1007 def getall(repo, subset, x):
1008 1008 """All changesets, the same as ``0:tip``.
1009 1009 """
1010 1010 # i18n: "all" is a keyword
1011 1011 getargs(x, 0, 0, _("all takes no arguments"))
1012 1012 return subset & spanset(repo) # drop "null" if any
1013 1013
1014 1014 @predicate('grep(regex)', weight=10)
1015 1015 def grep(repo, subset, x):
1016 1016 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1017 1017 to ensure special escape characters are handled correctly. Unlike
1018 1018 ``keyword(string)``, the match is case-sensitive.
1019 1019 """
1020 1020 try:
1021 1021 # i18n: "grep" is a keyword
1022 1022 gr = re.compile(getstring(x, _("grep requires a string")))
1023 1023 except re.error as e:
1024 1024 raise error.ParseError(_('invalid match pattern: %s') % e)
1025 1025
1026 1026 def matches(x):
1027 1027 c = repo[x]
1028 1028 for e in c.files() + [c.user(), c.description()]:
1029 1029 if gr.search(e):
1030 1030 return True
1031 1031 return False
1032 1032
1033 1033 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1034 1034
1035 1035 @predicate('_matchfiles', safe=True)
1036 1036 def _matchfiles(repo, subset, x):
1037 1037 # _matchfiles takes a revset list of prefixed arguments:
1038 1038 #
1039 1039 # [p:foo, i:bar, x:baz]
1040 1040 #
1041 1041 # builds a match object from them and filters subset. Allowed
1042 1042 # prefixes are 'p:' for regular patterns, 'i:' for include
1043 1043 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1044 1044 # a revision identifier, or the empty string to reference the
1045 1045 # working directory, from which the match object is
1046 1046 # initialized. Use 'd:' to set the default matching mode, default
1047 1047 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1048 1048
1049 1049 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1050 1050 pats, inc, exc = [], [], []
1051 1051 rev, default = None, None
1052 1052 for arg in l:
1053 1053 s = getstring(arg, "_matchfiles requires string arguments")
1054 1054 prefix, value = s[:2], s[2:]
1055 1055 if prefix == 'p:':
1056 1056 pats.append(value)
1057 1057 elif prefix == 'i:':
1058 1058 inc.append(value)
1059 1059 elif prefix == 'x:':
1060 1060 exc.append(value)
1061 1061 elif prefix == 'r:':
1062 1062 if rev is not None:
1063 1063 raise error.ParseError('_matchfiles expected at most one '
1064 1064 'revision')
1065 1065 if value != '': # empty means working directory; leave rev as None
1066 1066 rev = value
1067 1067 elif prefix == 'd:':
1068 1068 if default is not None:
1069 1069 raise error.ParseError('_matchfiles expected at most one '
1070 1070 'default mode')
1071 1071 default = value
1072 1072 else:
1073 1073 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1074 1074 if not default:
1075 1075 default = 'glob'
1076 1076
1077 1077 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1078 1078 exclude=exc, ctx=repo[rev], default=default)
1079 1079
1080 1080 # This directly read the changelog data as creating changectx for all
1081 1081 # revisions is quite expensive.
1082 1082 getfiles = repo.changelog.readfiles
1083 1083 wdirrev = node.wdirrev
1084 1084 def matches(x):
1085 1085 if x == wdirrev:
1086 1086 files = repo[x].files()
1087 1087 else:
1088 1088 files = getfiles(x)
1089 1089 for f in files:
1090 1090 if m(f):
1091 1091 return True
1092 1092 return False
1093 1093
1094 1094 return subset.filter(matches,
1095 1095 condrepr=('<matchfiles patterns=%r, include=%r '
1096 1096 'exclude=%r, default=%r, rev=%r>',
1097 1097 pats, inc, exc, default, rev))
1098 1098
1099 1099 @predicate('file(pattern)', safe=True, weight=10)
1100 1100 def hasfile(repo, subset, x):
1101 1101 """Changesets affecting files matched by pattern.
1102 1102
1103 1103 For a faster but less accurate result, consider using ``filelog()``
1104 1104 instead.
1105 1105
1106 1106 This predicate uses ``glob:`` as the default kind of pattern.
1107 1107 """
1108 1108 # i18n: "file" is a keyword
1109 1109 pat = getstring(x, _("file requires a pattern"))
1110 1110 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1111 1111
1112 1112 @predicate('head()', safe=True)
1113 1113 def head(repo, subset, x):
1114 1114 """Changeset is a named branch head.
1115 1115 """
1116 1116 # i18n: "head" is a keyword
1117 1117 getargs(x, 0, 0, _("head takes no arguments"))
1118 1118 hs = set()
1119 1119 cl = repo.changelog
1120 1120 for ls in repo.branchmap().itervalues():
1121 1121 hs.update(cl.rev(h) for h in ls)
1122 1122 return subset & baseset(hs)
1123 1123
1124 1124 @predicate('heads(set)', safe=True)
1125 1125 def heads(repo, subset, x):
1126 1126 """Members of set with no children in set.
1127 1127 """
1128 1128 s = getset(repo, subset, x)
1129 1129 ps = parents(repo, subset, x)
1130 1130 return s - ps
1131 1131
1132 1132 @predicate('hidden()', safe=True)
1133 1133 def hidden(repo, subset, x):
1134 1134 """Hidden changesets.
1135 1135 """
1136 1136 # i18n: "hidden" is a keyword
1137 1137 getargs(x, 0, 0, _("hidden takes no arguments"))
1138 1138 hiddenrevs = repoview.filterrevs(repo, 'visible')
1139 1139 return subset & hiddenrevs
1140 1140
1141 1141 @predicate('keyword(string)', safe=True, weight=10)
1142 1142 def keyword(repo, subset, x):
1143 1143 """Search commit message, user name, and names of changed files for
1144 1144 string. The match is case-insensitive.
1145 1145
1146 1146 For a regular expression or case sensitive search of these fields, use
1147 1147 ``grep(regex)``.
1148 1148 """
1149 1149 # i18n: "keyword" is a keyword
1150 1150 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1151 1151
1152 1152 def matches(r):
1153 1153 c = repo[r]
1154 1154 return any(kw in encoding.lower(t)
1155 1155 for t in c.files() + [c.user(), c.description()])
1156 1156
1157 1157 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1158 1158
1159 1159 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1160 1160 def limit(repo, subset, x, order):
1161 1161 """First n members of set, defaulting to 1, starting from offset.
1162 1162 """
1163 1163 args = getargsdict(x, 'limit', 'set n offset')
1164 1164 if 'set' not in args:
1165 1165 # i18n: "limit" is a keyword
1166 1166 raise error.ParseError(_("limit requires one to three arguments"))
1167 1167 # i18n: "limit" is a keyword
1168 1168 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1169 1169 if lim < 0:
1170 1170 raise error.ParseError(_("negative number to select"))
1171 1171 # i18n: "limit" is a keyword
1172 1172 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1173 1173 if ofs < 0:
1174 1174 raise error.ParseError(_("negative offset"))
1175 1175 os = getset(repo, fullreposet(repo), args['set'])
1176 1176 ls = os.slice(ofs, ofs + lim)
1177 1177 if order == followorder and lim > 1:
1178 1178 return subset & ls
1179 1179 return ls & subset
1180 1180
1181 1181 @predicate('last(set, [n])', safe=True, takeorder=True)
1182 1182 def last(repo, subset, x, order):
1183 1183 """Last n members of set, defaulting to 1.
1184 1184 """
1185 1185 # i18n: "last" is a keyword
1186 1186 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1187 1187 lim = 1
1188 1188 if len(l) == 2:
1189 1189 # i18n: "last" is a keyword
1190 1190 lim = getinteger(l[1], _("last expects a number"))
1191 1191 if lim < 0:
1192 1192 raise error.ParseError(_("negative number to select"))
1193 1193 os = getset(repo, fullreposet(repo), l[0])
1194 1194 os.reverse()
1195 1195 ls = os.slice(0, lim)
1196 1196 if order == followorder and lim > 1:
1197 1197 return subset & ls
1198 1198 ls.reverse()
1199 1199 return ls & subset
1200 1200
1201 1201 @predicate('max(set)', safe=True)
1202 1202 def maxrev(repo, subset, x):
1203 1203 """Changeset with highest revision number in set.
1204 1204 """
1205 1205 os = getset(repo, fullreposet(repo), x)
1206 1206 try:
1207 1207 m = os.max()
1208 1208 if m in subset:
1209 1209 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1210 1210 except ValueError:
1211 1211 # os.max() throws a ValueError when the collection is empty.
1212 1212 # Same as python's max().
1213 1213 pass
1214 1214 return baseset(datarepr=('<max %r, %r>', subset, os))
1215 1215
1216 1216 @predicate('merge()', safe=True)
1217 1217 def merge(repo, subset, x):
1218 1218 """Changeset is a merge changeset.
1219 1219 """
1220 1220 # i18n: "merge" is a keyword
1221 1221 getargs(x, 0, 0, _("merge takes no arguments"))
1222 1222 cl = repo.changelog
1223 1223 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1224 1224 condrepr='<merge>')
1225 1225
1226 1226 @predicate('branchpoint()', safe=True)
1227 1227 def branchpoint(repo, subset, x):
1228 1228 """Changesets with more than one child.
1229 1229 """
1230 1230 # i18n: "branchpoint" is a keyword
1231 1231 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1232 1232 cl = repo.changelog
1233 1233 if not subset:
1234 1234 return baseset()
1235 1235 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1236 1236 # (and if it is not, it should.)
1237 1237 baserev = min(subset)
1238 1238 parentscount = [0]*(len(repo) - baserev)
1239 1239 for r in cl.revs(start=baserev + 1):
1240 1240 for p in cl.parentrevs(r):
1241 1241 if p >= baserev:
1242 1242 parentscount[p - baserev] += 1
1243 1243 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1244 1244 condrepr='<branchpoint>')
1245 1245
1246 1246 @predicate('min(set)', safe=True)
1247 1247 def minrev(repo, subset, x):
1248 1248 """Changeset with lowest revision number in set.
1249 1249 """
1250 1250 os = getset(repo, fullreposet(repo), x)
1251 1251 try:
1252 1252 m = os.min()
1253 1253 if m in subset:
1254 1254 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1255 1255 except ValueError:
1256 1256 # os.min() throws a ValueError when the collection is empty.
1257 1257 # Same as python's min().
1258 1258 pass
1259 1259 return baseset(datarepr=('<min %r, %r>', subset, os))
1260 1260
1261 1261 @predicate('modifies(pattern)', safe=True, weight=30)
1262 1262 def modifies(repo, subset, x):
1263 1263 """Changesets modifying files matched by pattern.
1264 1264
1265 1265 The pattern without explicit kind like ``glob:`` is expected to be
1266 1266 relative to the current directory and match against a file or a
1267 1267 directory.
1268 1268 """
1269 1269 # i18n: "modifies" is a keyword
1270 1270 pat = getstring(x, _("modifies requires a pattern"))
1271 1271 return checkstatus(repo, subset, pat, 0)
1272 1272
1273 1273 @predicate('named(namespace)')
1274 1274 def named(repo, subset, x):
1275 1275 """The changesets in a given namespace.
1276 1276
1277 1277 Pattern matching is supported for `namespace`. See
1278 1278 :hg:`help revisions.patterns`.
1279 1279 """
1280 1280 # i18n: "named" is a keyword
1281 1281 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1282 1282
1283 1283 ns = getstring(args[0],
1284 1284 # i18n: "named" is a keyword
1285 1285 _('the argument to named must be a string'))
1286 1286 kind, pattern, matcher = util.stringmatcher(ns)
1287 1287 namespaces = set()
1288 1288 if kind == 'literal':
1289 1289 if pattern not in repo.names:
1290 1290 raise error.RepoLookupError(_("namespace '%s' does not exist")
1291 1291 % ns)
1292 1292 namespaces.add(repo.names[pattern])
1293 1293 else:
1294 1294 for name, ns in repo.names.iteritems():
1295 1295 if matcher(name):
1296 1296 namespaces.add(ns)
1297 1297 if not namespaces:
1298 1298 raise error.RepoLookupError(_("no namespace exists"
1299 1299 " that match '%s'") % pattern)
1300 1300
1301 1301 names = set()
1302 1302 for ns in namespaces:
1303 1303 for name in ns.listnames(repo):
1304 1304 if name not in ns.deprecated:
1305 1305 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1306 1306
1307 1307 names -= {node.nullrev}
1308 1308 return subset & names
1309 1309
1310 1310 @predicate('id(string)', safe=True)
1311 1311 def node_(repo, subset, x):
1312 1312 """Revision non-ambiguously specified by the given hex string prefix.
1313 1313 """
1314 1314 # i18n: "id" is a keyword
1315 1315 l = getargs(x, 1, 1, _("id requires one argument"))
1316 1316 # i18n: "id" is a keyword
1317 1317 n = getstring(l[0], _("id requires a string"))
1318 1318 if len(n) == 40:
1319 1319 try:
1320 1320 rn = repo.changelog.rev(node.bin(n))
1321 1321 except error.WdirUnsupported:
1322 1322 rn = node.wdirrev
1323 1323 except (LookupError, TypeError):
1324 1324 rn = None
1325 1325 else:
1326 1326 rn = None
1327 1327 try:
1328 1328 pm = repo.changelog._partialmatch(n)
1329 1329 if pm is not None:
1330 1330 rn = repo.changelog.rev(pm)
1331 1331 except error.WdirUnsupported:
1332 1332 rn = node.wdirrev
1333 1333
1334 1334 if rn is None:
1335 1335 return baseset()
1336 1336 result = baseset([rn])
1337 1337 return result & subset
1338 1338
1339 1339 @predicate('obsolete()', safe=True)
1340 1340 def obsolete(repo, subset, x):
1341 1341 """Mutable changeset with a newer version."""
1342 1342 # i18n: "obsolete" is a keyword
1343 1343 getargs(x, 0, 0, _("obsolete takes no arguments"))
1344 1344 obsoletes = obsmod.getrevs(repo, 'obsolete')
1345 1345 return subset & obsoletes
1346 1346
1347 1347 @predicate('only(set, [set])', safe=True)
1348 1348 def only(repo, subset, x):
1349 1349 """Changesets that are ancestors of the first set that are not ancestors
1350 1350 of any other head in the repo. If a second set is specified, the result
1351 1351 is ancestors of the first set that are not ancestors of the second set
1352 1352 (i.e. ::<set1> - ::<set2>).
1353 1353 """
1354 1354 cl = repo.changelog
1355 1355 # i18n: "only" is a keyword
1356 1356 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1357 1357 include = getset(repo, fullreposet(repo), args[0])
1358 1358 if len(args) == 1:
1359 1359 if not include:
1360 1360 return baseset()
1361 1361
1362 1362 descendants = set(dagop.revdescendants(repo, include, False))
1363 1363 exclude = [rev for rev in cl.headrevs()
1364 1364 if not rev in descendants and not rev in include]
1365 1365 else:
1366 1366 exclude = getset(repo, fullreposet(repo), args[1])
1367 1367
1368 1368 results = set(cl.findmissingrevs(common=exclude, heads=include))
1369 1369 # XXX we should turn this into a baseset instead of a set, smartset may do
1370 1370 # some optimizations from the fact this is a baseset.
1371 1371 return subset & results
1372 1372
1373 1373 @predicate('origin([set])', safe=True)
1374 1374 def origin(repo, subset, x):
1375 1375 """
1376 1376 Changesets that were specified as a source for the grafts, transplants or
1377 1377 rebases that created the given revisions. Omitting the optional set is the
1378 1378 same as passing all(). If a changeset created by these operations is itself
1379 1379 specified as a source for one of these operations, only the source changeset
1380 1380 for the first operation is selected.
1381 1381 """
1382 1382 if x is not None:
1383 1383 dests = getset(repo, fullreposet(repo), x)
1384 1384 else:
1385 1385 dests = fullreposet(repo)
1386 1386
1387 1387 def _firstsrc(rev):
1388 1388 src = _getrevsource(repo, rev)
1389 1389 if src is None:
1390 1390 return None
1391 1391
1392 1392 while True:
1393 1393 prev = _getrevsource(repo, src)
1394 1394
1395 1395 if prev is None:
1396 1396 return src
1397 1397 src = prev
1398 1398
1399 1399 o = {_firstsrc(r) for r in dests}
1400 1400 o -= {None}
1401 1401 # XXX we should turn this into a baseset instead of a set, smartset may do
1402 1402 # some optimizations from the fact this is a baseset.
1403 1403 return subset & o
1404 1404
1405 1405 @predicate('outgoing([path])', safe=False, weight=10)
1406 1406 def outgoing(repo, subset, x):
1407 1407 """Changesets not found in the specified destination repository, or the
1408 1408 default push location.
1409 1409 """
1410 1410 # Avoid cycles.
1411 1411 from . import (
1412 1412 discovery,
1413 1413 hg,
1414 1414 )
1415 1415 # i18n: "outgoing" is a keyword
1416 1416 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1417 1417 # i18n: "outgoing" is a keyword
1418 1418 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1419 1419 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1420 1420 dest, branches = hg.parseurl(dest)
1421 1421 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1422 1422 if revs:
1423 1423 revs = [repo.lookup(rev) for rev in revs]
1424 1424 other = hg.peer(repo, {}, dest)
1425 1425 repo.ui.pushbuffer()
1426 1426 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1427 1427 repo.ui.popbuffer()
1428 1428 cl = repo.changelog
1429 1429 o = {cl.rev(r) for r in outgoing.missing}
1430 1430 return subset & o
1431 1431
1432 1432 @predicate('p1([set])', safe=True)
1433 1433 def p1(repo, subset, x):
1434 1434 """First parent of changesets in set, or the working directory.
1435 1435 """
1436 1436 if x is None:
1437 1437 p = repo[x].p1().rev()
1438 1438 if p >= 0:
1439 1439 return subset & baseset([p])
1440 1440 return baseset()
1441 1441
1442 1442 ps = set()
1443 1443 cl = repo.changelog
1444 1444 for r in getset(repo, fullreposet(repo), x):
1445 1445 try:
1446 1446 ps.add(cl.parentrevs(r)[0])
1447 1447 except error.WdirUnsupported:
1448 1448 ps.add(repo[r].parents()[0].rev())
1449 1449 ps -= {node.nullrev}
1450 1450 # XXX we should turn this into a baseset instead of a set, smartset may do
1451 1451 # some optimizations from the fact this is a baseset.
1452 1452 return subset & ps
1453 1453
1454 1454 @predicate('p2([set])', safe=True)
1455 1455 def p2(repo, subset, x):
1456 1456 """Second parent of changesets in set, or the working directory.
1457 1457 """
1458 1458 if x is None:
1459 1459 ps = repo[x].parents()
1460 1460 try:
1461 1461 p = ps[1].rev()
1462 1462 if p >= 0:
1463 1463 return subset & baseset([p])
1464 1464 return baseset()
1465 1465 except IndexError:
1466 1466 return baseset()
1467 1467
1468 1468 ps = set()
1469 1469 cl = repo.changelog
1470 1470 for r in getset(repo, fullreposet(repo), x):
1471 1471 try:
1472 1472 ps.add(cl.parentrevs(r)[1])
1473 1473 except error.WdirUnsupported:
1474 1474 parents = repo[r].parents()
1475 1475 if len(parents) == 2:
1476 1476 ps.add(parents[1])
1477 1477 ps -= {node.nullrev}
1478 1478 # XXX we should turn this into a baseset instead of a set, smartset may do
1479 1479 # some optimizations from the fact this is a baseset.
1480 1480 return subset & ps
1481 1481
1482 1482 def parentpost(repo, subset, x, order):
1483 1483 return p1(repo, subset, x)
1484 1484
1485 1485 @predicate('parents([set])', safe=True)
1486 1486 def parents(repo, subset, x):
1487 1487 """
1488 1488 The set of all parents for all changesets in set, or the working directory.
1489 1489 """
1490 1490 if x is None:
1491 1491 ps = set(p.rev() for p in repo[x].parents())
1492 1492 else:
1493 1493 ps = set()
1494 1494 cl = repo.changelog
1495 1495 up = ps.update
1496 1496 parentrevs = cl.parentrevs
1497 1497 for r in getset(repo, fullreposet(repo), x):
1498 1498 try:
1499 1499 up(parentrevs(r))
1500 1500 except error.WdirUnsupported:
1501 1501 up(p.rev() for p in repo[r].parents())
1502 1502 ps -= {node.nullrev}
1503 1503 return subset & ps
1504 1504
1505 1505 def _phase(repo, subset, *targets):
1506 1506 """helper to select all rev in <targets> phases"""
1507 s = repo._phasecache.getrevset(repo, targets)
1508 return subset & s
1507 return repo._phasecache.getrevset(repo, targets, subset)
1509 1508
1510 1509 @predicate('draft()', safe=True)
1511 1510 def draft(repo, subset, x):
1512 1511 """Changeset in draft phase."""
1513 1512 # i18n: "draft" is a keyword
1514 1513 getargs(x, 0, 0, _("draft takes no arguments"))
1515 1514 target = phases.draft
1516 1515 return _phase(repo, subset, target)
1517 1516
1518 1517 @predicate('secret()', safe=True)
1519 1518 def secret(repo, subset, x):
1520 1519 """Changeset in secret phase."""
1521 1520 # i18n: "secret" is a keyword
1522 1521 getargs(x, 0, 0, _("secret takes no arguments"))
1523 1522 target = phases.secret
1524 1523 return _phase(repo, subset, target)
1525 1524
1526 1525 def parentspec(repo, subset, x, n, order):
1527 1526 """``set^0``
1528 1527 The set.
1529 1528 ``set^1`` (or ``set^``), ``set^2``
1530 1529 First or second parent, respectively, of all changesets in set.
1531 1530 """
1532 1531 try:
1533 1532 n = int(n[1])
1534 1533 if n not in (0, 1, 2):
1535 1534 raise ValueError
1536 1535 except (TypeError, ValueError):
1537 1536 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1538 1537 ps = set()
1539 1538 cl = repo.changelog
1540 1539 for r in getset(repo, fullreposet(repo), x):
1541 1540 if n == 0:
1542 1541 ps.add(r)
1543 1542 elif n == 1:
1544 1543 try:
1545 1544 ps.add(cl.parentrevs(r)[0])
1546 1545 except error.WdirUnsupported:
1547 1546 ps.add(repo[r].parents()[0].rev())
1548 1547 else:
1549 1548 try:
1550 1549 parents = cl.parentrevs(r)
1551 1550 if parents[1] != node.nullrev:
1552 1551 ps.add(parents[1])
1553 1552 except error.WdirUnsupported:
1554 1553 parents = repo[r].parents()
1555 1554 if len(parents) == 2:
1556 1555 ps.add(parents[1].rev())
1557 1556 return subset & ps
1558 1557
1559 1558 @predicate('present(set)', safe=True, takeorder=True)
1560 1559 def present(repo, subset, x, order):
1561 1560 """An empty set, if any revision in set isn't found; otherwise,
1562 1561 all revisions in set.
1563 1562
1564 1563 If any of specified revisions is not present in the local repository,
1565 1564 the query is normally aborted. But this predicate allows the query
1566 1565 to continue even in such cases.
1567 1566 """
1568 1567 try:
1569 1568 return getset(repo, subset, x, order)
1570 1569 except error.RepoLookupError:
1571 1570 return baseset()
1572 1571
1573 1572 # for internal use
1574 1573 @predicate('_notpublic', safe=True)
1575 1574 def _notpublic(repo, subset, x):
1576 1575 getargs(x, 0, 0, "_notpublic takes no arguments")
1577 1576 return _phase(repo, subset, phases.draft, phases.secret)
1578 1577
1579 1578 # for internal use
1580 1579 @predicate('_phaseandancestors(phasename, set)', safe=True)
1581 1580 def _phaseandancestors(repo, subset, x):
1582 1581 # equivalent to (phasename() & ancestors(set)) but more efficient
1583 1582 # phasename could be one of 'draft', 'secret', or '_notpublic'
1584 1583 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1585 1584 phasename = getsymbol(args[0])
1586 1585 s = getset(repo, fullreposet(repo), args[1])
1587 1586
1588 1587 draft = phases.draft
1589 1588 secret = phases.secret
1590 1589 phasenamemap = {
1591 1590 '_notpublic': draft,
1592 1591 'draft': draft, # follow secret's ancestors
1593 1592 'secret': secret,
1594 1593 }
1595 1594 if phasename not in phasenamemap:
1596 1595 raise error.ParseError('%r is not a valid phasename' % phasename)
1597 1596
1598 1597 minimalphase = phasenamemap[phasename]
1599 1598 getphase = repo._phasecache.phase
1600 1599
1601 1600 def cutfunc(rev):
1602 1601 return getphase(repo, rev) < minimalphase
1603 1602
1604 1603 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1605 1604
1606 1605 if phasename == 'draft': # need to remove secret changesets
1607 1606 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1608 1607 return subset & revs
1609 1608
1610 1609 @predicate('public()', safe=True)
1611 1610 def public(repo, subset, x):
1612 1611 """Changeset in public phase."""
1613 1612 # i18n: "public" is a keyword
1614 1613 getargs(x, 0, 0, _("public takes no arguments"))
1615 phase = repo._phasecache.phase
1616 target = phases.public
1617 condition = lambda r: phase(repo, r) == target
1618 return subset.filter(condition, condrepr=('<phase %r>', target),
1619 cache=False)
1614 return _phase(repo, subset, phases.public)
1620 1615
1621 1616 @predicate('remote([id [,path]])', safe=False)
1622 1617 def remote(repo, subset, x):
1623 1618 """Local revision that corresponds to the given identifier in a
1624 1619 remote repository, if present. Here, the '.' identifier is a
1625 1620 synonym for the current local branch.
1626 1621 """
1627 1622
1628 1623 from . import hg # avoid start-up nasties
1629 1624 # i18n: "remote" is a keyword
1630 1625 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1631 1626
1632 1627 q = '.'
1633 1628 if len(l) > 0:
1634 1629 # i18n: "remote" is a keyword
1635 1630 q = getstring(l[0], _("remote requires a string id"))
1636 1631 if q == '.':
1637 1632 q = repo['.'].branch()
1638 1633
1639 1634 dest = ''
1640 1635 if len(l) > 1:
1641 1636 # i18n: "remote" is a keyword
1642 1637 dest = getstring(l[1], _("remote requires a repository path"))
1643 1638 dest = repo.ui.expandpath(dest or 'default')
1644 1639 dest, branches = hg.parseurl(dest)
1645 1640 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1646 1641 if revs:
1647 1642 revs = [repo.lookup(rev) for rev in revs]
1648 1643 other = hg.peer(repo, {}, dest)
1649 1644 n = other.lookup(q)
1650 1645 if n in repo:
1651 1646 r = repo[n].rev()
1652 1647 if r in subset:
1653 1648 return baseset([r])
1654 1649 return baseset()
1655 1650
1656 1651 @predicate('removes(pattern)', safe=True, weight=30)
1657 1652 def removes(repo, subset, x):
1658 1653 """Changesets which remove files matching pattern.
1659 1654
1660 1655 The pattern without explicit kind like ``glob:`` is expected to be
1661 1656 relative to the current directory and match against a file or a
1662 1657 directory.
1663 1658 """
1664 1659 # i18n: "removes" is a keyword
1665 1660 pat = getstring(x, _("removes requires a pattern"))
1666 1661 return checkstatus(repo, subset, pat, 2)
1667 1662
1668 1663 @predicate('rev(number)', safe=True)
1669 1664 def rev(repo, subset, x):
1670 1665 """Revision with the given numeric identifier.
1671 1666 """
1672 1667 # i18n: "rev" is a keyword
1673 1668 l = getargs(x, 1, 1, _("rev requires one argument"))
1674 1669 try:
1675 1670 # i18n: "rev" is a keyword
1676 1671 l = int(getstring(l[0], _("rev requires a number")))
1677 1672 except (TypeError, ValueError):
1678 1673 # i18n: "rev" is a keyword
1679 1674 raise error.ParseError(_("rev expects a number"))
1680 1675 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1681 1676 return baseset()
1682 1677 return subset & baseset([l])
1683 1678
1684 1679 @predicate('matching(revision [, field])', safe=True)
1685 1680 def matching(repo, subset, x):
1686 1681 """Changesets in which a given set of fields match the set of fields in the
1687 1682 selected revision or set.
1688 1683
1689 1684 To match more than one field pass the list of fields to match separated
1690 1685 by spaces (e.g. ``author description``).
1691 1686
1692 1687 Valid fields are most regular revision fields and some special fields.
1693 1688
1694 1689 Regular revision fields are ``description``, ``author``, ``branch``,
1695 1690 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1696 1691 and ``diff``.
1697 1692 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1698 1693 contents of the revision. Two revisions matching their ``diff`` will
1699 1694 also match their ``files``.
1700 1695
1701 1696 Special fields are ``summary`` and ``metadata``:
1702 1697 ``summary`` matches the first line of the description.
1703 1698 ``metadata`` is equivalent to matching ``description user date``
1704 1699 (i.e. it matches the main metadata fields).
1705 1700
1706 1701 ``metadata`` is the default field which is used when no fields are
1707 1702 specified. You can match more than one field at a time.
1708 1703 """
1709 1704 # i18n: "matching" is a keyword
1710 1705 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1711 1706
1712 1707 revs = getset(repo, fullreposet(repo), l[0])
1713 1708
1714 1709 fieldlist = ['metadata']
1715 1710 if len(l) > 1:
1716 1711 fieldlist = getstring(l[1],
1717 1712 # i18n: "matching" is a keyword
1718 1713 _("matching requires a string "
1719 1714 "as its second argument")).split()
1720 1715
1721 1716 # Make sure that there are no repeated fields,
1722 1717 # expand the 'special' 'metadata' field type
1723 1718 # and check the 'files' whenever we check the 'diff'
1724 1719 fields = []
1725 1720 for field in fieldlist:
1726 1721 if field == 'metadata':
1727 1722 fields += ['user', 'description', 'date']
1728 1723 elif field == 'diff':
1729 1724 # a revision matching the diff must also match the files
1730 1725 # since matching the diff is very costly, make sure to
1731 1726 # also match the files first
1732 1727 fields += ['files', 'diff']
1733 1728 else:
1734 1729 if field == 'author':
1735 1730 field = 'user'
1736 1731 fields.append(field)
1737 1732 fields = set(fields)
1738 1733 if 'summary' in fields and 'description' in fields:
1739 1734 # If a revision matches its description it also matches its summary
1740 1735 fields.discard('summary')
1741 1736
1742 1737 # We may want to match more than one field
1743 1738 # Not all fields take the same amount of time to be matched
1744 1739 # Sort the selected fields in order of increasing matching cost
1745 1740 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1746 1741 'files', 'description', 'substate', 'diff']
1747 1742 def fieldkeyfunc(f):
1748 1743 try:
1749 1744 return fieldorder.index(f)
1750 1745 except ValueError:
1751 1746 # assume an unknown field is very costly
1752 1747 return len(fieldorder)
1753 1748 fields = list(fields)
1754 1749 fields.sort(key=fieldkeyfunc)
1755 1750
1756 1751 # Each field will be matched with its own "getfield" function
1757 1752 # which will be added to the getfieldfuncs array of functions
1758 1753 getfieldfuncs = []
1759 1754 _funcs = {
1760 1755 'user': lambda r: repo[r].user(),
1761 1756 'branch': lambda r: repo[r].branch(),
1762 1757 'date': lambda r: repo[r].date(),
1763 1758 'description': lambda r: repo[r].description(),
1764 1759 'files': lambda r: repo[r].files(),
1765 1760 'parents': lambda r: repo[r].parents(),
1766 1761 'phase': lambda r: repo[r].phase(),
1767 1762 'substate': lambda r: repo[r].substate,
1768 1763 'summary': lambda r: repo[r].description().splitlines()[0],
1769 1764 'diff': lambda r: list(repo[r].diff(git=True),)
1770 1765 }
1771 1766 for info in fields:
1772 1767 getfield = _funcs.get(info, None)
1773 1768 if getfield is None:
1774 1769 raise error.ParseError(
1775 1770 # i18n: "matching" is a keyword
1776 1771 _("unexpected field name passed to matching: %s") % info)
1777 1772 getfieldfuncs.append(getfield)
1778 1773 # convert the getfield array of functions into a "getinfo" function
1779 1774 # which returns an array of field values (or a single value if there
1780 1775 # is only one field to match)
1781 1776 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1782 1777
1783 1778 def matches(x):
1784 1779 for rev in revs:
1785 1780 target = getinfo(rev)
1786 1781 match = True
1787 1782 for n, f in enumerate(getfieldfuncs):
1788 1783 if target[n] != f(x):
1789 1784 match = False
1790 1785 if match:
1791 1786 return True
1792 1787 return False
1793 1788
1794 1789 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1795 1790
1796 1791 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1797 1792 def reverse(repo, subset, x, order):
1798 1793 """Reverse order of set.
1799 1794 """
1800 1795 l = getset(repo, subset, x, order)
1801 1796 if order == defineorder:
1802 1797 l.reverse()
1803 1798 return l
1804 1799
1805 1800 @predicate('roots(set)', safe=True)
1806 1801 def roots(repo, subset, x):
1807 1802 """Changesets in set with no parent changeset in set.
1808 1803 """
1809 1804 s = getset(repo, fullreposet(repo), x)
1810 1805 parents = repo.changelog.parentrevs
1811 1806 def filter(r):
1812 1807 for p in parents(r):
1813 1808 if 0 <= p and p in s:
1814 1809 return False
1815 1810 return True
1816 1811 return subset & s.filter(filter, condrepr='<roots>')
1817 1812
1818 1813 _sortkeyfuncs = {
1819 1814 'rev': lambda c: c.rev(),
1820 1815 'branch': lambda c: c.branch(),
1821 1816 'desc': lambda c: c.description(),
1822 1817 'user': lambda c: c.user(),
1823 1818 'author': lambda c: c.user(),
1824 1819 'date': lambda c: c.date()[0],
1825 1820 }
1826 1821
1827 1822 def _getsortargs(x):
1828 1823 """Parse sort options into (set, [(key, reverse)], opts)"""
1829 1824 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1830 1825 if 'set' not in args:
1831 1826 # i18n: "sort" is a keyword
1832 1827 raise error.ParseError(_('sort requires one or two arguments'))
1833 1828 keys = "rev"
1834 1829 if 'keys' in args:
1835 1830 # i18n: "sort" is a keyword
1836 1831 keys = getstring(args['keys'], _("sort spec must be a string"))
1837 1832
1838 1833 keyflags = []
1839 1834 for k in keys.split():
1840 1835 fk = k
1841 1836 reverse = (k[0] == '-')
1842 1837 if reverse:
1843 1838 k = k[1:]
1844 1839 if k not in _sortkeyfuncs and k != 'topo':
1845 1840 raise error.ParseError(_("unknown sort key %r") % fk)
1846 1841 keyflags.append((k, reverse))
1847 1842
1848 1843 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1849 1844 # i18n: "topo" is a keyword
1850 1845 raise error.ParseError(_('topo sort order cannot be combined '
1851 1846 'with other sort keys'))
1852 1847
1853 1848 opts = {}
1854 1849 if 'topo.firstbranch' in args:
1855 1850 if any(k == 'topo' for k, reverse in keyflags):
1856 1851 opts['topo.firstbranch'] = args['topo.firstbranch']
1857 1852 else:
1858 1853 # i18n: "topo" and "topo.firstbranch" are keywords
1859 1854 raise error.ParseError(_('topo.firstbranch can only be used '
1860 1855 'when using the topo sort key'))
1861 1856
1862 1857 return args['set'], keyflags, opts
1863 1858
1864 1859 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1865 1860 weight=10)
1866 1861 def sort(repo, subset, x, order):
1867 1862 """Sort set by keys. The default sort order is ascending, specify a key
1868 1863 as ``-key`` to sort in descending order.
1869 1864
1870 1865 The keys can be:
1871 1866
1872 1867 - ``rev`` for the revision number,
1873 1868 - ``branch`` for the branch name,
1874 1869 - ``desc`` for the commit message (description),
1875 1870 - ``user`` for user name (``author`` can be used as an alias),
1876 1871 - ``date`` for the commit date
1877 1872 - ``topo`` for a reverse topographical sort
1878 1873
1879 1874 The ``topo`` sort order cannot be combined with other sort keys. This sort
1880 1875 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1881 1876 specifies what topographical branches to prioritize in the sort.
1882 1877
1883 1878 """
1884 1879 s, keyflags, opts = _getsortargs(x)
1885 1880 revs = getset(repo, subset, s, order)
1886 1881
1887 1882 if not keyflags or order != defineorder:
1888 1883 return revs
1889 1884 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1890 1885 revs.sort(reverse=keyflags[0][1])
1891 1886 return revs
1892 1887 elif keyflags[0][0] == "topo":
1893 1888 firstbranch = ()
1894 1889 if 'topo.firstbranch' in opts:
1895 1890 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1896 1891 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1897 1892 firstbranch),
1898 1893 istopo=True)
1899 1894 if keyflags[0][1]:
1900 1895 revs.reverse()
1901 1896 return revs
1902 1897
1903 1898 # sort() is guaranteed to be stable
1904 1899 ctxs = [repo[r] for r in revs]
1905 1900 for k, reverse in reversed(keyflags):
1906 1901 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1907 1902 return baseset([c.rev() for c in ctxs])
1908 1903
1909 1904 @predicate('subrepo([pattern])')
1910 1905 def subrepo(repo, subset, x):
1911 1906 """Changesets that add, modify or remove the given subrepo. If no subrepo
1912 1907 pattern is named, any subrepo changes are returned.
1913 1908 """
1914 1909 # i18n: "subrepo" is a keyword
1915 1910 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1916 1911 pat = None
1917 1912 if len(args) != 0:
1918 1913 pat = getstring(args[0], _("subrepo requires a pattern"))
1919 1914
1920 1915 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1921 1916
1922 1917 def submatches(names):
1923 1918 k, p, m = util.stringmatcher(pat)
1924 1919 for name in names:
1925 1920 if m(name):
1926 1921 yield name
1927 1922
1928 1923 def matches(x):
1929 1924 c = repo[x]
1930 1925 s = repo.status(c.p1().node(), c.node(), match=m)
1931 1926
1932 1927 if pat is None:
1933 1928 return s.added or s.modified or s.removed
1934 1929
1935 1930 if s.added:
1936 1931 return any(submatches(c.substate.keys()))
1937 1932
1938 1933 if s.modified:
1939 1934 subs = set(c.p1().substate.keys())
1940 1935 subs.update(c.substate.keys())
1941 1936
1942 1937 for path in submatches(subs):
1943 1938 if c.p1().substate.get(path) != c.substate.get(path):
1944 1939 return True
1945 1940
1946 1941 if s.removed:
1947 1942 return any(submatches(c.p1().substate.keys()))
1948 1943
1949 1944 return False
1950 1945
1951 1946 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1952 1947
1953 1948 def _mapbynodefunc(repo, s, f):
1954 1949 """(repo, smartset, [node] -> [node]) -> smartset
1955 1950
1956 1951 Helper method to map a smartset to another smartset given a function only
1957 1952 talking about nodes. Handles converting between rev numbers and nodes, and
1958 1953 filtering.
1959 1954 """
1960 1955 cl = repo.unfiltered().changelog
1961 1956 torev = cl.rev
1962 1957 tonode = cl.node
1963 1958 nodemap = cl.nodemap
1964 1959 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1965 1960 return smartset.baseset(result - repo.changelog.filteredrevs)
1966 1961
1967 1962 @predicate('successors(set)', safe=True)
1968 1963 def successors(repo, subset, x):
1969 1964 """All successors for set, including the given set themselves"""
1970 1965 s = getset(repo, fullreposet(repo), x)
1971 1966 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1972 1967 d = _mapbynodefunc(repo, s, f)
1973 1968 return subset & d
1974 1969
1975 1970 def _substringmatcher(pattern, casesensitive=True):
1976 1971 kind, pattern, matcher = util.stringmatcher(pattern,
1977 1972 casesensitive=casesensitive)
1978 1973 if kind == 'literal':
1979 1974 if not casesensitive:
1980 1975 pattern = encoding.lower(pattern)
1981 1976 matcher = lambda s: pattern in encoding.lower(s)
1982 1977 else:
1983 1978 matcher = lambda s: pattern in s
1984 1979 return kind, pattern, matcher
1985 1980
1986 1981 @predicate('tag([name])', safe=True)
1987 1982 def tag(repo, subset, x):
1988 1983 """The specified tag by name, or all tagged revisions if no name is given.
1989 1984
1990 1985 Pattern matching is supported for `name`. See
1991 1986 :hg:`help revisions.patterns`.
1992 1987 """
1993 1988 # i18n: "tag" is a keyword
1994 1989 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1995 1990 cl = repo.changelog
1996 1991 if args:
1997 1992 pattern = getstring(args[0],
1998 1993 # i18n: "tag" is a keyword
1999 1994 _('the argument to tag must be a string'))
2000 1995 kind, pattern, matcher = util.stringmatcher(pattern)
2001 1996 if kind == 'literal':
2002 1997 # avoid resolving all tags
2003 1998 tn = repo._tagscache.tags.get(pattern, None)
2004 1999 if tn is None:
2005 2000 raise error.RepoLookupError(_("tag '%s' does not exist")
2006 2001 % pattern)
2007 2002 s = {repo[tn].rev()}
2008 2003 else:
2009 2004 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2010 2005 else:
2011 2006 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2012 2007 return subset & s
2013 2008
2014 2009 @predicate('tagged', safe=True)
2015 2010 def tagged(repo, subset, x):
2016 2011 return tag(repo, subset, x)
2017 2012
2018 2013 @predicate('unstable()', safe=True)
2019 2014 def unstable(repo, subset, x):
2020 2015 msg = ("'unstable()' is deprecated, "
2021 2016 "use 'orphan()'")
2022 2017 repo.ui.deprecwarn(msg, '4.4')
2023 2018
2024 2019 return orphan(repo, subset, x)
2025 2020
2026 2021 @predicate('orphan()', safe=True)
2027 2022 def orphan(repo, subset, x):
2028 2023 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2029 2024 """
2030 2025 # i18n: "orphan" is a keyword
2031 2026 getargs(x, 0, 0, _("orphan takes no arguments"))
2032 2027 orphan = obsmod.getrevs(repo, 'orphan')
2033 2028 return subset & orphan
2034 2029
2035 2030
2036 2031 @predicate('user(string)', safe=True, weight=10)
2037 2032 def user(repo, subset, x):
2038 2033 """User name contains string. The match is case-insensitive.
2039 2034
2040 2035 Pattern matching is supported for `string`. See
2041 2036 :hg:`help revisions.patterns`.
2042 2037 """
2043 2038 return author(repo, subset, x)
2044 2039
2045 2040 @predicate('wdir()', safe=True, weight=0)
2046 2041 def wdir(repo, subset, x):
2047 2042 """Working directory. (EXPERIMENTAL)"""
2048 2043 # i18n: "wdir" is a keyword
2049 2044 getargs(x, 0, 0, _("wdir takes no arguments"))
2050 2045 if node.wdirrev in subset or isinstance(subset, fullreposet):
2051 2046 return baseset([node.wdirrev])
2052 2047 return baseset()
2053 2048
2054 2049 def _orderedlist(repo, subset, x):
2055 2050 s = getstring(x, "internal error")
2056 2051 if not s:
2057 2052 return baseset()
2058 2053 # remove duplicates here. it's difficult for caller to deduplicate sets
2059 2054 # because different symbols can point to the same rev.
2060 2055 cl = repo.changelog
2061 2056 ls = []
2062 2057 seen = set()
2063 2058 for t in s.split('\0'):
2064 2059 try:
2065 2060 # fast path for integer revision
2066 2061 r = int(t)
2067 2062 if str(r) != t or r not in cl:
2068 2063 raise ValueError
2069 2064 revs = [r]
2070 2065 except ValueError:
2071 2066 revs = stringset(repo, subset, t, defineorder)
2072 2067
2073 2068 for r in revs:
2074 2069 if r in seen:
2075 2070 continue
2076 2071 if (r in subset
2077 2072 or r == node.nullrev and isinstance(subset, fullreposet)):
2078 2073 ls.append(r)
2079 2074 seen.add(r)
2080 2075 return baseset(ls)
2081 2076
2082 2077 # for internal use
2083 2078 @predicate('_list', safe=True, takeorder=True)
2084 2079 def _list(repo, subset, x, order):
2085 2080 if order == followorder:
2086 2081 # slow path to take the subset order
2087 2082 return subset & _orderedlist(repo, fullreposet(repo), x)
2088 2083 else:
2089 2084 return _orderedlist(repo, subset, x)
2090 2085
2091 2086 def _orderedintlist(repo, subset, x):
2092 2087 s = getstring(x, "internal error")
2093 2088 if not s:
2094 2089 return baseset()
2095 2090 ls = [int(r) for r in s.split('\0')]
2096 2091 s = subset
2097 2092 return baseset([r for r in ls if r in s])
2098 2093
2099 2094 # for internal use
2100 2095 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2101 2096 def _intlist(repo, subset, x, order):
2102 2097 if order == followorder:
2103 2098 # slow path to take the subset order
2104 2099 return subset & _orderedintlist(repo, fullreposet(repo), x)
2105 2100 else:
2106 2101 return _orderedintlist(repo, subset, x)
2107 2102
2108 2103 def _orderedhexlist(repo, subset, x):
2109 2104 s = getstring(x, "internal error")
2110 2105 if not s:
2111 2106 return baseset()
2112 2107 cl = repo.changelog
2113 2108 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2114 2109 s = subset
2115 2110 return baseset([r for r in ls if r in s])
2116 2111
2117 2112 # for internal use
2118 2113 @predicate('_hexlist', safe=True, takeorder=True)
2119 2114 def _hexlist(repo, subset, x, order):
2120 2115 if order == followorder:
2121 2116 # slow path to take the subset order
2122 2117 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2123 2118 else:
2124 2119 return _orderedhexlist(repo, subset, x)
2125 2120
2126 2121 methods = {
2127 2122 "range": rangeset,
2128 2123 "rangeall": rangeall,
2129 2124 "rangepre": rangepre,
2130 2125 "rangepost": rangepost,
2131 2126 "dagrange": dagrange,
2132 2127 "string": stringset,
2133 2128 "symbol": stringset,
2134 2129 "and": andset,
2135 2130 "andsmally": andsmallyset,
2136 2131 "or": orset,
2137 2132 "not": notset,
2138 2133 "difference": differenceset,
2139 2134 "relation": relationset,
2140 2135 "relsubscript": relsubscriptset,
2141 2136 "subscript": subscriptset,
2142 2137 "list": listset,
2143 2138 "keyvalue": keyvaluepair,
2144 2139 "func": func,
2145 2140 "ancestor": ancestorspec,
2146 2141 "parent": parentspec,
2147 2142 "parentpost": parentpost,
2148 2143 }
2149 2144
2150 2145 def posttreebuilthook(tree, repo):
2151 2146 # hook for extensions to execute code on the optimized tree
2152 2147 pass
2153 2148
2154 2149 def match(ui, spec, repo=None):
2155 2150 """Create a matcher for a single revision spec"""
2156 2151 return matchany(ui, [spec], repo=repo)
2157 2152
2158 2153 def matchany(ui, specs, repo=None, localalias=None):
2159 2154 """Create a matcher that will include any revisions matching one of the
2160 2155 given specs
2161 2156
2162 2157 If localalias is not None, it is a dict {name: definitionstring}. It takes
2163 2158 precedence over [revsetalias] config section.
2164 2159 """
2165 2160 if not specs:
2166 2161 def mfunc(repo, subset=None):
2167 2162 return baseset()
2168 2163 return mfunc
2169 2164 if not all(specs):
2170 2165 raise error.ParseError(_("empty query"))
2171 2166 lookup = None
2172 2167 if repo:
2173 2168 lookup = repo.__contains__
2174 2169 if len(specs) == 1:
2175 2170 tree = revsetlang.parse(specs[0], lookup)
2176 2171 else:
2177 2172 tree = ('or',
2178 2173 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2179 2174
2180 2175 aliases = []
2181 2176 warn = None
2182 2177 if ui:
2183 2178 aliases.extend(ui.configitems('revsetalias'))
2184 2179 warn = ui.warn
2185 2180 if localalias:
2186 2181 aliases.extend(localalias.items())
2187 2182 if aliases:
2188 2183 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2189 2184 tree = revsetlang.foldconcat(tree)
2190 2185 tree = revsetlang.analyze(tree)
2191 2186 tree = revsetlang.optimize(tree)
2192 2187 posttreebuilthook(tree, repo)
2193 2188 return makematcher(tree)
2194 2189
2195 2190 def makematcher(tree):
2196 2191 """Create a matcher from an evaluatable tree"""
2197 2192 def mfunc(repo, subset=None, order=None):
2198 2193 if order is None:
2199 2194 if subset is None:
2200 2195 order = defineorder # 'x'
2201 2196 else:
2202 2197 order = followorder # 'subset & x'
2203 2198 if subset is None:
2204 2199 subset = fullreposet(repo)
2205 2200 return getset(repo, subset, tree, order)
2206 2201 return mfunc
2207 2202
2208 2203 def loadpredicate(ui, extname, registrarobj):
2209 2204 """Load revset predicates from specified registrarobj
2210 2205 """
2211 2206 for name, func in registrarobj._table.iteritems():
2212 2207 symbols[name] = func
2213 2208 if func._safe:
2214 2209 safesymbols.add(name)
2215 2210
2216 2211 # load built-in predicates explicitly to setup safesymbols
2217 2212 loadpredicate(None, None, predicate)
2218 2213
2219 2214 # tell hggettext to extract docstrings from these functions:
2220 2215 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now