##// END OF EJS Templates
revset: include all non-public phases in _notpublic...
marmoute -
r51201:92f71d40 default
parent child Browse files
Show More
@@ -1,979 +1,980 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103
104 104 import struct
105 105
106 106 from .i18n import _
107 107 from .node import (
108 108 bin,
109 109 hex,
110 110 nullrev,
111 111 short,
112 112 wdirrev,
113 113 )
114 114 from .pycompat import (
115 115 getattr,
116 116 setattr,
117 117 )
118 118 from . import (
119 119 error,
120 120 pycompat,
121 121 requirements,
122 122 smartset,
123 123 txnutil,
124 124 util,
125 125 )
126 126
127 127 if pycompat.TYPE_CHECKING:
128 128 from typing import (
129 129 Any,
130 130 Callable,
131 131 Dict,
132 132 Iterable,
133 133 List,
134 134 Optional,
135 135 Set,
136 136 Tuple,
137 137 )
138 138 from . import (
139 139 localrepo,
140 140 ui as uimod,
141 141 )
142 142
143 143 Phaseroots = Dict[int, Set[bytes]]
144 144 Phasedefaults = List[
145 145 Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
146 146 ]
147 147
148 148
149 149 _fphasesentry = struct.Struct(b'>i20s')
150 150
151 151 # record phase index
152 152 public, draft, secret = range(3) # type: int
153 153 archived = 32 # non-continuous for compatibility
154 154 internal = 96 # non-continuous for compatibility
155 155 allphases = (public, draft, secret, archived, internal)
156 156 trackedphases = (draft, secret, archived, internal)
157 not_public_phases = trackedphases
157 158 # record phase names
158 159 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
159 160 phasenames = dict(enumerate(cmdphasenames))
160 161 phasenames[archived] = b'archived'
161 162 phasenames[internal] = b'internal'
162 163 # map phase name to phase number
163 164 phasenumber = {name: phase for phase, name in phasenames.items()}
164 165 # like phasenumber, but also include maps for the numeric and binary
165 166 # phase number to the phase number
166 167 phasenumber2 = phasenumber.copy()
167 168 phasenumber2.update({phase: phase for phase in phasenames})
168 169 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
169 170 # record phase property
170 171 mutablephases = (draft, secret, archived, internal)
171 172 remotehiddenphases = (secret, archived, internal)
172 173 localhiddenphases = (internal, archived)
173 174
174 175
175 176 def supportinternal(repo):
176 177 # type: (localrepo.localrepository) -> bool
177 178 """True if the internal phase can be used on a repository"""
178 179 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
179 180
180 181
181 182 def supportarchived(repo):
182 183 # type: (localrepo.localrepository) -> bool
183 184 """True if the archived phase can be used on a repository"""
184 185 return requirements.ARCHIVED_PHASE_REQUIREMENT in repo.requirements
185 186
186 187
187 188 def _readroots(repo, phasedefaults=None):
188 189 # type: (localrepo.localrepository, Optional[Phasedefaults]) -> Tuple[Phaseroots, bool]
189 190 """Read phase roots from disk
190 191
191 192 phasedefaults is a list of fn(repo, roots) callable, which are
192 193 executed if the phase roots file does not exist. When phases are
193 194 being initialized on an existing repository, this could be used to
194 195 set selected changesets phase to something else than public.
195 196
196 197 Return (roots, dirty) where dirty is true if roots differ from
197 198 what is being stored.
198 199 """
199 200 repo = repo.unfiltered()
200 201 dirty = False
201 202 roots = {i: set() for i in allphases}
202 203 try:
203 204 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
204 205 try:
205 206 for line in f:
206 207 phase, nh = line.split()
207 208 roots[int(phase)].add(bin(nh))
208 209 finally:
209 210 f.close()
210 211 except FileNotFoundError:
211 212 if phasedefaults:
212 213 for f in phasedefaults:
213 214 roots = f(repo, roots)
214 215 dirty = True
215 216 return roots, dirty
216 217
217 218
218 219 def binaryencode(phasemapping):
219 220 # type: (Dict[int, List[bytes]]) -> bytes
220 221 """encode a 'phase -> nodes' mapping into a binary stream
221 222
222 223 The revision lists are encoded as (phase, root) pairs.
223 224 """
224 225 binarydata = []
225 226 for phase, nodes in phasemapping.items():
226 227 for head in nodes:
227 228 binarydata.append(_fphasesentry.pack(phase, head))
228 229 return b''.join(binarydata)
229 230
230 231
231 232 def binarydecode(stream):
232 233 # type: (...) -> Dict[int, List[bytes]]
233 234 """decode a binary stream into a 'phase -> nodes' mapping
234 235
235 236 The (phase, root) pairs are turned back into a dictionary with
236 237 the phase as index and the aggregated roots of that phase as value."""
237 238 headsbyphase = {i: [] for i in allphases}
238 239 entrysize = _fphasesentry.size
239 240 while True:
240 241 entry = stream.read(entrysize)
241 242 if len(entry) < entrysize:
242 243 if entry:
243 244 raise error.Abort(_(b'bad phase-heads stream'))
244 245 break
245 246 phase, node = _fphasesentry.unpack(entry)
246 247 headsbyphase[phase].append(node)
247 248 return headsbyphase
248 249
249 250
250 251 def _sortedrange_insert(data, idx, rev, t):
251 252 merge_before = False
252 253 if idx:
253 254 r1, t1 = data[idx - 1]
254 255 merge_before = r1[-1] + 1 == rev and t1 == t
255 256 merge_after = False
256 257 if idx < len(data):
257 258 r2, t2 = data[idx]
258 259 merge_after = r2[0] == rev + 1 and t2 == t
259 260
260 261 if merge_before and merge_after:
261 262 data[idx - 1] = (range(r1[0], r2[-1] + 1), t)
262 263 data.pop(idx)
263 264 elif merge_before:
264 265 data[idx - 1] = (range(r1[0], rev + 1), t)
265 266 elif merge_after:
266 267 data[idx] = (range(rev, r2[-1] + 1), t)
267 268 else:
268 269 data.insert(idx, (range(rev, rev + 1), t))
269 270
270 271
271 272 def _sortedrange_split(data, idx, rev, t):
272 273 r1, t1 = data[idx]
273 274 if t == t1:
274 275 return
275 276 t = (t1[0], t[1])
276 277 if len(r1) == 1:
277 278 data.pop(idx)
278 279 _sortedrange_insert(data, idx, rev, t)
279 280 elif r1[0] == rev:
280 281 data[idx] = (range(rev + 1, r1[-1] + 1), t1)
281 282 _sortedrange_insert(data, idx, rev, t)
282 283 elif r1[-1] == rev:
283 284 data[idx] = (range(r1[0], rev), t1)
284 285 _sortedrange_insert(data, idx + 1, rev, t)
285 286 else:
286 287 data[idx : idx + 1] = [
287 288 (range(r1[0], rev), t1),
288 289 (range(rev, rev + 1), t),
289 290 (range(rev + 1, r1[-1] + 1), t1),
290 291 ]
291 292
292 293
293 294 def _trackphasechange(data, rev, old, new):
294 295 """add a phase move to the <data> list of ranges
295 296
296 297 If data is None, nothing happens.
297 298 """
298 299 if data is None:
299 300 return
300 301
301 302 # If data is empty, create a one-revision range and done
302 303 if not data:
303 304 data.insert(0, (range(rev, rev + 1), (old, new)))
304 305 return
305 306
306 307 low = 0
307 308 high = len(data)
308 309 t = (old, new)
309 310 while low < high:
310 311 mid = (low + high) // 2
311 312 revs = data[mid][0]
312 313 revs_low = revs[0]
313 314 revs_high = revs[-1]
314 315
315 316 if rev >= revs_low and rev <= revs_high:
316 317 _sortedrange_split(data, mid, rev, t)
317 318 return
318 319
319 320 if revs_low == rev + 1:
320 321 if mid and data[mid - 1][0][-1] == rev:
321 322 _sortedrange_split(data, mid - 1, rev, t)
322 323 else:
323 324 _sortedrange_insert(data, mid, rev, t)
324 325 return
325 326
326 327 if revs_high == rev - 1:
327 328 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
328 329 _sortedrange_split(data, mid + 1, rev, t)
329 330 else:
330 331 _sortedrange_insert(data, mid + 1, rev, t)
331 332 return
332 333
333 334 if revs_low > rev:
334 335 high = mid
335 336 else:
336 337 low = mid + 1
337 338
338 339 if low == len(data):
339 340 data.append((range(rev, rev + 1), t))
340 341 return
341 342
342 343 r1, t1 = data[low]
343 344 if r1[0] > rev:
344 345 data.insert(low, (range(rev, rev + 1), t))
345 346 else:
346 347 data.insert(low + 1, (range(rev, rev + 1), t))
347 348
348 349
349 350 class phasecache:
350 351 def __init__(self, repo, phasedefaults, _load=True):
351 352 # type: (localrepo.localrepository, Optional[Phasedefaults], bool) -> None
352 353 if _load:
353 354 # Cheap trick to allow shallow-copy without copy module
354 355 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
355 356 self._loadedrevslen = 0
356 357 self._phasesets = None
357 358 self.filterunknown(repo)
358 359 self.opener = repo.svfs
359 360
360 361 def hasnonpublicphases(self, repo):
361 362 # type: (localrepo.localrepository) -> bool
362 363 """detect if there are revisions with non-public phase"""
363 364 repo = repo.unfiltered()
364 365 cl = repo.changelog
365 366 if len(cl) >= self._loadedrevslen:
366 367 self.invalidate()
367 368 self.loadphaserevs(repo)
368 369 return any(
369 370 revs for phase, revs in self.phaseroots.items() if phase != public
370 371 )
371 372
372 373 def nonpublicphaseroots(self, repo):
373 374 # type: (localrepo.localrepository) -> Set[bytes]
374 375 """returns the roots of all non-public phases
375 376
376 377 The roots are not minimized, so if the secret revisions are
377 378 descendants of draft revisions, their roots will still be present.
378 379 """
379 380 repo = repo.unfiltered()
380 381 cl = repo.changelog
381 382 if len(cl) >= self._loadedrevslen:
382 383 self.invalidate()
383 384 self.loadphaserevs(repo)
384 385 return set().union(
385 386 *[
386 387 revs
387 388 for phase, revs in self.phaseroots.items()
388 389 if phase != public
389 390 ]
390 391 )
391 392
392 393 def getrevset(self, repo, phases, subset=None):
393 394 # type: (localrepo.localrepository, Iterable[int], Optional[Any]) -> Any
394 395 # TODO: finish typing this
395 396 """return a smartset for the given phases"""
396 397 self.loadphaserevs(repo) # ensure phase's sets are loaded
397 398 phases = set(phases)
398 399 publicphase = public in phases
399 400
400 401 if publicphase:
401 402 # In this case, phases keeps all the *other* phases.
402 403 phases = set(allphases).difference(phases)
403 404 if not phases:
404 405 return smartset.fullreposet(repo)
405 406
406 407 # fast path: _phasesets contains the interesting sets,
407 408 # might only need a union and post-filtering.
408 409 revsneedscopy = False
409 410 if len(phases) == 1:
410 411 [p] = phases
411 412 revs = self._phasesets[p]
412 413 revsneedscopy = True # Don't modify _phasesets
413 414 else:
414 415 # revs has the revisions in all *other* phases.
415 416 revs = set.union(*[self._phasesets[p] for p in phases])
416 417
417 418 def _addwdir(wdirsubset, wdirrevs):
418 419 if wdirrev in wdirsubset and repo[None].phase() in phases:
419 420 if revsneedscopy:
420 421 wdirrevs = wdirrevs.copy()
421 422 # The working dir would never be in the # cache, but it was in
422 423 # the subset being filtered for its phase (or filtered out,
423 424 # depending on publicphase), so add it to the output to be
424 425 # included (or filtered out).
425 426 wdirrevs.add(wdirrev)
426 427 return wdirrevs
427 428
428 429 if not publicphase:
429 430 if repo.changelog.filteredrevs:
430 431 revs = revs - repo.changelog.filteredrevs
431 432
432 433 if subset is None:
433 434 return smartset.baseset(revs)
434 435 else:
435 436 revs = _addwdir(subset, revs)
436 437 return subset & smartset.baseset(revs)
437 438 else:
438 439 if subset is None:
439 440 subset = smartset.fullreposet(repo)
440 441
441 442 revs = _addwdir(subset, revs)
442 443
443 444 if not revs:
444 445 return subset
445 446 return subset.filter(lambda r: r not in revs)
446 447
447 448 def copy(self):
448 449 # Shallow copy meant to ensure isolation in
449 450 # advance/retractboundary(), nothing more.
450 451 ph = self.__class__(None, None, _load=False)
451 452 ph.phaseroots = self.phaseroots.copy()
452 453 ph.dirty = self.dirty
453 454 ph.opener = self.opener
454 455 ph._loadedrevslen = self._loadedrevslen
455 456 ph._phasesets = self._phasesets
456 457 return ph
457 458
458 459 def replace(self, phcache):
459 460 """replace all values in 'self' with content of phcache"""
460 461 for a in (
461 462 b'phaseroots',
462 463 b'dirty',
463 464 b'opener',
464 465 b'_loadedrevslen',
465 466 b'_phasesets',
466 467 ):
467 468 setattr(self, a, getattr(phcache, a))
468 469
469 470 def _getphaserevsnative(self, repo):
470 471 repo = repo.unfiltered()
471 472 return repo.changelog.computephases(self.phaseroots)
472 473
473 474 def _computephaserevspure(self, repo):
474 475 repo = repo.unfiltered()
475 476 cl = repo.changelog
476 477 self._phasesets = {phase: set() for phase in allphases}
477 478 lowerroots = set()
478 479 for phase in reversed(trackedphases):
479 480 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
480 481 if roots:
481 482 ps = set(cl.descendants(roots))
482 483 for root in roots:
483 484 ps.add(root)
484 485 ps.difference_update(lowerroots)
485 486 lowerroots.update(ps)
486 487 self._phasesets[phase] = ps
487 488 self._loadedrevslen = len(cl)
488 489
489 490 def loadphaserevs(self, repo):
490 491 # type: (localrepo.localrepository) -> None
491 492 """ensure phase information is loaded in the object"""
492 493 if self._phasesets is None:
493 494 try:
494 495 res = self._getphaserevsnative(repo)
495 496 self._loadedrevslen, self._phasesets = res
496 497 except AttributeError:
497 498 self._computephaserevspure(repo)
498 499
499 500 def invalidate(self):
500 501 self._loadedrevslen = 0
501 502 self._phasesets = None
502 503
503 504 def phase(self, repo, rev):
504 505 # type: (localrepo.localrepository, int) -> int
505 506 # We need a repo argument here to be able to build _phasesets
506 507 # if necessary. The repository instance is not stored in
507 508 # phasecache to avoid reference cycles. The changelog instance
508 509 # is not stored because it is a filecache() property and can
509 510 # be replaced without us being notified.
510 511 if rev == nullrev:
511 512 return public
512 513 if rev < nullrev:
513 514 raise ValueError(_(b'cannot lookup negative revision'))
514 515 if rev >= self._loadedrevslen:
515 516 self.invalidate()
516 517 self.loadphaserevs(repo)
517 518 for phase in trackedphases:
518 519 if rev in self._phasesets[phase]:
519 520 return phase
520 521 return public
521 522
522 523 def write(self):
523 524 if not self.dirty:
524 525 return
525 526 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
526 527 try:
527 528 self._write(f)
528 529 finally:
529 530 f.close()
530 531
531 532 def _write(self, fp):
532 533 for phase, roots in self.phaseroots.items():
533 534 for h in sorted(roots):
534 535 fp.write(b'%i %s\n' % (phase, hex(h)))
535 536 self.dirty = False
536 537
537 538 def _updateroots(self, phase, newroots, tr):
538 539 self.phaseroots[phase] = newroots
539 540 self.invalidate()
540 541 self.dirty = True
541 542
542 543 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
543 544 tr.hookargs[b'phases_moved'] = b'1'
544 545
545 546 def registernew(self, repo, tr, targetphase, revs):
546 547 repo = repo.unfiltered()
547 548 self._retractboundary(repo, tr, targetphase, [], revs=revs)
548 549 if tr is not None and b'phases' in tr.changes:
549 550 phasetracking = tr.changes[b'phases']
550 551 phase = self.phase
551 552 for rev in sorted(revs):
552 553 revphase = phase(repo, rev)
553 554 _trackphasechange(phasetracking, rev, None, revphase)
554 555 repo.invalidatevolatilesets()
555 556
556 557 def advanceboundary(
557 558 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
558 559 ):
559 560 """Set all 'nodes' to phase 'targetphase'
560 561
561 562 Nodes with a phase lower than 'targetphase' are not affected.
562 563
563 564 If dryrun is True, no actions will be performed
564 565
565 566 Returns a set of revs whose phase is changed or should be changed
566 567 """
567 568 # Be careful to preserve shallow-copied values: do not update
568 569 # phaseroots values, replace them.
569 570 if revs is None:
570 571 revs = []
571 572 if tr is None:
572 573 phasetracking = None
573 574 else:
574 575 phasetracking = tr.changes.get(b'phases')
575 576
576 577 repo = repo.unfiltered()
577 578 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
578 579
579 580 changes = set() # set of revisions to be changed
580 581 delroots = [] # set of root deleted by this path
581 582 for phase in (phase for phase in allphases if phase > targetphase):
582 583 # filter nodes that are not in a compatible phase already
583 584 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
584 585 if not revs:
585 586 break # no roots to move anymore
586 587
587 588 olds = self.phaseroots[phase]
588 589
589 590 affected = repo.revs(b'%ln::%ld', olds, revs)
590 591 changes.update(affected)
591 592 if dryrun:
592 593 continue
593 594 for r in affected:
594 595 _trackphasechange(
595 596 phasetracking, r, self.phase(repo, r), targetphase
596 597 )
597 598
598 599 roots = {
599 600 ctx.node()
600 601 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
601 602 }
602 603 if olds != roots:
603 604 self._updateroots(phase, roots, tr)
604 605 # some roots may need to be declared for lower phases
605 606 delroots.extend(olds - roots)
606 607 if not dryrun:
607 608 # declare deleted root in the target phase
608 609 if targetphase != 0:
609 610 self._retractboundary(repo, tr, targetphase, delroots)
610 611 repo.invalidatevolatilesets()
611 612 return changes
612 613
613 614 def retractboundary(self, repo, tr, targetphase, nodes):
614 615 oldroots = {
615 616 phase: revs
616 617 for phase, revs in self.phaseroots.items()
617 618 if phase <= targetphase
618 619 }
619 620 if tr is None:
620 621 phasetracking = None
621 622 else:
622 623 phasetracking = tr.changes.get(b'phases')
623 624 repo = repo.unfiltered()
624 625 if (
625 626 self._retractboundary(repo, tr, targetphase, nodes)
626 627 and phasetracking is not None
627 628 ):
628 629
629 630 # find the affected revisions
630 631 new = self.phaseroots[targetphase]
631 632 old = oldroots[targetphase]
632 633 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
633 634
634 635 # find the phase of the affected revision
635 636 for phase in range(targetphase, -1, -1):
636 637 if phase:
637 638 roots = oldroots.get(phase, [])
638 639 revs = set(repo.revs(b'%ln::%ld', roots, affected))
639 640 affected -= revs
640 641 else: # public phase
641 642 revs = affected
642 643 for r in sorted(revs):
643 644 _trackphasechange(phasetracking, r, phase, targetphase)
644 645 repo.invalidatevolatilesets()
645 646
646 647 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
647 648 # Be careful to preserve shallow-copied values: do not update
648 649 # phaseroots values, replace them.
649 650 if revs is None:
650 651 revs = []
651 652 if (
652 653 targetphase == internal
653 654 and not supportinternal(repo)
654 655 or targetphase == archived
655 656 and not supportarchived(repo)
656 657 ):
657 658 name = phasenames[targetphase]
658 659 msg = b'this repository does not support the %s phase' % name
659 660 raise error.ProgrammingError(msg)
660 661
661 662 repo = repo.unfiltered()
662 663 torev = repo.changelog.rev
663 664 tonode = repo.changelog.node
664 665 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
665 666 finalroots = oldroots = set(currentroots)
666 667 newroots = [torev(node) for node in nodes] + [r for r in revs]
667 668 newroots = [
668 669 rev for rev in newroots if self.phase(repo, rev) < targetphase
669 670 ]
670 671
671 672 if newroots:
672 673 if nullrev in newroots:
673 674 raise error.Abort(_(b'cannot change null revision phase'))
674 675 currentroots.update(newroots)
675 676
676 677 # Only compute new roots for revs above the roots that are being
677 678 # retracted.
678 679 minnewroot = min(newroots)
679 680 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
680 681 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
681 682
682 683 finalroots = {rev for rev in currentroots if rev < minnewroot}
683 684 finalroots.update(updatedroots)
684 685 if finalroots != oldroots:
685 686 self._updateroots(
686 687 targetphase, {tonode(rev) for rev in finalroots}, tr
687 688 )
688 689 return True
689 690 return False
690 691
691 692 def filterunknown(self, repo):
692 693 # type: (localrepo.localrepository) -> None
693 694 """remove unknown nodes from the phase boundary
694 695
695 696 Nothing is lost as unknown nodes only hold data for their descendants.
696 697 """
697 698 filtered = False
698 699 has_node = repo.changelog.index.has_node # to filter unknown nodes
699 700 for phase, nodes in self.phaseroots.items():
700 701 missing = sorted(node for node in nodes if not has_node(node))
701 702 if missing:
702 703 for mnode in missing:
703 704 repo.ui.debug(
704 705 b'removing unknown node %s from %i-phase boundary\n'
705 706 % (short(mnode), phase)
706 707 )
707 708 nodes.symmetric_difference_update(missing)
708 709 filtered = True
709 710 if filtered:
710 711 self.dirty = True
711 712 # filterunknown is called by repo.destroyed, we may have no changes in
712 713 # root but _phasesets contents is certainly invalid (or at least we
713 714 # have not proper way to check that). related to issue 3858.
714 715 #
715 716 # The other caller is __init__ that have no _phasesets initialized
716 717 # anyway. If this change we should consider adding a dedicated
717 718 # "destroyed" function to phasecache or a proper cache key mechanism
718 719 # (see branchmap one)
719 720 self.invalidate()
720 721
721 722
722 723 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
723 724 """Add nodes to a phase changing other nodes phases if necessary.
724 725
725 726 This function move boundary *forward* this means that all nodes
726 727 are set in the target phase or kept in a *lower* phase.
727 728
728 729 Simplify boundary to contains phase roots only.
729 730
730 731 If dryrun is True, no actions will be performed
731 732
732 733 Returns a set of revs whose phase is changed or should be changed
733 734 """
734 735 if revs is None:
735 736 revs = []
736 737 phcache = repo._phasecache.copy()
737 738 changes = phcache.advanceboundary(
738 739 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
739 740 )
740 741 if not dryrun:
741 742 repo._phasecache.replace(phcache)
742 743 return changes
743 744
744 745
745 746 def retractboundary(repo, tr, targetphase, nodes):
746 747 """Set nodes back to a phase changing other nodes phases if
747 748 necessary.
748 749
749 750 This function move boundary *backward* this means that all nodes
750 751 are set in the target phase or kept in a *higher* phase.
751 752
752 753 Simplify boundary to contains phase roots only."""
753 754 phcache = repo._phasecache.copy()
754 755 phcache.retractboundary(repo, tr, targetphase, nodes)
755 756 repo._phasecache.replace(phcache)
756 757
757 758
758 759 def registernew(repo, tr, targetphase, revs):
759 760 """register a new revision and its phase
760 761
761 762 Code adding revisions to the repository should use this function to
762 763 set new changeset in their target phase (or higher).
763 764 """
764 765 phcache = repo._phasecache.copy()
765 766 phcache.registernew(repo, tr, targetphase, revs)
766 767 repo._phasecache.replace(phcache)
767 768
768 769
769 770 def listphases(repo):
770 771 # type: (localrepo.localrepository) -> Dict[bytes, bytes]
771 772 """List phases root for serialization over pushkey"""
772 773 # Use ordered dictionary so behavior is deterministic.
773 774 keys = util.sortdict()
774 775 value = b'%i' % draft
775 776 cl = repo.unfiltered().changelog
776 777 for root in repo._phasecache.phaseroots[draft]:
777 778 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
778 779 keys[hex(root)] = value
779 780
780 781 if repo.publishing():
781 782 # Add an extra data to let remote know we are a publishing
782 783 # repo. Publishing repo can't just pretend they are old repo.
783 784 # When pushing to a publishing repo, the client still need to
784 785 # push phase boundary
785 786 #
786 787 # Push do not only push changeset. It also push phase data.
787 788 # New phase data may apply to common changeset which won't be
788 789 # push (as they are common). Here is a very simple example:
789 790 #
790 791 # 1) repo A push changeset X as draft to repo B
791 792 # 2) repo B make changeset X public
792 793 # 3) repo B push to repo A. X is not pushed but the data that
793 794 # X as now public should
794 795 #
795 796 # The server can't handle it on it's own as it has no idea of
796 797 # client phase data.
797 798 keys[b'publishing'] = b'True'
798 799 return keys
799 800
800 801
801 802 def pushphase(repo, nhex, oldphasestr, newphasestr):
802 803 # type: (localrepo.localrepository, bytes, bytes, bytes) -> bool
803 804 """List phases root for serialization over pushkey"""
804 805 repo = repo.unfiltered()
805 806 with repo.lock():
806 807 currentphase = repo[nhex].phase()
807 808 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
808 809 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
809 810 if currentphase == oldphase and newphase < oldphase:
810 811 with repo.transaction(b'pushkey-phase') as tr:
811 812 advanceboundary(repo, tr, newphase, [bin(nhex)])
812 813 return True
813 814 elif currentphase == newphase:
814 815 # raced, but got correct result
815 816 return True
816 817 else:
817 818 return False
818 819
819 820
820 821 def subsetphaseheads(repo, subset):
821 822 """Finds the phase heads for a subset of a history
822 823
823 824 Returns a list indexed by phase number where each item is a list of phase
824 825 head nodes.
825 826 """
826 827 cl = repo.changelog
827 828
828 829 headsbyphase = {i: [] for i in allphases}
829 830 # No need to keep track of secret phase; any heads in the subset that
830 831 # are not mentioned are implicitly secret.
831 832 for phase in allphases[:secret]:
832 833 revset = b"heads(%%ln & %s())" % phasenames[phase]
833 834 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
834 835 return headsbyphase
835 836
836 837
837 838 def updatephases(repo, trgetter, headsbyphase):
838 839 """Updates the repo with the given phase heads"""
839 840 # Now advance phase boundaries of all phases
840 841 #
841 842 # run the update (and fetch transaction) only if there are actually things
842 843 # to update. This avoid creating empty transaction during no-op operation.
843 844
844 845 for phase in allphases:
845 846 revset = b'%ln - _phase(%s)'
846 847 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
847 848 if heads:
848 849 advanceboundary(repo, trgetter(), phase, heads)
849 850
850 851
851 852 def analyzeremotephases(repo, subset, roots):
852 853 """Compute phases heads and root in a subset of node from root dict
853 854
854 855 * subset is heads of the subset
855 856 * roots is {<nodeid> => phase} mapping. key and value are string.
856 857
857 858 Accept unknown element input
858 859 """
859 860 repo = repo.unfiltered()
860 861 # build list from dictionary
861 862 draftroots = []
862 863 has_node = repo.changelog.index.has_node # to filter unknown nodes
863 864 for nhex, phase in roots.items():
864 865 if nhex == b'publishing': # ignore data related to publish option
865 866 continue
866 867 node = bin(nhex)
867 868 phase = int(phase)
868 869 if phase == public:
869 870 if node != repo.nullid:
870 871 repo.ui.warn(
871 872 _(
872 873 b'ignoring inconsistent public root'
873 874 b' from remote: %s\n'
874 875 )
875 876 % nhex
876 877 )
877 878 elif phase == draft:
878 879 if has_node(node):
879 880 draftroots.append(node)
880 881 else:
881 882 repo.ui.warn(
882 883 _(b'ignoring unexpected root from remote: %i %s\n')
883 884 % (phase, nhex)
884 885 )
885 886 # compute heads
886 887 publicheads = newheads(repo, subset, draftroots)
887 888 return publicheads, draftroots
888 889
889 890
890 891 class remotephasessummary:
891 892 """summarize phase information on the remote side
892 893
893 894 :publishing: True is the remote is publishing
894 895 :publicheads: list of remote public phase heads (nodes)
895 896 :draftheads: list of remote draft phase heads (nodes)
896 897 :draftroots: list of remote draft phase root (nodes)
897 898 """
898 899
899 900 def __init__(self, repo, remotesubset, remoteroots):
900 901 unfi = repo.unfiltered()
901 902 self._allremoteroots = remoteroots
902 903
903 904 self.publishing = remoteroots.get(b'publishing', False)
904 905
905 906 ana = analyzeremotephases(repo, remotesubset, remoteroots)
906 907 self.publicheads, self.draftroots = ana
907 908 # Get the list of all "heads" revs draft on remote
908 909 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
909 910 self.draftheads = [c.node() for c in dheads]
910 911
911 912
912 913 def newheads(repo, heads, roots):
913 914 """compute new head of a subset minus another
914 915
915 916 * `heads`: define the first subset
916 917 * `roots`: define the second we subtract from the first"""
917 918 # prevent an import cycle
918 919 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
919 920 from . import dagop
920 921
921 922 repo = repo.unfiltered()
922 923 cl = repo.changelog
923 924 rev = cl.index.get_rev
924 925 if not roots:
925 926 return heads
926 927 if not heads or heads == [repo.nullid]:
927 928 return []
928 929 # The logic operated on revisions, convert arguments early for convenience
929 930 new_heads = {rev(n) for n in heads if n != repo.nullid}
930 931 roots = [rev(n) for n in roots]
931 932 # compute the area we need to remove
932 933 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
933 934 # heads in the area are no longer heads
934 935 new_heads.difference_update(affected_zone)
935 936 # revisions in the area have children outside of it,
936 937 # They might be new heads
937 938 candidates = repo.revs(
938 939 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
939 940 )
940 941 candidates -= affected_zone
941 942 if new_heads or candidates:
942 943 # remove candidate that are ancestors of other heads
943 944 new_heads.update(candidates)
944 945 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
945 946 pruned = dagop.reachableroots(repo, candidates, prunestart)
946 947 new_heads.difference_update(pruned)
947 948
948 949 return pycompat.maplist(cl.node, sorted(new_heads))
949 950
950 951
951 952 def newcommitphase(ui):
952 953 # type: (uimod.ui) -> int
953 954 """helper to get the target phase of new commit
954 955
955 956 Handle all possible values for the phases.new-commit options.
956 957
957 958 """
958 959 v = ui.config(b'phases', b'new-commit')
959 960 try:
960 961 return phasenumber2[v]
961 962 except KeyError:
962 963 raise error.ConfigError(
963 964 _(b"phases.new-commit: not a valid phase name ('%s')") % v
964 965 )
965 966
966 967
967 968 def hassecret(repo):
968 969 # type: (localrepo.localrepository) -> bool
969 970 """utility function that check if a repo have any secret changeset."""
970 971 return bool(repo._phasecache.phaseroots[secret])
971 972
972 973
973 974 def preparehookargs(node, old, new):
974 975 # type: (bytes, Optional[int], Optional[int]) -> Dict[bytes, bytes]
975 976 if old is None:
976 977 old = b''
977 978 else:
978 979 old = phasenames[old]
979 980 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
@@ -1,2869 +1,2869 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import functools
11 11 import random
12 12 import re
13 13
14 14 from .i18n import _
15 15 from .pycompat import getattr
16 16 from .node import (
17 17 bin,
18 18 nullrev,
19 19 wdirrev,
20 20 )
21 21 from . import (
22 22 dagop,
23 23 destutil,
24 24 diffutil,
25 25 encoding,
26 26 error,
27 27 grep as grepmod,
28 28 hbisect,
29 29 match as matchmod,
30 30 obsolete as obsmod,
31 31 obsutil,
32 32 pathutil,
33 33 phases,
34 34 pycompat,
35 35 registrar,
36 36 repoview,
37 37 revsetlang,
38 38 scmutil,
39 39 smartset,
40 40 stack as stackmod,
41 41 util,
42 42 )
43 43 from .utils import (
44 44 dateutil,
45 45 stringutil,
46 46 urlutil,
47 47 )
48 48
49 49 # helpers for processing parsed tree
50 50 getsymbol = revsetlang.getsymbol
51 51 getstring = revsetlang.getstring
52 52 getinteger = revsetlang.getinteger
53 53 getboolean = revsetlang.getboolean
54 54 getlist = revsetlang.getlist
55 55 getintrange = revsetlang.getintrange
56 56 getargs = revsetlang.getargs
57 57 getargsdict = revsetlang.getargsdict
58 58
59 59 baseset = smartset.baseset
60 60 generatorset = smartset.generatorset
61 61 spanset = smartset.spanset
62 62 fullreposet = smartset.fullreposet
63 63
64 64 # revisions not included in all(), but populated if specified
65 65 _virtualrevs = (nullrev, wdirrev)
66 66
67 67 # Constants for ordering requirement, used in getset():
68 68 #
69 69 # If 'define', any nested functions and operations MAY change the ordering of
70 70 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
71 71 # it). If 'follow', any nested functions and operations MUST take the ordering
72 72 # specified by the first operand to the '&' operator.
73 73 #
74 74 # For instance,
75 75 #
76 76 # X & (Y | Z)
77 77 # ^ ^^^^^^^
78 78 # | follow
79 79 # define
80 80 #
81 81 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
82 82 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
83 83 #
84 84 # 'any' means the order doesn't matter. For instance,
85 85 #
86 86 # (X & !Y) | ancestors(Z)
87 87 # ^ ^
88 88 # any any
89 89 #
90 90 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
91 91 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
92 92 # since 'ancestors' does not care about the order of its argument.
93 93 #
94 94 # Currently, most revsets do not care about the order, so 'define' is
95 95 # equivalent to 'follow' for them, and the resulting order is based on the
96 96 # 'subset' parameter passed down to them:
97 97 #
98 98 # m = revset.match(...)
99 99 # m(repo, subset, order=defineorder)
100 100 # ^^^^^^
101 101 # For most revsets, 'define' means using the order this subset provides
102 102 #
103 103 # There are a few revsets that always redefine the order if 'define' is
104 104 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
105 105 anyorder = b'any' # don't care the order, could be even random-shuffled
106 106 defineorder = b'define' # ALWAYS redefine, or ALWAYS follow the current order
107 107 followorder = b'follow' # MUST follow the current order
108 108
109 109 # helpers
110 110
111 111
112 112 def getset(repo, subset, x, order=defineorder):
113 113 if not x:
114 114 raise error.ParseError(_(b"missing argument"))
115 115 return methods[x[0]](repo, subset, *x[1:], order=order)
116 116
117 117
118 118 def _getrevsource(repo, r):
119 119 extra = repo[r].extra()
120 120 for label in (b'source', b'transplant_source', b'rebase_source'):
121 121 if label in extra:
122 122 try:
123 123 return repo[extra[label]].rev()
124 124 except error.RepoLookupError:
125 125 pass
126 126 return None
127 127
128 128
129 129 def _sortedb(xs):
130 130 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
131 131
132 132
133 133 # operator methods
134 134
135 135
136 136 def stringset(repo, subset, x, order):
137 137 if not x:
138 138 raise error.ParseError(_(b"empty string is not a valid revision"))
139 139 x = scmutil.intrev(scmutil.revsymbol(repo, x))
140 140 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
141 141 return baseset([x])
142 142 return baseset()
143 143
144 144
145 145 def rawsmartset(repo, subset, x, order):
146 146 """argument is already a smartset, use that directly"""
147 147 if order == followorder:
148 148 return subset & x
149 149 else:
150 150 return x & subset
151 151
152 152
153 153 def rangeset(repo, subset, x, y, order):
154 154 m = getset(repo, fullreposet(repo), x)
155 155 n = getset(repo, fullreposet(repo), y)
156 156
157 157 if not m or not n:
158 158 return baseset()
159 159 return _makerangeset(repo, subset, m.first(), n.last(), order)
160 160
161 161
162 162 def rangeall(repo, subset, x, order):
163 163 assert x is None
164 164 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
165 165
166 166
167 167 def rangepre(repo, subset, y, order):
168 168 # ':y' can't be rewritten to '0:y' since '0' may be hidden
169 169 n = getset(repo, fullreposet(repo), y)
170 170 if not n:
171 171 return baseset()
172 172 return _makerangeset(repo, subset, 0, n.last(), order)
173 173
174 174
175 175 def rangepost(repo, subset, x, order):
176 176 m = getset(repo, fullreposet(repo), x)
177 177 if not m:
178 178 return baseset()
179 179 return _makerangeset(
180 180 repo, subset, m.first(), repo.changelog.tiprev(), order
181 181 )
182 182
183 183
184 184 def _makerangeset(repo, subset, m, n, order):
185 185 if m == n:
186 186 r = baseset([m])
187 187 elif n == wdirrev:
188 188 r = spanset(repo, m, len(repo)) + baseset([n])
189 189 elif m == wdirrev:
190 190 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
191 191 elif m < n:
192 192 r = spanset(repo, m, n + 1)
193 193 else:
194 194 r = spanset(repo, m, n - 1)
195 195
196 196 if order == defineorder:
197 197 return r & subset
198 198 else:
199 199 # carrying the sorting over when possible would be more efficient
200 200 return subset & r
201 201
202 202
203 203 def dagrange(repo, subset, x, y, order):
204 204 r = fullreposet(repo)
205 205 xs = dagop.reachableroots(
206 206 repo, getset(repo, r, x), getset(repo, r, y), includepath=True
207 207 )
208 208 return subset & xs
209 209
210 210
211 211 def andset(repo, subset, x, y, order):
212 212 if order == anyorder:
213 213 yorder = anyorder
214 214 else:
215 215 yorder = followorder
216 216 return getset(repo, getset(repo, subset, x, order), y, yorder)
217 217
218 218
219 219 def andsmallyset(repo, subset, x, y, order):
220 220 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
221 221 if order == anyorder:
222 222 yorder = anyorder
223 223 else:
224 224 yorder = followorder
225 225 return getset(repo, getset(repo, subset, y, yorder), x, order)
226 226
227 227
228 228 def differenceset(repo, subset, x, y, order):
229 229 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
230 230
231 231
232 232 def _orsetlist(repo, subset, xs, order):
233 233 assert xs
234 234 if len(xs) == 1:
235 235 return getset(repo, subset, xs[0], order)
236 236 p = len(xs) // 2
237 237 a = _orsetlist(repo, subset, xs[:p], order)
238 238 b = _orsetlist(repo, subset, xs[p:], order)
239 239 return a + b
240 240
241 241
242 242 def orset(repo, subset, x, order):
243 243 xs = getlist(x)
244 244 if not xs:
245 245 return baseset()
246 246 if order == followorder:
247 247 # slow path to take the subset order
248 248 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
249 249 else:
250 250 return _orsetlist(repo, subset, xs, order)
251 251
252 252
253 253 def notset(repo, subset, x, order):
254 254 return subset - getset(repo, subset, x, anyorder)
255 255
256 256
257 257 def relationset(repo, subset, x, y, order):
258 258 # this is pretty basic implementation of 'x#y' operator, still
259 259 # experimental so undocumented. see the wiki for further ideas.
260 260 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
261 261 rel = getsymbol(y)
262 262 if rel in relations:
263 263 return relations[rel](repo, subset, x, rel, order)
264 264
265 265 relnames = [r for r in relations.keys() if len(r) > 1]
266 266 raise error.UnknownIdentifier(rel, relnames)
267 267
268 268
269 269 def _splitrange(a, b):
270 270 """Split range with bounds a and b into two ranges at 0 and return two
271 271 tuples of numbers for use as startdepth and stopdepth arguments of
272 272 revancestors and revdescendants.
273 273
274 274 >>> _splitrange(-10, -5) # [-10:-5]
275 275 ((5, 11), (None, None))
276 276 >>> _splitrange(5, 10) # [5:10]
277 277 ((None, None), (5, 11))
278 278 >>> _splitrange(-10, 10) # [-10:10]
279 279 ((0, 11), (0, 11))
280 280 >>> _splitrange(-10, 0) # [-10:0]
281 281 ((0, 11), (None, None))
282 282 >>> _splitrange(0, 10) # [0:10]
283 283 ((None, None), (0, 11))
284 284 >>> _splitrange(0, 0) # [0:0]
285 285 ((0, 1), (None, None))
286 286 >>> _splitrange(1, -1) # [1:-1]
287 287 ((None, None), (None, None))
288 288 """
289 289 ancdepths = (None, None)
290 290 descdepths = (None, None)
291 291 if a == b == 0:
292 292 ancdepths = (0, 1)
293 293 if a < 0:
294 294 ancdepths = (-min(b, 0), -a + 1)
295 295 if b > 0:
296 296 descdepths = (max(a, 0), b + 1)
297 297 return ancdepths, descdepths
298 298
299 299
300 300 def generationsrel(repo, subset, x, rel, order):
301 301 z = (b'rangeall', None)
302 302 return generationssubrel(repo, subset, x, rel, z, order)
303 303
304 304
305 305 def generationssubrel(repo, subset, x, rel, z, order):
306 306 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
307 307 # descendants() predicates
308 308 a, b = getintrange(
309 309 z,
310 310 _(b'relation subscript must be an integer or a range'),
311 311 _(b'relation subscript bounds must be integers'),
312 312 deffirst=-(dagop.maxlogdepth - 1),
313 313 deflast=+(dagop.maxlogdepth - 1),
314 314 )
315 315 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
316 316
317 317 if ancstart is None and descstart is None:
318 318 return baseset()
319 319
320 320 revs = getset(repo, fullreposet(repo), x)
321 321 if not revs:
322 322 return baseset()
323 323
324 324 if ancstart is not None and descstart is not None:
325 325 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
326 326 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
327 327 elif ancstart is not None:
328 328 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
329 329 elif descstart is not None:
330 330 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
331 331
332 332 return subset & s
333 333
334 334
335 335 def relsubscriptset(repo, subset, x, y, z, order):
336 336 # this is pretty basic implementation of 'x#y[z]' operator, still
337 337 # experimental so undocumented. see the wiki for further ideas.
338 338 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
339 339 rel = getsymbol(y)
340 340 if rel in subscriptrelations:
341 341 return subscriptrelations[rel](repo, subset, x, rel, z, order)
342 342
343 343 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
344 344 raise error.UnknownIdentifier(rel, relnames)
345 345
346 346
347 347 def subscriptset(repo, subset, x, y, order):
348 348 raise error.ParseError(_(b"can't use a subscript in this context"))
349 349
350 350
351 351 def listset(repo, subset, *xs, **opts):
352 352 raise error.ParseError(
353 353 _(b"can't use a list in this context"),
354 354 hint=_(b'see \'hg help "revsets.x or y"\''),
355 355 )
356 356
357 357
358 358 def keyvaluepair(repo, subset, k, v, order):
359 359 raise error.ParseError(_(b"can't use a key-value pair in this context"))
360 360
361 361
362 362 def func(repo, subset, a, b, order):
363 363 f = getsymbol(a)
364 364 if f in symbols:
365 365 func = symbols[f]
366 366 if getattr(func, '_takeorder', False):
367 367 return func(repo, subset, b, order)
368 368 return func(repo, subset, b)
369 369
370 370 keep = lambda fn: getattr(fn, '__doc__', None) is not None
371 371
372 372 syms = [s for (s, fn) in symbols.items() if keep(fn)]
373 373 raise error.UnknownIdentifier(f, syms)
374 374
375 375
376 376 # functions
377 377
378 378 # symbols are callables like:
379 379 # fn(repo, subset, x)
380 380 # with:
381 381 # repo - current repository instance
382 382 # subset - of revisions to be examined
383 383 # x - argument in tree form
384 384 symbols = revsetlang.symbols
385 385
386 386 # symbols which can't be used for a DoS attack for any given input
387 387 # (e.g. those which accept regexes as plain strings shouldn't be included)
388 388 # functions that just return a lot of changesets (like all) don't count here
389 389 safesymbols = set()
390 390
391 391 predicate = registrar.revsetpredicate()
392 392
393 393
394 394 @predicate(b'_destupdate')
395 395 def _destupdate(repo, subset, x):
396 396 # experimental revset for update destination
397 397 args = getargsdict(x, b'limit', b'clean')
398 398 return subset & baseset(
399 399 [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
400 400 )
401 401
402 402
403 403 @predicate(b'_destmerge')
404 404 def _destmerge(repo, subset, x):
405 405 # experimental revset for merge destination
406 406 sourceset = None
407 407 if x is not None:
408 408 sourceset = getset(repo, fullreposet(repo), x)
409 409 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
410 410
411 411
412 412 @predicate(b'adds(pattern)', safe=True, weight=30)
413 413 def adds(repo, subset, x):
414 414 """Changesets that add a file matching pattern.
415 415
416 416 The pattern without explicit kind like ``glob:`` is expected to be
417 417 relative to the current directory and match against a file or a
418 418 directory.
419 419 """
420 420 # i18n: "adds" is a keyword
421 421 pat = getstring(x, _(b"adds requires a pattern"))
422 422 return checkstatus(repo, subset, pat, 'added')
423 423
424 424
425 425 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
426 426 def ancestor(repo, subset, x):
427 427 """A greatest common ancestor of the changesets.
428 428
429 429 Accepts 0 or more changesets.
430 430 Will return empty list when passed no args.
431 431 Greatest common ancestor of a single changeset is that changeset.
432 432 """
433 433 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
434 434 try:
435 435 anc = repo[next(reviter)]
436 436 except StopIteration:
437 437 return baseset()
438 438 for r in reviter:
439 439 anc = anc.ancestor(repo[r])
440 440
441 441 r = scmutil.intrev(anc)
442 442 if r in subset:
443 443 return baseset([r])
444 444 return baseset()
445 445
446 446
447 447 def _ancestors(
448 448 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
449 449 ):
450 450 heads = getset(repo, fullreposet(repo), x)
451 451 if not heads:
452 452 return baseset()
453 453 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
454 454 return subset & s
455 455
456 456
457 457 @predicate(b'ancestors(set[, depth])', safe=True)
458 458 def ancestors(repo, subset, x):
459 459 """Changesets that are ancestors of changesets in set, including the
460 460 given changesets themselves.
461 461
462 462 If depth is specified, the result only includes changesets up to
463 463 the specified generation.
464 464 """
465 465 # startdepth is for internal use only until we can decide the UI
466 466 args = getargsdict(x, b'ancestors', b'set depth startdepth')
467 467 if b'set' not in args:
468 468 # i18n: "ancestors" is a keyword
469 469 raise error.ParseError(_(b'ancestors takes at least 1 argument'))
470 470 startdepth = stopdepth = None
471 471 if b'startdepth' in args:
472 472 n = getinteger(
473 473 args[b'startdepth'], b"ancestors expects an integer startdepth"
474 474 )
475 475 if n < 0:
476 476 raise error.ParseError(b"negative startdepth")
477 477 startdepth = n
478 478 if b'depth' in args:
479 479 # i18n: "ancestors" is a keyword
480 480 n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
481 481 if n < 0:
482 482 raise error.ParseError(_(b"negative depth"))
483 483 stopdepth = n + 1
484 484 return _ancestors(
485 485 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
486 486 )
487 487
488 488
489 489 @predicate(b'_firstancestors', safe=True)
490 490 def _firstancestors(repo, subset, x):
491 491 # ``_firstancestors(set)``
492 492 # Like ``ancestors(set)`` but follows only the first parents.
493 493 return _ancestors(repo, subset, x, followfirst=True)
494 494
495 495
496 496 def _childrenspec(repo, subset, x, n, order):
497 497 """Changesets that are the Nth child of a changeset
498 498 in set.
499 499 """
500 500 cs = set()
501 501 for r in getset(repo, fullreposet(repo), x):
502 502 for i in range(n):
503 503 c = repo[r].children()
504 504 if len(c) == 0:
505 505 break
506 506 if len(c) > 1:
507 507 raise error.RepoLookupError(
508 508 _(b"revision in set has more than one child")
509 509 )
510 510 r = c[0].rev()
511 511 else:
512 512 cs.add(r)
513 513 return subset & cs
514 514
515 515
516 516 def ancestorspec(repo, subset, x, n, order):
517 517 """``set~n``
518 518 Changesets that are the Nth ancestor (first parents only) of a changeset
519 519 in set.
520 520 """
521 521 n = getinteger(n, _(b"~ expects a number"))
522 522 if n < 0:
523 523 # children lookup
524 524 return _childrenspec(repo, subset, x, -n, order)
525 525 ps = set()
526 526 cl = repo.changelog
527 527 for r in getset(repo, fullreposet(repo), x):
528 528 for i in range(n):
529 529 try:
530 530 r = cl.parentrevs(r)[0]
531 531 except error.WdirUnsupported:
532 532 r = repo[r].p1().rev()
533 533 ps.add(r)
534 534 return subset & ps
535 535
536 536
537 537 @predicate(b'author(string)', safe=True, weight=10)
538 538 def author(repo, subset, x):
539 539 """Alias for ``user(string)``."""
540 540 # i18n: "author" is a keyword
541 541 n = getstring(x, _(b"author requires a string"))
542 542 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
543 543 return subset.filter(
544 544 lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
545 545 )
546 546
547 547
548 548 @predicate(b'bisect(string)', safe=True)
549 549 def bisect(repo, subset, x):
550 550 """Changesets marked in the specified bisect status:
551 551
552 552 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
553 553 - ``goods``, ``bads`` : csets topologically good/bad
554 554 - ``range`` : csets taking part in the bisection
555 555 - ``pruned`` : csets that are goods, bads or skipped
556 556 - ``untested`` : csets whose fate is yet unknown
557 557 - ``ignored`` : csets ignored due to DAG topology
558 558 - ``current`` : the cset currently being bisected
559 559 """
560 560 # i18n: "bisect" is a keyword
561 561 status = getstring(x, _(b"bisect requires a string")).lower()
562 562 state = set(hbisect.get(repo, status))
563 563 return subset & state
564 564
565 565
566 566 # Backward-compatibility
567 567 # - no help entry so that we do not advertise it any more
568 568 @predicate(b'bisected', safe=True)
569 569 def bisected(repo, subset, x):
570 570 return bisect(repo, subset, x)
571 571
572 572
573 573 @predicate(b'bookmark([name])', safe=True)
574 574 def bookmark(repo, subset, x):
575 575 """The named bookmark or all bookmarks.
576 576
577 577 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
578 578 """
579 579 # i18n: "bookmark" is a keyword
580 580 args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
581 581 if args:
582 582 bm = getstring(
583 583 args[0],
584 584 # i18n: "bookmark" is a keyword
585 585 _(b'the argument to bookmark must be a string'),
586 586 )
587 587 kind, pattern, matcher = stringutil.stringmatcher(bm)
588 588 bms = set()
589 589 if kind == b'literal':
590 590 if bm == pattern:
591 591 pattern = repo._bookmarks.expandname(pattern)
592 592 bmrev = repo._bookmarks.get(pattern, None)
593 593 if not bmrev:
594 594 raise error.RepoLookupError(
595 595 _(b"bookmark '%s' does not exist") % pattern
596 596 )
597 597 bms.add(repo[bmrev].rev())
598 598 else:
599 599 matchrevs = set()
600 600 for name, bmrev in repo._bookmarks.items():
601 601 if matcher(name):
602 602 matchrevs.add(bmrev)
603 603 for bmrev in matchrevs:
604 604 bms.add(repo[bmrev].rev())
605 605 else:
606 606 bms = {repo[r].rev() for r in repo._bookmarks.values()}
607 607 bms -= {nullrev}
608 608 return subset & bms
609 609
610 610
611 611 @predicate(b'branch(string or set)', safe=True, weight=10)
612 612 def branch(repo, subset, x):
613 613 """
614 614 All changesets belonging to the given branch or the branches of the given
615 615 changesets.
616 616
617 617 Pattern matching is supported for `string`. See
618 618 :hg:`help revisions.patterns`.
619 619 """
620 620 getbi = repo.revbranchcache().branchinfo
621 621
622 622 def getbranch(r):
623 623 try:
624 624 return getbi(r)[0]
625 625 except error.WdirUnsupported:
626 626 return repo[r].branch()
627 627
628 628 try:
629 629 b = getstring(x, b'')
630 630 except error.ParseError:
631 631 # not a string, but another revspec, e.g. tip()
632 632 pass
633 633 else:
634 634 kind, pattern, matcher = stringutil.stringmatcher(b)
635 635 if kind == b'literal':
636 636 # note: falls through to the revspec case if no branch with
637 637 # this name exists and pattern kind is not specified explicitly
638 638 if repo.branchmap().hasbranch(pattern):
639 639 return subset.filter(
640 640 lambda r: matcher(getbranch(r)),
641 641 condrepr=(b'<branch %r>', b),
642 642 )
643 643 if b.startswith(b'literal:'):
644 644 raise error.RepoLookupError(
645 645 _(b"branch '%s' does not exist") % pattern
646 646 )
647 647 else:
648 648 return subset.filter(
649 649 lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
650 650 )
651 651
652 652 s = getset(repo, fullreposet(repo), x)
653 653 b = set()
654 654 for r in s:
655 655 b.add(getbranch(r))
656 656 c = s.__contains__
657 657 return subset.filter(
658 658 lambda r: c(r) or getbranch(r) in b,
659 659 condrepr=lambda: b'<branch %r>' % _sortedb(b),
660 660 )
661 661
662 662
663 663 @predicate(b'phasedivergent()', safe=True)
664 664 def phasedivergent(repo, subset, x):
665 665 """Mutable changesets marked as successors of public changesets.
666 666
667 667 Only non-public and non-obsolete changesets can be `phasedivergent`.
668 668 (EXPERIMENTAL)
669 669 """
670 670 # i18n: "phasedivergent" is a keyword
671 671 getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
672 672 phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
673 673 return subset & phasedivergent
674 674
675 675
676 676 @predicate(b'bundle()', safe=True)
677 677 def bundle(repo, subset, x):
678 678 """Changesets in the bundle.
679 679
680 680 Bundle must be specified by the -R option."""
681 681
682 682 try:
683 683 bundlerevs = repo.changelog.bundlerevs
684 684 except AttributeError:
685 685 raise error.Abort(_(b"no bundle provided - specify with -R"))
686 686 return subset & bundlerevs
687 687
688 688
689 689 def checkstatus(repo, subset, pat, field):
690 690 """Helper for status-related revsets (adds, removes, modifies).
691 691 The field parameter says which kind is desired.
692 692 """
693 693 hasset = matchmod.patkind(pat) == b'set'
694 694
695 695 mcache = [None]
696 696
697 697 def matches(x):
698 698 c = repo[x]
699 699 if not mcache[0] or hasset:
700 700 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
701 701 m = mcache[0]
702 702 fname = None
703 703
704 704 assert m is not None # help pytype
705 705 if not m.anypats() and len(m.files()) == 1:
706 706 fname = m.files()[0]
707 707 if fname is not None:
708 708 if fname not in c.files():
709 709 return False
710 710 else:
711 711 if not any(m(f) for f in c.files()):
712 712 return False
713 713 files = getattr(repo.status(c.p1().node(), c.node()), field)
714 714 if fname is not None:
715 715 if fname in files:
716 716 return True
717 717 else:
718 718 if any(m(f) for f in files):
719 719 return True
720 720
721 721 return subset.filter(
722 722 matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
723 723 )
724 724
725 725
726 726 def _children(repo, subset, parentset):
727 727 if not parentset:
728 728 return baseset()
729 729 cs = set()
730 730 pr = repo.changelog.parentrevs
731 731 minrev = parentset.min()
732 732 for r in subset:
733 733 if r <= minrev:
734 734 continue
735 735 p1, p2 = pr(r)
736 736 if p1 in parentset:
737 737 cs.add(r)
738 738 if p2 != nullrev and p2 in parentset:
739 739 cs.add(r)
740 740 return baseset(cs)
741 741
742 742
743 743 @predicate(b'children(set)', safe=True)
744 744 def children(repo, subset, x):
745 745 """Child changesets of changesets in set."""
746 746 s = getset(repo, fullreposet(repo), x)
747 747 cs = _children(repo, subset, s)
748 748 return subset & cs
749 749
750 750
751 751 @predicate(b'closed()', safe=True, weight=10)
752 752 def closed(repo, subset, x):
753 753 """Changeset is closed."""
754 754 # i18n: "closed" is a keyword
755 755 getargs(x, 0, 0, _(b"closed takes no arguments"))
756 756 return subset.filter(
757 757 lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
758 758 )
759 759
760 760
761 761 # for internal use
762 762 @predicate(b'_commonancestorheads(set)', safe=True)
763 763 def _commonancestorheads(repo, subset, x):
764 764 # This is an internal method is for quickly calculating "heads(::x and
765 765 # ::y)"
766 766
767 767 # These greatest common ancestors are the same ones that the consensus bid
768 768 # merge will find.
769 769 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
770 770
771 771 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
772 772 return subset & baseset(ancs)
773 773
774 774
775 775 @predicate(b'commonancestors(set)', safe=True)
776 776 def commonancestors(repo, subset, x):
777 777 """Changesets that are ancestors of every changeset in set."""
778 778 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
779 779 if not startrevs:
780 780 return baseset()
781 781 for r in startrevs:
782 782 subset &= dagop.revancestors(repo, baseset([r]))
783 783 return subset
784 784
785 785
786 786 @predicate(b'conflictlocal()', safe=True)
787 787 def conflictlocal(repo, subset, x):
788 788 """The local side of the merge, if currently in an unresolved merge.
789 789
790 790 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
791 791 """
792 792 getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
793 793 from . import mergestate as mergestatemod
794 794
795 795 mergestate = mergestatemod.mergestate.read(repo)
796 796 if mergestate.active() and repo.changelog.hasnode(mergestate.local):
797 797 return subset & {repo.changelog.rev(mergestate.local)}
798 798
799 799 return baseset()
800 800
801 801
802 802 @predicate(b'conflictother()', safe=True)
803 803 def conflictother(repo, subset, x):
804 804 """The other side of the merge, if currently in an unresolved merge.
805 805
806 806 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
807 807 """
808 808 getargs(x, 0, 0, _(b"conflictother takes no arguments"))
809 809 from . import mergestate as mergestatemod
810 810
811 811 mergestate = mergestatemod.mergestate.read(repo)
812 812 if mergestate.active() and repo.changelog.hasnode(mergestate.other):
813 813 return subset & {repo.changelog.rev(mergestate.other)}
814 814
815 815 return baseset()
816 816
817 817
818 818 @predicate(b'contains(pattern)', weight=100)
819 819 def contains(repo, subset, x):
820 820 """The revision's manifest contains a file matching pattern (but might not
821 821 modify it). See :hg:`help patterns` for information about file patterns.
822 822
823 823 The pattern without explicit kind like ``glob:`` is expected to be
824 824 relative to the current directory and match against a file exactly
825 825 for efficiency.
826 826 """
827 827 # i18n: "contains" is a keyword
828 828 pat = getstring(x, _(b"contains requires a pattern"))
829 829
830 830 def matches(x):
831 831 if not matchmod.patkind(pat):
832 832 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
833 833 if pats in repo[x]:
834 834 return True
835 835 else:
836 836 c = repo[x]
837 837 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
838 838 for f in c.manifest():
839 839 if m(f):
840 840 return True
841 841 return False
842 842
843 843 return subset.filter(matches, condrepr=(b'<contains %r>', pat))
844 844
845 845
846 846 @predicate(b'converted([id])', safe=True)
847 847 def converted(repo, subset, x):
848 848 """Changesets converted from the given identifier in the old repository if
849 849 present, or all converted changesets if no identifier is specified.
850 850 """
851 851
852 852 # There is exactly no chance of resolving the revision, so do a simple
853 853 # string compare and hope for the best
854 854
855 855 rev = None
856 856 # i18n: "converted" is a keyword
857 857 l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
858 858 if l:
859 859 # i18n: "converted" is a keyword
860 860 rev = getstring(l[0], _(b'converted requires a revision'))
861 861
862 862 def _matchvalue(r):
863 863 source = repo[r].extra().get(b'convert_revision', None)
864 864 return source is not None and (rev is None or source.startswith(rev))
865 865
866 866 return subset.filter(
867 867 lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
868 868 )
869 869
870 870
871 871 @predicate(b'date(interval)', safe=True, weight=10)
872 872 def date(repo, subset, x):
873 873 """Changesets within the interval, see :hg:`help dates`."""
874 874 # i18n: "date" is a keyword
875 875 ds = getstring(x, _(b"date requires a string"))
876 876 dm = dateutil.matchdate(ds)
877 877 return subset.filter(
878 878 lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
879 879 )
880 880
881 881
882 882 @predicate(b'desc(string)', safe=True, weight=10)
883 883 def desc(repo, subset, x):
884 884 """Search commit message for string. The match is case-insensitive.
885 885
886 886 Pattern matching is supported for `string`. See
887 887 :hg:`help revisions.patterns`.
888 888 """
889 889 # i18n: "desc" is a keyword
890 890 ds = getstring(x, _(b"desc requires a string"))
891 891
892 892 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
893 893
894 894 return subset.filter(
895 895 lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
896 896 )
897 897
898 898
899 899 def _descendants(
900 900 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
901 901 ):
902 902 roots = getset(repo, fullreposet(repo), x)
903 903 if not roots:
904 904 return baseset()
905 905 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
906 906 return subset & s
907 907
908 908
909 909 @predicate(b'descendants(set[, depth])', safe=True)
910 910 def descendants(repo, subset, x):
911 911 """Changesets which are descendants of changesets in set, including the
912 912 given changesets themselves.
913 913
914 914 If depth is specified, the result only includes changesets up to
915 915 the specified generation.
916 916 """
917 917 # startdepth is for internal use only until we can decide the UI
918 918 args = getargsdict(x, b'descendants', b'set depth startdepth')
919 919 if b'set' not in args:
920 920 # i18n: "descendants" is a keyword
921 921 raise error.ParseError(_(b'descendants takes at least 1 argument'))
922 922 startdepth = stopdepth = None
923 923 if b'startdepth' in args:
924 924 n = getinteger(
925 925 args[b'startdepth'], b"descendants expects an integer startdepth"
926 926 )
927 927 if n < 0:
928 928 raise error.ParseError(b"negative startdepth")
929 929 startdepth = n
930 930 if b'depth' in args:
931 931 # i18n: "descendants" is a keyword
932 932 n = getinteger(
933 933 args[b'depth'], _(b"descendants expects an integer depth")
934 934 )
935 935 if n < 0:
936 936 raise error.ParseError(_(b"negative depth"))
937 937 stopdepth = n + 1
938 938 return _descendants(
939 939 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
940 940 )
941 941
942 942
943 943 @predicate(b'_firstdescendants', safe=True)
944 944 def _firstdescendants(repo, subset, x):
945 945 # ``_firstdescendants(set)``
946 946 # Like ``descendants(set)`` but follows only the first parents.
947 947 return _descendants(repo, subset, x, followfirst=True)
948 948
949 949
950 950 @predicate(b'destination([set])', safe=True, weight=10)
951 951 def destination(repo, subset, x):
952 952 """Changesets that were created by a graft, transplant or rebase operation,
953 953 with the given revisions specified as the source. Omitting the optional set
954 954 is the same as passing all().
955 955 """
956 956 if x is not None:
957 957 sources = getset(repo, fullreposet(repo), x)
958 958 else:
959 959 sources = fullreposet(repo)
960 960
961 961 dests = set()
962 962
963 963 # subset contains all of the possible destinations that can be returned, so
964 964 # iterate over them and see if their source(s) were provided in the arg set.
965 965 # Even if the immediate src of r is not in the arg set, src's source (or
966 966 # further back) may be. Scanning back further than the immediate src allows
967 967 # transitive transplants and rebases to yield the same results as transitive
968 968 # grafts.
969 969 for r in subset:
970 970 src = _getrevsource(repo, r)
971 971 lineage = None
972 972
973 973 while src is not None:
974 974 if lineage is None:
975 975 lineage = list()
976 976
977 977 lineage.append(r)
978 978
979 979 # The visited lineage is a match if the current source is in the arg
980 980 # set. Since every candidate dest is visited by way of iterating
981 981 # subset, any dests further back in the lineage will be tested by a
982 982 # different iteration over subset. Likewise, if the src was already
983 983 # selected, the current lineage can be selected without going back
984 984 # further.
985 985 if src in sources or src in dests:
986 986 dests.update(lineage)
987 987 break
988 988
989 989 r = src
990 990 src = _getrevsource(repo, r)
991 991
992 992 return subset.filter(
993 993 dests.__contains__,
994 994 condrepr=lambda: b'<destination %r>' % _sortedb(dests),
995 995 )
996 996
997 997
998 998 @predicate(b'diffcontains(pattern)', weight=110)
999 999 def diffcontains(repo, subset, x):
1000 1000 """Search revision differences for when the pattern was added or removed.
1001 1001
1002 1002 The pattern may be a substring literal or a regular expression. See
1003 1003 :hg:`help revisions.patterns`.
1004 1004 """
1005 1005 args = getargsdict(x, b'diffcontains', b'pattern')
1006 1006 if b'pattern' not in args:
1007 1007 # i18n: "diffcontains" is a keyword
1008 1008 raise error.ParseError(_(b'diffcontains takes at least 1 argument'))
1009 1009
1010 1010 pattern = getstring(
1011 1011 args[b'pattern'], _(b'diffcontains requires a string pattern')
1012 1012 )
1013 1013 regexp = stringutil.substringregexp(pattern, re.M)
1014 1014
1015 1015 # TODO: add support for file pattern and --follow. For example,
1016 1016 # diffcontains(pattern[, set]) where set may be file(pattern) or
1017 1017 # follow(pattern), and we'll eventually add a support for narrowing
1018 1018 # files by revset?
1019 1019 fmatch = matchmod.always()
1020 1020
1021 1021 def makefilematcher(ctx):
1022 1022 return fmatch
1023 1023
1024 1024 # TODO: search in a windowed way
1025 1025 searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
1026 1026
1027 1027 def testdiff(rev):
1028 1028 # consume the generator to discard revfiles/matches cache
1029 1029 found = False
1030 1030 for fn, ctx, pstates, states in searcher.searchfiles(
1031 1031 baseset([rev]), makefilematcher
1032 1032 ):
1033 1033 if next(grepmod.difflinestates(pstates, states), None):
1034 1034 found = True
1035 1035 return found
1036 1036
1037 1037 return subset.filter(testdiff, condrepr=(b'<diffcontains %r>', pattern))
1038 1038
1039 1039
1040 1040 @predicate(b'contentdivergent()', safe=True)
1041 1041 def contentdivergent(repo, subset, x):
1042 1042 """
1043 1043 Final successors of changesets with an alternative set of final
1044 1044 successors. (EXPERIMENTAL)
1045 1045 """
1046 1046 # i18n: "contentdivergent" is a keyword
1047 1047 getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
1048 1048 contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
1049 1049 return subset & contentdivergent
1050 1050
1051 1051
1052 1052 @predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
1053 1053 def expectsize(repo, subset, x, order):
1054 1054 """Return the given revset if size matches the revset size.
1055 1055 Abort if the revset doesn't expect given size.
1056 1056 size can either be an integer range or an integer.
1057 1057
1058 1058 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
1059 1059 2 is not between 3 and 5 inclusive."""
1060 1060
1061 1061 args = getargsdict(x, b'expectsize', b'set size')
1062 1062 minsize = 0
1063 1063 maxsize = len(repo) + 1
1064 1064 err = b''
1065 1065 if b'size' not in args or b'set' not in args:
1066 1066 raise error.ParseError(_(b'invalid set of arguments'))
1067 1067 minsize, maxsize = getintrange(
1068 1068 args[b'size'],
1069 1069 _(b'expectsize requires a size range or a positive integer'),
1070 1070 _(b'size range bounds must be integers'),
1071 1071 minsize,
1072 1072 maxsize,
1073 1073 )
1074 1074 if minsize < 0 or maxsize < 0:
1075 1075 raise error.ParseError(_(b'negative size'))
1076 1076 rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
1077 1077 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
1078 1078 err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
1079 1079 minsize,
1080 1080 maxsize,
1081 1081 len(rev),
1082 1082 )
1083 1083 elif minsize == maxsize and len(rev) != minsize:
1084 1084 err = _(b'revset size mismatch. expected %d, got %d') % (
1085 1085 minsize,
1086 1086 len(rev),
1087 1087 )
1088 1088 if err:
1089 1089 raise error.RepoLookupError(err)
1090 1090 if order == followorder:
1091 1091 return subset & rev
1092 1092 else:
1093 1093 return rev & subset
1094 1094
1095 1095
1096 1096 @predicate(b'extdata(source)', safe=False, weight=100)
1097 1097 def extdata(repo, subset, x):
1098 1098 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
1099 1099 # i18n: "extdata" is a keyword
1100 1100 args = getargsdict(x, b'extdata', b'source')
1101 1101 source = getstring(
1102 1102 args.get(b'source'),
1103 1103 # i18n: "extdata" is a keyword
1104 1104 _(b'extdata takes at least 1 string argument'),
1105 1105 )
1106 1106 data = scmutil.extdatasource(repo, source)
1107 1107 return subset & baseset(data)
1108 1108
1109 1109
1110 1110 @predicate(b'extinct()', safe=True)
1111 1111 def extinct(repo, subset, x):
1112 1112 """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)"""
1113 1113 # i18n: "extinct" is a keyword
1114 1114 getargs(x, 0, 0, _(b"extinct takes no arguments"))
1115 1115 extincts = obsmod.getrevs(repo, b'extinct')
1116 1116 return subset & extincts
1117 1117
1118 1118
1119 1119 @predicate(b'extra(label, [value])', safe=True)
1120 1120 def extra(repo, subset, x):
1121 1121 """Changesets with the given label in the extra metadata, with the given
1122 1122 optional value.
1123 1123
1124 1124 Pattern matching is supported for `value`. See
1125 1125 :hg:`help revisions.patterns`.
1126 1126 """
1127 1127 args = getargsdict(x, b'extra', b'label value')
1128 1128 if b'label' not in args:
1129 1129 # i18n: "extra" is a keyword
1130 1130 raise error.ParseError(_(b'extra takes at least 1 argument'))
1131 1131 # i18n: "extra" is a keyword
1132 1132 label = getstring(
1133 1133 args[b'label'], _(b'first argument to extra must be a string')
1134 1134 )
1135 1135 value = None
1136 1136
1137 1137 if b'value' in args:
1138 1138 # i18n: "extra" is a keyword
1139 1139 value = getstring(
1140 1140 args[b'value'], _(b'second argument to extra must be a string')
1141 1141 )
1142 1142 kind, value, matcher = stringutil.stringmatcher(value)
1143 1143
1144 1144 def _matchvalue(r):
1145 1145 extra = repo[r].extra()
1146 1146 return label in extra and (value is None or matcher(extra[label]))
1147 1147
1148 1148 return subset.filter(
1149 1149 lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
1150 1150 )
1151 1151
1152 1152
1153 1153 @predicate(b'filelog(pattern)', safe=True)
1154 1154 def filelog(repo, subset, x):
1155 1155 """Changesets connected to the specified filelog.
1156 1156
1157 1157 For performance reasons, visits only revisions mentioned in the file-level
1158 1158 filelog, rather than filtering through all changesets (much faster, but
1159 1159 doesn't include deletes or duplicate changes). For a slower, more accurate
1160 1160 result, use ``file()``.
1161 1161
1162 1162 The pattern without explicit kind like ``glob:`` is expected to be
1163 1163 relative to the current directory and match against a file exactly
1164 1164 for efficiency.
1165 1165 """
1166 1166
1167 1167 # i18n: "filelog" is a keyword
1168 1168 pat = getstring(x, _(b"filelog requires a pattern"))
1169 1169 s = set()
1170 1170 cl = repo.changelog
1171 1171
1172 1172 if not matchmod.patkind(pat):
1173 1173 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1174 1174 files = [f]
1175 1175 else:
1176 1176 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1177 1177 files = (f for f in repo[None] if m(f))
1178 1178
1179 1179 for f in files:
1180 1180 fl = repo.file(f)
1181 1181 known = {}
1182 1182 scanpos = 0
1183 1183 for fr in list(fl):
1184 1184 fn = fl.node(fr)
1185 1185 if fn in known:
1186 1186 s.add(known[fn])
1187 1187 continue
1188 1188
1189 1189 lr = fl.linkrev(fr)
1190 1190 if lr in cl:
1191 1191 s.add(lr)
1192 1192 elif scanpos is not None:
1193 1193 # lowest matching changeset is filtered, scan further
1194 1194 # ahead in changelog
1195 1195 start = max(lr, scanpos) + 1
1196 1196 scanpos = None
1197 1197 for r in cl.revs(start):
1198 1198 # minimize parsing of non-matching entries
1199 1199 if f in cl.revision(r) and f in cl.readfiles(r):
1200 1200 try:
1201 1201 # try to use manifest delta fastpath
1202 1202 n = repo[r].filenode(f)
1203 1203 if n not in known:
1204 1204 if n == fn:
1205 1205 s.add(r)
1206 1206 scanpos = r
1207 1207 break
1208 1208 else:
1209 1209 known[n] = r
1210 1210 except error.ManifestLookupError:
1211 1211 # deletion in changelog
1212 1212 continue
1213 1213
1214 1214 return subset & s
1215 1215
1216 1216
1217 1217 @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
1218 1218 def first(repo, subset, x, order):
1219 1219 """An alias for limit()."""
1220 1220 return limit(repo, subset, x, order)
1221 1221
1222 1222
1223 1223 def _follow(repo, subset, x, name, followfirst=False):
1224 1224 args = getargsdict(x, name, b'file startrev')
1225 1225 revs = None
1226 1226 if b'startrev' in args:
1227 1227 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1228 1228 if b'file' in args:
1229 1229 x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
1230 1230 if revs is None:
1231 1231 revs = [None]
1232 1232 fctxs = []
1233 1233 for r in revs:
1234 1234 ctx = mctx = repo[r]
1235 1235 if r is None:
1236 1236 ctx = repo[b'.']
1237 1237 m = matchmod.match(
1238 1238 repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
1239 1239 )
1240 1240 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1241 1241 s = dagop.filerevancestors(fctxs, followfirst)
1242 1242 else:
1243 1243 if revs is None:
1244 1244 revs = baseset([repo[b'.'].rev()])
1245 1245 s = dagop.revancestors(repo, revs, followfirst)
1246 1246
1247 1247 return subset & s
1248 1248
1249 1249
1250 1250 @predicate(b'follow([file[, startrev]])', safe=True)
1251 1251 def follow(repo, subset, x):
1252 1252 """
1253 1253 An alias for ``::.`` (ancestors of the working directory's first parent).
1254 1254 If file pattern is specified, the histories of files matching given
1255 1255 pattern in the revision given by startrev are followed, including copies.
1256 1256 """
1257 1257 return _follow(repo, subset, x, b'follow')
1258 1258
1259 1259
1260 1260 @predicate(b'_followfirst', safe=True)
1261 1261 def _followfirst(repo, subset, x):
1262 1262 # ``followfirst([file[, startrev]])``
1263 1263 # Like ``follow([file[, startrev]])`` but follows only the first parent
1264 1264 # of every revisions or files revisions.
1265 1265 return _follow(repo, subset, x, b'_followfirst', followfirst=True)
1266 1266
1267 1267
1268 1268 @predicate(
1269 1269 b'followlines(file, fromline:toline[, startrev=., descend=False])',
1270 1270 safe=True,
1271 1271 )
1272 1272 def followlines(repo, subset, x):
1273 1273 """Changesets modifying `file` in line range ('fromline', 'toline').
1274 1274
1275 1275 Line range corresponds to 'file' content at 'startrev' and should hence be
1276 1276 consistent with file size. If startrev is not specified, working directory's
1277 1277 parent is used.
1278 1278
1279 1279 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1280 1280 descendants of 'startrev' are returned though renames are (currently) not
1281 1281 followed in this direction.
1282 1282 """
1283 1283 args = getargsdict(x, b'followlines', b'file *lines startrev descend')
1284 1284 if len(args[b'lines']) != 1:
1285 1285 raise error.ParseError(_(b"followlines requires a line range"))
1286 1286
1287 1287 rev = b'.'
1288 1288 if b'startrev' in args:
1289 1289 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1290 1290 if len(revs) != 1:
1291 1291 raise error.ParseError(
1292 1292 # i18n: "followlines" is a keyword
1293 1293 _(b"followlines expects exactly one revision")
1294 1294 )
1295 1295 rev = revs.last()
1296 1296
1297 1297 pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
1298 1298 # i18n: "followlines" is a keyword
1299 1299 msg = _(b"followlines expects exactly one file")
1300 1300 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1301 1301 fromline, toline = util.processlinerange(
1302 1302 *getintrange(
1303 1303 args[b'lines'][0],
1304 1304 # i18n: "followlines" is a keyword
1305 1305 _(b"followlines expects a line number or a range"),
1306 1306 _(b"line range bounds must be integers"),
1307 1307 )
1308 1308 )
1309 1309
1310 1310 fctx = repo[rev].filectx(fname)
1311 1311 descend = False
1312 1312 if b'descend' in args:
1313 1313 descend = getboolean(
1314 1314 args[b'descend'],
1315 1315 # i18n: "descend" is a keyword
1316 1316 _(b"descend argument must be a boolean"),
1317 1317 )
1318 1318 if descend:
1319 1319 rs = generatorset(
1320 1320 (
1321 1321 c.rev()
1322 1322 for c, _linerange in dagop.blockdescendants(
1323 1323 fctx, fromline, toline
1324 1324 )
1325 1325 ),
1326 1326 iterasc=True,
1327 1327 )
1328 1328 else:
1329 1329 rs = generatorset(
1330 1330 (
1331 1331 c.rev()
1332 1332 for c, _linerange in dagop.blockancestors(
1333 1333 fctx, fromline, toline
1334 1334 )
1335 1335 ),
1336 1336 iterasc=False,
1337 1337 )
1338 1338 return subset & rs
1339 1339
1340 1340
1341 1341 @predicate(b'nodefromfile(path)')
1342 1342 def nodefromfile(repo, subset, x):
1343 1343 """Read a list of nodes from the file at `path`.
1344 1344
1345 1345 This applies `id(LINE)` to each line of the file.
1346 1346
1347 1347 This is useful when the amount of nodes you need to specify gets too large
1348 1348 for the command line.
1349 1349 """
1350 1350 path = getstring(x, _(b"nodefromfile require a file path"))
1351 1351 listed_rev = set()
1352 1352 try:
1353 1353 with pycompat.open(path, 'rb') as f:
1354 1354 for line in f:
1355 1355 n = line.strip()
1356 1356 rn = _node(repo, n)
1357 1357 if rn is not None:
1358 1358 listed_rev.add(rn)
1359 1359 except IOError as exc:
1360 1360 m = _(b'cannot open nodes file "%s": %s')
1361 1361 m %= (path, encoding.strtolocal(exc.strerror))
1362 1362 raise error.Abort(m)
1363 1363 return subset & baseset(listed_rev)
1364 1364
1365 1365
1366 1366 @predicate(b'all()', safe=True)
1367 1367 def getall(repo, subset, x):
1368 1368 """All changesets, the same as ``0:tip``."""
1369 1369 # i18n: "all" is a keyword
1370 1370 getargs(x, 0, 0, _(b"all takes no arguments"))
1371 1371 return subset & spanset(repo) # drop "null" if any
1372 1372
1373 1373
1374 1374 @predicate(b'grep(regex)', weight=10)
1375 1375 def grep(repo, subset, x):
1376 1376 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1377 1377 to ensure special escape characters are handled correctly. Unlike
1378 1378 ``keyword(string)``, the match is case-sensitive.
1379 1379 """
1380 1380 try:
1381 1381 # i18n: "grep" is a keyword
1382 1382 gr = re.compile(getstring(x, _(b"grep requires a string")))
1383 1383 except re.error as e:
1384 1384 raise error.ParseError(
1385 1385 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
1386 1386 )
1387 1387
1388 1388 def matches(x):
1389 1389 c = repo[x]
1390 1390 for e in c.files() + [c.user(), c.description()]:
1391 1391 if gr.search(e):
1392 1392 return True
1393 1393 return False
1394 1394
1395 1395 return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
1396 1396
1397 1397
1398 1398 @predicate(b'_matchfiles', safe=True)
1399 1399 def _matchfiles(repo, subset, x):
1400 1400 # _matchfiles takes a revset list of prefixed arguments:
1401 1401 #
1402 1402 # [p:foo, i:bar, x:baz]
1403 1403 #
1404 1404 # builds a match object from them and filters subset. Allowed
1405 1405 # prefixes are 'p:' for regular patterns, 'i:' for include
1406 1406 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1407 1407 # a revision identifier, or the empty string to reference the
1408 1408 # working directory, from which the match object is
1409 1409 # initialized. Use 'd:' to set the default matching mode, default
1410 1410 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1411 1411
1412 1412 l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
1413 1413 pats, inc, exc = [], [], []
1414 1414 rev, default = None, None
1415 1415 for arg in l:
1416 1416 s = getstring(arg, b"_matchfiles requires string arguments")
1417 1417 prefix, value = s[:2], s[2:]
1418 1418 if prefix == b'p:':
1419 1419 pats.append(value)
1420 1420 elif prefix == b'i:':
1421 1421 inc.append(value)
1422 1422 elif prefix == b'x:':
1423 1423 exc.append(value)
1424 1424 elif prefix == b'r:':
1425 1425 if rev is not None:
1426 1426 raise error.ParseError(
1427 1427 b'_matchfiles expected at most one revision'
1428 1428 )
1429 1429 if value == b'': # empty means working directory
1430 1430 rev = wdirrev
1431 1431 else:
1432 1432 rev = value
1433 1433 elif prefix == b'd:':
1434 1434 if default is not None:
1435 1435 raise error.ParseError(
1436 1436 b'_matchfiles expected at most one default mode'
1437 1437 )
1438 1438 default = value
1439 1439 else:
1440 1440 raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
1441 1441 if not default:
1442 1442 default = b'glob'
1443 1443 hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
1444 1444
1445 1445 mcache = [None]
1446 1446
1447 1447 # This directly read the changelog data as creating changectx for all
1448 1448 # revisions is quite expensive.
1449 1449 getfiles = repo.changelog.readfiles
1450 1450
1451 1451 def matches(x):
1452 1452 if x == wdirrev:
1453 1453 files = repo[x].files()
1454 1454 else:
1455 1455 files = getfiles(x)
1456 1456
1457 1457 if not mcache[0] or (hasset and rev is None):
1458 1458 r = x if rev is None else rev
1459 1459 mcache[0] = matchmod.match(
1460 1460 repo.root,
1461 1461 repo.getcwd(),
1462 1462 pats,
1463 1463 include=inc,
1464 1464 exclude=exc,
1465 1465 ctx=repo[r],
1466 1466 default=default,
1467 1467 )
1468 1468 m = mcache[0]
1469 1469
1470 1470 for f in files:
1471 1471 if m(f):
1472 1472 return True
1473 1473 return False
1474 1474
1475 1475 return subset.filter(
1476 1476 matches,
1477 1477 condrepr=(
1478 1478 b'<matchfiles patterns=%r, include=%r '
1479 1479 b'exclude=%r, default=%r, rev=%r>',
1480 1480 pats,
1481 1481 inc,
1482 1482 exc,
1483 1483 default,
1484 1484 rev,
1485 1485 ),
1486 1486 )
1487 1487
1488 1488
1489 1489 @predicate(b'file(pattern)', safe=True, weight=10)
1490 1490 def hasfile(repo, subset, x):
1491 1491 """Changesets affecting files matched by pattern.
1492 1492
1493 1493 For a faster but less accurate result, consider using ``filelog()``
1494 1494 instead.
1495 1495
1496 1496 This predicate uses ``glob:`` as the default kind of pattern.
1497 1497 """
1498 1498 # i18n: "file" is a keyword
1499 1499 pat = getstring(x, _(b"file requires a pattern"))
1500 1500 return _matchfiles(repo, subset, (b'string', b'p:' + pat))
1501 1501
1502 1502
1503 1503 @predicate(b'head()', safe=True)
1504 1504 def head(repo, subset, x):
1505 1505 """Changeset is a named branch head."""
1506 1506 # i18n: "head" is a keyword
1507 1507 getargs(x, 0, 0, _(b"head takes no arguments"))
1508 1508 hs = set()
1509 1509 cl = repo.changelog
1510 1510 for ls in repo.branchmap().iterheads():
1511 1511 hs.update(cl.rev(h) for h in ls)
1512 1512 return subset & baseset(hs)
1513 1513
1514 1514
1515 1515 @predicate(b'heads(set)', safe=True, takeorder=True)
1516 1516 def heads(repo, subset, x, order):
1517 1517 """Members of set with no children in set."""
1518 1518 # argument set should never define order
1519 1519 if order == defineorder:
1520 1520 order = followorder
1521 1521 inputset = getset(repo, fullreposet(repo), x, order=order)
1522 1522 wdirparents = None
1523 1523 if wdirrev in inputset:
1524 1524 # a bit slower, but not common so good enough for now
1525 1525 wdirparents = [p.rev() for p in repo[None].parents()]
1526 1526 inputset = set(inputset)
1527 1527 inputset.discard(wdirrev)
1528 1528 heads = repo.changelog.headrevs(inputset)
1529 1529 if wdirparents is not None:
1530 1530 heads.difference_update(wdirparents)
1531 1531 heads.add(wdirrev)
1532 1532 heads = baseset(heads)
1533 1533 return subset & heads
1534 1534
1535 1535
1536 1536 @predicate(b'hidden()', safe=True)
1537 1537 def hidden(repo, subset, x):
1538 1538 """Hidden changesets."""
1539 1539 # i18n: "hidden" is a keyword
1540 1540 getargs(x, 0, 0, _(b"hidden takes no arguments"))
1541 1541 hiddenrevs = repoview.filterrevs(repo, b'visible')
1542 1542 return subset & hiddenrevs
1543 1543
1544 1544
1545 1545 @predicate(b'keyword(string)', safe=True, weight=10)
1546 1546 def keyword(repo, subset, x):
1547 1547 """Search commit message, user name, and names of changed files for
1548 1548 string. The match is case-insensitive.
1549 1549
1550 1550 For a regular expression or case sensitive search of these fields, use
1551 1551 ``grep(regex)``.
1552 1552 """
1553 1553 # i18n: "keyword" is a keyword
1554 1554 kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
1555 1555
1556 1556 def matches(r):
1557 1557 c = repo[r]
1558 1558 return any(
1559 1559 kw in encoding.lower(t)
1560 1560 for t in c.files() + [c.user(), c.description()]
1561 1561 )
1562 1562
1563 1563 return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
1564 1564
1565 1565
1566 1566 @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1567 1567 def limit(repo, subset, x, order):
1568 1568 """First n members of set, defaulting to 1, starting from offset."""
1569 1569 args = getargsdict(x, b'limit', b'set n offset')
1570 1570 if b'set' not in args:
1571 1571 # i18n: "limit" is a keyword
1572 1572 raise error.ParseError(_(b"limit requires one to three arguments"))
1573 1573 # i18n: "limit" is a keyword
1574 1574 lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
1575 1575 if lim < 0:
1576 1576 raise error.ParseError(_(b"negative number to select"))
1577 1577 # i18n: "limit" is a keyword
1578 1578 ofs = getinteger(
1579 1579 args.get(b'offset'), _(b"limit expects a number"), default=0
1580 1580 )
1581 1581 if ofs < 0:
1582 1582 raise error.ParseError(_(b"negative offset"))
1583 1583 os = getset(repo, fullreposet(repo), args[b'set'])
1584 1584 ls = os.slice(ofs, ofs + lim)
1585 1585 if order == followorder and lim > 1:
1586 1586 return subset & ls
1587 1587 return ls & subset
1588 1588
1589 1589
1590 1590 @predicate(b'last(set, [n])', safe=True, takeorder=True)
1591 1591 def last(repo, subset, x, order):
1592 1592 """Last n members of set, defaulting to 1."""
1593 1593 # i18n: "last" is a keyword
1594 1594 l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
1595 1595 lim = 1
1596 1596 if len(l) == 2:
1597 1597 # i18n: "last" is a keyword
1598 1598 lim = getinteger(l[1], _(b"last expects a number"))
1599 1599 if lim < 0:
1600 1600 raise error.ParseError(_(b"negative number to select"))
1601 1601 os = getset(repo, fullreposet(repo), l[0])
1602 1602 os.reverse()
1603 1603 ls = os.slice(0, lim)
1604 1604 if order == followorder and lim > 1:
1605 1605 return subset & ls
1606 1606 ls.reverse()
1607 1607 return ls & subset
1608 1608
1609 1609
1610 1610 @predicate(b'max(set)', safe=True)
1611 1611 def maxrev(repo, subset, x):
1612 1612 """Changeset with highest revision number in set."""
1613 1613 os = getset(repo, fullreposet(repo), x)
1614 1614 try:
1615 1615 m = os.max()
1616 1616 if m in subset:
1617 1617 return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
1618 1618 except ValueError:
1619 1619 # os.max() throws a ValueError when the collection is empty.
1620 1620 # Same as python's max().
1621 1621 pass
1622 1622 return baseset(datarepr=(b'<max %r, %r>', subset, os))
1623 1623
1624 1624
1625 1625 @predicate(b'merge()', safe=True)
1626 1626 def merge(repo, subset, x):
1627 1627 """Changeset is a merge changeset."""
1628 1628 # i18n: "merge" is a keyword
1629 1629 getargs(x, 0, 0, _(b"merge takes no arguments"))
1630 1630 cl = repo.changelog
1631 1631
1632 1632 def ismerge(r):
1633 1633 try:
1634 1634 return cl.parentrevs(r)[1] != nullrev
1635 1635 except error.WdirUnsupported:
1636 1636 return bool(repo[r].p2())
1637 1637
1638 1638 return subset.filter(ismerge, condrepr=b'<merge>')
1639 1639
1640 1640
1641 1641 @predicate(b'branchpoint()', safe=True)
1642 1642 def branchpoint(repo, subset, x):
1643 1643 """Changesets with more than one child."""
1644 1644 # i18n: "branchpoint" is a keyword
1645 1645 getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
1646 1646 cl = repo.changelog
1647 1647 if not subset:
1648 1648 return baseset()
1649 1649 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1650 1650 # (and if it is not, it should.)
1651 1651 baserev = min(subset)
1652 1652 parentscount = [0] * (len(repo) - baserev)
1653 1653 for r in cl.revs(start=baserev + 1):
1654 1654 for p in cl.parentrevs(r):
1655 1655 if p >= baserev:
1656 1656 parentscount[p - baserev] += 1
1657 1657 return subset.filter(
1658 1658 lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
1659 1659 )
1660 1660
1661 1661
1662 1662 @predicate(b'min(set)', safe=True)
1663 1663 def minrev(repo, subset, x):
1664 1664 """Changeset with lowest revision number in set."""
1665 1665 os = getset(repo, fullreposet(repo), x)
1666 1666 try:
1667 1667 m = os.min()
1668 1668 if m in subset:
1669 1669 return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
1670 1670 except ValueError:
1671 1671 # os.min() throws a ValueError when the collection is empty.
1672 1672 # Same as python's min().
1673 1673 pass
1674 1674 return baseset(datarepr=(b'<min %r, %r>', subset, os))
1675 1675
1676 1676
1677 1677 @predicate(b'modifies(pattern)', safe=True, weight=30)
1678 1678 def modifies(repo, subset, x):
1679 1679 """Changesets modifying files matched by pattern.
1680 1680
1681 1681 The pattern without explicit kind like ``glob:`` is expected to be
1682 1682 relative to the current directory and match against a file or a
1683 1683 directory.
1684 1684 """
1685 1685 # i18n: "modifies" is a keyword
1686 1686 pat = getstring(x, _(b"modifies requires a pattern"))
1687 1687 return checkstatus(repo, subset, pat, 'modified')
1688 1688
1689 1689
1690 1690 @predicate(b'named(namespace)')
1691 1691 def named(repo, subset, x):
1692 1692 """The changesets in a given namespace.
1693 1693
1694 1694 Pattern matching is supported for `namespace`. See
1695 1695 :hg:`help revisions.patterns`.
1696 1696 """
1697 1697 # i18n: "named" is a keyword
1698 1698 args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
1699 1699
1700 1700 ns = getstring(
1701 1701 args[0],
1702 1702 # i18n: "named" is a keyword
1703 1703 _(b'the argument to named must be a string'),
1704 1704 )
1705 1705 kind, pattern, matcher = stringutil.stringmatcher(ns)
1706 1706 namespaces = set()
1707 1707 if kind == b'literal':
1708 1708 if pattern not in repo.names:
1709 1709 raise error.RepoLookupError(
1710 1710 _(b"namespace '%s' does not exist") % ns
1711 1711 )
1712 1712 namespaces.add(repo.names[pattern])
1713 1713 else:
1714 1714 for name, ns in repo.names.items():
1715 1715 if matcher(name):
1716 1716 namespaces.add(ns)
1717 1717
1718 1718 names = set()
1719 1719 for ns in namespaces:
1720 1720 for name in ns.listnames(repo):
1721 1721 if name not in ns.deprecated:
1722 1722 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1723 1723
1724 1724 names -= {nullrev}
1725 1725 return subset & names
1726 1726
1727 1727
1728 1728 def _node(repo, n):
1729 1729 """process a node input"""
1730 1730 rn = None
1731 1731 if len(n) == 2 * repo.nodeconstants.nodelen:
1732 1732 try:
1733 1733 rn = repo.changelog.rev(bin(n))
1734 1734 except error.WdirUnsupported:
1735 1735 rn = wdirrev
1736 1736 except (binascii.Error, LookupError):
1737 1737 rn = None
1738 1738 else:
1739 1739 try:
1740 1740 pm = scmutil.resolvehexnodeidprefix(repo, n)
1741 1741 if pm is not None:
1742 1742 rn = repo.changelog.rev(pm)
1743 1743 except LookupError:
1744 1744 pass
1745 1745 except error.WdirUnsupported:
1746 1746 rn = wdirrev
1747 1747 return rn
1748 1748
1749 1749
1750 1750 @predicate(b'id(string)', safe=True)
1751 1751 def node_(repo, subset, x):
1752 1752 """Revision non-ambiguously specified by the given hex string prefix."""
1753 1753 # i18n: "id" is a keyword
1754 1754 l = getargs(x, 1, 1, _(b"id requires one argument"))
1755 1755 # i18n: "id" is a keyword
1756 1756 n = getstring(l[0], _(b"id requires a string"))
1757 1757 rn = _node(repo, n)
1758 1758
1759 1759 if rn is None:
1760 1760 return baseset()
1761 1761 result = baseset([rn])
1762 1762 return result & subset
1763 1763
1764 1764
1765 1765 @predicate(b'none()', safe=True)
1766 1766 def none(repo, subset, x):
1767 1767 """No changesets."""
1768 1768 # i18n: "none" is a keyword
1769 1769 getargs(x, 0, 0, _(b"none takes no arguments"))
1770 1770 return baseset()
1771 1771
1772 1772
1773 1773 @predicate(b'obsolete()', safe=True)
1774 1774 def obsolete(repo, subset, x):
1775 1775 """Mutable changeset with a newer version. (EXPERIMENTAL)"""
1776 1776 # i18n: "obsolete" is a keyword
1777 1777 getargs(x, 0, 0, _(b"obsolete takes no arguments"))
1778 1778 obsoletes = obsmod.getrevs(repo, b'obsolete')
1779 1779 return subset & obsoletes
1780 1780
1781 1781
1782 1782 @predicate(b'only(set, [set])', safe=True)
1783 1783 def only(repo, subset, x):
1784 1784 """Changesets that are ancestors of the first set that are not ancestors
1785 1785 of any other head in the repo. If a second set is specified, the result
1786 1786 is ancestors of the first set that are not ancestors of the second set
1787 1787 (i.e. ::<set1> - ::<set2>).
1788 1788 """
1789 1789 cl = repo.changelog
1790 1790 # i18n: "only" is a keyword
1791 1791 args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
1792 1792 include = getset(repo, fullreposet(repo), args[0])
1793 1793 if len(args) == 1:
1794 1794 if not include:
1795 1795 return baseset()
1796 1796
1797 1797 descendants = set(dagop.revdescendants(repo, include, False))
1798 1798 exclude = [
1799 1799 rev
1800 1800 for rev in cl.headrevs()
1801 1801 if not rev in descendants and not rev in include
1802 1802 ]
1803 1803 else:
1804 1804 exclude = getset(repo, fullreposet(repo), args[1])
1805 1805
1806 1806 results = set(cl.findmissingrevs(common=exclude, heads=include))
1807 1807 # XXX we should turn this into a baseset instead of a set, smartset may do
1808 1808 # some optimizations from the fact this is a baseset.
1809 1809 return subset & results
1810 1810
1811 1811
1812 1812 @predicate(b'origin([set])', safe=True)
1813 1813 def origin(repo, subset, x):
1814 1814 """
1815 1815 Changesets that were specified as a source for the grafts, transplants or
1816 1816 rebases that created the given revisions. Omitting the optional set is the
1817 1817 same as passing all(). If a changeset created by these operations is itself
1818 1818 specified as a source for one of these operations, only the source changeset
1819 1819 for the first operation is selected.
1820 1820 """
1821 1821 if x is not None:
1822 1822 dests = getset(repo, fullreposet(repo), x)
1823 1823 else:
1824 1824 dests = fullreposet(repo)
1825 1825
1826 1826 def _firstsrc(rev):
1827 1827 src = _getrevsource(repo, rev)
1828 1828 if src is None:
1829 1829 return None
1830 1830
1831 1831 while True:
1832 1832 prev = _getrevsource(repo, src)
1833 1833
1834 1834 if prev is None:
1835 1835 return src
1836 1836 src = prev
1837 1837
1838 1838 o = {_firstsrc(r) for r in dests}
1839 1839 o -= {None}
1840 1840 # XXX we should turn this into a baseset instead of a set, smartset may do
1841 1841 # some optimizations from the fact this is a baseset.
1842 1842 return subset & o
1843 1843
1844 1844
1845 1845 @predicate(b'outgoing([path])', safe=False, weight=10)
1846 1846 def outgoing(repo, subset, x):
1847 1847 """Changesets not found in the specified destination repository, or the
1848 1848 default push location.
1849 1849
1850 1850 If the location resolve to multiple repositories, the union of all
1851 1851 outgoing changeset will be used.
1852 1852 """
1853 1853 # Avoid cycles.
1854 1854 from . import (
1855 1855 discovery,
1856 1856 hg,
1857 1857 )
1858 1858
1859 1859 # i18n: "outgoing" is a keyword
1860 1860 l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
1861 1861 # i18n: "outgoing" is a keyword
1862 1862 dest = (
1863 1863 l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
1864 1864 )
1865 1865 if dest:
1866 1866 dests = [dest]
1867 1867 else:
1868 1868 dests = []
1869 1869 missing = set()
1870 1870 for path in urlutil.get_push_paths(repo, repo.ui, dests):
1871 1871 branches = path.branch, []
1872 1872
1873 1873 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1874 1874 if revs:
1875 1875 revs = [repo.lookup(rev) for rev in revs]
1876 1876 other = hg.peer(repo, {}, path)
1877 1877 try:
1878 1878 with repo.ui.silent():
1879 1879 outgoing = discovery.findcommonoutgoing(
1880 1880 repo, other, onlyheads=revs
1881 1881 )
1882 1882 finally:
1883 1883 other.close()
1884 1884 missing.update(outgoing.missing)
1885 1885 cl = repo.changelog
1886 1886 o = {cl.rev(r) for r in missing}
1887 1887 return subset & o
1888 1888
1889 1889
1890 1890 @predicate(b'p1([set])', safe=True)
1891 1891 def p1(repo, subset, x):
1892 1892 """First parent of changesets in set, or the working directory."""
1893 1893 if x is None:
1894 1894 p = repo[x].p1().rev()
1895 1895 if p >= 0:
1896 1896 return subset & baseset([p])
1897 1897 return baseset()
1898 1898
1899 1899 ps = set()
1900 1900 cl = repo.changelog
1901 1901 for r in getset(repo, fullreposet(repo), x):
1902 1902 try:
1903 1903 ps.add(cl.parentrevs(r)[0])
1904 1904 except error.WdirUnsupported:
1905 1905 ps.add(repo[r].p1().rev())
1906 1906 ps -= {nullrev}
1907 1907 # XXX we should turn this into a baseset instead of a set, smartset may do
1908 1908 # some optimizations from the fact this is a baseset.
1909 1909 return subset & ps
1910 1910
1911 1911
1912 1912 @predicate(b'p2([set])', safe=True)
1913 1913 def p2(repo, subset, x):
1914 1914 """Second parent of changesets in set, or the working directory."""
1915 1915 if x is None:
1916 1916 ps = repo[x].parents()
1917 1917 try:
1918 1918 p = ps[1].rev()
1919 1919 if p >= 0:
1920 1920 return subset & baseset([p])
1921 1921 return baseset()
1922 1922 except IndexError:
1923 1923 return baseset()
1924 1924
1925 1925 ps = set()
1926 1926 cl = repo.changelog
1927 1927 for r in getset(repo, fullreposet(repo), x):
1928 1928 try:
1929 1929 ps.add(cl.parentrevs(r)[1])
1930 1930 except error.WdirUnsupported:
1931 1931 parents = repo[r].parents()
1932 1932 if len(parents) == 2:
1933 1933 ps.add(parents[1])
1934 1934 ps -= {nullrev}
1935 1935 # XXX we should turn this into a baseset instead of a set, smartset may do
1936 1936 # some optimizations from the fact this is a baseset.
1937 1937 return subset & ps
1938 1938
1939 1939
1940 1940 def parentpost(repo, subset, x, order):
1941 1941 return p1(repo, subset, x)
1942 1942
1943 1943
1944 1944 @predicate(b'parents([set])', safe=True)
1945 1945 def parents(repo, subset, x):
1946 1946 """
1947 1947 The set of all parents for all changesets in set, or the working directory.
1948 1948 """
1949 1949 if x is None:
1950 1950 ps = {p.rev() for p in repo[x].parents()}
1951 1951 else:
1952 1952 ps = set()
1953 1953 cl = repo.changelog
1954 1954 up = ps.update
1955 1955 parentrevs = cl.parentrevs
1956 1956 for r in getset(repo, fullreposet(repo), x):
1957 1957 try:
1958 1958 up(parentrevs(r))
1959 1959 except error.WdirUnsupported:
1960 1960 up(p.rev() for p in repo[r].parents())
1961 1961 ps -= {nullrev}
1962 1962 return subset & ps
1963 1963
1964 1964
1965 1965 def _phase(repo, subset, *targets):
1966 1966 """helper to select all rev in <targets> phases"""
1967 1967 return repo._phasecache.getrevset(repo, targets, subset)
1968 1968
1969 1969
1970 1970 @predicate(b'_phase(idx)', safe=True)
1971 1971 def phase(repo, subset, x):
1972 1972 l = getargs(x, 1, 1, b"_phase requires one argument")
1973 1973 target = getinteger(l[0], b"_phase expects a number")
1974 1974 return _phase(repo, subset, target)
1975 1975
1976 1976
1977 1977 @predicate(b'draft()', safe=True)
1978 1978 def draft(repo, subset, x):
1979 1979 """Changeset in draft phase."""
1980 1980 # i18n: "draft" is a keyword
1981 1981 getargs(x, 0, 0, _(b"draft takes no arguments"))
1982 1982 target = phases.draft
1983 1983 return _phase(repo, subset, target)
1984 1984
1985 1985
1986 1986 @predicate(b'secret()', safe=True)
1987 1987 def secret(repo, subset, x):
1988 1988 """Changeset in secret phase."""
1989 1989 # i18n: "secret" is a keyword
1990 1990 getargs(x, 0, 0, _(b"secret takes no arguments"))
1991 1991 target = phases.secret
1992 1992 return _phase(repo, subset, target)
1993 1993
1994 1994
1995 1995 @predicate(b'stack([revs])', safe=True)
1996 1996 def stack(repo, subset, x):
1997 1997 """Experimental revset for the stack of changesets or working directory
1998 1998 parent. (EXPERIMENTAL)
1999 1999 """
2000 2000 if x is None:
2001 2001 stacks = stackmod.getstack(repo)
2002 2002 else:
2003 2003 stacks = smartset.baseset([])
2004 2004 for revision in getset(repo, fullreposet(repo), x):
2005 2005 currentstack = stackmod.getstack(repo, revision)
2006 2006 stacks = stacks + currentstack
2007 2007
2008 2008 return subset & stacks
2009 2009
2010 2010
2011 2011 def parentspec(repo, subset, x, n, order):
2012 2012 """``set^0``
2013 2013 The set.
2014 2014 ``set^1`` (or ``set^``), ``set^2``
2015 2015 First or second parent, respectively, of all changesets in set.
2016 2016 """
2017 2017 try:
2018 2018 n = int(n[1])
2019 2019 if n not in (0, 1, 2):
2020 2020 raise ValueError
2021 2021 except (TypeError, ValueError):
2022 2022 raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
2023 2023 ps = set()
2024 2024 cl = repo.changelog
2025 2025 for r in getset(repo, fullreposet(repo), x):
2026 2026 if n == 0:
2027 2027 ps.add(r)
2028 2028 elif n == 1:
2029 2029 try:
2030 2030 ps.add(cl.parentrevs(r)[0])
2031 2031 except error.WdirUnsupported:
2032 2032 ps.add(repo[r].p1().rev())
2033 2033 else:
2034 2034 try:
2035 2035 parents = cl.parentrevs(r)
2036 2036 if parents[1] != nullrev:
2037 2037 ps.add(parents[1])
2038 2038 except error.WdirUnsupported:
2039 2039 parents = repo[r].parents()
2040 2040 if len(parents) == 2:
2041 2041 ps.add(parents[1].rev())
2042 2042 return subset & ps
2043 2043
2044 2044
2045 2045 @predicate(b'present(set)', safe=True, takeorder=True)
2046 2046 def present(repo, subset, x, order):
2047 2047 """An empty set, if any revision in set isn't found; otherwise,
2048 2048 all revisions in set.
2049 2049
2050 2050 If any of specified revisions is not present in the local repository,
2051 2051 the query is normally aborted. But this predicate allows the query
2052 2052 to continue even in such cases.
2053 2053 """
2054 2054 try:
2055 2055 return getset(repo, subset, x, order)
2056 2056 except error.RepoLookupError:
2057 2057 return baseset()
2058 2058
2059 2059
2060 2060 # for internal use
2061 2061 @predicate(b'_notpublic', safe=True)
2062 2062 def _notpublic(repo, subset, x):
2063 2063 getargs(x, 0, 0, b"_notpublic takes no arguments")
2064 return _phase(repo, subset, phases.draft, phases.secret)
2064 return _phase(repo, subset, *phases.not_public_phases)
2065 2065
2066 2066
2067 2067 # for internal use
2068 2068 @predicate(b'_phaseandancestors(phasename, set)', safe=True)
2069 2069 def _phaseandancestors(repo, subset, x):
2070 2070 # equivalent to (phasename() & ancestors(set)) but more efficient
2071 2071 # phasename could be one of 'draft', 'secret', or '_notpublic'
2072 2072 args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
2073 2073 phasename = getsymbol(args[0])
2074 2074 s = getset(repo, fullreposet(repo), args[1])
2075 2075
2076 2076 draft = phases.draft
2077 2077 secret = phases.secret
2078 2078 phasenamemap = {
2079 2079 b'_notpublic': draft,
2080 2080 b'draft': draft, # follow secret's ancestors
2081 2081 b'secret': secret,
2082 2082 }
2083 2083 if phasename not in phasenamemap:
2084 2084 raise error.ParseError(b'%r is not a valid phasename' % phasename)
2085 2085
2086 2086 minimalphase = phasenamemap[phasename]
2087 2087 getphase = repo._phasecache.phase
2088 2088
2089 2089 def cutfunc(rev):
2090 2090 return getphase(repo, rev) < minimalphase
2091 2091
2092 2092 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
2093 2093
2094 2094 if phasename == b'draft': # need to remove secret changesets
2095 2095 revs = revs.filter(lambda r: getphase(repo, r) == draft)
2096 2096 return subset & revs
2097 2097
2098 2098
2099 2099 @predicate(b'public()', safe=True)
2100 2100 def public(repo, subset, x):
2101 2101 """Changeset in public phase."""
2102 2102 # i18n: "public" is a keyword
2103 2103 getargs(x, 0, 0, _(b"public takes no arguments"))
2104 2104 return _phase(repo, subset, phases.public)
2105 2105
2106 2106
2107 2107 @predicate(b'remote([id [,path]])', safe=False)
2108 2108 def remote(repo, subset, x):
2109 2109 """Local revision that corresponds to the given identifier in a
2110 2110 remote repository, if present. Here, the '.' identifier is a
2111 2111 synonym for the current local branch.
2112 2112 """
2113 2113
2114 2114 from . import hg # avoid start-up nasties
2115 2115
2116 2116 # i18n: "remote" is a keyword
2117 2117 l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
2118 2118
2119 2119 q = b'.'
2120 2120 if len(l) > 0:
2121 2121 # i18n: "remote" is a keyword
2122 2122 q = getstring(l[0], _(b"remote requires a string id"))
2123 2123 if q == b'.':
2124 2124 q = repo[b'.'].branch()
2125 2125
2126 2126 dest = b''
2127 2127 if len(l) > 1:
2128 2128 # i18n: "remote" is a keyword
2129 2129 dest = getstring(l[1], _(b"remote requires a repository path"))
2130 2130 if not dest:
2131 2131 dest = b'default'
2132 2132 path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest)
2133 2133
2134 2134 other = hg.peer(repo, {}, path)
2135 2135 n = other.lookup(q)
2136 2136 if n in repo:
2137 2137 r = repo[n].rev()
2138 2138 if r in subset:
2139 2139 return baseset([r])
2140 2140 return baseset()
2141 2141
2142 2142
2143 2143 @predicate(b'removes(pattern)', safe=True, weight=30)
2144 2144 def removes(repo, subset, x):
2145 2145 """Changesets which remove files matching pattern.
2146 2146
2147 2147 The pattern without explicit kind like ``glob:`` is expected to be
2148 2148 relative to the current directory and match against a file or a
2149 2149 directory.
2150 2150 """
2151 2151 # i18n: "removes" is a keyword
2152 2152 pat = getstring(x, _(b"removes requires a pattern"))
2153 2153 return checkstatus(repo, subset, pat, 'removed')
2154 2154
2155 2155
2156 2156 @predicate(b'rev(number)', safe=True)
2157 2157 def rev(repo, subset, x):
2158 2158 """Revision with the given numeric identifier."""
2159 2159 try:
2160 2160 return _rev(repo, subset, x)
2161 2161 except error.RepoLookupError:
2162 2162 return baseset()
2163 2163
2164 2164
2165 2165 @predicate(b'_rev(number)', safe=True)
2166 2166 def _rev(repo, subset, x):
2167 2167 # internal version of "rev(x)" that raise error if "x" is invalid
2168 2168 # i18n: "rev" is a keyword
2169 2169 l = getargs(x, 1, 1, _(b"rev requires one argument"))
2170 2170 try:
2171 2171 # i18n: "rev" is a keyword
2172 2172 l = int(getstring(l[0], _(b"rev requires a number")))
2173 2173 except (TypeError, ValueError):
2174 2174 # i18n: "rev" is a keyword
2175 2175 raise error.ParseError(_(b"rev expects a number"))
2176 2176 if l not in _virtualrevs:
2177 2177 try:
2178 2178 repo.changelog.node(l) # check that the rev exists
2179 2179 except IndexError:
2180 2180 raise error.RepoLookupError(_(b"unknown revision '%d'") % l)
2181 2181 return subset & baseset([l])
2182 2182
2183 2183
2184 2184 @predicate(b'revset(set)', safe=True, takeorder=True)
2185 2185 def revsetpredicate(repo, subset, x, order):
2186 2186 """Strictly interpret the content as a revset.
2187 2187
2188 2188 The content of this special predicate will be strictly interpreted as a
2189 2189 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
2190 2190 without possible ambiguity with a "id(0)" bookmark or tag.
2191 2191 """
2192 2192 return getset(repo, subset, x, order)
2193 2193
2194 2194
2195 2195 @predicate(b'matching(revision [, field])', safe=True)
2196 2196 def matching(repo, subset, x):
2197 2197 """Changesets in which a given set of fields match the set of fields in the
2198 2198 selected revision or set.
2199 2199
2200 2200 To match more than one field pass the list of fields to match separated
2201 2201 by spaces (e.g. ``author description``).
2202 2202
2203 2203 Valid fields are most regular revision fields and some special fields.
2204 2204
2205 2205 Regular revision fields are ``description``, ``author``, ``branch``,
2206 2206 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
2207 2207 and ``diff``.
2208 2208 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
2209 2209 contents of the revision. Two revisions matching their ``diff`` will
2210 2210 also match their ``files``.
2211 2211
2212 2212 Special fields are ``summary`` and ``metadata``:
2213 2213 ``summary`` matches the first line of the description.
2214 2214 ``metadata`` is equivalent to matching ``description user date``
2215 2215 (i.e. it matches the main metadata fields).
2216 2216
2217 2217 ``metadata`` is the default field which is used when no fields are
2218 2218 specified. You can match more than one field at a time.
2219 2219 """
2220 2220 # i18n: "matching" is a keyword
2221 2221 l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
2222 2222
2223 2223 revs = getset(repo, fullreposet(repo), l[0])
2224 2224
2225 2225 fieldlist = [b'metadata']
2226 2226 if len(l) > 1:
2227 2227 fieldlist = getstring(
2228 2228 l[1],
2229 2229 # i18n: "matching" is a keyword
2230 2230 _(b"matching requires a string as its second argument"),
2231 2231 ).split()
2232 2232
2233 2233 # Make sure that there are no repeated fields,
2234 2234 # expand the 'special' 'metadata' field type
2235 2235 # and check the 'files' whenever we check the 'diff'
2236 2236 fields = []
2237 2237 for field in fieldlist:
2238 2238 if field == b'metadata':
2239 2239 fields += [b'user', b'description', b'date']
2240 2240 elif field == b'diff':
2241 2241 # a revision matching the diff must also match the files
2242 2242 # since matching the diff is very costly, make sure to
2243 2243 # also match the files first
2244 2244 fields += [b'files', b'diff']
2245 2245 else:
2246 2246 if field == b'author':
2247 2247 field = b'user'
2248 2248 fields.append(field)
2249 2249 fields = set(fields)
2250 2250 if b'summary' in fields and b'description' in fields:
2251 2251 # If a revision matches its description it also matches its summary
2252 2252 fields.discard(b'summary')
2253 2253
2254 2254 # We may want to match more than one field
2255 2255 # Not all fields take the same amount of time to be matched
2256 2256 # Sort the selected fields in order of increasing matching cost
2257 2257 fieldorder = [
2258 2258 b'phase',
2259 2259 b'parents',
2260 2260 b'user',
2261 2261 b'date',
2262 2262 b'branch',
2263 2263 b'summary',
2264 2264 b'files',
2265 2265 b'description',
2266 2266 b'substate',
2267 2267 b'diff',
2268 2268 ]
2269 2269
2270 2270 def fieldkeyfunc(f):
2271 2271 try:
2272 2272 return fieldorder.index(f)
2273 2273 except ValueError:
2274 2274 # assume an unknown field is very costly
2275 2275 return len(fieldorder)
2276 2276
2277 2277 fields = list(fields)
2278 2278 fields.sort(key=fieldkeyfunc)
2279 2279
2280 2280 # Each field will be matched with its own "getfield" function
2281 2281 # which will be added to the getfieldfuncs array of functions
2282 2282 getfieldfuncs = []
2283 2283 _funcs = {
2284 2284 b'user': lambda r: repo[r].user(),
2285 2285 b'branch': lambda r: repo[r].branch(),
2286 2286 b'date': lambda r: repo[r].date(),
2287 2287 b'description': lambda r: repo[r].description(),
2288 2288 b'files': lambda r: repo[r].files(),
2289 2289 b'parents': lambda r: repo[r].parents(),
2290 2290 b'phase': lambda r: repo[r].phase(),
2291 2291 b'substate': lambda r: repo[r].substate,
2292 2292 b'summary': lambda r: repo[r].description().splitlines()[0],
2293 2293 b'diff': lambda r: list(
2294 2294 repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
2295 2295 ),
2296 2296 }
2297 2297 for info in fields:
2298 2298 getfield = _funcs.get(info, None)
2299 2299 if getfield is None:
2300 2300 raise error.ParseError(
2301 2301 # i18n: "matching" is a keyword
2302 2302 _(b"unexpected field name passed to matching: %s")
2303 2303 % info
2304 2304 )
2305 2305 getfieldfuncs.append(getfield)
2306 2306 # convert the getfield array of functions into a "getinfo" function
2307 2307 # which returns an array of field values (or a single value if there
2308 2308 # is only one field to match)
2309 2309 getinfo = lambda r: [f(r) for f in getfieldfuncs]
2310 2310
2311 2311 def matches(x):
2312 2312 for rev in revs:
2313 2313 target = getinfo(rev)
2314 2314 match = True
2315 2315 for n, f in enumerate(getfieldfuncs):
2316 2316 if target[n] != f(x):
2317 2317 match = False
2318 2318 if match:
2319 2319 return True
2320 2320 return False
2321 2321
2322 2322 return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
2323 2323
2324 2324
2325 2325 @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
2326 2326 def reverse(repo, subset, x, order):
2327 2327 """Reverse order of set."""
2328 2328 l = getset(repo, subset, x, order)
2329 2329 if order == defineorder:
2330 2330 l.reverse()
2331 2331 return l
2332 2332
2333 2333
2334 2334 @predicate(b'roots(set)', safe=True)
2335 2335 def roots(repo, subset, x):
2336 2336 """Changesets in set with no parent changeset in set."""
2337 2337 s = getset(repo, fullreposet(repo), x)
2338 2338 parents = repo.changelog.parentrevs
2339 2339
2340 2340 def filter(r):
2341 2341 try:
2342 2342 for p in parents(r):
2343 2343 if 0 <= p and p in s:
2344 2344 return False
2345 2345 except error.WdirUnsupported:
2346 2346 for p in repo[None].parents():
2347 2347 if p.rev() in s:
2348 2348 return False
2349 2349 return True
2350 2350
2351 2351 return subset & s.filter(filter, condrepr=b'<roots>')
2352 2352
2353 2353
2354 2354 MAXINT = (1 << 31) - 1
2355 2355 MININT = -MAXINT - 1
2356 2356
2357 2357
2358 2358 def pick_random(c, gen=random):
2359 2359 # exists as its own function to make it possible to overwrite the seed
2360 2360 return gen.randint(MININT, MAXINT)
2361 2361
2362 2362
2363 2363 _sortkeyfuncs = {
2364 2364 b'rev': scmutil.intrev,
2365 2365 b'branch': lambda c: c.branch(),
2366 2366 b'desc': lambda c: c.description(),
2367 2367 b'user': lambda c: c.user(),
2368 2368 b'author': lambda c: c.user(),
2369 2369 b'date': lambda c: c.date()[0],
2370 2370 b'node': scmutil.binnode,
2371 2371 b'random': pick_random,
2372 2372 }
2373 2373
2374 2374
2375 2375 def _getsortargs(x):
2376 2376 """Parse sort options into (set, [(key, reverse)], opts)"""
2377 2377 args = getargsdict(
2378 2378 x,
2379 2379 b'sort',
2380 2380 b'set keys topo.firstbranch random.seed',
2381 2381 )
2382 2382 if b'set' not in args:
2383 2383 # i18n: "sort" is a keyword
2384 2384 raise error.ParseError(_(b'sort requires one or two arguments'))
2385 2385 keys = b"rev"
2386 2386 if b'keys' in args:
2387 2387 # i18n: "sort" is a keyword
2388 2388 keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
2389 2389
2390 2390 keyflags = []
2391 2391 for k in keys.split():
2392 2392 fk = k
2393 2393 reverse = k.startswith(b'-')
2394 2394 if reverse:
2395 2395 k = k[1:]
2396 2396 if k not in _sortkeyfuncs and k != b'topo':
2397 2397 raise error.ParseError(
2398 2398 _(b"unknown sort key %r") % pycompat.bytestr(fk)
2399 2399 )
2400 2400 keyflags.append((k, reverse))
2401 2401
2402 2402 if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
2403 2403 # i18n: "topo" is a keyword
2404 2404 raise error.ParseError(
2405 2405 _(b'topo sort order cannot be combined with other sort keys')
2406 2406 )
2407 2407
2408 2408 opts = {}
2409 2409 if b'topo.firstbranch' in args:
2410 2410 if any(k == b'topo' for k, reverse in keyflags):
2411 2411 opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
2412 2412 else:
2413 2413 # i18n: "topo" and "topo.firstbranch" are keywords
2414 2414 raise error.ParseError(
2415 2415 _(
2416 2416 b'topo.firstbranch can only be used '
2417 2417 b'when using the topo sort key'
2418 2418 )
2419 2419 )
2420 2420
2421 2421 if b'random.seed' in args:
2422 2422 if any(k == b'random' for k, reverse in keyflags):
2423 2423 s = args[b'random.seed']
2424 2424 seed = getstring(s, _(b"random.seed must be a string"))
2425 2425 opts[b'random.seed'] = seed
2426 2426 else:
2427 2427 # i18n: "random" and "random.seed" are keywords
2428 2428 raise error.ParseError(
2429 2429 _(
2430 2430 b'random.seed can only be used '
2431 2431 b'when using the random sort key'
2432 2432 )
2433 2433 )
2434 2434
2435 2435 return args[b'set'], keyflags, opts
2436 2436
2437 2437
2438 2438 @predicate(
2439 2439 b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
2440 2440 )
2441 2441 def sort(repo, subset, x, order):
2442 2442 """Sort set by keys. The default sort order is ascending, specify a key
2443 2443 as ``-key`` to sort in descending order.
2444 2444
2445 2445 The keys can be:
2446 2446
2447 2447 - ``rev`` for the revision number,
2448 2448 - ``branch`` for the branch name,
2449 2449 - ``desc`` for the commit message (description),
2450 2450 - ``user`` for user name (``author`` can be used as an alias),
2451 2451 - ``date`` for the commit date
2452 2452 - ``topo`` for a reverse topographical sort
2453 2453 - ``node`` the nodeid of the revision
2454 2454 - ``random`` randomly shuffle revisions
2455 2455
2456 2456 The ``topo`` sort order cannot be combined with other sort keys. This sort
2457 2457 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2458 2458 specifies what topographical branches to prioritize in the sort.
2459 2459
2460 2460 The ``random`` sort takes one optional ``random.seed`` argument to control
2461 2461 the pseudo-randomness of the result.
2462 2462 """
2463 2463 s, keyflags, opts = _getsortargs(x)
2464 2464 revs = getset(repo, subset, s, order)
2465 2465
2466 2466 if not keyflags or order != defineorder:
2467 2467 return revs
2468 2468 if len(keyflags) == 1 and keyflags[0][0] == b"rev":
2469 2469 revs.sort(reverse=keyflags[0][1])
2470 2470 return revs
2471 2471 elif keyflags[0][0] == b"topo":
2472 2472 firstbranch = ()
2473 2473 parentrevs = repo.changelog.parentrevs
2474 2474 parentsfunc = parentrevs
2475 2475 if wdirrev in revs:
2476 2476
2477 2477 def parentsfunc(r):
2478 2478 try:
2479 2479 return parentrevs(r)
2480 2480 except error.WdirUnsupported:
2481 2481 return [p.rev() for p in repo[None].parents()]
2482 2482
2483 2483 if b'topo.firstbranch' in opts:
2484 2484 firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
2485 2485 revs = baseset(
2486 2486 dagop.toposort(revs, parentsfunc, firstbranch),
2487 2487 istopo=True,
2488 2488 )
2489 2489 if keyflags[0][1]:
2490 2490 revs.reverse()
2491 2491 return revs
2492 2492
2493 2493 # sort() is guaranteed to be stable
2494 2494 ctxs = [repo[r] for r in revs]
2495 2495 for k, reverse in reversed(keyflags):
2496 2496 func = _sortkeyfuncs[k]
2497 2497 if k == b'random' and b'random.seed' in opts:
2498 2498 seed = opts[b'random.seed']
2499 2499 r = random.Random(seed)
2500 2500 func = functools.partial(func, gen=r)
2501 2501 ctxs.sort(key=func, reverse=reverse)
2502 2502 return baseset([c.rev() for c in ctxs])
2503 2503
2504 2504
2505 2505 @predicate(b'subrepo([pattern])')
2506 2506 def subrepo(repo, subset, x):
2507 2507 """Changesets that add, modify or remove the given subrepo. If no subrepo
2508 2508 pattern is named, any subrepo changes are returned.
2509 2509 """
2510 2510 # i18n: "subrepo" is a keyword
2511 2511 args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
2512 2512 pat = None
2513 2513 if len(args) != 0:
2514 2514 pat = getstring(args[0], _(b"subrepo requires a pattern"))
2515 2515
2516 2516 m = matchmod.exact([b'.hgsubstate'])
2517 2517
2518 2518 def submatches(names):
2519 2519 k, p, m = stringutil.stringmatcher(pat)
2520 2520 for name in names:
2521 2521 if m(name):
2522 2522 yield name
2523 2523
2524 2524 def matches(x):
2525 2525 c = repo[x]
2526 2526 s = repo.status(c.p1().node(), c.node(), match=m)
2527 2527
2528 2528 if pat is None:
2529 2529 return s.added or s.modified or s.removed
2530 2530
2531 2531 if s.added:
2532 2532 return any(submatches(c.substate.keys()))
2533 2533
2534 2534 if s.modified:
2535 2535 subs = set(c.p1().substate.keys())
2536 2536 subs.update(c.substate.keys())
2537 2537
2538 2538 for path in submatches(subs):
2539 2539 if c.p1().substate.get(path) != c.substate.get(path):
2540 2540 return True
2541 2541
2542 2542 if s.removed:
2543 2543 return any(submatches(c.p1().substate.keys()))
2544 2544
2545 2545 return False
2546 2546
2547 2547 return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
2548 2548
2549 2549
2550 2550 def _mapbynodefunc(repo, s, f):
2551 2551 """(repo, smartset, [node] -> [node]) -> smartset
2552 2552
2553 2553 Helper method to map a smartset to another smartset given a function only
2554 2554 talking about nodes. Handles converting between rev numbers and nodes, and
2555 2555 filtering.
2556 2556 """
2557 2557 cl = repo.unfiltered().changelog
2558 2558 torev = cl.index.get_rev
2559 2559 tonode = cl.node
2560 2560 result = {torev(n) for n in f(tonode(r) for r in s)}
2561 2561 result.discard(None)
2562 2562 return smartset.baseset(result - repo.changelog.filteredrevs)
2563 2563
2564 2564
2565 2565 @predicate(b'successors(set)', safe=True)
2566 2566 def successors(repo, subset, x):
2567 2567 """All successors for set, including the given set themselves.
2568 2568 (EXPERIMENTAL)"""
2569 2569 s = getset(repo, fullreposet(repo), x)
2570 2570 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2571 2571 d = _mapbynodefunc(repo, s, f)
2572 2572 return subset & d
2573 2573
2574 2574
2575 2575 def _substringmatcher(pattern, casesensitive=True):
2576 2576 kind, pattern, matcher = stringutil.stringmatcher(
2577 2577 pattern, casesensitive=casesensitive
2578 2578 )
2579 2579 if kind == b'literal':
2580 2580 if not casesensitive:
2581 2581 pattern = encoding.lower(pattern)
2582 2582 matcher = lambda s: pattern in encoding.lower(s)
2583 2583 else:
2584 2584 matcher = lambda s: pattern in s
2585 2585 return kind, pattern, matcher
2586 2586
2587 2587
2588 2588 @predicate(b'tag([name])', safe=True)
2589 2589 def tag(repo, subset, x):
2590 2590 """The specified tag by name, or all tagged revisions if no name is given.
2591 2591
2592 2592 Pattern matching is supported for `name`. See
2593 2593 :hg:`help revisions.patterns`.
2594 2594 """
2595 2595 # i18n: "tag" is a keyword
2596 2596 args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
2597 2597 cl = repo.changelog
2598 2598 if args:
2599 2599 pattern = getstring(
2600 2600 args[0],
2601 2601 # i18n: "tag" is a keyword
2602 2602 _(b'the argument to tag must be a string'),
2603 2603 )
2604 2604 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2605 2605 if kind == b'literal':
2606 2606 # avoid resolving all tags
2607 2607 tn = repo._tagscache.tags.get(pattern, None)
2608 2608 if tn is None:
2609 2609 raise error.RepoLookupError(
2610 2610 _(b"tag '%s' does not exist") % pattern
2611 2611 )
2612 2612 s = {repo[tn].rev()}
2613 2613 else:
2614 2614 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2615 2615 else:
2616 2616 s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
2617 2617 return subset & s
2618 2618
2619 2619
2620 2620 @predicate(b'tagged', safe=True)
2621 2621 def tagged(repo, subset, x):
2622 2622 return tag(repo, subset, x)
2623 2623
2624 2624
2625 2625 @predicate(b'orphan()', safe=True)
2626 2626 def orphan(repo, subset, x):
2627 2627 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)"""
2628 2628 # i18n: "orphan" is a keyword
2629 2629 getargs(x, 0, 0, _(b"orphan takes no arguments"))
2630 2630 orphan = obsmod.getrevs(repo, b'orphan')
2631 2631 return subset & orphan
2632 2632
2633 2633
2634 2634 @predicate(b'unstable()', safe=True)
2635 2635 def unstable(repo, subset, x):
2636 2636 """Changesets with instabilities. (EXPERIMENTAL)"""
2637 2637 # i18n: "unstable" is a keyword
2638 2638 getargs(x, 0, 0, b'unstable takes no arguments')
2639 2639 _unstable = set()
2640 2640 _unstable.update(obsmod.getrevs(repo, b'orphan'))
2641 2641 _unstable.update(obsmod.getrevs(repo, b'phasedivergent'))
2642 2642 _unstable.update(obsmod.getrevs(repo, b'contentdivergent'))
2643 2643 return subset & baseset(_unstable)
2644 2644
2645 2645
2646 2646 @predicate(b'user(string)', safe=True, weight=10)
2647 2647 def user(repo, subset, x):
2648 2648 """User name contains string. The match is case-insensitive.
2649 2649
2650 2650 Pattern matching is supported for `string`. See
2651 2651 :hg:`help revisions.patterns`.
2652 2652 """
2653 2653 return author(repo, subset, x)
2654 2654
2655 2655
2656 2656 @predicate(b'wdir()', safe=True, weight=0)
2657 2657 def wdir(repo, subset, x):
2658 2658 """Working directory. (EXPERIMENTAL)"""
2659 2659 # i18n: "wdir" is a keyword
2660 2660 getargs(x, 0, 0, _(b"wdir takes no arguments"))
2661 2661 if wdirrev in subset or isinstance(subset, fullreposet):
2662 2662 return baseset([wdirrev])
2663 2663 return baseset()
2664 2664
2665 2665
2666 2666 def _orderedlist(repo, subset, x):
2667 2667 s = getstring(x, b"internal error")
2668 2668 if not s:
2669 2669 return baseset()
2670 2670 # remove duplicates here. it's difficult for caller to deduplicate sets
2671 2671 # because different symbols can point to the same rev.
2672 2672 cl = repo.changelog
2673 2673 ls = []
2674 2674 seen = set()
2675 2675 for t in s.split(b'\0'):
2676 2676 try:
2677 2677 # fast path for integer revision
2678 2678 r = int(t)
2679 2679 if (b'%d' % r) != t or r not in cl:
2680 2680 raise ValueError
2681 2681 revs = [r]
2682 2682 except ValueError:
2683 2683 revs = stringset(repo, subset, t, defineorder)
2684 2684
2685 2685 for r in revs:
2686 2686 if r in seen:
2687 2687 continue
2688 2688 if (
2689 2689 r in subset
2690 2690 or r in _virtualrevs
2691 2691 and isinstance(subset, fullreposet)
2692 2692 ):
2693 2693 ls.append(r)
2694 2694 seen.add(r)
2695 2695 return baseset(ls)
2696 2696
2697 2697
2698 2698 # for internal use
2699 2699 @predicate(b'_list', safe=True, takeorder=True)
2700 2700 def _list(repo, subset, x, order):
2701 2701 if order == followorder:
2702 2702 # slow path to take the subset order
2703 2703 return subset & _orderedlist(repo, fullreposet(repo), x)
2704 2704 else:
2705 2705 return _orderedlist(repo, subset, x)
2706 2706
2707 2707
2708 2708 def _orderedintlist(repo, subset, x):
2709 2709 s = getstring(x, b"internal error")
2710 2710 if not s:
2711 2711 return baseset()
2712 2712 ls = [int(r) for r in s.split(b'\0')]
2713 2713 s = subset
2714 2714 return baseset([r for r in ls if r in s])
2715 2715
2716 2716
2717 2717 # for internal use
2718 2718 @predicate(b'_intlist', safe=True, takeorder=True, weight=0)
2719 2719 def _intlist(repo, subset, x, order):
2720 2720 if order == followorder:
2721 2721 # slow path to take the subset order
2722 2722 return subset & _orderedintlist(repo, fullreposet(repo), x)
2723 2723 else:
2724 2724 return _orderedintlist(repo, subset, x)
2725 2725
2726 2726
2727 2727 def _orderedhexlist(repo, subset, x):
2728 2728 s = getstring(x, b"internal error")
2729 2729 if not s:
2730 2730 return baseset()
2731 2731 cl = repo.changelog
2732 2732 ls = [cl.rev(bin(r)) for r in s.split(b'\0')]
2733 2733 s = subset
2734 2734 return baseset([r for r in ls if r in s])
2735 2735
2736 2736
2737 2737 # for internal use
2738 2738 @predicate(b'_hexlist', safe=True, takeorder=True)
2739 2739 def _hexlist(repo, subset, x, order):
2740 2740 if order == followorder:
2741 2741 # slow path to take the subset order
2742 2742 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2743 2743 else:
2744 2744 return _orderedhexlist(repo, subset, x)
2745 2745
2746 2746
2747 2747 methods = {
2748 2748 b"range": rangeset,
2749 2749 b"rangeall": rangeall,
2750 2750 b"rangepre": rangepre,
2751 2751 b"rangepost": rangepost,
2752 2752 b"dagrange": dagrange,
2753 2753 b"string": stringset,
2754 2754 b"symbol": stringset,
2755 2755 b"and": andset,
2756 2756 b"andsmally": andsmallyset,
2757 2757 b"or": orset,
2758 2758 b"not": notset,
2759 2759 b"difference": differenceset,
2760 2760 b"relation": relationset,
2761 2761 b"relsubscript": relsubscriptset,
2762 2762 b"subscript": subscriptset,
2763 2763 b"list": listset,
2764 2764 b"keyvalue": keyvaluepair,
2765 2765 b"func": func,
2766 2766 b"ancestor": ancestorspec,
2767 2767 b"parent": parentspec,
2768 2768 b"parentpost": parentpost,
2769 2769 b"smartset": rawsmartset,
2770 2770 }
2771 2771
2772 2772 relations = {
2773 2773 b"g": generationsrel,
2774 2774 b"generations": generationsrel,
2775 2775 }
2776 2776
2777 2777 subscriptrelations = {
2778 2778 b"g": generationssubrel,
2779 2779 b"generations": generationssubrel,
2780 2780 }
2781 2781
2782 2782
2783 2783 def lookupfn(repo):
2784 2784 def fn(symbol):
2785 2785 try:
2786 2786 return scmutil.isrevsymbol(repo, symbol)
2787 2787 except error.AmbiguousPrefixLookupError:
2788 2788 raise error.InputError(
2789 2789 b'ambiguous revision identifier: %s' % symbol
2790 2790 )
2791 2791
2792 2792 return fn
2793 2793
2794 2794
2795 2795 def match(ui, spec, lookup=None):
2796 2796 """Create a matcher for a single revision spec"""
2797 2797 return matchany(ui, [spec], lookup=lookup)
2798 2798
2799 2799
2800 2800 def matchany(ui, specs, lookup=None, localalias=None):
2801 2801 """Create a matcher that will include any revisions matching one of the
2802 2802 given specs
2803 2803
2804 2804 If lookup function is not None, the parser will first attempt to handle
2805 2805 old-style ranges, which may contain operator characters.
2806 2806
2807 2807 If localalias is not None, it is a dict {name: definitionstring}. It takes
2808 2808 precedence over [revsetalias] config section.
2809 2809 """
2810 2810 if not specs:
2811 2811
2812 2812 def mfunc(repo, subset=None):
2813 2813 return baseset()
2814 2814
2815 2815 return mfunc
2816 2816 if not all(specs):
2817 2817 raise error.ParseError(_(b"empty query"))
2818 2818 if len(specs) == 1:
2819 2819 tree = revsetlang.parse(specs[0], lookup)
2820 2820 else:
2821 2821 tree = (
2822 2822 b'or',
2823 2823 (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
2824 2824 )
2825 2825
2826 2826 aliases = []
2827 2827 warn = None
2828 2828 if ui:
2829 2829 aliases.extend(ui.configitems(b'revsetalias'))
2830 2830 warn = ui.warn
2831 2831 if localalias:
2832 2832 aliases.extend(localalias.items())
2833 2833 if aliases:
2834 2834 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2835 2835 tree = revsetlang.foldconcat(tree)
2836 2836 tree = revsetlang.analyze(tree)
2837 2837 tree = revsetlang.optimize(tree)
2838 2838 return makematcher(tree)
2839 2839
2840 2840
2841 2841 def makematcher(tree):
2842 2842 """Create a matcher from an evaluatable tree"""
2843 2843
2844 2844 def mfunc(repo, subset=None, order=None):
2845 2845 if order is None:
2846 2846 if subset is None:
2847 2847 order = defineorder # 'x'
2848 2848 else:
2849 2849 order = followorder # 'subset & x'
2850 2850 if subset is None:
2851 2851 subset = fullreposet(repo)
2852 2852 return getset(repo, subset, tree, order)
2853 2853
2854 2854 return mfunc
2855 2855
2856 2856
2857 2857 def loadpredicate(ui, extname, registrarobj):
2858 2858 """Load revset predicates from specified registrarobj"""
2859 2859 for name, func in registrarobj._table.items():
2860 2860 symbols[name] = func
2861 2861 if func._safe:
2862 2862 safesymbols.add(name)
2863 2863
2864 2864
2865 2865 # load built-in predicates explicitly to setup safesymbols
2866 2866 loadpredicate(None, None, predicate)
2867 2867
2868 2868 # tell hggettext to extract docstrings from these functions:
2869 2869 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now