##// END OF EJS Templates
context: also return ancestor's line range in blockancestors
Denis Laxalde -
r31076:0e07855e default
parent child Browse files
Show More
@@ -1,2116 +1,2116 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 newnodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 repoview,
37 37 revlog,
38 38 scmutil,
39 39 subrepo,
40 40 util,
41 41 )
42 42
43 43 propertycache = util.propertycache
44 44
45 45 nonascii = re.compile(r'[^\x21-\x7f]').search
46 46
47 47 class basectx(object):
48 48 """A basectx object represents the common logic for its children:
49 49 changectx: read-only context that is already present in the repo,
50 50 workingctx: a context that represents the working directory and can
51 51 be committed,
52 52 memctx: a context that represents changes in-memory and can also
53 53 be committed."""
54 54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 55 if isinstance(changeid, basectx):
56 56 return changeid
57 57
58 58 o = super(basectx, cls).__new__(cls)
59 59
60 60 o._repo = repo
61 61 o._rev = nullrev
62 62 o._node = nullid
63 63
64 64 return o
65 65
66 66 def __str__(self):
67 67 return short(self.node())
68 68
69 69 def __int__(self):
70 70 return self.rev()
71 71
72 72 def __repr__(self):
73 73 return "<%s %s>" % (type(self).__name__, str(self))
74 74
75 75 def __eq__(self, other):
76 76 try:
77 77 return type(self) == type(other) and self._rev == other._rev
78 78 except AttributeError:
79 79 return False
80 80
81 81 def __ne__(self, other):
82 82 return not (self == other)
83 83
84 84 def __contains__(self, key):
85 85 return key in self._manifest
86 86
87 87 def __getitem__(self, key):
88 88 return self.filectx(key)
89 89
90 90 def __iter__(self):
91 91 return iter(self._manifest)
92 92
93 93 def _manifestmatches(self, match, s):
94 94 """generate a new manifest filtered by the match argument
95 95
96 96 This method is for internal use only and mainly exists to provide an
97 97 object oriented way for other contexts to customize the manifest
98 98 generation.
99 99 """
100 100 return self.manifest().matches(match)
101 101
102 102 def _matchstatus(self, other, match):
103 103 """return match.always if match is none
104 104
105 105 This internal method provides a way for child objects to override the
106 106 match operator.
107 107 """
108 108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109 109
110 110 def _buildstatus(self, other, s, match, listignored, listclean,
111 111 listunknown):
112 112 """build a status with respect to another context"""
113 113 # Load earliest manifest first for caching reasons. More specifically,
114 114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 117 # delta to what's in the cache. So that's one full reconstruction + one
118 118 # delta application.
119 119 if self.rev() is not None and self.rev() < other.rev():
120 120 self.manifest()
121 121 mf1 = other._manifestmatches(match, s)
122 122 mf2 = self._manifestmatches(match, s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, clean=listclean)
130 130 for fn, value in d.iteritems():
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 != newnodeid:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [fn for fn in unknown if fn not in mf1]
157 157 ignored = [fn for fn in ignored if fn not in mf1]
158 158 # if they're deleted, don't report them as removed
159 159 removed = [fn for fn in removed if fn not in deletedset]
160 160
161 161 return scmutil.status(modified, added, removed, deleted, unknown,
162 162 ignored, clean)
163 163
164 164 @propertycache
165 165 def substate(self):
166 166 return subrepo.state(self, self._repo.ui)
167 167
168 168 def subrev(self, subpath):
169 169 return self.substate[subpath][1]
170 170
171 171 def rev(self):
172 172 return self._rev
173 173 def node(self):
174 174 return self._node
175 175 def hex(self):
176 176 return hex(self.node())
177 177 def manifest(self):
178 178 return self._manifest
179 179 def manifestctx(self):
180 180 return self._manifestctx
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 mfl = self._repo.manifestlog
263 263 try:
264 264 node, flag = mfl[self._changeset.manifest].find(path)
265 265 except KeyError:
266 266 raise error.ManifestLookupError(self._node, path,
267 267 _('not found in manifest'))
268 268
269 269 return node, flag
270 270
271 271 def filenode(self, path):
272 272 return self._fileinfo(path)[0]
273 273
274 274 def flags(self, path):
275 275 try:
276 276 return self._fileinfo(path)[1]
277 277 except error.LookupError:
278 278 return ''
279 279
280 280 def sub(self, path, allowcreate=True):
281 281 '''return a subrepo for the stored revision of path, never wdir()'''
282 282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 283
284 284 def nullsub(self, path, pctx):
285 285 return subrepo.nullsubrepo(self, path, pctx)
286 286
287 287 def workingsub(self, path):
288 288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 289 context.
290 290 '''
291 291 return subrepo.subrepo(self, path, allowwdir=True)
292 292
293 293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 294 listsubrepos=False, badfn=None):
295 295 r = self._repo
296 296 return matchmod.match(r.root, r.getcwd(), pats,
297 297 include, exclude, default,
298 298 auditor=r.nofsauditor, ctx=self,
299 299 listsubrepos=listsubrepos, badfn=badfn)
300 300
301 301 def diff(self, ctx2=None, match=None, **opts):
302 302 """Returns a diff generator for the given contexts and matcher"""
303 303 if ctx2 is None:
304 304 ctx2 = self.p1()
305 305 if ctx2 is not None:
306 306 ctx2 = self._repo[ctx2]
307 307 diffopts = patch.diffopts(self._repo.ui, opts)
308 308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 309
310 310 def dirs(self):
311 311 return self._manifest.dirs()
312 312
313 313 def hasdir(self, dir):
314 314 return self._manifest.hasdir(dir)
315 315
316 316 def dirty(self, missing=False, merge=True, branch=True):
317 317 return False
318 318
319 319 def status(self, other=None, match=None, listignored=False,
320 320 listclean=False, listunknown=False, listsubrepos=False):
321 321 """return status of files between two nodes or node and working
322 322 directory.
323 323
324 324 If other is None, compare this node with working directory.
325 325
326 326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 327 """
328 328
329 329 ctx1 = self
330 330 ctx2 = self._repo[other]
331 331
332 332 # This next code block is, admittedly, fragile logic that tests for
333 333 # reversing the contexts and wouldn't need to exist if it weren't for
334 334 # the fast (and common) code path of comparing the working directory
335 335 # with its first parent.
336 336 #
337 337 # What we're aiming for here is the ability to call:
338 338 #
339 339 # workingctx.status(parentctx)
340 340 #
341 341 # If we always built the manifest for each context and compared those,
342 342 # then we'd be done. But the special case of the above call means we
343 343 # just copy the manifest of the parent.
344 344 reversed = False
345 345 if (not isinstance(ctx1, changectx)
346 346 and isinstance(ctx2, changectx)):
347 347 reversed = True
348 348 ctx1, ctx2 = ctx2, ctx1
349 349
350 350 match = ctx2._matchstatus(ctx1, match)
351 351 r = scmutil.status([], [], [], [], [], [], [])
352 352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 353 listunknown)
354 354
355 355 if reversed:
356 356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 357 # these make no sense to reverse.
358 358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 359 r.clean)
360 360
361 361 if listsubrepos:
362 362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 363 try:
364 364 rev2 = ctx2.subrev(subpath)
365 365 except KeyError:
366 366 # A subrepo that existed in node1 was deleted between
367 367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 368 # won't contain that subpath. The best we can do ignore it.
369 369 rev2 = None
370 370 submatch = matchmod.subdirmatcher(subpath, match)
371 371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 372 clean=listclean, unknown=listunknown,
373 373 listsubrepos=True)
374 374 for rfiles, sfiles in zip(r, s):
375 375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 376
377 377 for l in r:
378 378 l.sort()
379 379
380 380 return r
381 381
382 382
383 383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 384 editor=None, extra=None):
385 385 def getfilectx(repo, memctx, path):
386 386 data, mode, copied = store.getfile(path)
387 387 if data is None:
388 388 return None
389 389 islink, isexec = mode
390 390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 391 copied=copied, memctx=memctx)
392 392 if extra is None:
393 393 extra = {}
394 394 if branch:
395 395 extra['branch'] = encoding.fromlocal(branch)
396 396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 397 date, extra, editor)
398 398 return ctx
399 399
400 400 class changectx(basectx):
401 401 """A changecontext object makes access to data related to a particular
402 402 changeset convenient. It represents a read-only context already present in
403 403 the repo."""
404 404 def __init__(self, repo, changeid=''):
405 405 """changeid is a revision number, node, or tag"""
406 406
407 407 # since basectx.__new__ already took care of copying the object, we
408 408 # don't need to do anything in __init__, so we just exit here
409 409 if isinstance(changeid, basectx):
410 410 return
411 411
412 412 if changeid == '':
413 413 changeid = '.'
414 414 self._repo = repo
415 415
416 416 try:
417 417 if isinstance(changeid, int):
418 418 self._node = repo.changelog.node(changeid)
419 419 self._rev = changeid
420 420 return
421 421 if isinstance(changeid, long):
422 422 changeid = str(changeid)
423 423 if changeid == 'null':
424 424 self._node = nullid
425 425 self._rev = nullrev
426 426 return
427 427 if changeid == 'tip':
428 428 self._node = repo.changelog.tip()
429 429 self._rev = repo.changelog.rev(self._node)
430 430 return
431 431 if changeid == '.' or changeid == repo.dirstate.p1():
432 432 # this is a hack to delay/avoid loading obsmarkers
433 433 # when we know that '.' won't be hidden
434 434 self._node = repo.dirstate.p1()
435 435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 436 return
437 437 if len(changeid) == 20:
438 438 try:
439 439 self._node = changeid
440 440 self._rev = repo.changelog.rev(changeid)
441 441 return
442 442 except error.FilteredRepoLookupError:
443 443 raise
444 444 except LookupError:
445 445 pass
446 446
447 447 try:
448 448 r = int(changeid)
449 449 if str(r) != changeid:
450 450 raise ValueError
451 451 l = len(repo.changelog)
452 452 if r < 0:
453 453 r += l
454 454 if r < 0 or r >= l:
455 455 raise ValueError
456 456 self._rev = r
457 457 self._node = repo.changelog.node(r)
458 458 return
459 459 except error.FilteredIndexError:
460 460 raise
461 461 except (ValueError, OverflowError, IndexError):
462 462 pass
463 463
464 464 if len(changeid) == 40:
465 465 try:
466 466 self._node = bin(changeid)
467 467 self._rev = repo.changelog.rev(self._node)
468 468 return
469 469 except error.FilteredLookupError:
470 470 raise
471 471 except (TypeError, LookupError):
472 472 pass
473 473
474 474 # lookup bookmarks through the name interface
475 475 try:
476 476 self._node = repo.names.singlenode(repo, changeid)
477 477 self._rev = repo.changelog.rev(self._node)
478 478 return
479 479 except KeyError:
480 480 pass
481 481 except error.FilteredRepoLookupError:
482 482 raise
483 483 except error.RepoLookupError:
484 484 pass
485 485
486 486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 487 if self._node is not None:
488 488 self._rev = repo.changelog.rev(self._node)
489 489 return
490 490
491 491 # lookup failed
492 492 # check if it might have come from damaged dirstate
493 493 #
494 494 # XXX we could avoid the unfiltered if we had a recognizable
495 495 # exception for filtered changeset access
496 496 if changeid in repo.unfiltered().dirstate.parents():
497 497 msg = _("working directory has unknown parent '%s'!")
498 498 raise error.Abort(msg % short(changeid))
499 499 try:
500 500 if len(changeid) == 20 and nonascii(changeid):
501 501 changeid = hex(changeid)
502 502 except TypeError:
503 503 pass
504 504 except (error.FilteredIndexError, error.FilteredLookupError,
505 505 error.FilteredRepoLookupError):
506 506 if repo.filtername.startswith('visible'):
507 507 msg = _("hidden revision '%s'") % changeid
508 508 hint = _('use --hidden to access hidden revisions')
509 509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 511 msg %= (changeid, repo.filtername)
512 512 raise error.FilteredRepoLookupError(msg)
513 513 except IndexError:
514 514 pass
515 515 raise error.RepoLookupError(
516 516 _("unknown revision '%s'") % changeid)
517 517
518 518 def __hash__(self):
519 519 try:
520 520 return hash(self._rev)
521 521 except AttributeError:
522 522 return id(self)
523 523
524 524 def __nonzero__(self):
525 525 return self._rev != nullrev
526 526
527 527 @propertycache
528 528 def _changeset(self):
529 529 return self._repo.changelog.changelogrevision(self.rev())
530 530
531 531 @propertycache
532 532 def _manifest(self):
533 533 return self._manifestctx.read()
534 534
535 535 @propertycache
536 536 def _manifestctx(self):
537 537 return self._repo.manifestlog[self._changeset.manifest]
538 538
539 539 @propertycache
540 540 def _manifestdelta(self):
541 541 return self._manifestctx.readdelta()
542 542
543 543 @propertycache
544 544 def _parents(self):
545 545 repo = self._repo
546 546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 547 if p2 == nullrev:
548 548 return [changectx(repo, p1)]
549 549 return [changectx(repo, p1), changectx(repo, p2)]
550 550
551 551 def changeset(self):
552 552 c = self._changeset
553 553 return (
554 554 c.manifest,
555 555 c.user,
556 556 c.date,
557 557 c.files,
558 558 c.description,
559 559 c.extra,
560 560 )
561 561 def manifestnode(self):
562 562 return self._changeset.manifest
563 563
564 564 def user(self):
565 565 return self._changeset.user
566 566 def date(self):
567 567 return self._changeset.date
568 568 def files(self):
569 569 return self._changeset.files
570 570 def description(self):
571 571 return self._changeset.description
572 572 def branch(self):
573 573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 574 def closesbranch(self):
575 575 return 'close' in self._changeset.extra
576 576 def extra(self):
577 577 return self._changeset.extra
578 578 def tags(self):
579 579 return self._repo.nodetags(self._node)
580 580 def bookmarks(self):
581 581 return self._repo.nodebookmarks(self._node)
582 582 def phase(self):
583 583 return self._repo._phasecache.phase(self._repo, self._rev)
584 584 def hidden(self):
585 585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586 586
587 587 def children(self):
588 588 """return contexts for each child changeset"""
589 589 c = self._repo.changelog.children(self._node)
590 590 return [changectx(self._repo, x) for x in c]
591 591
592 592 def ancestors(self):
593 593 for a in self._repo.changelog.ancestors([self._rev]):
594 594 yield changectx(self._repo, a)
595 595
596 596 def descendants(self):
597 597 for d in self._repo.changelog.descendants([self._rev]):
598 598 yield changectx(self._repo, d)
599 599
600 600 def filectx(self, path, fileid=None, filelog=None):
601 601 """get a file context from this changeset"""
602 602 if fileid is None:
603 603 fileid = self.filenode(path)
604 604 return filectx(self._repo, path, fileid=fileid,
605 605 changectx=self, filelog=filelog)
606 606
607 607 def ancestor(self, c2, warn=False):
608 608 """return the "best" ancestor context of self and c2
609 609
610 610 If there are multiple candidates, it will show a message and check
611 611 merge.preferancestor configuration before falling back to the
612 612 revlog ancestor."""
613 613 # deal with workingctxs
614 614 n2 = c2._node
615 615 if n2 is None:
616 616 n2 = c2._parents[0]._node
617 617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 618 if not cahs:
619 619 anc = nullid
620 620 elif len(cahs) == 1:
621 621 anc = cahs[0]
622 622 else:
623 623 # experimental config: merge.preferancestor
624 624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 625 try:
626 626 ctx = changectx(self._repo, r)
627 627 except error.RepoLookupError:
628 628 continue
629 629 anc = ctx.node()
630 630 if anc in cahs:
631 631 break
632 632 else:
633 633 anc = self._repo.changelog.ancestor(self._node, n2)
634 634 if warn:
635 635 self._repo.ui.status(
636 636 (_("note: using %s as ancestor of %s and %s\n") %
637 637 (short(anc), short(self._node), short(n2))) +
638 638 ''.join(_(" alternatively, use --config "
639 639 "merge.preferancestor=%s\n") %
640 640 short(n) for n in sorted(cahs) if n != anc))
641 641 return changectx(self._repo, anc)
642 642
643 643 def descendant(self, other):
644 644 """True if other is descendant of this changeset"""
645 645 return self._repo.changelog.descendant(self._rev, other._rev)
646 646
647 647 def walk(self, match):
648 648 '''Generates matching file names.'''
649 649
650 650 # Wrap match.bad method to have message with nodeid
651 651 def bad(fn, msg):
652 652 # The manifest doesn't know about subrepos, so don't complain about
653 653 # paths into valid subrepos.
654 654 if any(fn == s or fn.startswith(s + '/')
655 655 for s in self.substate):
656 656 return
657 657 match.bad(fn, _('no such file in rev %s') % self)
658 658
659 659 m = matchmod.badmatch(match, bad)
660 660 return self._manifest.walk(m)
661 661
662 662 def matches(self, match):
663 663 return self.walk(match)
664 664
665 665 class basefilectx(object):
666 666 """A filecontext object represents the common logic for its children:
667 667 filectx: read-only access to a filerevision that is already present
668 668 in the repo,
669 669 workingfilectx: a filecontext that represents files from the working
670 670 directory,
671 671 memfilectx: a filecontext that represents files in-memory."""
672 672 def __new__(cls, repo, path, *args, **kwargs):
673 673 return super(basefilectx, cls).__new__(cls)
674 674
675 675 @propertycache
676 676 def _filelog(self):
677 677 return self._repo.file(self._path)
678 678
679 679 @propertycache
680 680 def _changeid(self):
681 681 if '_changeid' in self.__dict__:
682 682 return self._changeid
683 683 elif '_changectx' in self.__dict__:
684 684 return self._changectx.rev()
685 685 elif '_descendantrev' in self.__dict__:
686 686 # this file context was created from a revision with a known
687 687 # descendant, we can (lazily) correct for linkrev aliases
688 688 return self._adjustlinkrev(self._descendantrev)
689 689 else:
690 690 return self._filelog.linkrev(self._filerev)
691 691
692 692 @propertycache
693 693 def _filenode(self):
694 694 if '_fileid' in self.__dict__:
695 695 return self._filelog.lookup(self._fileid)
696 696 else:
697 697 return self._changectx.filenode(self._path)
698 698
699 699 @propertycache
700 700 def _filerev(self):
701 701 return self._filelog.rev(self._filenode)
702 702
703 703 @propertycache
704 704 def _repopath(self):
705 705 return self._path
706 706
707 707 def __nonzero__(self):
708 708 try:
709 709 self._filenode
710 710 return True
711 711 except error.LookupError:
712 712 # file is missing
713 713 return False
714 714
715 715 def __str__(self):
716 716 try:
717 717 return "%s@%s" % (self.path(), self._changectx)
718 718 except error.LookupError:
719 719 return "%s@???" % self.path()
720 720
721 721 def __repr__(self):
722 722 return "<%s %s>" % (type(self).__name__, str(self))
723 723
724 724 def __hash__(self):
725 725 try:
726 726 return hash((self._path, self._filenode))
727 727 except AttributeError:
728 728 return id(self)
729 729
730 730 def __eq__(self, other):
731 731 try:
732 732 return (type(self) == type(other) and self._path == other._path
733 733 and self._filenode == other._filenode)
734 734 except AttributeError:
735 735 return False
736 736
737 737 def __ne__(self, other):
738 738 return not (self == other)
739 739
740 740 def filerev(self):
741 741 return self._filerev
742 742 def filenode(self):
743 743 return self._filenode
744 744 def flags(self):
745 745 return self._changectx.flags(self._path)
746 746 def filelog(self):
747 747 return self._filelog
748 748 def rev(self):
749 749 return self._changeid
750 750 def linkrev(self):
751 751 return self._filelog.linkrev(self._filerev)
752 752 def node(self):
753 753 return self._changectx.node()
754 754 def hex(self):
755 755 return self._changectx.hex()
756 756 def user(self):
757 757 return self._changectx.user()
758 758 def date(self):
759 759 return self._changectx.date()
760 760 def files(self):
761 761 return self._changectx.files()
762 762 def description(self):
763 763 return self._changectx.description()
764 764 def branch(self):
765 765 return self._changectx.branch()
766 766 def extra(self):
767 767 return self._changectx.extra()
768 768 def phase(self):
769 769 return self._changectx.phase()
770 770 def phasestr(self):
771 771 return self._changectx.phasestr()
772 772 def manifest(self):
773 773 return self._changectx.manifest()
774 774 def changectx(self):
775 775 return self._changectx
776 776 def repo(self):
777 777 return self._repo
778 778
779 779 def path(self):
780 780 return self._path
781 781
782 782 def isbinary(self):
783 783 try:
784 784 return util.binary(self.data())
785 785 except IOError:
786 786 return False
787 787 def isexec(self):
788 788 return 'x' in self.flags()
789 789 def islink(self):
790 790 return 'l' in self.flags()
791 791
792 792 def isabsent(self):
793 793 """whether this filectx represents a file not in self._changectx
794 794
795 795 This is mainly for merge code to detect change/delete conflicts. This is
796 796 expected to be True for all subclasses of basectx."""
797 797 return False
798 798
799 799 _customcmp = False
800 800 def cmp(self, fctx):
801 801 """compare with other file context
802 802
803 803 returns True if different than fctx.
804 804 """
805 805 if fctx._customcmp:
806 806 return fctx.cmp(self)
807 807
808 808 if (fctx._filenode is None
809 809 and (self._repo._encodefilterpats
810 810 # if file data starts with '\1\n', empty metadata block is
811 811 # prepended, which adds 4 bytes to filelog.size().
812 812 or self.size() - 4 == fctx.size())
813 813 or self.size() == fctx.size()):
814 814 return self._filelog.cmp(self._filenode, fctx.data())
815 815
816 816 return True
817 817
818 818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 819 """return the first ancestor of <srcrev> introducing <fnode>
820 820
821 821 If the linkrev of the file revision does not point to an ancestor of
822 822 srcrev, we'll walk down the ancestors until we find one introducing
823 823 this file revision.
824 824
825 825 :srcrev: the changeset revision we search ancestors from
826 826 :inclusive: if true, the src revision will also be checked
827 827 """
828 828 repo = self._repo
829 829 cl = repo.unfiltered().changelog
830 830 mfl = repo.manifestlog
831 831 # fetch the linkrev
832 832 lkr = self.linkrev()
833 833 # hack to reuse ancestor computation when searching for renames
834 834 memberanc = getattr(self, '_ancestrycontext', None)
835 835 iteranc = None
836 836 if srcrev is None:
837 837 # wctx case, used by workingfilectx during mergecopy
838 838 revs = [p.rev() for p in self._repo[None].parents()]
839 839 inclusive = True # we skipped the real (revless) source
840 840 else:
841 841 revs = [srcrev]
842 842 if memberanc is None:
843 843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 844 inclusive=inclusive)
845 845 # check if this linkrev is an ancestor of srcrev
846 846 if lkr not in memberanc:
847 847 if iteranc is None:
848 848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 849 fnode = self._filenode
850 850 path = self._path
851 851 for a in iteranc:
852 852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 853 if path in ac[3]: # checking the 'files' field.
854 854 # The file has been touched, check if the content is
855 855 # similar to the one we search for.
856 856 if fnode == mfl[ac[0]].readfast().get(path):
857 857 return a
858 858 # In theory, we should never get out of that loop without a result.
859 859 # But if manifest uses a buggy file revision (not children of the
860 860 # one it replaces) we could. Such a buggy situation will likely
861 861 # result is crash somewhere else at to some point.
862 862 return lkr
863 863
864 864 def introrev(self):
865 865 """return the rev of the changeset which introduced this file revision
866 866
867 867 This method is different from linkrev because it take into account the
868 868 changeset the filectx was created from. It ensures the returned
869 869 revision is one of its ancestors. This prevents bugs from
870 870 'linkrev-shadowing' when a file revision is used by multiple
871 871 changesets.
872 872 """
873 873 lkr = self.linkrev()
874 874 attrs = vars(self)
875 875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 876 if noctx or self.rev() == lkr:
877 877 return self.linkrev()
878 878 return self._adjustlinkrev(self.rev(), inclusive=True)
879 879
880 880 def _parentfilectx(self, path, fileid, filelog):
881 881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 884 # If self is associated with a changeset (probably explicitly
885 885 # fed), ensure the created filectx is associated with a
886 886 # changeset that is an ancestor of self.changectx.
887 887 # This lets us later use _adjustlinkrev to get a correct link.
888 888 fctx._descendantrev = self.rev()
889 889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 890 elif '_descendantrev' in vars(self):
891 891 # Otherwise propagate _descendantrev if we have one associated.
892 892 fctx._descendantrev = self._descendantrev
893 893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 894 return fctx
895 895
896 896 def parents(self):
897 897 _path = self._path
898 898 fl = self._filelog
899 899 parents = self._filelog.parents(self._filenode)
900 900 pl = [(_path, node, fl) for node in parents if node != nullid]
901 901
902 902 r = fl.renamed(self._filenode)
903 903 if r:
904 904 # - In the simple rename case, both parent are nullid, pl is empty.
905 905 # - In case of merge, only one of the parent is null id and should
906 906 # be replaced with the rename information. This parent is -always-
907 907 # the first one.
908 908 #
909 909 # As null id have always been filtered out in the previous list
910 910 # comprehension, inserting to 0 will always result in "replacing
911 911 # first nullid parent with rename information.
912 912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913 913
914 914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915 915
916 916 def p1(self):
917 917 return self.parents()[0]
918 918
919 919 def p2(self):
920 920 p = self.parents()
921 921 if len(p) == 2:
922 922 return p[1]
923 923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924 924
925 925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 926 '''returns a list of tuples of ((ctx, number), line) for each line
927 927 in the file, where ctx is the filectx of the node where
928 928 that line was last changed; if linenumber parameter is true, number is
929 929 the line number at the first appearance in the managed file, otherwise,
930 930 number has a fixed value of False.
931 931 '''
932 932
933 933 def lines(text):
934 934 if text.endswith("\n"):
935 935 return text.count("\n")
936 936 return text.count("\n") + int(bool(text))
937 937
938 938 if linenumber:
939 939 def decorate(text, rev):
940 940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 941 else:
942 942 def decorate(text, rev):
943 943 return ([(rev, False)] * lines(text), text)
944 944
945 945 def pair(parent, child):
946 946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 947 for (a1, a2, b1, b2), t in blocks:
948 948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 949 # belong to the child.
950 950 if t == '=':
951 951 child[0][b1:b2] = parent[0][a1:a2]
952 952 return child
953 953
954 954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955 955
956 956 def parents(f):
957 957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 960 # isn't an ancestor of the srcrev.
961 961 f._changeid
962 962 pl = f.parents()
963 963
964 964 # Don't return renamed parents if we aren't following.
965 965 if not follow:
966 966 pl = [p for p in pl if p.path() == f.path()]
967 967
968 968 # renamed filectx won't have a filelog yet, so set it
969 969 # from the cache to save time
970 970 for p in pl:
971 971 if not '_filelog' in p.__dict__:
972 972 p._filelog = getlog(p.path())
973 973
974 974 return pl
975 975
976 976 # use linkrev to find the first changeset where self appeared
977 977 base = self
978 978 introrev = self.introrev()
979 979 if self.rev() != introrev:
980 980 base = self.filectx(self.filenode(), changeid=introrev)
981 981 if getattr(base, '_ancestrycontext', None) is None:
982 982 cl = self._repo.changelog
983 983 if introrev is None:
984 984 # wctx is not inclusive, but works because _ancestrycontext
985 985 # is used to test filelog revisions
986 986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 987 inclusive=True)
988 988 else:
989 989 ac = cl.ancestors([introrev], inclusive=True)
990 990 base._ancestrycontext = ac
991 991
992 992 # This algorithm would prefer to be recursive, but Python is a
993 993 # bit recursion-hostile. Instead we do an iterative
994 994 # depth-first search.
995 995
996 996 # 1st DFS pre-calculates pcache and needed
997 997 visit = [base]
998 998 pcache = {}
999 999 needed = {base: 1}
1000 1000 while visit:
1001 1001 f = visit.pop()
1002 1002 if f in pcache:
1003 1003 continue
1004 1004 pl = parents(f)
1005 1005 pcache[f] = pl
1006 1006 for p in pl:
1007 1007 needed[p] = needed.get(p, 0) + 1
1008 1008 if p not in pcache:
1009 1009 visit.append(p)
1010 1010
1011 1011 # 2nd DFS does the actual annotate
1012 1012 visit[:] = [base]
1013 1013 hist = {}
1014 1014 while visit:
1015 1015 f = visit[-1]
1016 1016 if f in hist:
1017 1017 visit.pop()
1018 1018 continue
1019 1019
1020 1020 ready = True
1021 1021 pl = pcache[f]
1022 1022 for p in pl:
1023 1023 if p not in hist:
1024 1024 ready = False
1025 1025 visit.append(p)
1026 1026 if ready:
1027 1027 visit.pop()
1028 1028 curr = decorate(f.data(), f)
1029 1029 for p in pl:
1030 1030 curr = pair(hist[p], curr)
1031 1031 if needed[p] == 1:
1032 1032 del hist[p]
1033 1033 del needed[p]
1034 1034 else:
1035 1035 needed[p] -= 1
1036 1036
1037 1037 hist[f] = curr
1038 1038 del pcache[f]
1039 1039
1040 1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 1041
1042 1042 def ancestors(self, followfirst=False):
1043 1043 visit = {}
1044 1044 c = self
1045 1045 if followfirst:
1046 1046 cut = 1
1047 1047 else:
1048 1048 cut = None
1049 1049
1050 1050 while True:
1051 1051 for parent in c.parents()[:cut]:
1052 1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 1053 if not visit:
1054 1054 break
1055 1055 c = visit.pop(max(visit))
1056 1056 yield c
1057 1057
1058 1058 class filectx(basefilectx):
1059 1059 """A filecontext object makes access to data related to a particular
1060 1060 filerevision convenient."""
1061 1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 1062 filelog=None, changectx=None):
1063 1063 """changeid can be a changeset revision, node, or tag.
1064 1064 fileid can be a file revision or node."""
1065 1065 self._repo = repo
1066 1066 self._path = path
1067 1067
1068 1068 assert (changeid is not None
1069 1069 or fileid is not None
1070 1070 or changectx is not None), \
1071 1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 1072 % (changeid, fileid, changectx))
1073 1073
1074 1074 if filelog is not None:
1075 1075 self._filelog = filelog
1076 1076
1077 1077 if changeid is not None:
1078 1078 self._changeid = changeid
1079 1079 if changectx is not None:
1080 1080 self._changectx = changectx
1081 1081 if fileid is not None:
1082 1082 self._fileid = fileid
1083 1083
1084 1084 @propertycache
1085 1085 def _changectx(self):
1086 1086 try:
1087 1087 return changectx(self._repo, self._changeid)
1088 1088 except error.FilteredRepoLookupError:
1089 1089 # Linkrev may point to any revision in the repository. When the
1090 1090 # repository is filtered this may lead to `filectx` trying to build
1091 1091 # `changectx` for filtered revision. In such case we fallback to
1092 1092 # creating `changectx` on the unfiltered version of the reposition.
1093 1093 # This fallback should not be an issue because `changectx` from
1094 1094 # `filectx` are not used in complex operations that care about
1095 1095 # filtering.
1096 1096 #
1097 1097 # This fallback is a cheap and dirty fix that prevent several
1098 1098 # crashes. It does not ensure the behavior is correct. However the
1099 1099 # behavior was not correct before filtering either and "incorrect
1100 1100 # behavior" is seen as better as "crash"
1101 1101 #
1102 1102 # Linkrevs have several serious troubles with filtering that are
1103 1103 # complicated to solve. Proper handling of the issue here should be
1104 1104 # considered when solving linkrev issue are on the table.
1105 1105 return changectx(self._repo.unfiltered(), self._changeid)
1106 1106
1107 1107 def filectx(self, fileid, changeid=None):
1108 1108 '''opens an arbitrary revision of the file without
1109 1109 opening a new filelog'''
1110 1110 return filectx(self._repo, self._path, fileid=fileid,
1111 1111 filelog=self._filelog, changeid=changeid)
1112 1112
1113 1113 def rawdata(self):
1114 1114 return self._filelog.revision(self._filenode, raw=True)
1115 1115
1116 1116 def data(self):
1117 1117 try:
1118 1118 return self._filelog.read(self._filenode)
1119 1119 except error.CensoredNodeError:
1120 1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1121 1121 return ""
1122 1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1123 1123 hint=_("set censor.policy to ignore errors"))
1124 1124
1125 1125 def size(self):
1126 1126 return self._filelog.size(self._filerev)
1127 1127
1128 1128 def renamed(self):
1129 1129 """check if file was actually renamed in this changeset revision
1130 1130
1131 1131 If rename logged in file revision, we report copy for changeset only
1132 1132 if file revisions linkrev points back to the changeset in question
1133 1133 or both changeset parents contain different file revisions.
1134 1134 """
1135 1135
1136 1136 renamed = self._filelog.renamed(self._filenode)
1137 1137 if not renamed:
1138 1138 return renamed
1139 1139
1140 1140 if self.rev() == self.linkrev():
1141 1141 return renamed
1142 1142
1143 1143 name = self.path()
1144 1144 fnode = self._filenode
1145 1145 for p in self._changectx.parents():
1146 1146 try:
1147 1147 if fnode == p.filenode(name):
1148 1148 return None
1149 1149 except error.LookupError:
1150 1150 pass
1151 1151 return renamed
1152 1152
1153 1153 def children(self):
1154 1154 # hard for renames
1155 1155 c = self._filelog.children(self._filenode)
1156 1156 return [filectx(self._repo, self._path, fileid=x,
1157 1157 filelog=self._filelog) for x in c]
1158 1158
1159 1159 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1160 1160 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1161 1161 if diff from fctx2 to fctx1 has changes in linerange2 and
1162 1162 `linerange1` is the new line range for fctx1.
1163 1163 """
1164 1164 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1165 1165 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1166 1166 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1167 1167 return diffinrange, linerange1
1168 1168
1169 1169 def blockancestors(fctx, fromline, toline, followfirst=False):
1170 1170 """Yield ancestors of `fctx` with respect to the block of lines within
1171 1171 `fromline`-`toline` range.
1172 1172 """
1173 1173 diffopts = patch.diffopts(fctx._repo.ui)
1174 1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1175 1175 while visit:
1176 1176 c, linerange2 = visit.pop(max(visit))
1177 1177 pl = c.parents()
1178 1178 if followfirst:
1179 1179 pl = pl[:1]
1180 1180 if not pl:
1181 1181 # The block originates from the initial revision.
1182 yield c
1182 yield c, linerange2
1183 1183 continue
1184 1184 inrange = False
1185 1185 for p in pl:
1186 1186 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1187 1187 inrange = inrange or inrangep
1188 1188 if linerange1[0] == linerange1[1]:
1189 1189 # Parent's linerange is empty, meaning that the block got
1190 1190 # introduced in this revision; no need to go futher in this
1191 1191 # branch.
1192 1192 continue
1193 1193 visit[p.linkrev(), p.filenode()] = p, linerange1
1194 1194 if inrange:
1195 yield c
1195 yield c, linerange2
1196 1196
1197 1197 class committablectx(basectx):
1198 1198 """A committablectx object provides common functionality for a context that
1199 1199 wants the ability to commit, e.g. workingctx or memctx."""
1200 1200 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 1201 changes=None):
1202 1202 self._repo = repo
1203 1203 self._rev = None
1204 1204 self._node = None
1205 1205 self._text = text
1206 1206 if date:
1207 1207 self._date = util.parsedate(date)
1208 1208 if user:
1209 1209 self._user = user
1210 1210 if changes:
1211 1211 self._status = changes
1212 1212
1213 1213 self._extra = {}
1214 1214 if extra:
1215 1215 self._extra = extra.copy()
1216 1216 if 'branch' not in self._extra:
1217 1217 try:
1218 1218 branch = encoding.fromlocal(self._repo.dirstate.branch())
1219 1219 except UnicodeDecodeError:
1220 1220 raise error.Abort(_('branch name not in UTF-8!'))
1221 1221 self._extra['branch'] = branch
1222 1222 if self._extra['branch'] == '':
1223 1223 self._extra['branch'] = 'default'
1224 1224
1225 1225 def __str__(self):
1226 1226 return str(self._parents[0]) + "+"
1227 1227
1228 1228 def __nonzero__(self):
1229 1229 return True
1230 1230
1231 1231 def _buildflagfunc(self):
1232 1232 # Create a fallback function for getting file flags when the
1233 1233 # filesystem doesn't support them
1234 1234
1235 1235 copiesget = self._repo.dirstate.copies().get
1236 1236 parents = self.parents()
1237 1237 if len(parents) < 2:
1238 1238 # when we have one parent, it's easy: copy from parent
1239 1239 man = parents[0].manifest()
1240 1240 def func(f):
1241 1241 f = copiesget(f, f)
1242 1242 return man.flags(f)
1243 1243 else:
1244 1244 # merges are tricky: we try to reconstruct the unstored
1245 1245 # result from the merge (issue1802)
1246 1246 p1, p2 = parents
1247 1247 pa = p1.ancestor(p2)
1248 1248 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1249 1249
1250 1250 def func(f):
1251 1251 f = copiesget(f, f) # may be wrong for merges with copies
1252 1252 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1253 1253 if fl1 == fl2:
1254 1254 return fl1
1255 1255 if fl1 == fla:
1256 1256 return fl2
1257 1257 if fl2 == fla:
1258 1258 return fl1
1259 1259 return '' # punt for conflicts
1260 1260
1261 1261 return func
1262 1262
1263 1263 @propertycache
1264 1264 def _flagfunc(self):
1265 1265 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1266 1266
1267 1267 @propertycache
1268 1268 def _manifest(self):
1269 1269 """generate a manifest corresponding to the values in self._status
1270 1270
1271 1271 This reuse the file nodeid from parent, but we append an extra letter
1272 1272 when modified. Modified files get an extra 'm' while added files get
1273 1273 an extra 'a'. This is used by manifests merge to see that files
1274 1274 are different and by update logic to avoid deleting newly added files.
1275 1275 """
1276 1276 parents = self.parents()
1277 1277
1278 1278 man = parents[0].manifest().copy()
1279 1279
1280 1280 ff = self._flagfunc
1281 1281 for i, l in ((addednodeid, self._status.added),
1282 1282 (modifiednodeid, self._status.modified)):
1283 1283 for f in l:
1284 1284 man[f] = i
1285 1285 try:
1286 1286 man.setflag(f, ff(f))
1287 1287 except OSError:
1288 1288 pass
1289 1289
1290 1290 for f in self._status.deleted + self._status.removed:
1291 1291 if f in man:
1292 1292 del man[f]
1293 1293
1294 1294 return man
1295 1295
1296 1296 @propertycache
1297 1297 def _status(self):
1298 1298 return self._repo.status()
1299 1299
1300 1300 @propertycache
1301 1301 def _user(self):
1302 1302 return self._repo.ui.username()
1303 1303
1304 1304 @propertycache
1305 1305 def _date(self):
1306 1306 return util.makedate()
1307 1307
1308 1308 def subrev(self, subpath):
1309 1309 return None
1310 1310
1311 1311 def manifestnode(self):
1312 1312 return None
1313 1313 def user(self):
1314 1314 return self._user or self._repo.ui.username()
1315 1315 def date(self):
1316 1316 return self._date
1317 1317 def description(self):
1318 1318 return self._text
1319 1319 def files(self):
1320 1320 return sorted(self._status.modified + self._status.added +
1321 1321 self._status.removed)
1322 1322
1323 1323 def modified(self):
1324 1324 return self._status.modified
1325 1325 def added(self):
1326 1326 return self._status.added
1327 1327 def removed(self):
1328 1328 return self._status.removed
1329 1329 def deleted(self):
1330 1330 return self._status.deleted
1331 1331 def branch(self):
1332 1332 return encoding.tolocal(self._extra['branch'])
1333 1333 def closesbranch(self):
1334 1334 return 'close' in self._extra
1335 1335 def extra(self):
1336 1336 return self._extra
1337 1337
1338 1338 def tags(self):
1339 1339 return []
1340 1340
1341 1341 def bookmarks(self):
1342 1342 b = []
1343 1343 for p in self.parents():
1344 1344 b.extend(p.bookmarks())
1345 1345 return b
1346 1346
1347 1347 def phase(self):
1348 1348 phase = phases.draft # default phase to draft
1349 1349 for p in self.parents():
1350 1350 phase = max(phase, p.phase())
1351 1351 return phase
1352 1352
1353 1353 def hidden(self):
1354 1354 return False
1355 1355
1356 1356 def children(self):
1357 1357 return []
1358 1358
1359 1359 def flags(self, path):
1360 1360 if '_manifest' in self.__dict__:
1361 1361 try:
1362 1362 return self._manifest.flags(path)
1363 1363 except KeyError:
1364 1364 return ''
1365 1365
1366 1366 try:
1367 1367 return self._flagfunc(path)
1368 1368 except OSError:
1369 1369 return ''
1370 1370
1371 1371 def ancestor(self, c2):
1372 1372 """return the "best" ancestor context of self and c2"""
1373 1373 return self._parents[0].ancestor(c2) # punt on two parents for now
1374 1374
1375 1375 def walk(self, match):
1376 1376 '''Generates matching file names.'''
1377 1377 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1378 1378 True, False))
1379 1379
1380 1380 def matches(self, match):
1381 1381 return sorted(self._repo.dirstate.matches(match))
1382 1382
1383 1383 def ancestors(self):
1384 1384 for p in self._parents:
1385 1385 yield p
1386 1386 for a in self._repo.changelog.ancestors(
1387 1387 [p.rev() for p in self._parents]):
1388 1388 yield changectx(self._repo, a)
1389 1389
1390 1390 def markcommitted(self, node):
1391 1391 """Perform post-commit cleanup necessary after committing this ctx
1392 1392
1393 1393 Specifically, this updates backing stores this working context
1394 1394 wraps to reflect the fact that the changes reflected by this
1395 1395 workingctx have been committed. For example, it marks
1396 1396 modified and added files as normal in the dirstate.
1397 1397
1398 1398 """
1399 1399
1400 1400 self._repo.dirstate.beginparentchange()
1401 1401 for f in self.modified() + self.added():
1402 1402 self._repo.dirstate.normal(f)
1403 1403 for f in self.removed():
1404 1404 self._repo.dirstate.drop(f)
1405 1405 self._repo.dirstate.setparents(node)
1406 1406 self._repo.dirstate.endparentchange()
1407 1407
1408 1408 # write changes out explicitly, because nesting wlock at
1409 1409 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1410 1410 # from immediately doing so for subsequent changing files
1411 1411 self._repo.dirstate.write(self._repo.currenttransaction())
1412 1412
1413 1413 class workingctx(committablectx):
1414 1414 """A workingctx object makes access to data related to
1415 1415 the current working directory convenient.
1416 1416 date - any valid date string or (unixtime, offset), or None.
1417 1417 user - username string, or None.
1418 1418 extra - a dictionary of extra values, or None.
1419 1419 changes - a list of file lists as returned by localrepo.status()
1420 1420 or None to use the repository status.
1421 1421 """
1422 1422 def __init__(self, repo, text="", user=None, date=None, extra=None,
1423 1423 changes=None):
1424 1424 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1425 1425
1426 1426 def __iter__(self):
1427 1427 d = self._repo.dirstate
1428 1428 for f in d:
1429 1429 if d[f] != 'r':
1430 1430 yield f
1431 1431
1432 1432 def __contains__(self, key):
1433 1433 return self._repo.dirstate[key] not in "?r"
1434 1434
1435 1435 def hex(self):
1436 1436 return hex(wdirid)
1437 1437
1438 1438 @propertycache
1439 1439 def _parents(self):
1440 1440 p = self._repo.dirstate.parents()
1441 1441 if p[1] == nullid:
1442 1442 p = p[:-1]
1443 1443 return [changectx(self._repo, x) for x in p]
1444 1444
1445 1445 def filectx(self, path, filelog=None):
1446 1446 """get a file context from the working directory"""
1447 1447 return workingfilectx(self._repo, path, workingctx=self,
1448 1448 filelog=filelog)
1449 1449
1450 1450 def dirty(self, missing=False, merge=True, branch=True):
1451 1451 "check whether a working directory is modified"
1452 1452 # check subrepos first
1453 1453 for s in sorted(self.substate):
1454 1454 if self.sub(s).dirty():
1455 1455 return True
1456 1456 # check current working dir
1457 1457 return ((merge and self.p2()) or
1458 1458 (branch and self.branch() != self.p1().branch()) or
1459 1459 self.modified() or self.added() or self.removed() or
1460 1460 (missing and self.deleted()))
1461 1461
1462 1462 def add(self, list, prefix=""):
1463 1463 join = lambda f: os.path.join(prefix, f)
1464 1464 with self._repo.wlock():
1465 1465 ui, ds = self._repo.ui, self._repo.dirstate
1466 1466 rejected = []
1467 1467 lstat = self._repo.wvfs.lstat
1468 1468 for f in list:
1469 1469 scmutil.checkportable(ui, join(f))
1470 1470 try:
1471 1471 st = lstat(f)
1472 1472 except OSError:
1473 1473 ui.warn(_("%s does not exist!\n") % join(f))
1474 1474 rejected.append(f)
1475 1475 continue
1476 1476 if st.st_size > 10000000:
1477 1477 ui.warn(_("%s: up to %d MB of RAM may be required "
1478 1478 "to manage this file\n"
1479 1479 "(use 'hg revert %s' to cancel the "
1480 1480 "pending addition)\n")
1481 1481 % (f, 3 * st.st_size // 1000000, join(f)))
1482 1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1483 1483 ui.warn(_("%s not added: only files and symlinks "
1484 1484 "supported currently\n") % join(f))
1485 1485 rejected.append(f)
1486 1486 elif ds[f] in 'amn':
1487 1487 ui.warn(_("%s already tracked!\n") % join(f))
1488 1488 elif ds[f] == 'r':
1489 1489 ds.normallookup(f)
1490 1490 else:
1491 1491 ds.add(f)
1492 1492 return rejected
1493 1493
1494 1494 def forget(self, files, prefix=""):
1495 1495 join = lambda f: os.path.join(prefix, f)
1496 1496 with self._repo.wlock():
1497 1497 rejected = []
1498 1498 for f in files:
1499 1499 if f not in self._repo.dirstate:
1500 1500 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1501 1501 rejected.append(f)
1502 1502 elif self._repo.dirstate[f] != 'a':
1503 1503 self._repo.dirstate.remove(f)
1504 1504 else:
1505 1505 self._repo.dirstate.drop(f)
1506 1506 return rejected
1507 1507
1508 1508 def undelete(self, list):
1509 1509 pctxs = self.parents()
1510 1510 with self._repo.wlock():
1511 1511 for f in list:
1512 1512 if self._repo.dirstate[f] != 'r':
1513 1513 self._repo.ui.warn(_("%s not removed!\n") % f)
1514 1514 else:
1515 1515 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1516 1516 t = fctx.data()
1517 1517 self._repo.wwrite(f, t, fctx.flags())
1518 1518 self._repo.dirstate.normal(f)
1519 1519
1520 1520 def copy(self, source, dest):
1521 1521 try:
1522 1522 st = self._repo.wvfs.lstat(dest)
1523 1523 except OSError as err:
1524 1524 if err.errno != errno.ENOENT:
1525 1525 raise
1526 1526 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1527 1527 return
1528 1528 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1529 1529 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1530 1530 "symbolic link\n") % dest)
1531 1531 else:
1532 1532 with self._repo.wlock():
1533 1533 if self._repo.dirstate[dest] in '?':
1534 1534 self._repo.dirstate.add(dest)
1535 1535 elif self._repo.dirstate[dest] in 'r':
1536 1536 self._repo.dirstate.normallookup(dest)
1537 1537 self._repo.dirstate.copy(source, dest)
1538 1538
1539 1539 def match(self, pats=[], include=None, exclude=None, default='glob',
1540 1540 listsubrepos=False, badfn=None):
1541 1541 r = self._repo
1542 1542
1543 1543 # Only a case insensitive filesystem needs magic to translate user input
1544 1544 # to actual case in the filesystem.
1545 1545 if not util.fscasesensitive(r.root):
1546 1546 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1547 1547 exclude, default, r.auditor, self,
1548 1548 listsubrepos=listsubrepos,
1549 1549 badfn=badfn)
1550 1550 return matchmod.match(r.root, r.getcwd(), pats,
1551 1551 include, exclude, default,
1552 1552 auditor=r.auditor, ctx=self,
1553 1553 listsubrepos=listsubrepos, badfn=badfn)
1554 1554
1555 1555 def _filtersuspectsymlink(self, files):
1556 1556 if not files or self._repo.dirstate._checklink:
1557 1557 return files
1558 1558
1559 1559 # Symlink placeholders may get non-symlink-like contents
1560 1560 # via user error or dereferencing by NFS or Samba servers,
1561 1561 # so we filter out any placeholders that don't look like a
1562 1562 # symlink
1563 1563 sane = []
1564 1564 for f in files:
1565 1565 if self.flags(f) == 'l':
1566 1566 d = self[f].data()
1567 1567 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1568 1568 self._repo.ui.debug('ignoring suspect symlink placeholder'
1569 1569 ' "%s"\n' % f)
1570 1570 continue
1571 1571 sane.append(f)
1572 1572 return sane
1573 1573
1574 1574 def _checklookup(self, files):
1575 1575 # check for any possibly clean files
1576 1576 if not files:
1577 1577 return [], []
1578 1578
1579 1579 modified = []
1580 1580 fixup = []
1581 1581 pctx = self._parents[0]
1582 1582 # do a full compare of any files that might have changed
1583 1583 for f in sorted(files):
1584 1584 if (f not in pctx or self.flags(f) != pctx.flags(f)
1585 1585 or pctx[f].cmp(self[f])):
1586 1586 modified.append(f)
1587 1587 else:
1588 1588 fixup.append(f)
1589 1589
1590 1590 # update dirstate for files that are actually clean
1591 1591 if fixup:
1592 1592 try:
1593 1593 # updating the dirstate is optional
1594 1594 # so we don't wait on the lock
1595 1595 # wlock can invalidate the dirstate, so cache normal _after_
1596 1596 # taking the lock
1597 1597 with self._repo.wlock(False):
1598 1598 normal = self._repo.dirstate.normal
1599 1599 for f in fixup:
1600 1600 normal(f)
1601 1601 # write changes out explicitly, because nesting
1602 1602 # wlock at runtime may prevent 'wlock.release()'
1603 1603 # after this block from doing so for subsequent
1604 1604 # changing files
1605 1605 self._repo.dirstate.write(self._repo.currenttransaction())
1606 1606 except error.LockError:
1607 1607 pass
1608 1608 return modified, fixup
1609 1609
1610 1610 def _manifestmatches(self, match, s):
1611 1611 """Slow path for workingctx
1612 1612
1613 1613 The fast path is when we compare the working directory to its parent
1614 1614 which means this function is comparing with a non-parent; therefore we
1615 1615 need to build a manifest and return what matches.
1616 1616 """
1617 1617 mf = self._repo['.']._manifestmatches(match, s)
1618 1618 for f in s.modified + s.added:
1619 1619 mf[f] = newnodeid
1620 1620 mf.setflag(f, self.flags(f))
1621 1621 for f in s.removed:
1622 1622 if f in mf:
1623 1623 del mf[f]
1624 1624 return mf
1625 1625
1626 1626 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1627 1627 unknown=False):
1628 1628 '''Gets the status from the dirstate -- internal use only.'''
1629 1629 listignored, listclean, listunknown = ignored, clean, unknown
1630 1630 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1631 1631 subrepos = []
1632 1632 if '.hgsub' in self:
1633 1633 subrepos = sorted(self.substate)
1634 1634 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1635 1635 listclean, listunknown)
1636 1636
1637 1637 # check for any possibly clean files
1638 1638 if cmp:
1639 1639 modified2, fixup = self._checklookup(cmp)
1640 1640 s.modified.extend(modified2)
1641 1641
1642 1642 # update dirstate for files that are actually clean
1643 1643 if fixup and listclean:
1644 1644 s.clean.extend(fixup)
1645 1645
1646 1646 if match.always():
1647 1647 # cache for performance
1648 1648 if s.unknown or s.ignored or s.clean:
1649 1649 # "_status" is cached with list*=False in the normal route
1650 1650 self._status = scmutil.status(s.modified, s.added, s.removed,
1651 1651 s.deleted, [], [], [])
1652 1652 else:
1653 1653 self._status = s
1654 1654
1655 1655 return s
1656 1656
1657 1657 def _buildstatus(self, other, s, match, listignored, listclean,
1658 1658 listunknown):
1659 1659 """build a status with respect to another context
1660 1660
1661 1661 This includes logic for maintaining the fast path of status when
1662 1662 comparing the working directory against its parent, which is to skip
1663 1663 building a new manifest if self (working directory) is not comparing
1664 1664 against its parent (repo['.']).
1665 1665 """
1666 1666 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 1667 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 1668 # might have accidentally ended up with the entire contents of the file
1669 1669 # they are supposed to be linking to.
1670 1670 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 1671 if other != self._repo['.']:
1672 1672 s = super(workingctx, self)._buildstatus(other, s, match,
1673 1673 listignored, listclean,
1674 1674 listunknown)
1675 1675 return s
1676 1676
1677 1677 def _matchstatus(self, other, match):
1678 1678 """override the match method with a filter for directory patterns
1679 1679
1680 1680 We use inheritance to customize the match.bad method only in cases of
1681 1681 workingctx since it belongs only to the working directory when
1682 1682 comparing against the parent changeset.
1683 1683
1684 1684 If we aren't comparing against the working directory's parent, then we
1685 1685 just use the default match object sent to us.
1686 1686 """
1687 1687 superself = super(workingctx, self)
1688 1688 match = superself._matchstatus(other, match)
1689 1689 if other != self._repo['.']:
1690 1690 def bad(f, msg):
1691 1691 # 'f' may be a directory pattern from 'match.files()',
1692 1692 # so 'f not in ctx1' is not enough
1693 1693 if f not in other and not other.hasdir(f):
1694 1694 self._repo.ui.warn('%s: %s\n' %
1695 1695 (self._repo.dirstate.pathto(f), msg))
1696 1696 match.bad = bad
1697 1697 return match
1698 1698
1699 1699 class committablefilectx(basefilectx):
1700 1700 """A committablefilectx provides common functionality for a file context
1701 1701 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 1702 def __init__(self, repo, path, filelog=None, ctx=None):
1703 1703 self._repo = repo
1704 1704 self._path = path
1705 1705 self._changeid = None
1706 1706 self._filerev = self._filenode = None
1707 1707
1708 1708 if filelog is not None:
1709 1709 self._filelog = filelog
1710 1710 if ctx:
1711 1711 self._changectx = ctx
1712 1712
1713 1713 def __nonzero__(self):
1714 1714 return True
1715 1715
1716 1716 def linkrev(self):
1717 1717 # linked to self._changectx no matter if file is modified or not
1718 1718 return self.rev()
1719 1719
1720 1720 def parents(self):
1721 1721 '''return parent filectxs, following copies if necessary'''
1722 1722 def filenode(ctx, path):
1723 1723 return ctx._manifest.get(path, nullid)
1724 1724
1725 1725 path = self._path
1726 1726 fl = self._filelog
1727 1727 pcl = self._changectx._parents
1728 1728 renamed = self.renamed()
1729 1729
1730 1730 if renamed:
1731 1731 pl = [renamed + (None,)]
1732 1732 else:
1733 1733 pl = [(path, filenode(pcl[0], path), fl)]
1734 1734
1735 1735 for pc in pcl[1:]:
1736 1736 pl.append((path, filenode(pc, path), fl))
1737 1737
1738 1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 1739 for p, n, l in pl if n != nullid]
1740 1740
1741 1741 def children(self):
1742 1742 return []
1743 1743
1744 1744 class workingfilectx(committablefilectx):
1745 1745 """A workingfilectx object makes access to data related to a particular
1746 1746 file in the working directory convenient."""
1747 1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749 1749
1750 1750 @propertycache
1751 1751 def _changectx(self):
1752 1752 return workingctx(self._repo)
1753 1753
1754 1754 def data(self):
1755 1755 return self._repo.wread(self._path)
1756 1756 def renamed(self):
1757 1757 rp = self._repo.dirstate.copied(self._path)
1758 1758 if not rp:
1759 1759 return None
1760 1760 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761 1761
1762 1762 def size(self):
1763 1763 return self._repo.wvfs.lstat(self._path).st_size
1764 1764 def date(self):
1765 1765 t, tz = self._changectx.date()
1766 1766 try:
1767 1767 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 1768 except OSError as err:
1769 1769 if err.errno != errno.ENOENT:
1770 1770 raise
1771 1771 return (t, tz)
1772 1772
1773 1773 def cmp(self, fctx):
1774 1774 """compare with other file context
1775 1775
1776 1776 returns True if different than fctx.
1777 1777 """
1778 1778 # fctx should be a filectx (not a workingfilectx)
1779 1779 # invert comparison to reuse the same code path
1780 1780 return fctx.cmp(self)
1781 1781
1782 1782 def remove(self, ignoremissing=False):
1783 1783 """wraps unlink for a repo's working directory"""
1784 1784 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1785 1785
1786 1786 def write(self, data, flags):
1787 1787 """wraps repo.wwrite"""
1788 1788 self._repo.wwrite(self._path, data, flags)
1789 1789
1790 1790 class workingcommitctx(workingctx):
1791 1791 """A workingcommitctx object makes access to data related to
1792 1792 the revision being committed convenient.
1793 1793
1794 1794 This hides changes in the working directory, if they aren't
1795 1795 committed in this context.
1796 1796 """
1797 1797 def __init__(self, repo, changes,
1798 1798 text="", user=None, date=None, extra=None):
1799 1799 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 1800 changes)
1801 1801
1802 1802 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 1803 unknown=False):
1804 1804 """Return matched files only in ``self._status``
1805 1805
1806 1806 Uncommitted files appear "clean" via this context, even if
1807 1807 they aren't actually so in the working directory.
1808 1808 """
1809 1809 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 1810 if clean:
1811 1811 clean = [f for f in self._manifest if f not in self._changedset]
1812 1812 else:
1813 1813 clean = []
1814 1814 return scmutil.status([f for f in self._status.modified if match(f)],
1815 1815 [f for f in self._status.added if match(f)],
1816 1816 [f for f in self._status.removed if match(f)],
1817 1817 [], [], [], clean)
1818 1818
1819 1819 @propertycache
1820 1820 def _changedset(self):
1821 1821 """Return the set of files changed in this context
1822 1822 """
1823 1823 changed = set(self._status.modified)
1824 1824 changed.update(self._status.added)
1825 1825 changed.update(self._status.removed)
1826 1826 return changed
1827 1827
1828 1828 def makecachingfilectxfn(func):
1829 1829 """Create a filectxfn that caches based on the path.
1830 1830
1831 1831 We can't use util.cachefunc because it uses all arguments as the cache
1832 1832 key and this creates a cycle since the arguments include the repo and
1833 1833 memctx.
1834 1834 """
1835 1835 cache = {}
1836 1836
1837 1837 def getfilectx(repo, memctx, path):
1838 1838 if path not in cache:
1839 1839 cache[path] = func(repo, memctx, path)
1840 1840 return cache[path]
1841 1841
1842 1842 return getfilectx
1843 1843
1844 1844 class memctx(committablectx):
1845 1845 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846 1846
1847 1847 Revision information is supplied at initialization time while
1848 1848 related files data and is made available through a callback
1849 1849 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 1850 sequence of two parent revisions identifiers (pass None for every
1851 1851 missing parent), 'text' is the commit message and 'files' lists
1852 1852 names of files touched by the revision (normalized and relative to
1853 1853 repository root).
1854 1854
1855 1855 filectxfn(repo, memctx, path) is a callable receiving the
1856 1856 repository, the current memctx object and the normalized path of
1857 1857 requested file, relative to repository root. It is fired by the
1858 1858 commit function for every file in 'files', but calls order is
1859 1859 undefined. If the file is available in the revision being
1860 1860 committed (updated or added), filectxfn returns a memfilectx
1861 1861 object. If the file was removed, filectxfn raises an
1862 1862 IOError. Moved files are represented by marking the source file
1863 1863 removed and the new file added with copy information (see
1864 1864 memfilectx).
1865 1865
1866 1866 user receives the committer name and defaults to current
1867 1867 repository username, date is the commit date in any format
1868 1868 supported by util.parsedate() and defaults to current date, extra
1869 1869 is a dictionary of metadata or is left empty.
1870 1870 """
1871 1871
1872 1872 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 1873 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 1874 # this field to determine what to do in filectxfn.
1875 1875 _returnnoneformissingfiles = True
1876 1876
1877 1877 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 1878 date=None, extra=None, editor=False):
1879 1879 super(memctx, self).__init__(repo, text, user, date, extra)
1880 1880 self._rev = None
1881 1881 self._node = None
1882 1882 parents = [(p or nullid) for p in parents]
1883 1883 p1, p2 = parents
1884 1884 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 1885 files = sorted(set(files))
1886 1886 self._files = files
1887 1887 self.substate = {}
1888 1888
1889 1889 # if store is not callable, wrap it in a function
1890 1890 if not callable(filectxfn):
1891 1891 def getfilectx(repo, memctx, path):
1892 1892 fctx = filectxfn[path]
1893 1893 # this is weird but apparently we only keep track of one parent
1894 1894 # (why not only store that instead of a tuple?)
1895 1895 copied = fctx.renamed()
1896 1896 if copied:
1897 1897 copied = copied[0]
1898 1898 return memfilectx(repo, path, fctx.data(),
1899 1899 islink=fctx.islink(), isexec=fctx.isexec(),
1900 1900 copied=copied, memctx=memctx)
1901 1901 self._filectxfn = getfilectx
1902 1902 else:
1903 1903 # memoizing increases performance for e.g. vcs convert scenarios.
1904 1904 self._filectxfn = makecachingfilectxfn(filectxfn)
1905 1905
1906 1906 if extra:
1907 1907 self._extra = extra.copy()
1908 1908 else:
1909 1909 self._extra = {}
1910 1910
1911 1911 if self._extra.get('branch', '') == '':
1912 1912 self._extra['branch'] = 'default'
1913 1913
1914 1914 if editor:
1915 1915 self._text = editor(self._repo, self, [])
1916 1916 self._repo.savecommitmessage(self._text)
1917 1917
1918 1918 def filectx(self, path, filelog=None):
1919 1919 """get a file context from the working directory
1920 1920
1921 1921 Returns None if file doesn't exist and should be removed."""
1922 1922 return self._filectxfn(self._repo, self, path)
1923 1923
1924 1924 def commit(self):
1925 1925 """commit context to the repo"""
1926 1926 return self._repo.commitctx(self)
1927 1927
1928 1928 @propertycache
1929 1929 def _manifest(self):
1930 1930 """generate a manifest based on the return values of filectxfn"""
1931 1931
1932 1932 # keep this simple for now; just worry about p1
1933 1933 pctx = self._parents[0]
1934 1934 man = pctx.manifest().copy()
1935 1935
1936 1936 for f in self._status.modified:
1937 1937 p1node = nullid
1938 1938 p2node = nullid
1939 1939 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 1940 if len(p) > 0:
1941 1941 p1node = p[0].filenode()
1942 1942 if len(p) > 1:
1943 1943 p2node = p[1].filenode()
1944 1944 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945 1945
1946 1946 for f in self._status.added:
1947 1947 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948 1948
1949 1949 for f in self._status.removed:
1950 1950 if f in man:
1951 1951 del man[f]
1952 1952
1953 1953 return man
1954 1954
1955 1955 @propertycache
1956 1956 def _status(self):
1957 1957 """Calculate exact status from ``files`` specified at construction
1958 1958 """
1959 1959 man1 = self.p1().manifest()
1960 1960 p2 = self._parents[1]
1961 1961 # "1 < len(self._parents)" can't be used for checking
1962 1962 # existence of the 2nd parent, because "memctx._parents" is
1963 1963 # explicitly initialized by the list, of which length is 2.
1964 1964 if p2.node() != nullid:
1965 1965 man2 = p2.manifest()
1966 1966 managing = lambda f: f in man1 or f in man2
1967 1967 else:
1968 1968 managing = lambda f: f in man1
1969 1969
1970 1970 modified, added, removed = [], [], []
1971 1971 for f in self._files:
1972 1972 if not managing(f):
1973 1973 added.append(f)
1974 1974 elif self[f]:
1975 1975 modified.append(f)
1976 1976 else:
1977 1977 removed.append(f)
1978 1978
1979 1979 return scmutil.status(modified, added, removed, [], [], [], [])
1980 1980
1981 1981 class memfilectx(committablefilectx):
1982 1982 """memfilectx represents an in-memory file to commit.
1983 1983
1984 1984 See memctx and committablefilectx for more details.
1985 1985 """
1986 1986 def __init__(self, repo, path, data, islink=False,
1987 1987 isexec=False, copied=None, memctx=None):
1988 1988 """
1989 1989 path is the normalized file path relative to repository root.
1990 1990 data is the file content as a string.
1991 1991 islink is True if the file is a symbolic link.
1992 1992 isexec is True if the file is executable.
1993 1993 copied is the source file path if current file was copied in the
1994 1994 revision being committed, or None."""
1995 1995 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 1996 self._data = data
1997 1997 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 1998 self._copied = None
1999 1999 if copied:
2000 2000 self._copied = (copied, nullid)
2001 2001
2002 2002 def data(self):
2003 2003 return self._data
2004 2004 def size(self):
2005 2005 return len(self.data())
2006 2006 def flags(self):
2007 2007 return self._flags
2008 2008 def renamed(self):
2009 2009 return self._copied
2010 2010
2011 2011 def remove(self, ignoremissing=False):
2012 2012 """wraps unlink for a repo's working directory"""
2013 2013 # need to figure out what to do here
2014 2014 del self._changectx[self._path]
2015 2015
2016 2016 def write(self, data, flags):
2017 2017 """wraps repo.wwrite"""
2018 2018 self._data = data
2019 2019
2020 2020 class metadataonlyctx(committablectx):
2021 2021 """Like memctx but it's reusing the manifest of different commit.
2022 2022 Intended to be used by lightweight operations that are creating
2023 2023 metadata-only changes.
2024 2024
2025 2025 Revision information is supplied at initialization time. 'repo' is the
2026 2026 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 2027 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 2028 every missing parent), 'text' is the commit.
2029 2029
2030 2030 user receives the committer name and defaults to current repository
2031 2031 username, date is the commit date in any format supported by
2032 2032 util.parsedate() and defaults to current date, extra is a dictionary of
2033 2033 metadata or is left empty.
2034 2034 """
2035 2035 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 2036 return super(metadataonlyctx, cls).__new__(cls, repo)
2037 2037
2038 2038 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 2039 extra=None, editor=False):
2040 2040 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 2041 self._rev = None
2042 2042 self._node = None
2043 2043 self._originalctx = originalctx
2044 2044 self._manifestnode = originalctx.manifestnode()
2045 2045 parents = [(p or nullid) for p in parents]
2046 2046 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047 2047
2048 2048 # sanity check to ensure that the reused manifest parents are
2049 2049 # manifests of our commit parents
2050 2050 mp1, mp2 = self.manifestctx().parents
2051 2051 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 2052 raise RuntimeError('can\'t reuse the manifest: '
2053 2053 'its p1 doesn\'t match the new ctx p1')
2054 2054 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 2055 raise RuntimeError('can\'t reuse the manifest: '
2056 2056 'its p2 doesn\'t match the new ctx p2')
2057 2057
2058 2058 self._files = originalctx.files()
2059 2059 self.substate = {}
2060 2060
2061 2061 if extra:
2062 2062 self._extra = extra.copy()
2063 2063 else:
2064 2064 self._extra = {}
2065 2065
2066 2066 if self._extra.get('branch', '') == '':
2067 2067 self._extra['branch'] = 'default'
2068 2068
2069 2069 if editor:
2070 2070 self._text = editor(self._repo, self, [])
2071 2071 self._repo.savecommitmessage(self._text)
2072 2072
2073 2073 def manifestnode(self):
2074 2074 return self._manifestnode
2075 2075
2076 2076 @propertycache
2077 2077 def _manifestctx(self):
2078 2078 return self._repo.manifestlog[self._manifestnode]
2079 2079
2080 2080 def filectx(self, path, filelog=None):
2081 2081 return self._originalctx.filectx(path, filelog=filelog)
2082 2082
2083 2083 def commit(self):
2084 2084 """commit context to the repo"""
2085 2085 return self._repo.commitctx(self)
2086 2086
2087 2087 @property
2088 2088 def _manifest(self):
2089 2089 return self._originalctx.manifest()
2090 2090
2091 2091 @propertycache
2092 2092 def _status(self):
2093 2093 """Calculate exact status from ``files`` specified in the ``origctx``
2094 2094 and parents manifests.
2095 2095 """
2096 2096 man1 = self.p1().manifest()
2097 2097 p2 = self._parents[1]
2098 2098 # "1 < len(self._parents)" can't be used for checking
2099 2099 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 2100 # explicitly initialized by the list, of which length is 2.
2101 2101 if p2.node() != nullid:
2102 2102 man2 = p2.manifest()
2103 2103 managing = lambda f: f in man1 or f in man2
2104 2104 else:
2105 2105 managing = lambda f: f in man1
2106 2106
2107 2107 modified, added, removed = [], [], []
2108 2108 for f in self._files:
2109 2109 if not managing(f):
2110 2110 added.append(f)
2111 2111 elif self[f]:
2112 2112 modified.append(f)
2113 2113 else:
2114 2114 removed.append(f)
2115 2115
2116 2116 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,2288 +1,2289 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 pathutil,
23 23 phases,
24 24 registrar,
25 25 repoview,
26 26 revsetlang,
27 27 smartset,
28 28 util,
29 29 )
30 30
31 31 # helpers for processing parsed tree
32 32 getsymbol = revsetlang.getsymbol
33 33 getstring = revsetlang.getstring
34 34 getinteger = revsetlang.getinteger
35 35 getlist = revsetlang.getlist
36 36 getrange = revsetlang.getrange
37 37 getargs = revsetlang.getargs
38 38 getargsdict = revsetlang.getargsdict
39 39
40 40 # constants used as an argument of match() and matchany()
41 41 anyorder = revsetlang.anyorder
42 42 defineorder = revsetlang.defineorder
43 43 followorder = revsetlang.followorder
44 44
45 45 baseset = smartset.baseset
46 46 generatorset = smartset.generatorset
47 47 spanset = smartset.spanset
48 48 fullreposet = smartset.fullreposet
49 49
50 50 def _revancestors(repo, revs, followfirst):
51 51 """Like revlog.ancestors(), but supports followfirst."""
52 52 if followfirst:
53 53 cut = 1
54 54 else:
55 55 cut = None
56 56 cl = repo.changelog
57 57
58 58 def iterate():
59 59 revs.sort(reverse=True)
60 60 irevs = iter(revs)
61 61 h = []
62 62
63 63 inputrev = next(irevs, None)
64 64 if inputrev is not None:
65 65 heapq.heappush(h, -inputrev)
66 66
67 67 seen = set()
68 68 while h:
69 69 current = -heapq.heappop(h)
70 70 if current == inputrev:
71 71 inputrev = next(irevs, None)
72 72 if inputrev is not None:
73 73 heapq.heappush(h, -inputrev)
74 74 if current not in seen:
75 75 seen.add(current)
76 76 yield current
77 77 for parent in cl.parentrevs(current)[:cut]:
78 78 if parent != node.nullrev:
79 79 heapq.heappush(h, -parent)
80 80
81 81 return generatorset(iterate(), iterasc=False)
82 82
83 83 def _revdescendants(repo, revs, followfirst):
84 84 """Like revlog.descendants() but supports followfirst."""
85 85 if followfirst:
86 86 cut = 1
87 87 else:
88 88 cut = None
89 89
90 90 def iterate():
91 91 cl = repo.changelog
92 92 # XXX this should be 'parentset.min()' assuming 'parentset' is a
93 93 # smartset (and if it is not, it should.)
94 94 first = min(revs)
95 95 nullrev = node.nullrev
96 96 if first == nullrev:
97 97 # Are there nodes with a null first parent and a non-null
98 98 # second one? Maybe. Do we care? Probably not.
99 99 for i in cl:
100 100 yield i
101 101 else:
102 102 seen = set(revs)
103 103 for i in cl.revs(first + 1):
104 104 for x in cl.parentrevs(i)[:cut]:
105 105 if x != nullrev and x in seen:
106 106 seen.add(i)
107 107 yield i
108 108 break
109 109
110 110 return generatorset(iterate(), iterasc=True)
111 111
112 112 def _reachablerootspure(repo, minroot, roots, heads, includepath):
113 113 """return (heads(::<roots> and ::<heads>))
114 114
115 115 If includepath is True, return (<roots>::<heads>)."""
116 116 if not roots:
117 117 return []
118 118 parentrevs = repo.changelog.parentrevs
119 119 roots = set(roots)
120 120 visit = list(heads)
121 121 reachable = set()
122 122 seen = {}
123 123 # prefetch all the things! (because python is slow)
124 124 reached = reachable.add
125 125 dovisit = visit.append
126 126 nextvisit = visit.pop
127 127 # open-code the post-order traversal due to the tiny size of
128 128 # sys.getrecursionlimit()
129 129 while visit:
130 130 rev = nextvisit()
131 131 if rev in roots:
132 132 reached(rev)
133 133 if not includepath:
134 134 continue
135 135 parents = parentrevs(rev)
136 136 seen[rev] = parents
137 137 for parent in parents:
138 138 if parent >= minroot and parent not in seen:
139 139 dovisit(parent)
140 140 if not reachable:
141 141 return baseset()
142 142 if not includepath:
143 143 return reachable
144 144 for rev in sorted(seen):
145 145 for parent in seen[rev]:
146 146 if parent in reachable:
147 147 reached(rev)
148 148 return reachable
149 149
150 150 def reachableroots(repo, roots, heads, includepath=False):
151 151 """return (heads(::<roots> and ::<heads>))
152 152
153 153 If includepath is True, return (<roots>::<heads>)."""
154 154 if not roots:
155 155 return baseset()
156 156 minroot = roots.min()
157 157 roots = list(roots)
158 158 heads = list(heads)
159 159 try:
160 160 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
161 161 except AttributeError:
162 162 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
163 163 revs = baseset(revs)
164 164 revs.sort()
165 165 return revs
166 166
167 167 # helpers
168 168
169 169 def getset(repo, subset, x):
170 170 if not x:
171 171 raise error.ParseError(_("missing argument"))
172 172 s = methods[x[0]](repo, subset, *x[1:])
173 173 if util.safehasattr(s, 'isascending'):
174 174 return s
175 175 # else case should not happen, because all non-func are internal,
176 176 # ignoring for now.
177 177 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
178 178 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
179 179 % x[1][1],
180 180 '3.9')
181 181 return baseset(s)
182 182
183 183 def _getrevsource(repo, r):
184 184 extra = repo[r].extra()
185 185 for label in ('source', 'transplant_source', 'rebase_source'):
186 186 if label in extra:
187 187 try:
188 188 return repo[extra[label]].rev()
189 189 except error.RepoLookupError:
190 190 pass
191 191 return None
192 192
193 193 # operator methods
194 194
195 195 def stringset(repo, subset, x):
196 196 x = repo[x].rev()
197 197 if (x in subset
198 198 or x == node.nullrev and isinstance(subset, fullreposet)):
199 199 return baseset([x])
200 200 return baseset()
201 201
202 202 def rangeset(repo, subset, x, y, order):
203 203 m = getset(repo, fullreposet(repo), x)
204 204 n = getset(repo, fullreposet(repo), y)
205 205
206 206 if not m or not n:
207 207 return baseset()
208 208 return _makerangeset(repo, subset, m.first(), n.last(), order)
209 209
210 210 def rangeall(repo, subset, x, order):
211 211 assert x is None
212 212 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
213 213
214 214 def rangepre(repo, subset, y, order):
215 215 # ':y' can't be rewritten to '0:y' since '0' may be hidden
216 216 n = getset(repo, fullreposet(repo), y)
217 217 if not n:
218 218 return baseset()
219 219 return _makerangeset(repo, subset, 0, n.last(), order)
220 220
221 221 def rangepost(repo, subset, x, order):
222 222 m = getset(repo, fullreposet(repo), x)
223 223 if not m:
224 224 return baseset()
225 225 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
226 226
227 227 def _makerangeset(repo, subset, m, n, order):
228 228 if m == n:
229 229 r = baseset([m])
230 230 elif n == node.wdirrev:
231 231 r = spanset(repo, m, len(repo)) + baseset([n])
232 232 elif m == node.wdirrev:
233 233 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
234 234 elif m < n:
235 235 r = spanset(repo, m, n + 1)
236 236 else:
237 237 r = spanset(repo, m, n - 1)
238 238
239 239 if order == defineorder:
240 240 return r & subset
241 241 else:
242 242 # carrying the sorting over when possible would be more efficient
243 243 return subset & r
244 244
245 245 def dagrange(repo, subset, x, y, order):
246 246 r = fullreposet(repo)
247 247 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
248 248 includepath=True)
249 249 return subset & xs
250 250
251 251 def andset(repo, subset, x, y, order):
252 252 return getset(repo, getset(repo, subset, x), y)
253 253
254 254 def differenceset(repo, subset, x, y, order):
255 255 return getset(repo, subset, x) - getset(repo, subset, y)
256 256
257 257 def _orsetlist(repo, subset, xs):
258 258 assert xs
259 259 if len(xs) == 1:
260 260 return getset(repo, subset, xs[0])
261 261 p = len(xs) // 2
262 262 a = _orsetlist(repo, subset, xs[:p])
263 263 b = _orsetlist(repo, subset, xs[p:])
264 264 return a + b
265 265
266 266 def orset(repo, subset, x, order):
267 267 xs = getlist(x)
268 268 if order == followorder:
269 269 # slow path to take the subset order
270 270 return subset & _orsetlist(repo, fullreposet(repo), xs)
271 271 else:
272 272 return _orsetlist(repo, subset, xs)
273 273
274 274 def notset(repo, subset, x, order):
275 275 return subset - getset(repo, subset, x)
276 276
277 277 def listset(repo, subset, *xs):
278 278 raise error.ParseError(_("can't use a list in this context"),
279 279 hint=_('see hg help "revsets.x or y"'))
280 280
281 281 def keyvaluepair(repo, subset, k, v):
282 282 raise error.ParseError(_("can't use a key-value pair in this context"))
283 283
284 284 def func(repo, subset, a, b, order):
285 285 f = getsymbol(a)
286 286 if f in symbols:
287 287 func = symbols[f]
288 288 if getattr(func, '_takeorder', False):
289 289 return func(repo, subset, b, order)
290 290 return func(repo, subset, b)
291 291
292 292 keep = lambda fn: getattr(fn, '__doc__', None) is not None
293 293
294 294 syms = [s for (s, fn) in symbols.items() if keep(fn)]
295 295 raise error.UnknownIdentifier(f, syms)
296 296
297 297 # functions
298 298
299 299 # symbols are callables like:
300 300 # fn(repo, subset, x)
301 301 # with:
302 302 # repo - current repository instance
303 303 # subset - of revisions to be examined
304 304 # x - argument in tree form
305 305 symbols = {}
306 306
307 307 # symbols which can't be used for a DoS attack for any given input
308 308 # (e.g. those which accept regexes as plain strings shouldn't be included)
309 309 # functions that just return a lot of changesets (like all) don't count here
310 310 safesymbols = set()
311 311
312 312 predicate = registrar.revsetpredicate()
313 313
314 314 @predicate('_destupdate')
315 315 def _destupdate(repo, subset, x):
316 316 # experimental revset for update destination
317 317 args = getargsdict(x, 'limit', 'clean')
318 318 return subset & baseset([destutil.destupdate(repo, **args)[0]])
319 319
320 320 @predicate('_destmerge')
321 321 def _destmerge(repo, subset, x):
322 322 # experimental revset for merge destination
323 323 sourceset = None
324 324 if x is not None:
325 325 sourceset = getset(repo, fullreposet(repo), x)
326 326 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
327 327
328 328 @predicate('adds(pattern)', safe=True)
329 329 def adds(repo, subset, x):
330 330 """Changesets that add a file matching pattern.
331 331
332 332 The pattern without explicit kind like ``glob:`` is expected to be
333 333 relative to the current directory and match against a file or a
334 334 directory.
335 335 """
336 336 # i18n: "adds" is a keyword
337 337 pat = getstring(x, _("adds requires a pattern"))
338 338 return checkstatus(repo, subset, pat, 1)
339 339
340 340 @predicate('ancestor(*changeset)', safe=True)
341 341 def ancestor(repo, subset, x):
342 342 """A greatest common ancestor of the changesets.
343 343
344 344 Accepts 0 or more changesets.
345 345 Will return empty list when passed no args.
346 346 Greatest common ancestor of a single changeset is that changeset.
347 347 """
348 348 # i18n: "ancestor" is a keyword
349 349 l = getlist(x)
350 350 rl = fullreposet(repo)
351 351 anc = None
352 352
353 353 # (getset(repo, rl, i) for i in l) generates a list of lists
354 354 for revs in (getset(repo, rl, i) for i in l):
355 355 for r in revs:
356 356 if anc is None:
357 357 anc = repo[r]
358 358 else:
359 359 anc = anc.ancestor(repo[r])
360 360
361 361 if anc is not None and anc.rev() in subset:
362 362 return baseset([anc.rev()])
363 363 return baseset()
364 364
365 365 def _ancestors(repo, subset, x, followfirst=False):
366 366 heads = getset(repo, fullreposet(repo), x)
367 367 if not heads:
368 368 return baseset()
369 369 s = _revancestors(repo, heads, followfirst)
370 370 return subset & s
371 371
372 372 @predicate('ancestors(set)', safe=True)
373 373 def ancestors(repo, subset, x):
374 374 """Changesets that are ancestors of a changeset in set.
375 375 """
376 376 return _ancestors(repo, subset, x)
377 377
378 378 @predicate('_firstancestors', safe=True)
379 379 def _firstancestors(repo, subset, x):
380 380 # ``_firstancestors(set)``
381 381 # Like ``ancestors(set)`` but follows only the first parents.
382 382 return _ancestors(repo, subset, x, followfirst=True)
383 383
384 384 def ancestorspec(repo, subset, x, n, order):
385 385 """``set~n``
386 386 Changesets that are the Nth ancestor (first parents only) of a changeset
387 387 in set.
388 388 """
389 389 n = getinteger(n, _("~ expects a number"))
390 390 ps = set()
391 391 cl = repo.changelog
392 392 for r in getset(repo, fullreposet(repo), x):
393 393 for i in range(n):
394 394 r = cl.parentrevs(r)[0]
395 395 ps.add(r)
396 396 return subset & ps
397 397
398 398 @predicate('author(string)', safe=True)
399 399 def author(repo, subset, x):
400 400 """Alias for ``user(string)``.
401 401 """
402 402 # i18n: "author" is a keyword
403 403 n = getstring(x, _("author requires a string"))
404 404 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
405 405 return subset.filter(lambda x: matcher(repo[x].user()),
406 406 condrepr=('<user %r>', n))
407 407
408 408 @predicate('bisect(string)', safe=True)
409 409 def bisect(repo, subset, x):
410 410 """Changesets marked in the specified bisect status:
411 411
412 412 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
413 413 - ``goods``, ``bads`` : csets topologically good/bad
414 414 - ``range`` : csets taking part in the bisection
415 415 - ``pruned`` : csets that are goods, bads or skipped
416 416 - ``untested`` : csets whose fate is yet unknown
417 417 - ``ignored`` : csets ignored due to DAG topology
418 418 - ``current`` : the cset currently being bisected
419 419 """
420 420 # i18n: "bisect" is a keyword
421 421 status = getstring(x, _("bisect requires a string")).lower()
422 422 state = set(hbisect.get(repo, status))
423 423 return subset & state
424 424
425 425 # Backward-compatibility
426 426 # - no help entry so that we do not advertise it any more
427 427 @predicate('bisected', safe=True)
428 428 def bisected(repo, subset, x):
429 429 return bisect(repo, subset, x)
430 430
431 431 @predicate('bookmark([name])', safe=True)
432 432 def bookmark(repo, subset, x):
433 433 """The named bookmark or all bookmarks.
434 434
435 435 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
436 436 """
437 437 # i18n: "bookmark" is a keyword
438 438 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
439 439 if args:
440 440 bm = getstring(args[0],
441 441 # i18n: "bookmark" is a keyword
442 442 _('the argument to bookmark must be a string'))
443 443 kind, pattern, matcher = util.stringmatcher(bm)
444 444 bms = set()
445 445 if kind == 'literal':
446 446 bmrev = repo._bookmarks.get(pattern, None)
447 447 if not bmrev:
448 448 raise error.RepoLookupError(_("bookmark '%s' does not exist")
449 449 % pattern)
450 450 bms.add(repo[bmrev].rev())
451 451 else:
452 452 matchrevs = set()
453 453 for name, bmrev in repo._bookmarks.iteritems():
454 454 if matcher(name):
455 455 matchrevs.add(bmrev)
456 456 if not matchrevs:
457 457 raise error.RepoLookupError(_("no bookmarks exist"
458 458 " that match '%s'") % pattern)
459 459 for bmrev in matchrevs:
460 460 bms.add(repo[bmrev].rev())
461 461 else:
462 462 bms = set([repo[r].rev()
463 463 for r in repo._bookmarks.values()])
464 464 bms -= set([node.nullrev])
465 465 return subset & bms
466 466
467 467 @predicate('branch(string or set)', safe=True)
468 468 def branch(repo, subset, x):
469 469 """
470 470 All changesets belonging to the given branch or the branches of the given
471 471 changesets.
472 472
473 473 Pattern matching is supported for `string`. See
474 474 :hg:`help revisions.patterns`.
475 475 """
476 476 getbi = repo.revbranchcache().branchinfo
477 477
478 478 try:
479 479 b = getstring(x, '')
480 480 except error.ParseError:
481 481 # not a string, but another revspec, e.g. tip()
482 482 pass
483 483 else:
484 484 kind, pattern, matcher = util.stringmatcher(b)
485 485 if kind == 'literal':
486 486 # note: falls through to the revspec case if no branch with
487 487 # this name exists and pattern kind is not specified explicitly
488 488 if pattern in repo.branchmap():
489 489 return subset.filter(lambda r: matcher(getbi(r)[0]),
490 490 condrepr=('<branch %r>', b))
491 491 if b.startswith('literal:'):
492 492 raise error.RepoLookupError(_("branch '%s' does not exist")
493 493 % pattern)
494 494 else:
495 495 return subset.filter(lambda r: matcher(getbi(r)[0]),
496 496 condrepr=('<branch %r>', b))
497 497
498 498 s = getset(repo, fullreposet(repo), x)
499 499 b = set()
500 500 for r in s:
501 501 b.add(getbi(r)[0])
502 502 c = s.__contains__
503 503 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
504 504 condrepr=lambda: '<branch %r>' % sorted(b))
505 505
506 506 @predicate('bumped()', safe=True)
507 507 def bumped(repo, subset, x):
508 508 """Mutable changesets marked as successors of public changesets.
509 509
510 510 Only non-public and non-obsolete changesets can be `bumped`.
511 511 """
512 512 # i18n: "bumped" is a keyword
513 513 getargs(x, 0, 0, _("bumped takes no arguments"))
514 514 bumped = obsmod.getrevs(repo, 'bumped')
515 515 return subset & bumped
516 516
517 517 @predicate('bundle()', safe=True)
518 518 def bundle(repo, subset, x):
519 519 """Changesets in the bundle.
520 520
521 521 Bundle must be specified by the -R option."""
522 522
523 523 try:
524 524 bundlerevs = repo.changelog.bundlerevs
525 525 except AttributeError:
526 526 raise error.Abort(_("no bundle provided - specify with -R"))
527 527 return subset & bundlerevs
528 528
529 529 def checkstatus(repo, subset, pat, field):
530 530 hasset = matchmod.patkind(pat) == 'set'
531 531
532 532 mcache = [None]
533 533 def matches(x):
534 534 c = repo[x]
535 535 if not mcache[0] or hasset:
536 536 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
537 537 m = mcache[0]
538 538 fname = None
539 539 if not m.anypats() and len(m.files()) == 1:
540 540 fname = m.files()[0]
541 541 if fname is not None:
542 542 if fname not in c.files():
543 543 return False
544 544 else:
545 545 for f in c.files():
546 546 if m(f):
547 547 break
548 548 else:
549 549 return False
550 550 files = repo.status(c.p1().node(), c.node())[field]
551 551 if fname is not None:
552 552 if fname in files:
553 553 return True
554 554 else:
555 555 for f in files:
556 556 if m(f):
557 557 return True
558 558
559 559 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
560 560
561 561 def _children(repo, subset, parentset):
562 562 if not parentset:
563 563 return baseset()
564 564 cs = set()
565 565 pr = repo.changelog.parentrevs
566 566 minrev = parentset.min()
567 567 nullrev = node.nullrev
568 568 for r in subset:
569 569 if r <= minrev:
570 570 continue
571 571 p1, p2 = pr(r)
572 572 if p1 in parentset:
573 573 cs.add(r)
574 574 if p2 != nullrev and p2 in parentset:
575 575 cs.add(r)
576 576 return baseset(cs)
577 577
578 578 @predicate('children(set)', safe=True)
579 579 def children(repo, subset, x):
580 580 """Child changesets of changesets in set.
581 581 """
582 582 s = getset(repo, fullreposet(repo), x)
583 583 cs = _children(repo, subset, s)
584 584 return subset & cs
585 585
586 586 @predicate('closed()', safe=True)
587 587 def closed(repo, subset, x):
588 588 """Changeset is closed.
589 589 """
590 590 # i18n: "closed" is a keyword
591 591 getargs(x, 0, 0, _("closed takes no arguments"))
592 592 return subset.filter(lambda r: repo[r].closesbranch(),
593 593 condrepr='<branch closed>')
594 594
595 595 @predicate('contains(pattern)')
596 596 def contains(repo, subset, x):
597 597 """The revision's manifest contains a file matching pattern (but might not
598 598 modify it). See :hg:`help patterns` for information about file patterns.
599 599
600 600 The pattern without explicit kind like ``glob:`` is expected to be
601 601 relative to the current directory and match against a file exactly
602 602 for efficiency.
603 603 """
604 604 # i18n: "contains" is a keyword
605 605 pat = getstring(x, _("contains requires a pattern"))
606 606
607 607 def matches(x):
608 608 if not matchmod.patkind(pat):
609 609 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
610 610 if pats in repo[x]:
611 611 return True
612 612 else:
613 613 c = repo[x]
614 614 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
615 615 for f in c.manifest():
616 616 if m(f):
617 617 return True
618 618 return False
619 619
620 620 return subset.filter(matches, condrepr=('<contains %r>', pat))
621 621
622 622 @predicate('converted([id])', safe=True)
623 623 def converted(repo, subset, x):
624 624 """Changesets converted from the given identifier in the old repository if
625 625 present, or all converted changesets if no identifier is specified.
626 626 """
627 627
628 628 # There is exactly no chance of resolving the revision, so do a simple
629 629 # string compare and hope for the best
630 630
631 631 rev = None
632 632 # i18n: "converted" is a keyword
633 633 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
634 634 if l:
635 635 # i18n: "converted" is a keyword
636 636 rev = getstring(l[0], _('converted requires a revision'))
637 637
638 638 def _matchvalue(r):
639 639 source = repo[r].extra().get('convert_revision', None)
640 640 return source is not None and (rev is None or source.startswith(rev))
641 641
642 642 return subset.filter(lambda r: _matchvalue(r),
643 643 condrepr=('<converted %r>', rev))
644 644
645 645 @predicate('date(interval)', safe=True)
646 646 def date(repo, subset, x):
647 647 """Changesets within the interval, see :hg:`help dates`.
648 648 """
649 649 # i18n: "date" is a keyword
650 650 ds = getstring(x, _("date requires a string"))
651 651 dm = util.matchdate(ds)
652 652 return subset.filter(lambda x: dm(repo[x].date()[0]),
653 653 condrepr=('<date %r>', ds))
654 654
655 655 @predicate('desc(string)', safe=True)
656 656 def desc(repo, subset, x):
657 657 """Search commit message for string. The match is case-insensitive.
658 658
659 659 Pattern matching is supported for `string`. See
660 660 :hg:`help revisions.patterns`.
661 661 """
662 662 # i18n: "desc" is a keyword
663 663 ds = getstring(x, _("desc requires a string"))
664 664
665 665 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
666 666
667 667 return subset.filter(lambda r: matcher(repo[r].description()),
668 668 condrepr=('<desc %r>', ds))
669 669
670 670 def _descendants(repo, subset, x, followfirst=False):
671 671 roots = getset(repo, fullreposet(repo), x)
672 672 if not roots:
673 673 return baseset()
674 674 s = _revdescendants(repo, roots, followfirst)
675 675
676 676 # Both sets need to be ascending in order to lazily return the union
677 677 # in the correct order.
678 678 base = subset & roots
679 679 desc = subset & s
680 680 result = base + desc
681 681 if subset.isascending():
682 682 result.sort()
683 683 elif subset.isdescending():
684 684 result.sort(reverse=True)
685 685 else:
686 686 result = subset & result
687 687 return result
688 688
689 689 @predicate('descendants(set)', safe=True)
690 690 def descendants(repo, subset, x):
691 691 """Changesets which are descendants of changesets in set.
692 692 """
693 693 return _descendants(repo, subset, x)
694 694
695 695 @predicate('_firstdescendants', safe=True)
696 696 def _firstdescendants(repo, subset, x):
697 697 # ``_firstdescendants(set)``
698 698 # Like ``descendants(set)`` but follows only the first parents.
699 699 return _descendants(repo, subset, x, followfirst=True)
700 700
701 701 @predicate('destination([set])', safe=True)
702 702 def destination(repo, subset, x):
703 703 """Changesets that were created by a graft, transplant or rebase operation,
704 704 with the given revisions specified as the source. Omitting the optional set
705 705 is the same as passing all().
706 706 """
707 707 if x is not None:
708 708 sources = getset(repo, fullreposet(repo), x)
709 709 else:
710 710 sources = fullreposet(repo)
711 711
712 712 dests = set()
713 713
714 714 # subset contains all of the possible destinations that can be returned, so
715 715 # iterate over them and see if their source(s) were provided in the arg set.
716 716 # Even if the immediate src of r is not in the arg set, src's source (or
717 717 # further back) may be. Scanning back further than the immediate src allows
718 718 # transitive transplants and rebases to yield the same results as transitive
719 719 # grafts.
720 720 for r in subset:
721 721 src = _getrevsource(repo, r)
722 722 lineage = None
723 723
724 724 while src is not None:
725 725 if lineage is None:
726 726 lineage = list()
727 727
728 728 lineage.append(r)
729 729
730 730 # The visited lineage is a match if the current source is in the arg
731 731 # set. Since every candidate dest is visited by way of iterating
732 732 # subset, any dests further back in the lineage will be tested by a
733 733 # different iteration over subset. Likewise, if the src was already
734 734 # selected, the current lineage can be selected without going back
735 735 # further.
736 736 if src in sources or src in dests:
737 737 dests.update(lineage)
738 738 break
739 739
740 740 r = src
741 741 src = _getrevsource(repo, r)
742 742
743 743 return subset.filter(dests.__contains__,
744 744 condrepr=lambda: '<destination %r>' % sorted(dests))
745 745
746 746 @predicate('divergent()', safe=True)
747 747 def divergent(repo, subset, x):
748 748 """
749 749 Final successors of changesets with an alternative set of final successors.
750 750 """
751 751 # i18n: "divergent" is a keyword
752 752 getargs(x, 0, 0, _("divergent takes no arguments"))
753 753 divergent = obsmod.getrevs(repo, 'divergent')
754 754 return subset & divergent
755 755
756 756 @predicate('extinct()', safe=True)
757 757 def extinct(repo, subset, x):
758 758 """Obsolete changesets with obsolete descendants only.
759 759 """
760 760 # i18n: "extinct" is a keyword
761 761 getargs(x, 0, 0, _("extinct takes no arguments"))
762 762 extincts = obsmod.getrevs(repo, 'extinct')
763 763 return subset & extincts
764 764
765 765 @predicate('extra(label, [value])', safe=True)
766 766 def extra(repo, subset, x):
767 767 """Changesets with the given label in the extra metadata, with the given
768 768 optional value.
769 769
770 770 Pattern matching is supported for `value`. See
771 771 :hg:`help revisions.patterns`.
772 772 """
773 773 args = getargsdict(x, 'extra', 'label value')
774 774 if 'label' not in args:
775 775 # i18n: "extra" is a keyword
776 776 raise error.ParseError(_('extra takes at least 1 argument'))
777 777 # i18n: "extra" is a keyword
778 778 label = getstring(args['label'], _('first argument to extra must be '
779 779 'a string'))
780 780 value = None
781 781
782 782 if 'value' in args:
783 783 # i18n: "extra" is a keyword
784 784 value = getstring(args['value'], _('second argument to extra must be '
785 785 'a string'))
786 786 kind, value, matcher = util.stringmatcher(value)
787 787
788 788 def _matchvalue(r):
789 789 extra = repo[r].extra()
790 790 return label in extra and (value is None or matcher(extra[label]))
791 791
792 792 return subset.filter(lambda r: _matchvalue(r),
793 793 condrepr=('<extra[%r] %r>', label, value))
794 794
795 795 @predicate('filelog(pattern)', safe=True)
796 796 def filelog(repo, subset, x):
797 797 """Changesets connected to the specified filelog.
798 798
799 799 For performance reasons, visits only revisions mentioned in the file-level
800 800 filelog, rather than filtering through all changesets (much faster, but
801 801 doesn't include deletes or duplicate changes). For a slower, more accurate
802 802 result, use ``file()``.
803 803
804 804 The pattern without explicit kind like ``glob:`` is expected to be
805 805 relative to the current directory and match against a file exactly
806 806 for efficiency.
807 807
808 808 If some linkrev points to revisions filtered by the current repoview, we'll
809 809 work around it to return a non-filtered value.
810 810 """
811 811
812 812 # i18n: "filelog" is a keyword
813 813 pat = getstring(x, _("filelog requires a pattern"))
814 814 s = set()
815 815 cl = repo.changelog
816 816
817 817 if not matchmod.patkind(pat):
818 818 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
819 819 files = [f]
820 820 else:
821 821 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
822 822 files = (f for f in repo[None] if m(f))
823 823
824 824 for f in files:
825 825 fl = repo.file(f)
826 826 known = {}
827 827 scanpos = 0
828 828 for fr in list(fl):
829 829 fn = fl.node(fr)
830 830 if fn in known:
831 831 s.add(known[fn])
832 832 continue
833 833
834 834 lr = fl.linkrev(fr)
835 835 if lr in cl:
836 836 s.add(lr)
837 837 elif scanpos is not None:
838 838 # lowest matching changeset is filtered, scan further
839 839 # ahead in changelog
840 840 start = max(lr, scanpos) + 1
841 841 scanpos = None
842 842 for r in cl.revs(start):
843 843 # minimize parsing of non-matching entries
844 844 if f in cl.revision(r) and f in cl.readfiles(r):
845 845 try:
846 846 # try to use manifest delta fastpath
847 847 n = repo[r].filenode(f)
848 848 if n not in known:
849 849 if n == fn:
850 850 s.add(r)
851 851 scanpos = r
852 852 break
853 853 else:
854 854 known[n] = r
855 855 except error.ManifestLookupError:
856 856 # deletion in changelog
857 857 continue
858 858
859 859 return subset & s
860 860
861 861 @predicate('first(set, [n])', safe=True)
862 862 def first(repo, subset, x):
863 863 """An alias for limit().
864 864 """
865 865 return limit(repo, subset, x)
866 866
867 867 def _follow(repo, subset, x, name, followfirst=False):
868 868 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
869 869 "and an optional revset") % name)
870 870 c = repo['.']
871 871 if l:
872 872 x = getstring(l[0], _("%s expected a pattern") % name)
873 873 rev = None
874 874 if len(l) >= 2:
875 875 revs = getset(repo, fullreposet(repo), l[1])
876 876 if len(revs) != 1:
877 877 raise error.RepoLookupError(
878 878 _("%s expected one starting revision") % name)
879 879 rev = revs.last()
880 880 c = repo[rev]
881 881 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
882 882 ctx=repo[rev], default='path')
883 883
884 884 files = c.manifest().walk(matcher)
885 885
886 886 s = set()
887 887 for fname in files:
888 888 fctx = c[fname]
889 889 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
890 890 # include the revision responsible for the most recent version
891 891 s.add(fctx.introrev())
892 892 else:
893 893 s = _revancestors(repo, baseset([c.rev()]), followfirst)
894 894
895 895 return subset & s
896 896
897 897 @predicate('follow([pattern[, startrev]])', safe=True)
898 898 def follow(repo, subset, x):
899 899 """
900 900 An alias for ``::.`` (ancestors of the working directory's first parent).
901 901 If pattern is specified, the histories of files matching given
902 902 pattern in the revision given by startrev are followed, including copies.
903 903 """
904 904 return _follow(repo, subset, x, 'follow')
905 905
906 906 @predicate('_followfirst', safe=True)
907 907 def _followfirst(repo, subset, x):
908 908 # ``followfirst([pattern[, startrev]])``
909 909 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
910 910 # of every revisions or files revisions.
911 911 return _follow(repo, subset, x, '_followfirst', followfirst=True)
912 912
913 913 @predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
914 914 def followlines(repo, subset, x):
915 915 """Changesets modifying `file` in line range ('fromline', 'toline').
916 916
917 917 Line range corresponds to 'file' content at 'startrev' and should hence be
918 918 consistent with file size. If startrev is not specified, working directory's
919 919 parent is used.
920 920 """
921 921 from . import context # avoid circular import issues
922 922
923 923 args = getargsdict(x, 'followlines', 'file *lines startrev')
924 924 if len(args['lines']) != 1:
925 925 raise error.ParseError(_("followlines requires a line range"))
926 926
927 927 rev = '.'
928 928 if 'startrev' in args:
929 929 revs = getset(repo, fullreposet(repo), args['startrev'])
930 930 if len(revs) != 1:
931 931 raise error.ParseError(
932 932 _("followlines expects exactly one revision"))
933 933 rev = revs.last()
934 934
935 935 pat = getstring(args['file'], _("followlines requires a pattern"))
936 936 if not matchmod.patkind(pat):
937 937 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
938 938 else:
939 939 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
940 940 files = [f for f in repo[rev] if m(f)]
941 941 if len(files) != 1:
942 942 raise error.ParseError(_("followlines expects exactly one file"))
943 943 fname = files[0]
944 944
945 945 lr = getrange(args['lines'][0], _("followlines expects a line range"))
946 946 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
947 947 for a in lr]
948 948 if toline - fromline < 0:
949 949 raise error.ParseError(_("line range must be positive"))
950 950 if fromline < 1:
951 951 raise error.ParseError(_("fromline must be strictly positive"))
952 952 fromline -= 1
953 953
954 954 fctx = repo[rev].filectx(fname)
955 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
955 revs = (c.rev() for c, _linerange
956 in context.blockancestors(fctx, fromline, toline))
956 957 return subset & generatorset(revs, iterasc=False)
957 958
958 959 @predicate('all()', safe=True)
959 960 def getall(repo, subset, x):
960 961 """All changesets, the same as ``0:tip``.
961 962 """
962 963 # i18n: "all" is a keyword
963 964 getargs(x, 0, 0, _("all takes no arguments"))
964 965 return subset & spanset(repo) # drop "null" if any
965 966
966 967 @predicate('grep(regex)')
967 968 def grep(repo, subset, x):
968 969 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
969 970 to ensure special escape characters are handled correctly. Unlike
970 971 ``keyword(string)``, the match is case-sensitive.
971 972 """
972 973 try:
973 974 # i18n: "grep" is a keyword
974 975 gr = re.compile(getstring(x, _("grep requires a string")))
975 976 except re.error as e:
976 977 raise error.ParseError(_('invalid match pattern: %s') % e)
977 978
978 979 def matches(x):
979 980 c = repo[x]
980 981 for e in c.files() + [c.user(), c.description()]:
981 982 if gr.search(e):
982 983 return True
983 984 return False
984 985
985 986 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
986 987
987 988 @predicate('_matchfiles', safe=True)
988 989 def _matchfiles(repo, subset, x):
989 990 # _matchfiles takes a revset list of prefixed arguments:
990 991 #
991 992 # [p:foo, i:bar, x:baz]
992 993 #
993 994 # builds a match object from them and filters subset. Allowed
994 995 # prefixes are 'p:' for regular patterns, 'i:' for include
995 996 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
996 997 # a revision identifier, or the empty string to reference the
997 998 # working directory, from which the match object is
998 999 # initialized. Use 'd:' to set the default matching mode, default
999 1000 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1000 1001
1001 1002 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1002 1003 pats, inc, exc = [], [], []
1003 1004 rev, default = None, None
1004 1005 for arg in l:
1005 1006 s = getstring(arg, "_matchfiles requires string arguments")
1006 1007 prefix, value = s[:2], s[2:]
1007 1008 if prefix == 'p:':
1008 1009 pats.append(value)
1009 1010 elif prefix == 'i:':
1010 1011 inc.append(value)
1011 1012 elif prefix == 'x:':
1012 1013 exc.append(value)
1013 1014 elif prefix == 'r:':
1014 1015 if rev is not None:
1015 1016 raise error.ParseError('_matchfiles expected at most one '
1016 1017 'revision')
1017 1018 if value != '': # empty means working directory; leave rev as None
1018 1019 rev = value
1019 1020 elif prefix == 'd:':
1020 1021 if default is not None:
1021 1022 raise error.ParseError('_matchfiles expected at most one '
1022 1023 'default mode')
1023 1024 default = value
1024 1025 else:
1025 1026 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1026 1027 if not default:
1027 1028 default = 'glob'
1028 1029
1029 1030 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1030 1031 exclude=exc, ctx=repo[rev], default=default)
1031 1032
1032 1033 # This directly read the changelog data as creating changectx for all
1033 1034 # revisions is quite expensive.
1034 1035 getfiles = repo.changelog.readfiles
1035 1036 wdirrev = node.wdirrev
1036 1037 def matches(x):
1037 1038 if x == wdirrev:
1038 1039 files = repo[x].files()
1039 1040 else:
1040 1041 files = getfiles(x)
1041 1042 for f in files:
1042 1043 if m(f):
1043 1044 return True
1044 1045 return False
1045 1046
1046 1047 return subset.filter(matches,
1047 1048 condrepr=('<matchfiles patterns=%r, include=%r '
1048 1049 'exclude=%r, default=%r, rev=%r>',
1049 1050 pats, inc, exc, default, rev))
1050 1051
1051 1052 @predicate('file(pattern)', safe=True)
1052 1053 def hasfile(repo, subset, x):
1053 1054 """Changesets affecting files matched by pattern.
1054 1055
1055 1056 For a faster but less accurate result, consider using ``filelog()``
1056 1057 instead.
1057 1058
1058 1059 This predicate uses ``glob:`` as the default kind of pattern.
1059 1060 """
1060 1061 # i18n: "file" is a keyword
1061 1062 pat = getstring(x, _("file requires a pattern"))
1062 1063 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1063 1064
1064 1065 @predicate('head()', safe=True)
1065 1066 def head(repo, subset, x):
1066 1067 """Changeset is a named branch head.
1067 1068 """
1068 1069 # i18n: "head" is a keyword
1069 1070 getargs(x, 0, 0, _("head takes no arguments"))
1070 1071 hs = set()
1071 1072 cl = repo.changelog
1072 1073 for ls in repo.branchmap().itervalues():
1073 1074 hs.update(cl.rev(h) for h in ls)
1074 1075 return subset & baseset(hs)
1075 1076
1076 1077 @predicate('heads(set)', safe=True)
1077 1078 def heads(repo, subset, x):
1078 1079 """Members of set with no children in set.
1079 1080 """
1080 1081 s = getset(repo, subset, x)
1081 1082 ps = parents(repo, subset, x)
1082 1083 return s - ps
1083 1084
1084 1085 @predicate('hidden()', safe=True)
1085 1086 def hidden(repo, subset, x):
1086 1087 """Hidden changesets.
1087 1088 """
1088 1089 # i18n: "hidden" is a keyword
1089 1090 getargs(x, 0, 0, _("hidden takes no arguments"))
1090 1091 hiddenrevs = repoview.filterrevs(repo, 'visible')
1091 1092 return subset & hiddenrevs
1092 1093
1093 1094 @predicate('keyword(string)', safe=True)
1094 1095 def keyword(repo, subset, x):
1095 1096 """Search commit message, user name, and names of changed files for
1096 1097 string. The match is case-insensitive.
1097 1098
1098 1099 For a regular expression or case sensitive search of these fields, use
1099 1100 ``grep(regex)``.
1100 1101 """
1101 1102 # i18n: "keyword" is a keyword
1102 1103 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1103 1104
1104 1105 def matches(r):
1105 1106 c = repo[r]
1106 1107 return any(kw in encoding.lower(t)
1107 1108 for t in c.files() + [c.user(), c.description()])
1108 1109
1109 1110 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1110 1111
1111 1112 @predicate('limit(set[, n[, offset]])', safe=True)
1112 1113 def limit(repo, subset, x):
1113 1114 """First n members of set, defaulting to 1, starting from offset.
1114 1115 """
1115 1116 args = getargsdict(x, 'limit', 'set n offset')
1116 1117 if 'set' not in args:
1117 1118 # i18n: "limit" is a keyword
1118 1119 raise error.ParseError(_("limit requires one to three arguments"))
1119 1120 # i18n: "limit" is a keyword
1120 1121 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1121 1122 # i18n: "limit" is a keyword
1122 1123 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1123 1124 if ofs < 0:
1124 1125 raise error.ParseError(_("negative offset"))
1125 1126 os = getset(repo, fullreposet(repo), args['set'])
1126 1127 result = []
1127 1128 it = iter(os)
1128 1129 for x in xrange(ofs):
1129 1130 y = next(it, None)
1130 1131 if y is None:
1131 1132 break
1132 1133 for x in xrange(lim):
1133 1134 y = next(it, None)
1134 1135 if y is None:
1135 1136 break
1136 1137 elif y in subset:
1137 1138 result.append(y)
1138 1139 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1139 1140 lim, ofs, subset, os))
1140 1141
1141 1142 @predicate('last(set, [n])', safe=True)
1142 1143 def last(repo, subset, x):
1143 1144 """Last n members of set, defaulting to 1.
1144 1145 """
1145 1146 # i18n: "last" is a keyword
1146 1147 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1147 1148 lim = 1
1148 1149 if len(l) == 2:
1149 1150 # i18n: "last" is a keyword
1150 1151 lim = getinteger(l[1], _("last expects a number"))
1151 1152 os = getset(repo, fullreposet(repo), l[0])
1152 1153 os.reverse()
1153 1154 result = []
1154 1155 it = iter(os)
1155 1156 for x in xrange(lim):
1156 1157 y = next(it, None)
1157 1158 if y is None:
1158 1159 break
1159 1160 elif y in subset:
1160 1161 result.append(y)
1161 1162 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1162 1163
1163 1164 @predicate('max(set)', safe=True)
1164 1165 def maxrev(repo, subset, x):
1165 1166 """Changeset with highest revision number in set.
1166 1167 """
1167 1168 os = getset(repo, fullreposet(repo), x)
1168 1169 try:
1169 1170 m = os.max()
1170 1171 if m in subset:
1171 1172 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1172 1173 except ValueError:
1173 1174 # os.max() throws a ValueError when the collection is empty.
1174 1175 # Same as python's max().
1175 1176 pass
1176 1177 return baseset(datarepr=('<max %r, %r>', subset, os))
1177 1178
1178 1179 @predicate('merge()', safe=True)
1179 1180 def merge(repo, subset, x):
1180 1181 """Changeset is a merge changeset.
1181 1182 """
1182 1183 # i18n: "merge" is a keyword
1183 1184 getargs(x, 0, 0, _("merge takes no arguments"))
1184 1185 cl = repo.changelog
1185 1186 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1186 1187 condrepr='<merge>')
1187 1188
1188 1189 @predicate('branchpoint()', safe=True)
1189 1190 def branchpoint(repo, subset, x):
1190 1191 """Changesets with more than one child.
1191 1192 """
1192 1193 # i18n: "branchpoint" is a keyword
1193 1194 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1194 1195 cl = repo.changelog
1195 1196 if not subset:
1196 1197 return baseset()
1197 1198 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1198 1199 # (and if it is not, it should.)
1199 1200 baserev = min(subset)
1200 1201 parentscount = [0]*(len(repo) - baserev)
1201 1202 for r in cl.revs(start=baserev + 1):
1202 1203 for p in cl.parentrevs(r):
1203 1204 if p >= baserev:
1204 1205 parentscount[p - baserev] += 1
1205 1206 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1206 1207 condrepr='<branchpoint>')
1207 1208
1208 1209 @predicate('min(set)', safe=True)
1209 1210 def minrev(repo, subset, x):
1210 1211 """Changeset with lowest revision number in set.
1211 1212 """
1212 1213 os = getset(repo, fullreposet(repo), x)
1213 1214 try:
1214 1215 m = os.min()
1215 1216 if m in subset:
1216 1217 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1217 1218 except ValueError:
1218 1219 # os.min() throws a ValueError when the collection is empty.
1219 1220 # Same as python's min().
1220 1221 pass
1221 1222 return baseset(datarepr=('<min %r, %r>', subset, os))
1222 1223
1223 1224 @predicate('modifies(pattern)', safe=True)
1224 1225 def modifies(repo, subset, x):
1225 1226 """Changesets modifying files matched by pattern.
1226 1227
1227 1228 The pattern without explicit kind like ``glob:`` is expected to be
1228 1229 relative to the current directory and match against a file or a
1229 1230 directory.
1230 1231 """
1231 1232 # i18n: "modifies" is a keyword
1232 1233 pat = getstring(x, _("modifies requires a pattern"))
1233 1234 return checkstatus(repo, subset, pat, 0)
1234 1235
1235 1236 @predicate('named(namespace)')
1236 1237 def named(repo, subset, x):
1237 1238 """The changesets in a given namespace.
1238 1239
1239 1240 Pattern matching is supported for `namespace`. See
1240 1241 :hg:`help revisions.patterns`.
1241 1242 """
1242 1243 # i18n: "named" is a keyword
1243 1244 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1244 1245
1245 1246 ns = getstring(args[0],
1246 1247 # i18n: "named" is a keyword
1247 1248 _('the argument to named must be a string'))
1248 1249 kind, pattern, matcher = util.stringmatcher(ns)
1249 1250 namespaces = set()
1250 1251 if kind == 'literal':
1251 1252 if pattern not in repo.names:
1252 1253 raise error.RepoLookupError(_("namespace '%s' does not exist")
1253 1254 % ns)
1254 1255 namespaces.add(repo.names[pattern])
1255 1256 else:
1256 1257 for name, ns in repo.names.iteritems():
1257 1258 if matcher(name):
1258 1259 namespaces.add(ns)
1259 1260 if not namespaces:
1260 1261 raise error.RepoLookupError(_("no namespace exists"
1261 1262 " that match '%s'") % pattern)
1262 1263
1263 1264 names = set()
1264 1265 for ns in namespaces:
1265 1266 for name in ns.listnames(repo):
1266 1267 if name not in ns.deprecated:
1267 1268 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1268 1269
1269 1270 names -= set([node.nullrev])
1270 1271 return subset & names
1271 1272
1272 1273 @predicate('id(string)', safe=True)
1273 1274 def node_(repo, subset, x):
1274 1275 """Revision non-ambiguously specified by the given hex string prefix.
1275 1276 """
1276 1277 # i18n: "id" is a keyword
1277 1278 l = getargs(x, 1, 1, _("id requires one argument"))
1278 1279 # i18n: "id" is a keyword
1279 1280 n = getstring(l[0], _("id requires a string"))
1280 1281 if len(n) == 40:
1281 1282 try:
1282 1283 rn = repo.changelog.rev(node.bin(n))
1283 1284 except (LookupError, TypeError):
1284 1285 rn = None
1285 1286 else:
1286 1287 rn = None
1287 1288 pm = repo.changelog._partialmatch(n)
1288 1289 if pm is not None:
1289 1290 rn = repo.changelog.rev(pm)
1290 1291
1291 1292 if rn is None:
1292 1293 return baseset()
1293 1294 result = baseset([rn])
1294 1295 return result & subset
1295 1296
1296 1297 @predicate('obsolete()', safe=True)
1297 1298 def obsolete(repo, subset, x):
1298 1299 """Mutable changeset with a newer version."""
1299 1300 # i18n: "obsolete" is a keyword
1300 1301 getargs(x, 0, 0, _("obsolete takes no arguments"))
1301 1302 obsoletes = obsmod.getrevs(repo, 'obsolete')
1302 1303 return subset & obsoletes
1303 1304
1304 1305 @predicate('only(set, [set])', safe=True)
1305 1306 def only(repo, subset, x):
1306 1307 """Changesets that are ancestors of the first set that are not ancestors
1307 1308 of any other head in the repo. If a second set is specified, the result
1308 1309 is ancestors of the first set that are not ancestors of the second set
1309 1310 (i.e. ::<set1> - ::<set2>).
1310 1311 """
1311 1312 cl = repo.changelog
1312 1313 # i18n: "only" is a keyword
1313 1314 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1314 1315 include = getset(repo, fullreposet(repo), args[0])
1315 1316 if len(args) == 1:
1316 1317 if not include:
1317 1318 return baseset()
1318 1319
1319 1320 descendants = set(_revdescendants(repo, include, False))
1320 1321 exclude = [rev for rev in cl.headrevs()
1321 1322 if not rev in descendants and not rev in include]
1322 1323 else:
1323 1324 exclude = getset(repo, fullreposet(repo), args[1])
1324 1325
1325 1326 results = set(cl.findmissingrevs(common=exclude, heads=include))
1326 1327 # XXX we should turn this into a baseset instead of a set, smartset may do
1327 1328 # some optimizations from the fact this is a baseset.
1328 1329 return subset & results
1329 1330
1330 1331 @predicate('origin([set])', safe=True)
1331 1332 def origin(repo, subset, x):
1332 1333 """
1333 1334 Changesets that were specified as a source for the grafts, transplants or
1334 1335 rebases that created the given revisions. Omitting the optional set is the
1335 1336 same as passing all(). If a changeset created by these operations is itself
1336 1337 specified as a source for one of these operations, only the source changeset
1337 1338 for the first operation is selected.
1338 1339 """
1339 1340 if x is not None:
1340 1341 dests = getset(repo, fullreposet(repo), x)
1341 1342 else:
1342 1343 dests = fullreposet(repo)
1343 1344
1344 1345 def _firstsrc(rev):
1345 1346 src = _getrevsource(repo, rev)
1346 1347 if src is None:
1347 1348 return None
1348 1349
1349 1350 while True:
1350 1351 prev = _getrevsource(repo, src)
1351 1352
1352 1353 if prev is None:
1353 1354 return src
1354 1355 src = prev
1355 1356
1356 1357 o = set([_firstsrc(r) for r in dests])
1357 1358 o -= set([None])
1358 1359 # XXX we should turn this into a baseset instead of a set, smartset may do
1359 1360 # some optimizations from the fact this is a baseset.
1360 1361 return subset & o
1361 1362
1362 1363 @predicate('outgoing([path])', safe=False)
1363 1364 def outgoing(repo, subset, x):
1364 1365 """Changesets not found in the specified destination repository, or the
1365 1366 default push location.
1366 1367 """
1367 1368 # Avoid cycles.
1368 1369 from . import (
1369 1370 discovery,
1370 1371 hg,
1371 1372 )
1372 1373 # i18n: "outgoing" is a keyword
1373 1374 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1374 1375 # i18n: "outgoing" is a keyword
1375 1376 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1376 1377 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1377 1378 dest, branches = hg.parseurl(dest)
1378 1379 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1379 1380 if revs:
1380 1381 revs = [repo.lookup(rev) for rev in revs]
1381 1382 other = hg.peer(repo, {}, dest)
1382 1383 repo.ui.pushbuffer()
1383 1384 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1384 1385 repo.ui.popbuffer()
1385 1386 cl = repo.changelog
1386 1387 o = set([cl.rev(r) for r in outgoing.missing])
1387 1388 return subset & o
1388 1389
1389 1390 @predicate('p1([set])', safe=True)
1390 1391 def p1(repo, subset, x):
1391 1392 """First parent of changesets in set, or the working directory.
1392 1393 """
1393 1394 if x is None:
1394 1395 p = repo[x].p1().rev()
1395 1396 if p >= 0:
1396 1397 return subset & baseset([p])
1397 1398 return baseset()
1398 1399
1399 1400 ps = set()
1400 1401 cl = repo.changelog
1401 1402 for r in getset(repo, fullreposet(repo), x):
1402 1403 ps.add(cl.parentrevs(r)[0])
1403 1404 ps -= set([node.nullrev])
1404 1405 # XXX we should turn this into a baseset instead of a set, smartset may do
1405 1406 # some optimizations from the fact this is a baseset.
1406 1407 return subset & ps
1407 1408
1408 1409 @predicate('p2([set])', safe=True)
1409 1410 def p2(repo, subset, x):
1410 1411 """Second parent of changesets in set, or the working directory.
1411 1412 """
1412 1413 if x is None:
1413 1414 ps = repo[x].parents()
1414 1415 try:
1415 1416 p = ps[1].rev()
1416 1417 if p >= 0:
1417 1418 return subset & baseset([p])
1418 1419 return baseset()
1419 1420 except IndexError:
1420 1421 return baseset()
1421 1422
1422 1423 ps = set()
1423 1424 cl = repo.changelog
1424 1425 for r in getset(repo, fullreposet(repo), x):
1425 1426 ps.add(cl.parentrevs(r)[1])
1426 1427 ps -= set([node.nullrev])
1427 1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1428 1429 # some optimizations from the fact this is a baseset.
1429 1430 return subset & ps
1430 1431
1431 1432 def parentpost(repo, subset, x, order):
1432 1433 return p1(repo, subset, x)
1433 1434
1434 1435 @predicate('parents([set])', safe=True)
1435 1436 def parents(repo, subset, x):
1436 1437 """
1437 1438 The set of all parents for all changesets in set, or the working directory.
1438 1439 """
1439 1440 if x is None:
1440 1441 ps = set(p.rev() for p in repo[x].parents())
1441 1442 else:
1442 1443 ps = set()
1443 1444 cl = repo.changelog
1444 1445 up = ps.update
1445 1446 parentrevs = cl.parentrevs
1446 1447 for r in getset(repo, fullreposet(repo), x):
1447 1448 if r == node.wdirrev:
1448 1449 up(p.rev() for p in repo[r].parents())
1449 1450 else:
1450 1451 up(parentrevs(r))
1451 1452 ps -= set([node.nullrev])
1452 1453 return subset & ps
1453 1454
1454 1455 def _phase(repo, subset, *targets):
1455 1456 """helper to select all rev in <targets> phases"""
1456 1457 s = repo._phasecache.getrevset(repo, targets)
1457 1458 return subset & s
1458 1459
1459 1460 @predicate('draft()', safe=True)
1460 1461 def draft(repo, subset, x):
1461 1462 """Changeset in draft phase."""
1462 1463 # i18n: "draft" is a keyword
1463 1464 getargs(x, 0, 0, _("draft takes no arguments"))
1464 1465 target = phases.draft
1465 1466 return _phase(repo, subset, target)
1466 1467
1467 1468 @predicate('secret()', safe=True)
1468 1469 def secret(repo, subset, x):
1469 1470 """Changeset in secret phase."""
1470 1471 # i18n: "secret" is a keyword
1471 1472 getargs(x, 0, 0, _("secret takes no arguments"))
1472 1473 target = phases.secret
1473 1474 return _phase(repo, subset, target)
1474 1475
1475 1476 def parentspec(repo, subset, x, n, order):
1476 1477 """``set^0``
1477 1478 The set.
1478 1479 ``set^1`` (or ``set^``), ``set^2``
1479 1480 First or second parent, respectively, of all changesets in set.
1480 1481 """
1481 1482 try:
1482 1483 n = int(n[1])
1483 1484 if n not in (0, 1, 2):
1484 1485 raise ValueError
1485 1486 except (TypeError, ValueError):
1486 1487 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1487 1488 ps = set()
1488 1489 cl = repo.changelog
1489 1490 for r in getset(repo, fullreposet(repo), x):
1490 1491 if n == 0:
1491 1492 ps.add(r)
1492 1493 elif n == 1:
1493 1494 ps.add(cl.parentrevs(r)[0])
1494 1495 elif n == 2:
1495 1496 parents = cl.parentrevs(r)
1496 1497 if parents[1] != node.nullrev:
1497 1498 ps.add(parents[1])
1498 1499 return subset & ps
1499 1500
1500 1501 @predicate('present(set)', safe=True)
1501 1502 def present(repo, subset, x):
1502 1503 """An empty set, if any revision in set isn't found; otherwise,
1503 1504 all revisions in set.
1504 1505
1505 1506 If any of specified revisions is not present in the local repository,
1506 1507 the query is normally aborted. But this predicate allows the query
1507 1508 to continue even in such cases.
1508 1509 """
1509 1510 try:
1510 1511 return getset(repo, subset, x)
1511 1512 except error.RepoLookupError:
1512 1513 return baseset()
1513 1514
1514 1515 # for internal use
1515 1516 @predicate('_notpublic', safe=True)
1516 1517 def _notpublic(repo, subset, x):
1517 1518 getargs(x, 0, 0, "_notpublic takes no arguments")
1518 1519 return _phase(repo, subset, phases.draft, phases.secret)
1519 1520
1520 1521 @predicate('public()', safe=True)
1521 1522 def public(repo, subset, x):
1522 1523 """Changeset in public phase."""
1523 1524 # i18n: "public" is a keyword
1524 1525 getargs(x, 0, 0, _("public takes no arguments"))
1525 1526 phase = repo._phasecache.phase
1526 1527 target = phases.public
1527 1528 condition = lambda r: phase(repo, r) == target
1528 1529 return subset.filter(condition, condrepr=('<phase %r>', target),
1529 1530 cache=False)
1530 1531
1531 1532 @predicate('remote([id [,path]])', safe=False)
1532 1533 def remote(repo, subset, x):
1533 1534 """Local revision that corresponds to the given identifier in a
1534 1535 remote repository, if present. Here, the '.' identifier is a
1535 1536 synonym for the current local branch.
1536 1537 """
1537 1538
1538 1539 from . import hg # avoid start-up nasties
1539 1540 # i18n: "remote" is a keyword
1540 1541 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1541 1542
1542 1543 q = '.'
1543 1544 if len(l) > 0:
1544 1545 # i18n: "remote" is a keyword
1545 1546 q = getstring(l[0], _("remote requires a string id"))
1546 1547 if q == '.':
1547 1548 q = repo['.'].branch()
1548 1549
1549 1550 dest = ''
1550 1551 if len(l) > 1:
1551 1552 # i18n: "remote" is a keyword
1552 1553 dest = getstring(l[1], _("remote requires a repository path"))
1553 1554 dest = repo.ui.expandpath(dest or 'default')
1554 1555 dest, branches = hg.parseurl(dest)
1555 1556 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1556 1557 if revs:
1557 1558 revs = [repo.lookup(rev) for rev in revs]
1558 1559 other = hg.peer(repo, {}, dest)
1559 1560 n = other.lookup(q)
1560 1561 if n in repo:
1561 1562 r = repo[n].rev()
1562 1563 if r in subset:
1563 1564 return baseset([r])
1564 1565 return baseset()
1565 1566
1566 1567 @predicate('removes(pattern)', safe=True)
1567 1568 def removes(repo, subset, x):
1568 1569 """Changesets which remove files matching pattern.
1569 1570
1570 1571 The pattern without explicit kind like ``glob:`` is expected to be
1571 1572 relative to the current directory and match against a file or a
1572 1573 directory.
1573 1574 """
1574 1575 # i18n: "removes" is a keyword
1575 1576 pat = getstring(x, _("removes requires a pattern"))
1576 1577 return checkstatus(repo, subset, pat, 2)
1577 1578
1578 1579 @predicate('rev(number)', safe=True)
1579 1580 def rev(repo, subset, x):
1580 1581 """Revision with the given numeric identifier.
1581 1582 """
1582 1583 # i18n: "rev" is a keyword
1583 1584 l = getargs(x, 1, 1, _("rev requires one argument"))
1584 1585 try:
1585 1586 # i18n: "rev" is a keyword
1586 1587 l = int(getstring(l[0], _("rev requires a number")))
1587 1588 except (TypeError, ValueError):
1588 1589 # i18n: "rev" is a keyword
1589 1590 raise error.ParseError(_("rev expects a number"))
1590 1591 if l not in repo.changelog and l != node.nullrev:
1591 1592 return baseset()
1592 1593 return subset & baseset([l])
1593 1594
1594 1595 @predicate('matching(revision [, field])', safe=True)
1595 1596 def matching(repo, subset, x):
1596 1597 """Changesets in which a given set of fields match the set of fields in the
1597 1598 selected revision or set.
1598 1599
1599 1600 To match more than one field pass the list of fields to match separated
1600 1601 by spaces (e.g. ``author description``).
1601 1602
1602 1603 Valid fields are most regular revision fields and some special fields.
1603 1604
1604 1605 Regular revision fields are ``description``, ``author``, ``branch``,
1605 1606 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1606 1607 and ``diff``.
1607 1608 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1608 1609 contents of the revision. Two revisions matching their ``diff`` will
1609 1610 also match their ``files``.
1610 1611
1611 1612 Special fields are ``summary`` and ``metadata``:
1612 1613 ``summary`` matches the first line of the description.
1613 1614 ``metadata`` is equivalent to matching ``description user date``
1614 1615 (i.e. it matches the main metadata fields).
1615 1616
1616 1617 ``metadata`` is the default field which is used when no fields are
1617 1618 specified. You can match more than one field at a time.
1618 1619 """
1619 1620 # i18n: "matching" is a keyword
1620 1621 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1621 1622
1622 1623 revs = getset(repo, fullreposet(repo), l[0])
1623 1624
1624 1625 fieldlist = ['metadata']
1625 1626 if len(l) > 1:
1626 1627 fieldlist = getstring(l[1],
1627 1628 # i18n: "matching" is a keyword
1628 1629 _("matching requires a string "
1629 1630 "as its second argument")).split()
1630 1631
1631 1632 # Make sure that there are no repeated fields,
1632 1633 # expand the 'special' 'metadata' field type
1633 1634 # and check the 'files' whenever we check the 'diff'
1634 1635 fields = []
1635 1636 for field in fieldlist:
1636 1637 if field == 'metadata':
1637 1638 fields += ['user', 'description', 'date']
1638 1639 elif field == 'diff':
1639 1640 # a revision matching the diff must also match the files
1640 1641 # since matching the diff is very costly, make sure to
1641 1642 # also match the files first
1642 1643 fields += ['files', 'diff']
1643 1644 else:
1644 1645 if field == 'author':
1645 1646 field = 'user'
1646 1647 fields.append(field)
1647 1648 fields = set(fields)
1648 1649 if 'summary' in fields and 'description' in fields:
1649 1650 # If a revision matches its description it also matches its summary
1650 1651 fields.discard('summary')
1651 1652
1652 1653 # We may want to match more than one field
1653 1654 # Not all fields take the same amount of time to be matched
1654 1655 # Sort the selected fields in order of increasing matching cost
1655 1656 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1656 1657 'files', 'description', 'substate', 'diff']
1657 1658 def fieldkeyfunc(f):
1658 1659 try:
1659 1660 return fieldorder.index(f)
1660 1661 except ValueError:
1661 1662 # assume an unknown field is very costly
1662 1663 return len(fieldorder)
1663 1664 fields = list(fields)
1664 1665 fields.sort(key=fieldkeyfunc)
1665 1666
1666 1667 # Each field will be matched with its own "getfield" function
1667 1668 # which will be added to the getfieldfuncs array of functions
1668 1669 getfieldfuncs = []
1669 1670 _funcs = {
1670 1671 'user': lambda r: repo[r].user(),
1671 1672 'branch': lambda r: repo[r].branch(),
1672 1673 'date': lambda r: repo[r].date(),
1673 1674 'description': lambda r: repo[r].description(),
1674 1675 'files': lambda r: repo[r].files(),
1675 1676 'parents': lambda r: repo[r].parents(),
1676 1677 'phase': lambda r: repo[r].phase(),
1677 1678 'substate': lambda r: repo[r].substate,
1678 1679 'summary': lambda r: repo[r].description().splitlines()[0],
1679 1680 'diff': lambda r: list(repo[r].diff(git=True),)
1680 1681 }
1681 1682 for info in fields:
1682 1683 getfield = _funcs.get(info, None)
1683 1684 if getfield is None:
1684 1685 raise error.ParseError(
1685 1686 # i18n: "matching" is a keyword
1686 1687 _("unexpected field name passed to matching: %s") % info)
1687 1688 getfieldfuncs.append(getfield)
1688 1689 # convert the getfield array of functions into a "getinfo" function
1689 1690 # which returns an array of field values (or a single value if there
1690 1691 # is only one field to match)
1691 1692 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1692 1693
1693 1694 def matches(x):
1694 1695 for rev in revs:
1695 1696 target = getinfo(rev)
1696 1697 match = True
1697 1698 for n, f in enumerate(getfieldfuncs):
1698 1699 if target[n] != f(x):
1699 1700 match = False
1700 1701 if match:
1701 1702 return True
1702 1703 return False
1703 1704
1704 1705 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1705 1706
1706 1707 @predicate('reverse(set)', safe=True, takeorder=True)
1707 1708 def reverse(repo, subset, x, order):
1708 1709 """Reverse order of set.
1709 1710 """
1710 1711 l = getset(repo, subset, x)
1711 1712 if order == defineorder:
1712 1713 l.reverse()
1713 1714 return l
1714 1715
1715 1716 @predicate('roots(set)', safe=True)
1716 1717 def roots(repo, subset, x):
1717 1718 """Changesets in set with no parent changeset in set.
1718 1719 """
1719 1720 s = getset(repo, fullreposet(repo), x)
1720 1721 parents = repo.changelog.parentrevs
1721 1722 def filter(r):
1722 1723 for p in parents(r):
1723 1724 if 0 <= p and p in s:
1724 1725 return False
1725 1726 return True
1726 1727 return subset & s.filter(filter, condrepr='<roots>')
1727 1728
1728 1729 _sortkeyfuncs = {
1729 1730 'rev': lambda c: c.rev(),
1730 1731 'branch': lambda c: c.branch(),
1731 1732 'desc': lambda c: c.description(),
1732 1733 'user': lambda c: c.user(),
1733 1734 'author': lambda c: c.user(),
1734 1735 'date': lambda c: c.date()[0],
1735 1736 }
1736 1737
1737 1738 def _getsortargs(x):
1738 1739 """Parse sort options into (set, [(key, reverse)], opts)"""
1739 1740 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1740 1741 if 'set' not in args:
1741 1742 # i18n: "sort" is a keyword
1742 1743 raise error.ParseError(_('sort requires one or two arguments'))
1743 1744 keys = "rev"
1744 1745 if 'keys' in args:
1745 1746 # i18n: "sort" is a keyword
1746 1747 keys = getstring(args['keys'], _("sort spec must be a string"))
1747 1748
1748 1749 keyflags = []
1749 1750 for k in keys.split():
1750 1751 fk = k
1751 1752 reverse = (k[0] == '-')
1752 1753 if reverse:
1753 1754 k = k[1:]
1754 1755 if k not in _sortkeyfuncs and k != 'topo':
1755 1756 raise error.ParseError(_("unknown sort key %r") % fk)
1756 1757 keyflags.append((k, reverse))
1757 1758
1758 1759 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1759 1760 # i18n: "topo" is a keyword
1760 1761 raise error.ParseError(_('topo sort order cannot be combined '
1761 1762 'with other sort keys'))
1762 1763
1763 1764 opts = {}
1764 1765 if 'topo.firstbranch' in args:
1765 1766 if any(k == 'topo' for k, reverse in keyflags):
1766 1767 opts['topo.firstbranch'] = args['topo.firstbranch']
1767 1768 else:
1768 1769 # i18n: "topo" and "topo.firstbranch" are keywords
1769 1770 raise error.ParseError(_('topo.firstbranch can only be used '
1770 1771 'when using the topo sort key'))
1771 1772
1772 1773 return args['set'], keyflags, opts
1773 1774
1774 1775 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1775 1776 def sort(repo, subset, x, order):
1776 1777 """Sort set by keys. The default sort order is ascending, specify a key
1777 1778 as ``-key`` to sort in descending order.
1778 1779
1779 1780 The keys can be:
1780 1781
1781 1782 - ``rev`` for the revision number,
1782 1783 - ``branch`` for the branch name,
1783 1784 - ``desc`` for the commit message (description),
1784 1785 - ``user`` for user name (``author`` can be used as an alias),
1785 1786 - ``date`` for the commit date
1786 1787 - ``topo`` for a reverse topographical sort
1787 1788
1788 1789 The ``topo`` sort order cannot be combined with other sort keys. This sort
1789 1790 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1790 1791 specifies what topographical branches to prioritize in the sort.
1791 1792
1792 1793 """
1793 1794 s, keyflags, opts = _getsortargs(x)
1794 1795 revs = getset(repo, subset, s)
1795 1796
1796 1797 if not keyflags or order != defineorder:
1797 1798 return revs
1798 1799 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1799 1800 revs.sort(reverse=keyflags[0][1])
1800 1801 return revs
1801 1802 elif keyflags[0][0] == "topo":
1802 1803 firstbranch = ()
1803 1804 if 'topo.firstbranch' in opts:
1804 1805 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1805 1806 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1806 1807 istopo=True)
1807 1808 if keyflags[0][1]:
1808 1809 revs.reverse()
1809 1810 return revs
1810 1811
1811 1812 # sort() is guaranteed to be stable
1812 1813 ctxs = [repo[r] for r in revs]
1813 1814 for k, reverse in reversed(keyflags):
1814 1815 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1815 1816 return baseset([c.rev() for c in ctxs])
1816 1817
1817 1818 def _toposort(revs, parentsfunc, firstbranch=()):
1818 1819 """Yield revisions from heads to roots one (topo) branch at a time.
1819 1820
1820 1821 This function aims to be used by a graph generator that wishes to minimize
1821 1822 the number of parallel branches and their interleaving.
1822 1823
1823 1824 Example iteration order (numbers show the "true" order in a changelog):
1824 1825
1825 1826 o 4
1826 1827 |
1827 1828 o 1
1828 1829 |
1829 1830 | o 3
1830 1831 | |
1831 1832 | o 2
1832 1833 |/
1833 1834 o 0
1834 1835
1835 1836 Note that the ancestors of merges are understood by the current
1836 1837 algorithm to be on the same branch. This means no reordering will
1837 1838 occur behind a merge.
1838 1839 """
1839 1840
1840 1841 ### Quick summary of the algorithm
1841 1842 #
1842 1843 # This function is based around a "retention" principle. We keep revisions
1843 1844 # in memory until we are ready to emit a whole branch that immediately
1844 1845 # "merges" into an existing one. This reduces the number of parallel
1845 1846 # branches with interleaved revisions.
1846 1847 #
1847 1848 # During iteration revs are split into two groups:
1848 1849 # A) revision already emitted
1849 1850 # B) revision in "retention". They are stored as different subgroups.
1850 1851 #
1851 1852 # for each REV, we do the following logic:
1852 1853 #
1853 1854 # 1) if REV is a parent of (A), we will emit it. If there is a
1854 1855 # retention group ((B) above) that is blocked on REV being
1855 1856 # available, we emit all the revisions out of that retention
1856 1857 # group first.
1857 1858 #
1858 1859 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1859 1860 # available, if such subgroup exist, we add REV to it and the subgroup is
1860 1861 # now awaiting for REV.parents() to be available.
1861 1862 #
1862 1863 # 3) finally if no such group existed in (B), we create a new subgroup.
1863 1864 #
1864 1865 #
1865 1866 # To bootstrap the algorithm, we emit the tipmost revision (which
1866 1867 # puts it in group (A) from above).
1867 1868
1868 1869 revs.sort(reverse=True)
1869 1870
1870 1871 # Set of parents of revision that have been emitted. They can be considered
1871 1872 # unblocked as the graph generator is already aware of them so there is no
1872 1873 # need to delay the revisions that reference them.
1873 1874 #
1874 1875 # If someone wants to prioritize a branch over the others, pre-filling this
1875 1876 # set will force all other branches to wait until this branch is ready to be
1876 1877 # emitted.
1877 1878 unblocked = set(firstbranch)
1878 1879
1879 1880 # list of groups waiting to be displayed, each group is defined by:
1880 1881 #
1881 1882 # (revs: lists of revs waiting to be displayed,
1882 1883 # blocked: set of that cannot be displayed before those in 'revs')
1883 1884 #
1884 1885 # The second value ('blocked') correspond to parents of any revision in the
1885 1886 # group ('revs') that is not itself contained in the group. The main idea
1886 1887 # of this algorithm is to delay as much as possible the emission of any
1887 1888 # revision. This means waiting for the moment we are about to display
1888 1889 # these parents to display the revs in a group.
1889 1890 #
1890 1891 # This first implementation is smart until it encounters a merge: it will
1891 1892 # emit revs as soon as any parent is about to be emitted and can grow an
1892 1893 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1893 1894 # retains new branches but gives up on any special ordering for ancestors
1894 1895 # of merges. The implementation can be improved to handle this better.
1895 1896 #
1896 1897 # The first subgroup is special. It corresponds to all the revision that
1897 1898 # were already emitted. The 'revs' lists is expected to be empty and the
1898 1899 # 'blocked' set contains the parents revisions of already emitted revision.
1899 1900 #
1900 1901 # You could pre-seed the <parents> set of groups[0] to a specific
1901 1902 # changesets to select what the first emitted branch should be.
1902 1903 groups = [([], unblocked)]
1903 1904 pendingheap = []
1904 1905 pendingset = set()
1905 1906
1906 1907 heapq.heapify(pendingheap)
1907 1908 heappop = heapq.heappop
1908 1909 heappush = heapq.heappush
1909 1910 for currentrev in revs:
1910 1911 # Heap works with smallest element, we want highest so we invert
1911 1912 if currentrev not in pendingset:
1912 1913 heappush(pendingheap, -currentrev)
1913 1914 pendingset.add(currentrev)
1914 1915 # iterates on pending rev until after the current rev have been
1915 1916 # processed.
1916 1917 rev = None
1917 1918 while rev != currentrev:
1918 1919 rev = -heappop(pendingheap)
1919 1920 pendingset.remove(rev)
1920 1921
1921 1922 # Seek for a subgroup blocked, waiting for the current revision.
1922 1923 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1923 1924
1924 1925 if matching:
1925 1926 # The main idea is to gather together all sets that are blocked
1926 1927 # on the same revision.
1927 1928 #
1928 1929 # Groups are merged when a common blocking ancestor is
1929 1930 # observed. For example, given two groups:
1930 1931 #
1931 1932 # revs [5, 4] waiting for 1
1932 1933 # revs [3, 2] waiting for 1
1933 1934 #
1934 1935 # These two groups will be merged when we process
1935 1936 # 1. In theory, we could have merged the groups when
1936 1937 # we added 2 to the group it is now in (we could have
1937 1938 # noticed the groups were both blocked on 1 then), but
1938 1939 # the way it works now makes the algorithm simpler.
1939 1940 #
1940 1941 # We also always keep the oldest subgroup first. We can
1941 1942 # probably improve the behavior by having the longest set
1942 1943 # first. That way, graph algorithms could minimise the length
1943 1944 # of parallel lines their drawing. This is currently not done.
1944 1945 targetidx = matching.pop(0)
1945 1946 trevs, tparents = groups[targetidx]
1946 1947 for i in matching:
1947 1948 gr = groups[i]
1948 1949 trevs.extend(gr[0])
1949 1950 tparents |= gr[1]
1950 1951 # delete all merged subgroups (except the one we kept)
1951 1952 # (starting from the last subgroup for performance and
1952 1953 # sanity reasons)
1953 1954 for i in reversed(matching):
1954 1955 del groups[i]
1955 1956 else:
1956 1957 # This is a new head. We create a new subgroup for it.
1957 1958 targetidx = len(groups)
1958 1959 groups.append(([], set([rev])))
1959 1960
1960 1961 gr = groups[targetidx]
1961 1962
1962 1963 # We now add the current nodes to this subgroups. This is done
1963 1964 # after the subgroup merging because all elements from a subgroup
1964 1965 # that relied on this rev must precede it.
1965 1966 #
1966 1967 # we also update the <parents> set to include the parents of the
1967 1968 # new nodes.
1968 1969 if rev == currentrev: # only display stuff in rev
1969 1970 gr[0].append(rev)
1970 1971 gr[1].remove(rev)
1971 1972 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
1972 1973 gr[1].update(parents)
1973 1974 for p in parents:
1974 1975 if p not in pendingset:
1975 1976 pendingset.add(p)
1976 1977 heappush(pendingheap, -p)
1977 1978
1978 1979 # Look for a subgroup to display
1979 1980 #
1980 1981 # When unblocked is empty (if clause), we were not waiting for any
1981 1982 # revisions during the first iteration (if no priority was given) or
1982 1983 # if we emitted a whole disconnected set of the graph (reached a
1983 1984 # root). In that case we arbitrarily take the oldest known
1984 1985 # subgroup. The heuristic could probably be better.
1985 1986 #
1986 1987 # Otherwise (elif clause) if the subgroup is blocked on
1987 1988 # a revision we just emitted, we can safely emit it as
1988 1989 # well.
1989 1990 if not unblocked:
1990 1991 if len(groups) > 1: # display other subset
1991 1992 targetidx = 1
1992 1993 gr = groups[1]
1993 1994 elif not gr[1] & unblocked:
1994 1995 gr = None
1995 1996
1996 1997 if gr is not None:
1997 1998 # update the set of awaited revisions with the one from the
1998 1999 # subgroup
1999 2000 unblocked |= gr[1]
2000 2001 # output all revisions in the subgroup
2001 2002 for r in gr[0]:
2002 2003 yield r
2003 2004 # delete the subgroup that you just output
2004 2005 # unless it is groups[0] in which case you just empty it.
2005 2006 if targetidx:
2006 2007 del groups[targetidx]
2007 2008 else:
2008 2009 gr[0][:] = []
2009 2010 # Check if we have some subgroup waiting for revisions we are not going to
2010 2011 # iterate over
2011 2012 for g in groups:
2012 2013 for r in g[0]:
2013 2014 yield r
2014 2015
2015 2016 @predicate('subrepo([pattern])')
2016 2017 def subrepo(repo, subset, x):
2017 2018 """Changesets that add, modify or remove the given subrepo. If no subrepo
2018 2019 pattern is named, any subrepo changes are returned.
2019 2020 """
2020 2021 # i18n: "subrepo" is a keyword
2021 2022 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2022 2023 pat = None
2023 2024 if len(args) != 0:
2024 2025 pat = getstring(args[0], _("subrepo requires a pattern"))
2025 2026
2026 2027 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2027 2028
2028 2029 def submatches(names):
2029 2030 k, p, m = util.stringmatcher(pat)
2030 2031 for name in names:
2031 2032 if m(name):
2032 2033 yield name
2033 2034
2034 2035 def matches(x):
2035 2036 c = repo[x]
2036 2037 s = repo.status(c.p1().node(), c.node(), match=m)
2037 2038
2038 2039 if pat is None:
2039 2040 return s.added or s.modified or s.removed
2040 2041
2041 2042 if s.added:
2042 2043 return any(submatches(c.substate.keys()))
2043 2044
2044 2045 if s.modified:
2045 2046 subs = set(c.p1().substate.keys())
2046 2047 subs.update(c.substate.keys())
2047 2048
2048 2049 for path in submatches(subs):
2049 2050 if c.p1().substate.get(path) != c.substate.get(path):
2050 2051 return True
2051 2052
2052 2053 if s.removed:
2053 2054 return any(submatches(c.p1().substate.keys()))
2054 2055
2055 2056 return False
2056 2057
2057 2058 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2058 2059
2059 2060 def _substringmatcher(pattern, casesensitive=True):
2060 2061 kind, pattern, matcher = util.stringmatcher(pattern,
2061 2062 casesensitive=casesensitive)
2062 2063 if kind == 'literal':
2063 2064 if not casesensitive:
2064 2065 pattern = encoding.lower(pattern)
2065 2066 matcher = lambda s: pattern in encoding.lower(s)
2066 2067 else:
2067 2068 matcher = lambda s: pattern in s
2068 2069 return kind, pattern, matcher
2069 2070
2070 2071 @predicate('tag([name])', safe=True)
2071 2072 def tag(repo, subset, x):
2072 2073 """The specified tag by name, or all tagged revisions if no name is given.
2073 2074
2074 2075 Pattern matching is supported for `name`. See
2075 2076 :hg:`help revisions.patterns`.
2076 2077 """
2077 2078 # i18n: "tag" is a keyword
2078 2079 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2079 2080 cl = repo.changelog
2080 2081 if args:
2081 2082 pattern = getstring(args[0],
2082 2083 # i18n: "tag" is a keyword
2083 2084 _('the argument to tag must be a string'))
2084 2085 kind, pattern, matcher = util.stringmatcher(pattern)
2085 2086 if kind == 'literal':
2086 2087 # avoid resolving all tags
2087 2088 tn = repo._tagscache.tags.get(pattern, None)
2088 2089 if tn is None:
2089 2090 raise error.RepoLookupError(_("tag '%s' does not exist")
2090 2091 % pattern)
2091 2092 s = set([repo[tn].rev()])
2092 2093 else:
2093 2094 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2094 2095 else:
2095 2096 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2096 2097 return subset & s
2097 2098
2098 2099 @predicate('tagged', safe=True)
2099 2100 def tagged(repo, subset, x):
2100 2101 return tag(repo, subset, x)
2101 2102
2102 2103 @predicate('unstable()', safe=True)
2103 2104 def unstable(repo, subset, x):
2104 2105 """Non-obsolete changesets with obsolete ancestors.
2105 2106 """
2106 2107 # i18n: "unstable" is a keyword
2107 2108 getargs(x, 0, 0, _("unstable takes no arguments"))
2108 2109 unstables = obsmod.getrevs(repo, 'unstable')
2109 2110 return subset & unstables
2110 2111
2111 2112
2112 2113 @predicate('user(string)', safe=True)
2113 2114 def user(repo, subset, x):
2114 2115 """User name contains string. The match is case-insensitive.
2115 2116
2116 2117 Pattern matching is supported for `string`. See
2117 2118 :hg:`help revisions.patterns`.
2118 2119 """
2119 2120 return author(repo, subset, x)
2120 2121
2121 2122 @predicate('wdir', safe=True)
2122 2123 def wdir(repo, subset, x):
2123 2124 """Working directory. (EXPERIMENTAL)"""
2124 2125 # i18n: "wdir" is a keyword
2125 2126 getargs(x, 0, 0, _("wdir takes no arguments"))
2126 2127 if node.wdirrev in subset or isinstance(subset, fullreposet):
2127 2128 return baseset([node.wdirrev])
2128 2129 return baseset()
2129 2130
2130 2131 def _orderedlist(repo, subset, x):
2131 2132 s = getstring(x, "internal error")
2132 2133 if not s:
2133 2134 return baseset()
2134 2135 # remove duplicates here. it's difficult for caller to deduplicate sets
2135 2136 # because different symbols can point to the same rev.
2136 2137 cl = repo.changelog
2137 2138 ls = []
2138 2139 seen = set()
2139 2140 for t in s.split('\0'):
2140 2141 try:
2141 2142 # fast path for integer revision
2142 2143 r = int(t)
2143 2144 if str(r) != t or r not in cl:
2144 2145 raise ValueError
2145 2146 revs = [r]
2146 2147 except ValueError:
2147 2148 revs = stringset(repo, subset, t)
2148 2149
2149 2150 for r in revs:
2150 2151 if r in seen:
2151 2152 continue
2152 2153 if (r in subset
2153 2154 or r == node.nullrev and isinstance(subset, fullreposet)):
2154 2155 ls.append(r)
2155 2156 seen.add(r)
2156 2157 return baseset(ls)
2157 2158
2158 2159 # for internal use
2159 2160 @predicate('_list', safe=True, takeorder=True)
2160 2161 def _list(repo, subset, x, order):
2161 2162 if order == followorder:
2162 2163 # slow path to take the subset order
2163 2164 return subset & _orderedlist(repo, fullreposet(repo), x)
2164 2165 else:
2165 2166 return _orderedlist(repo, subset, x)
2166 2167
2167 2168 def _orderedintlist(repo, subset, x):
2168 2169 s = getstring(x, "internal error")
2169 2170 if not s:
2170 2171 return baseset()
2171 2172 ls = [int(r) for r in s.split('\0')]
2172 2173 s = subset
2173 2174 return baseset([r for r in ls if r in s])
2174 2175
2175 2176 # for internal use
2176 2177 @predicate('_intlist', safe=True, takeorder=True)
2177 2178 def _intlist(repo, subset, x, order):
2178 2179 if order == followorder:
2179 2180 # slow path to take the subset order
2180 2181 return subset & _orderedintlist(repo, fullreposet(repo), x)
2181 2182 else:
2182 2183 return _orderedintlist(repo, subset, x)
2183 2184
2184 2185 def _orderedhexlist(repo, subset, x):
2185 2186 s = getstring(x, "internal error")
2186 2187 if not s:
2187 2188 return baseset()
2188 2189 cl = repo.changelog
2189 2190 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2190 2191 s = subset
2191 2192 return baseset([r for r in ls if r in s])
2192 2193
2193 2194 # for internal use
2194 2195 @predicate('_hexlist', safe=True, takeorder=True)
2195 2196 def _hexlist(repo, subset, x, order):
2196 2197 if order == followorder:
2197 2198 # slow path to take the subset order
2198 2199 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2199 2200 else:
2200 2201 return _orderedhexlist(repo, subset, x)
2201 2202
2202 2203 methods = {
2203 2204 "range": rangeset,
2204 2205 "rangeall": rangeall,
2205 2206 "rangepre": rangepre,
2206 2207 "rangepost": rangepost,
2207 2208 "dagrange": dagrange,
2208 2209 "string": stringset,
2209 2210 "symbol": stringset,
2210 2211 "and": andset,
2211 2212 "or": orset,
2212 2213 "not": notset,
2213 2214 "difference": differenceset,
2214 2215 "list": listset,
2215 2216 "keyvalue": keyvaluepair,
2216 2217 "func": func,
2217 2218 "ancestor": ancestorspec,
2218 2219 "parent": parentspec,
2219 2220 "parentpost": parentpost,
2220 2221 }
2221 2222
2222 2223 def posttreebuilthook(tree, repo):
2223 2224 # hook for extensions to execute code on the optimized tree
2224 2225 pass
2225 2226
2226 2227 def match(ui, spec, repo=None, order=defineorder):
2227 2228 """Create a matcher for a single revision spec
2228 2229
2229 2230 If order=followorder, a matcher takes the ordering specified by the input
2230 2231 set.
2231 2232 """
2232 2233 return matchany(ui, [spec], repo=repo, order=order)
2233 2234
2234 2235 def matchany(ui, specs, repo=None, order=defineorder):
2235 2236 """Create a matcher that will include any revisions matching one of the
2236 2237 given specs
2237 2238
2238 2239 If order=followorder, a matcher takes the ordering specified by the input
2239 2240 set.
2240 2241 """
2241 2242 if not specs:
2242 2243 def mfunc(repo, subset=None):
2243 2244 return baseset()
2244 2245 return mfunc
2245 2246 if not all(specs):
2246 2247 raise error.ParseError(_("empty query"))
2247 2248 lookup = None
2248 2249 if repo:
2249 2250 lookup = repo.__contains__
2250 2251 if len(specs) == 1:
2251 2252 tree = revsetlang.parse(specs[0], lookup)
2252 2253 else:
2253 2254 tree = ('or',
2254 2255 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2255 2256
2256 2257 if ui:
2257 2258 tree = revsetlang.expandaliases(ui, tree)
2258 2259 tree = revsetlang.foldconcat(tree)
2259 2260 tree = revsetlang.analyze(tree, order)
2260 2261 tree = revsetlang.optimize(tree)
2261 2262 posttreebuilthook(tree, repo)
2262 2263 return makematcher(tree)
2263 2264
2264 2265 def makematcher(tree):
2265 2266 """Create a matcher from an evaluatable tree"""
2266 2267 def mfunc(repo, subset=None):
2267 2268 if subset is None:
2268 2269 subset = fullreposet(repo)
2269 2270 if util.safehasattr(subset, 'isascending'):
2270 2271 result = getset(repo, subset, tree)
2271 2272 else:
2272 2273 result = getset(repo, baseset(subset), tree)
2273 2274 return result
2274 2275 return mfunc
2275 2276
2276 2277 def loadpredicate(ui, extname, registrarobj):
2277 2278 """Load revset predicates from specified registrarobj
2278 2279 """
2279 2280 for name, func in registrarobj._table.iteritems():
2280 2281 symbols[name] = func
2281 2282 if func._safe:
2282 2283 safesymbols.add(name)
2283 2284
2284 2285 # load built-in predicates explicitly to setup safesymbols
2285 2286 loadpredicate(None, None, predicate)
2286 2287
2287 2288 # tell hggettext to extract docstrings from these functions:
2288 2289 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now