##// END OF EJS Templates
dagop: move blockancestors() and blockdescendants() from context...
Yuya Nishihara -
r32904:582080a4 default
parent child Browse files
Show More
@@ -1,2389 +1,2306
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 subrepo,
42 42 util,
43 43 )
44 44
45 45 propertycache = util.propertycache
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 r = short(self.node())
70 70 if pycompat.ispy3:
71 71 return r.decode('ascii')
72 72 return r
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 def __int__(self):
78 78 return self.rev()
79 79
80 80 def __repr__(self):
81 81 return r"<%s %s>" % (type(self).__name__, str(self))
82 82
83 83 def __eq__(self, other):
84 84 try:
85 85 return type(self) == type(other) and self._rev == other._rev
86 86 except AttributeError:
87 87 return False
88 88
89 89 def __ne__(self, other):
90 90 return not (self == other)
91 91
92 92 def __contains__(self, key):
93 93 return key in self._manifest
94 94
95 95 def __getitem__(self, key):
96 96 return self.filectx(key)
97 97
98 98 def __iter__(self):
99 99 return iter(self._manifest)
100 100
101 101 def _buildstatusmanifest(self, status):
102 102 """Builds a manifest that includes the given status results, if this is
103 103 a working copy context. For non-working copy contexts, it just returns
104 104 the normal manifest."""
105 105 return self.manifest()
106 106
107 107 def _matchstatus(self, other, match):
108 108 """return match.always if match is none
109 109
110 110 This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 """True if the changeset is not obsolete but it's ancestor are"""
210 210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211 211
212 212 def bumped(self):
213 213 """True if the changeset try to be a successor of a public changeset
214 214
215 215 Only non-public and non-obsolete changesets may be bumped.
216 216 """
217 217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218 218
219 219 def divergent(self):
220 220 """Is a successors of a changeset with multiple possible successors set
221 221
222 222 Only non-public and non-obsolete changesets may be divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225 225
226 226 def troubled(self):
227 227 """True if the changeset is either unstable, bumped or divergent"""
228 228 return self.unstable() or self.bumped() or self.divergent()
229 229
230 230 def troubles(self):
231 231 """return the list of troubles affecting this changesets.
232 232
233 233 Troubles are returned as strings. possible values are:
234 234 - unstable,
235 235 - bumped,
236 236 - divergent.
237 237 """
238 238 troubles = []
239 239 if self.unstable():
240 240 troubles.append('unstable')
241 241 if self.bumped():
242 242 troubles.append('bumped')
243 243 if self.divergent():
244 244 troubles.append('divergent')
245 245 return troubles
246 246
247 247 def parents(self):
248 248 """return contexts for each parent changeset"""
249 249 return self._parents
250 250
251 251 def p1(self):
252 252 return self._parents[0]
253 253
254 254 def p2(self):
255 255 parents = self._parents
256 256 if len(parents) == 2:
257 257 return parents[1]
258 258 return changectx(self._repo, nullrev)
259 259
260 260 def _fileinfo(self, path):
261 261 if r'_manifest' in self.__dict__:
262 262 try:
263 263 return self._manifest[path], self._manifest.flags(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(self._node, path,
266 266 _('not found in manifest'))
267 267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 268 if path in self._manifestdelta:
269 269 return (self._manifestdelta[path],
270 270 self._manifestdelta.flags(path))
271 271 mfl = self._repo.manifestlog
272 272 try:
273 273 node, flag = mfl[self._changeset.manifest].find(path)
274 274 except KeyError:
275 275 raise error.ManifestLookupError(self._node, path,
276 276 _('not found in manifest'))
277 277
278 278 return node, flag
279 279
280 280 def filenode(self, path):
281 281 return self._fileinfo(path)[0]
282 282
283 283 def flags(self, path):
284 284 try:
285 285 return self._fileinfo(path)[1]
286 286 except error.LookupError:
287 287 return ''
288 288
289 289 def sub(self, path, allowcreate=True):
290 290 '''return a subrepo for the stored revision of path, never wdir()'''
291 291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292 292
293 293 def nullsub(self, path, pctx):
294 294 return subrepo.nullsubrepo(self, path, pctx)
295 295
296 296 def workingsub(self, path):
297 297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 298 context.
299 299 '''
300 300 return subrepo.subrepo(self, path, allowwdir=True)
301 301
302 302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 303 listsubrepos=False, badfn=None):
304 304 r = self._repo
305 305 return matchmod.match(r.root, r.getcwd(), pats,
306 306 include, exclude, default,
307 307 auditor=r.nofsauditor, ctx=self,
308 308 listsubrepos=listsubrepos, badfn=badfn)
309 309
310 310 def diff(self, ctx2=None, match=None, **opts):
311 311 """Returns a diff generator for the given contexts and matcher"""
312 312 if ctx2 is None:
313 313 ctx2 = self.p1()
314 314 if ctx2 is not None:
315 315 ctx2 = self._repo[ctx2]
316 316 diffopts = patch.diffopts(self._repo.ui, opts)
317 317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 318
319 319 def dirs(self):
320 320 return self._manifest.dirs()
321 321
322 322 def hasdir(self, dir):
323 323 return self._manifest.hasdir(dir)
324 324
325 325 def status(self, other=None, match=None, listignored=False,
326 326 listclean=False, listunknown=False, listsubrepos=False):
327 327 """return status of files between two nodes or node and working
328 328 directory.
329 329
330 330 If other is None, compare this node with working directory.
331 331
332 332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 333 """
334 334
335 335 ctx1 = self
336 336 ctx2 = self._repo[other]
337 337
338 338 # This next code block is, admittedly, fragile logic that tests for
339 339 # reversing the contexts and wouldn't need to exist if it weren't for
340 340 # the fast (and common) code path of comparing the working directory
341 341 # with its first parent.
342 342 #
343 343 # What we're aiming for here is the ability to call:
344 344 #
345 345 # workingctx.status(parentctx)
346 346 #
347 347 # If we always built the manifest for each context and compared those,
348 348 # then we'd be done. But the special case of the above call means we
349 349 # just copy the manifest of the parent.
350 350 reversed = False
351 351 if (not isinstance(ctx1, changectx)
352 352 and isinstance(ctx2, changectx)):
353 353 reversed = True
354 354 ctx1, ctx2 = ctx2, ctx1
355 355
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 def _filterederror(repo, changeid):
389 389 """build an exception to be raised about a filtered changeid
390 390
391 391 This is extracted in a function to help extensions (eg: evolve) to
392 392 experiment with various message variants."""
393 393 if repo.filtername.startswith('visible'):
394 394 msg = _("hidden revision '%s'") % changeid
395 395 hint = _('use --hidden to access hidden revisions')
396 396 return error.FilteredRepoLookupError(msg, hint=hint)
397 397 msg = _("filtered revision '%s' (not in '%s' subset)")
398 398 msg %= (changeid, repo.filtername)
399 399 return error.FilteredRepoLookupError(msg)
400 400
401 401 class changectx(basectx):
402 402 """A changecontext object makes access to data related to a particular
403 403 changeset convenient. It represents a read-only context already present in
404 404 the repo."""
405 405 def __init__(self, repo, changeid=''):
406 406 """changeid is a revision number, node, or tag"""
407 407
408 408 # since basectx.__new__ already took care of copying the object, we
409 409 # don't need to do anything in __init__, so we just exit here
410 410 if isinstance(changeid, basectx):
411 411 return
412 412
413 413 if changeid == '':
414 414 changeid = '.'
415 415 self._repo = repo
416 416
417 417 try:
418 418 if isinstance(changeid, int):
419 419 self._node = repo.changelog.node(changeid)
420 420 self._rev = changeid
421 421 return
422 422 if not pycompat.ispy3 and isinstance(changeid, long):
423 423 changeid = str(changeid)
424 424 if changeid == 'null':
425 425 self._node = nullid
426 426 self._rev = nullrev
427 427 return
428 428 if changeid == 'tip':
429 429 self._node = repo.changelog.tip()
430 430 self._rev = repo.changelog.rev(self._node)
431 431 return
432 432 if changeid == '.' or changeid == repo.dirstate.p1():
433 433 # this is a hack to delay/avoid loading obsmarkers
434 434 # when we know that '.' won't be hidden
435 435 self._node = repo.dirstate.p1()
436 436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 437 return
438 438 if len(changeid) == 20:
439 439 try:
440 440 self._node = changeid
441 441 self._rev = repo.changelog.rev(changeid)
442 442 return
443 443 except error.FilteredRepoLookupError:
444 444 raise
445 445 except LookupError:
446 446 pass
447 447
448 448 try:
449 449 r = int(changeid)
450 450 if '%d' % r != changeid:
451 451 raise ValueError
452 452 l = len(repo.changelog)
453 453 if r < 0:
454 454 r += l
455 455 if r < 0 or r >= l and r != wdirrev:
456 456 raise ValueError
457 457 self._rev = r
458 458 self._node = repo.changelog.node(r)
459 459 return
460 460 except error.FilteredIndexError:
461 461 raise
462 462 except (ValueError, OverflowError, IndexError):
463 463 pass
464 464
465 465 if len(changeid) == 40:
466 466 try:
467 467 self._node = bin(changeid)
468 468 self._rev = repo.changelog.rev(self._node)
469 469 return
470 470 except error.FilteredLookupError:
471 471 raise
472 472 except (TypeError, LookupError):
473 473 pass
474 474
475 475 # lookup bookmarks through the name interface
476 476 try:
477 477 self._node = repo.names.singlenode(repo, changeid)
478 478 self._rev = repo.changelog.rev(self._node)
479 479 return
480 480 except KeyError:
481 481 pass
482 482 except error.FilteredRepoLookupError:
483 483 raise
484 484 except error.RepoLookupError:
485 485 pass
486 486
487 487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 488 if self._node is not None:
489 489 self._rev = repo.changelog.rev(self._node)
490 490 return
491 491
492 492 # lookup failed
493 493 # check if it might have come from damaged dirstate
494 494 #
495 495 # XXX we could avoid the unfiltered if we had a recognizable
496 496 # exception for filtered changeset access
497 497 if changeid in repo.unfiltered().dirstate.parents():
498 498 msg = _("working directory has unknown parent '%s'!")
499 499 raise error.Abort(msg % short(changeid))
500 500 try:
501 501 if len(changeid) == 20 and nonascii(changeid):
502 502 changeid = hex(changeid)
503 503 except TypeError:
504 504 pass
505 505 except (error.FilteredIndexError, error.FilteredLookupError,
506 506 error.FilteredRepoLookupError):
507 507 raise _filterederror(repo, changeid)
508 508 except IndexError:
509 509 pass
510 510 raise error.RepoLookupError(
511 511 _("unknown revision '%s'") % changeid)
512 512
513 513 def __hash__(self):
514 514 try:
515 515 return hash(self._rev)
516 516 except AttributeError:
517 517 return id(self)
518 518
519 519 def __nonzero__(self):
520 520 return self._rev != nullrev
521 521
522 522 __bool__ = __nonzero__
523 523
524 524 @propertycache
525 525 def _changeset(self):
526 526 return self._repo.changelog.changelogrevision(self.rev())
527 527
528 528 @propertycache
529 529 def _manifest(self):
530 530 return self._manifestctx.read()
531 531
532 532 @property
533 533 def _manifestctx(self):
534 534 return self._repo.manifestlog[self._changeset.manifest]
535 535
536 536 @propertycache
537 537 def _manifestdelta(self):
538 538 return self._manifestctx.readdelta()
539 539
540 540 @propertycache
541 541 def _parents(self):
542 542 repo = self._repo
543 543 p1, p2 = repo.changelog.parentrevs(self._rev)
544 544 if p2 == nullrev:
545 545 return [changectx(repo, p1)]
546 546 return [changectx(repo, p1), changectx(repo, p2)]
547 547
548 548 def changeset(self):
549 549 c = self._changeset
550 550 return (
551 551 c.manifest,
552 552 c.user,
553 553 c.date,
554 554 c.files,
555 555 c.description,
556 556 c.extra,
557 557 )
558 558 def manifestnode(self):
559 559 return self._changeset.manifest
560 560
561 561 def user(self):
562 562 return self._changeset.user
563 563 def date(self):
564 564 return self._changeset.date
565 565 def files(self):
566 566 return self._changeset.files
567 567 def description(self):
568 568 return self._changeset.description
569 569 def branch(self):
570 570 return encoding.tolocal(self._changeset.extra.get("branch"))
571 571 def closesbranch(self):
572 572 return 'close' in self._changeset.extra
573 573 def extra(self):
574 574 return self._changeset.extra
575 575 def tags(self):
576 576 return self._repo.nodetags(self._node)
577 577 def bookmarks(self):
578 578 return self._repo.nodebookmarks(self._node)
579 579 def phase(self):
580 580 return self._repo._phasecache.phase(self._repo, self._rev)
581 581 def hidden(self):
582 582 return self._rev in repoview.filterrevs(self._repo, 'visible')
583 583
584 584 def children(self):
585 585 """return contexts for each child changeset"""
586 586 c = self._repo.changelog.children(self._node)
587 587 return [changectx(self._repo, x) for x in c]
588 588
589 589 def ancestors(self):
590 590 for a in self._repo.changelog.ancestors([self._rev]):
591 591 yield changectx(self._repo, a)
592 592
593 593 def descendants(self):
594 594 for d in self._repo.changelog.descendants([self._rev]):
595 595 yield changectx(self._repo, d)
596 596
597 597 def filectx(self, path, fileid=None, filelog=None):
598 598 """get a file context from this changeset"""
599 599 if fileid is None:
600 600 fileid = self.filenode(path)
601 601 return filectx(self._repo, path, fileid=fileid,
602 602 changectx=self, filelog=filelog)
603 603
604 604 def ancestor(self, c2, warn=False):
605 605 """return the "best" ancestor context of self and c2
606 606
607 607 If there are multiple candidates, it will show a message and check
608 608 merge.preferancestor configuration before falling back to the
609 609 revlog ancestor."""
610 610 # deal with workingctxs
611 611 n2 = c2._node
612 612 if n2 is None:
613 613 n2 = c2._parents[0]._node
614 614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 615 if not cahs:
616 616 anc = nullid
617 617 elif len(cahs) == 1:
618 618 anc = cahs[0]
619 619 else:
620 620 # experimental config: merge.preferancestor
621 621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 622 try:
623 623 ctx = changectx(self._repo, r)
624 624 except error.RepoLookupError:
625 625 continue
626 626 anc = ctx.node()
627 627 if anc in cahs:
628 628 break
629 629 else:
630 630 anc = self._repo.changelog.ancestor(self._node, n2)
631 631 if warn:
632 632 self._repo.ui.status(
633 633 (_("note: using %s as ancestor of %s and %s\n") %
634 634 (short(anc), short(self._node), short(n2))) +
635 635 ''.join(_(" alternatively, use --config "
636 636 "merge.preferancestor=%s\n") %
637 637 short(n) for n in sorted(cahs) if n != anc))
638 638 return changectx(self._repo, anc)
639 639
640 640 def descendant(self, other):
641 641 """True if other is descendant of this changeset"""
642 642 return self._repo.changelog.descendant(self._rev, other._rev)
643 643
644 644 def walk(self, match):
645 645 '''Generates matching file names.'''
646 646
647 647 # Wrap match.bad method to have message with nodeid
648 648 def bad(fn, msg):
649 649 # The manifest doesn't know about subrepos, so don't complain about
650 650 # paths into valid subrepos.
651 651 if any(fn == s or fn.startswith(s + '/')
652 652 for s in self.substate):
653 653 return
654 654 match.bad(fn, _('no such file in rev %s') % self)
655 655
656 656 m = matchmod.badmatch(match, bad)
657 657 return self._manifest.walk(m)
658 658
659 659 def matches(self, match):
660 660 return self.walk(match)
661 661
662 662 class basefilectx(object):
663 663 """A filecontext object represents the common logic for its children:
664 664 filectx: read-only access to a filerevision that is already present
665 665 in the repo,
666 666 workingfilectx: a filecontext that represents files from the working
667 667 directory,
668 668 memfilectx: a filecontext that represents files in-memory,
669 669 overlayfilectx: duplicate another filecontext with some fields overridden.
670 670 """
671 671 @propertycache
672 672 def _filelog(self):
673 673 return self._repo.file(self._path)
674 674
675 675 @propertycache
676 676 def _changeid(self):
677 677 if r'_changeid' in self.__dict__:
678 678 return self._changeid
679 679 elif r'_changectx' in self.__dict__:
680 680 return self._changectx.rev()
681 681 elif r'_descendantrev' in self.__dict__:
682 682 # this file context was created from a revision with a known
683 683 # descendant, we can (lazily) correct for linkrev aliases
684 684 return self._adjustlinkrev(self._descendantrev)
685 685 else:
686 686 return self._filelog.linkrev(self._filerev)
687 687
688 688 @propertycache
689 689 def _filenode(self):
690 690 if r'_fileid' in self.__dict__:
691 691 return self._filelog.lookup(self._fileid)
692 692 else:
693 693 return self._changectx.filenode(self._path)
694 694
695 695 @propertycache
696 696 def _filerev(self):
697 697 return self._filelog.rev(self._filenode)
698 698
699 699 @propertycache
700 700 def _repopath(self):
701 701 return self._path
702 702
703 703 def __nonzero__(self):
704 704 try:
705 705 self._filenode
706 706 return True
707 707 except error.LookupError:
708 708 # file is missing
709 709 return False
710 710
711 711 __bool__ = __nonzero__
712 712
713 713 def __str__(self):
714 714 try:
715 715 return "%s@%s" % (self.path(), self._changectx)
716 716 except error.LookupError:
717 717 return "%s@???" % self.path()
718 718
719 719 def __repr__(self):
720 720 return "<%s %s>" % (type(self).__name__, str(self))
721 721
722 722 def __hash__(self):
723 723 try:
724 724 return hash((self._path, self._filenode))
725 725 except AttributeError:
726 726 return id(self)
727 727
728 728 def __eq__(self, other):
729 729 try:
730 730 return (type(self) == type(other) and self._path == other._path
731 731 and self._filenode == other._filenode)
732 732 except AttributeError:
733 733 return False
734 734
735 735 def __ne__(self, other):
736 736 return not (self == other)
737 737
738 738 def filerev(self):
739 739 return self._filerev
740 740 def filenode(self):
741 741 return self._filenode
742 742 @propertycache
743 743 def _flags(self):
744 744 return self._changectx.flags(self._path)
745 745 def flags(self):
746 746 return self._flags
747 747 def filelog(self):
748 748 return self._filelog
749 749 def rev(self):
750 750 return self._changeid
751 751 def linkrev(self):
752 752 return self._filelog.linkrev(self._filerev)
753 753 def node(self):
754 754 return self._changectx.node()
755 755 def hex(self):
756 756 return self._changectx.hex()
757 757 def user(self):
758 758 return self._changectx.user()
759 759 def date(self):
760 760 return self._changectx.date()
761 761 def files(self):
762 762 return self._changectx.files()
763 763 def description(self):
764 764 return self._changectx.description()
765 765 def branch(self):
766 766 return self._changectx.branch()
767 767 def extra(self):
768 768 return self._changectx.extra()
769 769 def phase(self):
770 770 return self._changectx.phase()
771 771 def phasestr(self):
772 772 return self._changectx.phasestr()
773 773 def manifest(self):
774 774 return self._changectx.manifest()
775 775 def changectx(self):
776 776 return self._changectx
777 777 def renamed(self):
778 778 return self._copied
779 779 def repo(self):
780 780 return self._repo
781 781 def size(self):
782 782 return len(self.data())
783 783
784 784 def path(self):
785 785 return self._path
786 786
787 787 def isbinary(self):
788 788 try:
789 789 return util.binary(self.data())
790 790 except IOError:
791 791 return False
792 792 def isexec(self):
793 793 return 'x' in self.flags()
794 794 def islink(self):
795 795 return 'l' in self.flags()
796 796
797 797 def isabsent(self):
798 798 """whether this filectx represents a file not in self._changectx
799 799
800 800 This is mainly for merge code to detect change/delete conflicts. This is
801 801 expected to be True for all subclasses of basectx."""
802 802 return False
803 803
804 804 _customcmp = False
805 805 def cmp(self, fctx):
806 806 """compare with other file context
807 807
808 808 returns True if different than fctx.
809 809 """
810 810 if fctx._customcmp:
811 811 return fctx.cmp(self)
812 812
813 813 if (fctx._filenode is None
814 814 and (self._repo._encodefilterpats
815 815 # if file data starts with '\1\n', empty metadata block is
816 816 # prepended, which adds 4 bytes to filelog.size().
817 817 or self.size() - 4 == fctx.size())
818 818 or self.size() == fctx.size()):
819 819 return self._filelog.cmp(self._filenode, fctx.data())
820 820
821 821 return True
822 822
823 823 def _adjustlinkrev(self, srcrev, inclusive=False):
824 824 """return the first ancestor of <srcrev> introducing <fnode>
825 825
826 826 If the linkrev of the file revision does not point to an ancestor of
827 827 srcrev, we'll walk down the ancestors until we find one introducing
828 828 this file revision.
829 829
830 830 :srcrev: the changeset revision we search ancestors from
831 831 :inclusive: if true, the src revision will also be checked
832 832 """
833 833 repo = self._repo
834 834 cl = repo.unfiltered().changelog
835 835 mfl = repo.manifestlog
836 836 # fetch the linkrev
837 837 lkr = self.linkrev()
838 838 # hack to reuse ancestor computation when searching for renames
839 839 memberanc = getattr(self, '_ancestrycontext', None)
840 840 iteranc = None
841 841 if srcrev is None:
842 842 # wctx case, used by workingfilectx during mergecopy
843 843 revs = [p.rev() for p in self._repo[None].parents()]
844 844 inclusive = True # we skipped the real (revless) source
845 845 else:
846 846 revs = [srcrev]
847 847 if memberanc is None:
848 848 memberanc = iteranc = cl.ancestors(revs, lkr,
849 849 inclusive=inclusive)
850 850 # check if this linkrev is an ancestor of srcrev
851 851 if lkr not in memberanc:
852 852 if iteranc is None:
853 853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
854 854 fnode = self._filenode
855 855 path = self._path
856 856 for a in iteranc:
857 857 ac = cl.read(a) # get changeset data (we avoid object creation)
858 858 if path in ac[3]: # checking the 'files' field.
859 859 # The file has been touched, check if the content is
860 860 # similar to the one we search for.
861 861 if fnode == mfl[ac[0]].readfast().get(path):
862 862 return a
863 863 # In theory, we should never get out of that loop without a result.
864 864 # But if manifest uses a buggy file revision (not children of the
865 865 # one it replaces) we could. Such a buggy situation will likely
866 866 # result is crash somewhere else at to some point.
867 867 return lkr
868 868
869 869 def introrev(self):
870 870 """return the rev of the changeset which introduced this file revision
871 871
872 872 This method is different from linkrev because it take into account the
873 873 changeset the filectx was created from. It ensures the returned
874 874 revision is one of its ancestors. This prevents bugs from
875 875 'linkrev-shadowing' when a file revision is used by multiple
876 876 changesets.
877 877 """
878 878 lkr = self.linkrev()
879 879 attrs = vars(self)
880 880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
881 881 if noctx or self.rev() == lkr:
882 882 return self.linkrev()
883 883 return self._adjustlinkrev(self.rev(), inclusive=True)
884 884
885 885 def _parentfilectx(self, path, fileid, filelog):
886 886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 888 if '_changeid' in vars(self) or '_changectx' in vars(self):
889 889 # If self is associated with a changeset (probably explicitly
890 890 # fed), ensure the created filectx is associated with a
891 891 # changeset that is an ancestor of self.changectx.
892 892 # This lets us later use _adjustlinkrev to get a correct link.
893 893 fctx._descendantrev = self.rev()
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 elif '_descendantrev' in vars(self):
896 896 # Otherwise propagate _descendantrev if we have one associated.
897 897 fctx._descendantrev = self._descendantrev
898 898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 899 return fctx
900 900
901 901 def parents(self):
902 902 _path = self._path
903 903 fl = self._filelog
904 904 parents = self._filelog.parents(self._filenode)
905 905 pl = [(_path, node, fl) for node in parents if node != nullid]
906 906
907 907 r = fl.renamed(self._filenode)
908 908 if r:
909 909 # - In the simple rename case, both parent are nullid, pl is empty.
910 910 # - In case of merge, only one of the parent is null id and should
911 911 # be replaced with the rename information. This parent is -always-
912 912 # the first one.
913 913 #
914 914 # As null id have always been filtered out in the previous list
915 915 # comprehension, inserting to 0 will always result in "replacing
916 916 # first nullid parent with rename information.
917 917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918 918
919 919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920 920
921 921 def p1(self):
922 922 return self.parents()[0]
923 923
924 924 def p2(self):
925 925 p = self.parents()
926 926 if len(p) == 2:
927 927 return p[1]
928 928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929 929
930 930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
931 931 diffopts=None):
932 932 '''returns a list of tuples of ((ctx, number), line) for each line
933 933 in the file, where ctx is the filectx of the node where
934 934 that line was last changed; if linenumber parameter is true, number is
935 935 the line number at the first appearance in the managed file, otherwise,
936 936 number has a fixed value of False.
937 937 '''
938 938
939 939 def lines(text):
940 940 if text.endswith("\n"):
941 941 return text.count("\n")
942 942 return text.count("\n") + int(bool(text))
943 943
944 944 if linenumber:
945 945 def decorate(text, rev):
946 946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
947 947 else:
948 948 def decorate(text, rev):
949 949 return ([(rev, False)] * lines(text), text)
950 950
951 951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 952
953 953 def parents(f):
954 954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 957 # isn't an ancestor of the srcrev.
958 958 f._changeid
959 959 pl = f.parents()
960 960
961 961 # Don't return renamed parents if we aren't following.
962 962 if not follow:
963 963 pl = [p for p in pl if p.path() == f.path()]
964 964
965 965 # renamed filectx won't have a filelog yet, so set it
966 966 # from the cache to save time
967 967 for p in pl:
968 968 if not '_filelog' in p.__dict__:
969 969 p._filelog = getlog(p.path())
970 970
971 971 return pl
972 972
973 973 # use linkrev to find the first changeset where self appeared
974 974 base = self
975 975 introrev = self.introrev()
976 976 if self.rev() != introrev:
977 977 base = self.filectx(self.filenode(), changeid=introrev)
978 978 if getattr(base, '_ancestrycontext', None) is None:
979 979 cl = self._repo.changelog
980 980 if introrev is None:
981 981 # wctx is not inclusive, but works because _ancestrycontext
982 982 # is used to test filelog revisions
983 983 ac = cl.ancestors([p.rev() for p in base.parents()],
984 984 inclusive=True)
985 985 else:
986 986 ac = cl.ancestors([introrev], inclusive=True)
987 987 base._ancestrycontext = ac
988 988
989 989 # This algorithm would prefer to be recursive, but Python is a
990 990 # bit recursion-hostile. Instead we do an iterative
991 991 # depth-first search.
992 992
993 993 # 1st DFS pre-calculates pcache and needed
994 994 visit = [base]
995 995 pcache = {}
996 996 needed = {base: 1}
997 997 while visit:
998 998 f = visit.pop()
999 999 if f in pcache:
1000 1000 continue
1001 1001 pl = parents(f)
1002 1002 pcache[f] = pl
1003 1003 for p in pl:
1004 1004 needed[p] = needed.get(p, 0) + 1
1005 1005 if p not in pcache:
1006 1006 visit.append(p)
1007 1007
1008 1008 # 2nd DFS does the actual annotate
1009 1009 visit[:] = [base]
1010 1010 hist = {}
1011 1011 while visit:
1012 1012 f = visit[-1]
1013 1013 if f in hist:
1014 1014 visit.pop()
1015 1015 continue
1016 1016
1017 1017 ready = True
1018 1018 pl = pcache[f]
1019 1019 for p in pl:
1020 1020 if p not in hist:
1021 1021 ready = False
1022 1022 visit.append(p)
1023 1023 if ready:
1024 1024 visit.pop()
1025 1025 curr = decorate(f.data(), f)
1026 1026 skipchild = False
1027 1027 if skiprevs is not None:
1028 1028 skipchild = f._changeid in skiprevs
1029 1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1030 1030 diffopts)
1031 1031 for p in pl:
1032 1032 if needed[p] == 1:
1033 1033 del hist[p]
1034 1034 del needed[p]
1035 1035 else:
1036 1036 needed[p] -= 1
1037 1037
1038 1038 hist[f] = curr
1039 1039 del pcache[f]
1040 1040
1041 1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1042 1042
1043 1043 def ancestors(self, followfirst=False):
1044 1044 visit = {}
1045 1045 c = self
1046 1046 if followfirst:
1047 1047 cut = 1
1048 1048 else:
1049 1049 cut = None
1050 1050
1051 1051 while True:
1052 1052 for parent in c.parents()[:cut]:
1053 1053 visit[(parent.linkrev(), parent.filenode())] = parent
1054 1054 if not visit:
1055 1055 break
1056 1056 c = visit.pop(max(visit))
1057 1057 yield c
1058 1058
1059 1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1060 1060 r'''
1061 1061 Given parent and child fctxes and annotate data for parents, for all lines
1062 1062 in either parent that match the child, annotate the child with the parent's
1063 1063 data.
1064 1064
1065 1065 Additionally, if `skipchild` is True, replace all other lines with parent
1066 1066 annotate data as well such that child is never blamed for any lines.
1067 1067
1068 1068 >>> oldfctx = 'old'
1069 1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1070 1070 >>> olddata = 'a\nb\n'
1071 1071 >>> p1data = 'a\nb\nc\n'
1072 1072 >>> p2data = 'a\nc\nd\n'
1073 1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1074 1074 >>> diffopts = mdiff.diffopts()
1075 1075
1076 1076 >>> def decorate(text, rev):
1077 1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1078 1078
1079 1079 Basic usage:
1080 1080
1081 1081 >>> oldann = decorate(olddata, oldfctx)
1082 1082 >>> p1ann = decorate(p1data, p1fctx)
1083 1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1084 1084 >>> p1ann[0]
1085 1085 [('old', 1), ('old', 2), ('p1', 3)]
1086 1086 >>> p2ann = decorate(p2data, p2fctx)
1087 1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1088 1088 >>> p2ann[0]
1089 1089 [('old', 1), ('p2', 2), ('p2', 3)]
1090 1090
1091 1091 Test with multiple parents (note the difference caused by ordering):
1092 1092
1093 1093 >>> childann = decorate(childdata, childfctx)
1094 1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1095 1095 ... diffopts)
1096 1096 >>> childann[0]
1097 1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1098 1098
1099 1099 >>> childann = decorate(childdata, childfctx)
1100 1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1101 1101 ... diffopts)
1102 1102 >>> childann[0]
1103 1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1104 1104
1105 1105 Test with skipchild (note the difference caused by ordering):
1106 1106
1107 1107 >>> childann = decorate(childdata, childfctx)
1108 1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1109 1109 ... diffopts)
1110 1110 >>> childann[0]
1111 1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1112 1112
1113 1113 >>> childann = decorate(childdata, childfctx)
1114 1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1115 1115 ... diffopts)
1116 1116 >>> childann[0]
1117 1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1118 1118 '''
1119 1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1120 1120 for parent in parents]
1121 1121
1122 1122 if skipchild:
1123 1123 # Need to iterate over the blocks twice -- make it a list
1124 1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1125 1125 # Mercurial currently prefers p2 over p1 for annotate.
1126 1126 # TODO: change this?
1127 1127 for parent, blocks in pblocks:
1128 1128 for (a1, a2, b1, b2), t in blocks:
1129 1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1130 1130 # belong to the child.
1131 1131 if t == '=':
1132 1132 child[0][b1:b2] = parent[0][a1:a2]
1133 1133
1134 1134 if skipchild:
1135 1135 # Now try and match up anything that couldn't be matched,
1136 1136 # Reversing pblocks maintains bias towards p2, matching above
1137 1137 # behavior.
1138 1138 pblocks.reverse()
1139 1139
1140 1140 # The heuristics are:
1141 1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1142 1142 # This could potentially be smarter but works well enough.
1143 1143 # * For a non-matching section, do a best-effort fit. Match lines in
1144 1144 # diff hunks 1:1, dropping lines as necessary.
1145 1145 # * Repeat the last line as a last resort.
1146 1146
1147 1147 # First, replace as much as possible without repeating the last line.
1148 1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1149 1149 for idx, (parent, blocks) in enumerate(pblocks):
1150 1150 for (a1, a2, b1, b2), _t in blocks:
1151 1151 if a2 - a1 >= b2 - b1:
1152 1152 for bk in xrange(b1, b2):
1153 1153 if child[0][bk][0] == childfctx:
1154 1154 ak = min(a1 + (bk - b1), a2 - 1)
1155 1155 child[0][bk] = parent[0][ak]
1156 1156 else:
1157 1157 remaining[idx][1].append((a1, a2, b1, b2))
1158 1158
1159 1159 # Then, look at anything left, which might involve repeating the last
1160 1160 # line.
1161 1161 for parent, blocks in remaining:
1162 1162 for a1, a2, b1, b2 in blocks:
1163 1163 for bk in xrange(b1, b2):
1164 1164 if child[0][bk][0] == childfctx:
1165 1165 ak = min(a1 + (bk - b1), a2 - 1)
1166 1166 child[0][bk] = parent[0][ak]
1167 1167 return child
1168 1168
1169 1169 class filectx(basefilectx):
1170 1170 """A filecontext object makes access to data related to a particular
1171 1171 filerevision convenient."""
1172 1172 def __init__(self, repo, path, changeid=None, fileid=None,
1173 1173 filelog=None, changectx=None):
1174 1174 """changeid can be a changeset revision, node, or tag.
1175 1175 fileid can be a file revision or node."""
1176 1176 self._repo = repo
1177 1177 self._path = path
1178 1178
1179 1179 assert (changeid is not None
1180 1180 or fileid is not None
1181 1181 or changectx is not None), \
1182 1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1183 1183 % (changeid, fileid, changectx))
1184 1184
1185 1185 if filelog is not None:
1186 1186 self._filelog = filelog
1187 1187
1188 1188 if changeid is not None:
1189 1189 self._changeid = changeid
1190 1190 if changectx is not None:
1191 1191 self._changectx = changectx
1192 1192 if fileid is not None:
1193 1193 self._fileid = fileid
1194 1194
1195 1195 @propertycache
1196 1196 def _changectx(self):
1197 1197 try:
1198 1198 return changectx(self._repo, self._changeid)
1199 1199 except error.FilteredRepoLookupError:
1200 1200 # Linkrev may point to any revision in the repository. When the
1201 1201 # repository is filtered this may lead to `filectx` trying to build
1202 1202 # `changectx` for filtered revision. In such case we fallback to
1203 1203 # creating `changectx` on the unfiltered version of the reposition.
1204 1204 # This fallback should not be an issue because `changectx` from
1205 1205 # `filectx` are not used in complex operations that care about
1206 1206 # filtering.
1207 1207 #
1208 1208 # This fallback is a cheap and dirty fix that prevent several
1209 1209 # crashes. It does not ensure the behavior is correct. However the
1210 1210 # behavior was not correct before filtering either and "incorrect
1211 1211 # behavior" is seen as better as "crash"
1212 1212 #
1213 1213 # Linkrevs have several serious troubles with filtering that are
1214 1214 # complicated to solve. Proper handling of the issue here should be
1215 1215 # considered when solving linkrev issue are on the table.
1216 1216 return changectx(self._repo.unfiltered(), self._changeid)
1217 1217
1218 1218 def filectx(self, fileid, changeid=None):
1219 1219 '''opens an arbitrary revision of the file without
1220 1220 opening a new filelog'''
1221 1221 return filectx(self._repo, self._path, fileid=fileid,
1222 1222 filelog=self._filelog, changeid=changeid)
1223 1223
1224 1224 def rawdata(self):
1225 1225 return self._filelog.revision(self._filenode, raw=True)
1226 1226
1227 1227 def rawflags(self):
1228 1228 """low-level revlog flags"""
1229 1229 return self._filelog.flags(self._filerev)
1230 1230
1231 1231 def data(self):
1232 1232 try:
1233 1233 return self._filelog.read(self._filenode)
1234 1234 except error.CensoredNodeError:
1235 1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1236 1236 return ""
1237 1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1238 1238 hint=_("set censor.policy to ignore errors"))
1239 1239
1240 1240 def size(self):
1241 1241 return self._filelog.size(self._filerev)
1242 1242
1243 1243 @propertycache
1244 1244 def _copied(self):
1245 1245 """check if file was actually renamed in this changeset revision
1246 1246
1247 1247 If rename logged in file revision, we report copy for changeset only
1248 1248 if file revisions linkrev points back to the changeset in question
1249 1249 or both changeset parents contain different file revisions.
1250 1250 """
1251 1251
1252 1252 renamed = self._filelog.renamed(self._filenode)
1253 1253 if not renamed:
1254 1254 return renamed
1255 1255
1256 1256 if self.rev() == self.linkrev():
1257 1257 return renamed
1258 1258
1259 1259 name = self.path()
1260 1260 fnode = self._filenode
1261 1261 for p in self._changectx.parents():
1262 1262 try:
1263 1263 if fnode == p.filenode(name):
1264 1264 return None
1265 1265 except error.LookupError:
1266 1266 pass
1267 1267 return renamed
1268 1268
1269 1269 def children(self):
1270 1270 # hard for renames
1271 1271 c = self._filelog.children(self._filenode)
1272 1272 return [filectx(self._repo, self._path, fileid=x,
1273 1273 filelog=self._filelog) for x in c]
1274 1274
1275 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1276 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1277 if diff from fctx2 to fctx1 has changes in linerange2 and
1278 `linerange1` is the new line range for fctx1.
1279 """
1280 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1281 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1282 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1283 return diffinrange, linerange1
1284
1285 def blockancestors(fctx, fromline, toline, followfirst=False):
1286 """Yield ancestors of `fctx` with respect to the block of lines within
1287 `fromline`-`toline` range.
1288 """
1289 diffopts = patch.diffopts(fctx._repo.ui)
1290 introrev = fctx.introrev()
1291 if fctx.rev() != introrev:
1292 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1293 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1294 while visit:
1295 c, linerange2 = visit.pop(max(visit))
1296 pl = c.parents()
1297 if followfirst:
1298 pl = pl[:1]
1299 if not pl:
1300 # The block originates from the initial revision.
1301 yield c, linerange2
1302 continue
1303 inrange = False
1304 for p in pl:
1305 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1306 inrange = inrange or inrangep
1307 if linerange1[0] == linerange1[1]:
1308 # Parent's linerange is empty, meaning that the block got
1309 # introduced in this revision; no need to go futher in this
1310 # branch.
1311 continue
1312 # Set _descendantrev with 'c' (a known descendant) so that, when
1313 # _adjustlinkrev is called for 'p', it receives this descendant
1314 # (as srcrev) instead possibly topmost introrev.
1315 p._descendantrev = c.rev()
1316 visit[p.linkrev(), p.filenode()] = p, linerange1
1317 if inrange:
1318 yield c, linerange2
1319
1320 def blockdescendants(fctx, fromline, toline):
1321 """Yield descendants of `fctx` with respect to the block of lines within
1322 `fromline`-`toline` range.
1323 """
1324 # First possibly yield 'fctx' if it has changes in range with respect to
1325 # its parents.
1326 try:
1327 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1328 except StopIteration:
1329 pass
1330 else:
1331 if c == fctx:
1332 yield c, linerange1
1333
1334 diffopts = patch.diffopts(fctx._repo.ui)
1335 fl = fctx.filelog()
1336 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1337 for i in fl.descendants([fctx.filerev()]):
1338 c = fctx.filectx(i)
1339 inrange = False
1340 for x in fl.parentrevs(i):
1341 try:
1342 p, linerange2 = seen[x]
1343 except KeyError:
1344 # nullrev or other branch
1345 continue
1346 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1347 inrange = inrange or inrangep
1348 # If revision 'i' has been seen (it's a merge), we assume that its
1349 # line range is the same independently of which parents was used
1350 # to compute it.
1351 assert i not in seen or seen[i][1] == linerange1, (
1352 'computed line range for %s is not consistent between '
1353 'ancestor branches' % c)
1354 seen[i] = c, linerange1
1355 if inrange:
1356 yield c, linerange1
1357
1358 1275 class committablectx(basectx):
1359 1276 """A committablectx object provides common functionality for a context that
1360 1277 wants the ability to commit, e.g. workingctx or memctx."""
1361 1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1362 1279 changes=None):
1363 1280 self._repo = repo
1364 1281 self._rev = None
1365 1282 self._node = None
1366 1283 self._text = text
1367 1284 if date:
1368 1285 self._date = util.parsedate(date)
1369 1286 if user:
1370 1287 self._user = user
1371 1288 if changes:
1372 1289 self._status = changes
1373 1290
1374 1291 self._extra = {}
1375 1292 if extra:
1376 1293 self._extra = extra.copy()
1377 1294 if 'branch' not in self._extra:
1378 1295 try:
1379 1296 branch = encoding.fromlocal(self._repo.dirstate.branch())
1380 1297 except UnicodeDecodeError:
1381 1298 raise error.Abort(_('branch name not in UTF-8!'))
1382 1299 self._extra['branch'] = branch
1383 1300 if self._extra['branch'] == '':
1384 1301 self._extra['branch'] = 'default'
1385 1302
1386 1303 def __str__(self):
1387 1304 return str(self._parents[0]) + r"+"
1388 1305
1389 1306 def __bytes__(self):
1390 1307 return bytes(self._parents[0]) + "+"
1391 1308
1392 1309 def __nonzero__(self):
1393 1310 return True
1394 1311
1395 1312 __bool__ = __nonzero__
1396 1313
1397 1314 def _buildflagfunc(self):
1398 1315 # Create a fallback function for getting file flags when the
1399 1316 # filesystem doesn't support them
1400 1317
1401 1318 copiesget = self._repo.dirstate.copies().get
1402 1319 parents = self.parents()
1403 1320 if len(parents) < 2:
1404 1321 # when we have one parent, it's easy: copy from parent
1405 1322 man = parents[0].manifest()
1406 1323 def func(f):
1407 1324 f = copiesget(f, f)
1408 1325 return man.flags(f)
1409 1326 else:
1410 1327 # merges are tricky: we try to reconstruct the unstored
1411 1328 # result from the merge (issue1802)
1412 1329 p1, p2 = parents
1413 1330 pa = p1.ancestor(p2)
1414 1331 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1415 1332
1416 1333 def func(f):
1417 1334 f = copiesget(f, f) # may be wrong for merges with copies
1418 1335 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1419 1336 if fl1 == fl2:
1420 1337 return fl1
1421 1338 if fl1 == fla:
1422 1339 return fl2
1423 1340 if fl2 == fla:
1424 1341 return fl1
1425 1342 return '' # punt for conflicts
1426 1343
1427 1344 return func
1428 1345
1429 1346 @propertycache
1430 1347 def _flagfunc(self):
1431 1348 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1432 1349
1433 1350 @propertycache
1434 1351 def _status(self):
1435 1352 return self._repo.status()
1436 1353
1437 1354 @propertycache
1438 1355 def _user(self):
1439 1356 return self._repo.ui.username()
1440 1357
1441 1358 @propertycache
1442 1359 def _date(self):
1443 1360 ui = self._repo.ui
1444 1361 date = ui.configdate('devel', 'default-date')
1445 1362 if date is None:
1446 1363 date = util.makedate()
1447 1364 return date
1448 1365
1449 1366 def subrev(self, subpath):
1450 1367 return None
1451 1368
1452 1369 def manifestnode(self):
1453 1370 return None
1454 1371 def user(self):
1455 1372 return self._user or self._repo.ui.username()
1456 1373 def date(self):
1457 1374 return self._date
1458 1375 def description(self):
1459 1376 return self._text
1460 1377 def files(self):
1461 1378 return sorted(self._status.modified + self._status.added +
1462 1379 self._status.removed)
1463 1380
1464 1381 def modified(self):
1465 1382 return self._status.modified
1466 1383 def added(self):
1467 1384 return self._status.added
1468 1385 def removed(self):
1469 1386 return self._status.removed
1470 1387 def deleted(self):
1471 1388 return self._status.deleted
1472 1389 def branch(self):
1473 1390 return encoding.tolocal(self._extra['branch'])
1474 1391 def closesbranch(self):
1475 1392 return 'close' in self._extra
1476 1393 def extra(self):
1477 1394 return self._extra
1478 1395
1479 1396 def tags(self):
1480 1397 return []
1481 1398
1482 1399 def bookmarks(self):
1483 1400 b = []
1484 1401 for p in self.parents():
1485 1402 b.extend(p.bookmarks())
1486 1403 return b
1487 1404
1488 1405 def phase(self):
1489 1406 phase = phases.draft # default phase to draft
1490 1407 for p in self.parents():
1491 1408 phase = max(phase, p.phase())
1492 1409 return phase
1493 1410
1494 1411 def hidden(self):
1495 1412 return False
1496 1413
1497 1414 def children(self):
1498 1415 return []
1499 1416
1500 1417 def flags(self, path):
1501 1418 if r'_manifest' in self.__dict__:
1502 1419 try:
1503 1420 return self._manifest.flags(path)
1504 1421 except KeyError:
1505 1422 return ''
1506 1423
1507 1424 try:
1508 1425 return self._flagfunc(path)
1509 1426 except OSError:
1510 1427 return ''
1511 1428
1512 1429 def ancestor(self, c2):
1513 1430 """return the "best" ancestor context of self and c2"""
1514 1431 return self._parents[0].ancestor(c2) # punt on two parents for now
1515 1432
1516 1433 def walk(self, match):
1517 1434 '''Generates matching file names.'''
1518 1435 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1519 1436 True, False))
1520 1437
1521 1438 def matches(self, match):
1522 1439 return sorted(self._repo.dirstate.matches(match))
1523 1440
1524 1441 def ancestors(self):
1525 1442 for p in self._parents:
1526 1443 yield p
1527 1444 for a in self._repo.changelog.ancestors(
1528 1445 [p.rev() for p in self._parents]):
1529 1446 yield changectx(self._repo, a)
1530 1447
1531 1448 def markcommitted(self, node):
1532 1449 """Perform post-commit cleanup necessary after committing this ctx
1533 1450
1534 1451 Specifically, this updates backing stores this working context
1535 1452 wraps to reflect the fact that the changes reflected by this
1536 1453 workingctx have been committed. For example, it marks
1537 1454 modified and added files as normal in the dirstate.
1538 1455
1539 1456 """
1540 1457
1541 1458 with self._repo.dirstate.parentchange():
1542 1459 for f in self.modified() + self.added():
1543 1460 self._repo.dirstate.normal(f)
1544 1461 for f in self.removed():
1545 1462 self._repo.dirstate.drop(f)
1546 1463 self._repo.dirstate.setparents(node)
1547 1464
1548 1465 # write changes out explicitly, because nesting wlock at
1549 1466 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1550 1467 # from immediately doing so for subsequent changing files
1551 1468 self._repo.dirstate.write(self._repo.currenttransaction())
1552 1469
1553 1470 def dirty(self, missing=False, merge=True, branch=True):
1554 1471 return False
1555 1472
1556 1473 class workingctx(committablectx):
1557 1474 """A workingctx object makes access to data related to
1558 1475 the current working directory convenient.
1559 1476 date - any valid date string or (unixtime, offset), or None.
1560 1477 user - username string, or None.
1561 1478 extra - a dictionary of extra values, or None.
1562 1479 changes - a list of file lists as returned by localrepo.status()
1563 1480 or None to use the repository status.
1564 1481 """
1565 1482 def __init__(self, repo, text="", user=None, date=None, extra=None,
1566 1483 changes=None):
1567 1484 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1568 1485
1569 1486 def __iter__(self):
1570 1487 d = self._repo.dirstate
1571 1488 for f in d:
1572 1489 if d[f] != 'r':
1573 1490 yield f
1574 1491
1575 1492 def __contains__(self, key):
1576 1493 return self._repo.dirstate[key] not in "?r"
1577 1494
1578 1495 def hex(self):
1579 1496 return hex(wdirid)
1580 1497
1581 1498 @propertycache
1582 1499 def _parents(self):
1583 1500 p = self._repo.dirstate.parents()
1584 1501 if p[1] == nullid:
1585 1502 p = p[:-1]
1586 1503 return [changectx(self._repo, x) for x in p]
1587 1504
1588 1505 def filectx(self, path, filelog=None):
1589 1506 """get a file context from the working directory"""
1590 1507 return workingfilectx(self._repo, path, workingctx=self,
1591 1508 filelog=filelog)
1592 1509
1593 1510 def dirty(self, missing=False, merge=True, branch=True):
1594 1511 "check whether a working directory is modified"
1595 1512 # check subrepos first
1596 1513 for s in sorted(self.substate):
1597 1514 if self.sub(s).dirty():
1598 1515 return True
1599 1516 # check current working dir
1600 1517 return ((merge and self.p2()) or
1601 1518 (branch and self.branch() != self.p1().branch()) or
1602 1519 self.modified() or self.added() or self.removed() or
1603 1520 (missing and self.deleted()))
1604 1521
1605 1522 def add(self, list, prefix=""):
1606 1523 join = lambda f: os.path.join(prefix, f)
1607 1524 with self._repo.wlock():
1608 1525 ui, ds = self._repo.ui, self._repo.dirstate
1609 1526 rejected = []
1610 1527 lstat = self._repo.wvfs.lstat
1611 1528 for f in list:
1612 1529 scmutil.checkportable(ui, join(f))
1613 1530 try:
1614 1531 st = lstat(f)
1615 1532 except OSError:
1616 1533 ui.warn(_("%s does not exist!\n") % join(f))
1617 1534 rejected.append(f)
1618 1535 continue
1619 1536 if st.st_size > 10000000:
1620 1537 ui.warn(_("%s: up to %d MB of RAM may be required "
1621 1538 "to manage this file\n"
1622 1539 "(use 'hg revert %s' to cancel the "
1623 1540 "pending addition)\n")
1624 1541 % (f, 3 * st.st_size // 1000000, join(f)))
1625 1542 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1626 1543 ui.warn(_("%s not added: only files and symlinks "
1627 1544 "supported currently\n") % join(f))
1628 1545 rejected.append(f)
1629 1546 elif ds[f] in 'amn':
1630 1547 ui.warn(_("%s already tracked!\n") % join(f))
1631 1548 elif ds[f] == 'r':
1632 1549 ds.normallookup(f)
1633 1550 else:
1634 1551 ds.add(f)
1635 1552 return rejected
1636 1553
1637 1554 def forget(self, files, prefix=""):
1638 1555 join = lambda f: os.path.join(prefix, f)
1639 1556 with self._repo.wlock():
1640 1557 rejected = []
1641 1558 for f in files:
1642 1559 if f not in self._repo.dirstate:
1643 1560 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1644 1561 rejected.append(f)
1645 1562 elif self._repo.dirstate[f] != 'a':
1646 1563 self._repo.dirstate.remove(f)
1647 1564 else:
1648 1565 self._repo.dirstate.drop(f)
1649 1566 return rejected
1650 1567
1651 1568 def undelete(self, list):
1652 1569 pctxs = self.parents()
1653 1570 with self._repo.wlock():
1654 1571 for f in list:
1655 1572 if self._repo.dirstate[f] != 'r':
1656 1573 self._repo.ui.warn(_("%s not removed!\n") % f)
1657 1574 else:
1658 1575 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1659 1576 t = fctx.data()
1660 1577 self._repo.wwrite(f, t, fctx.flags())
1661 1578 self._repo.dirstate.normal(f)
1662 1579
1663 1580 def copy(self, source, dest):
1664 1581 try:
1665 1582 st = self._repo.wvfs.lstat(dest)
1666 1583 except OSError as err:
1667 1584 if err.errno != errno.ENOENT:
1668 1585 raise
1669 1586 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1670 1587 return
1671 1588 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 1589 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1673 1590 "symbolic link\n") % dest)
1674 1591 else:
1675 1592 with self._repo.wlock():
1676 1593 if self._repo.dirstate[dest] in '?':
1677 1594 self._repo.dirstate.add(dest)
1678 1595 elif self._repo.dirstate[dest] in 'r':
1679 1596 self._repo.dirstate.normallookup(dest)
1680 1597 self._repo.dirstate.copy(source, dest)
1681 1598
1682 1599 def match(self, pats=None, include=None, exclude=None, default='glob',
1683 1600 listsubrepos=False, badfn=None):
1684 1601 r = self._repo
1685 1602
1686 1603 # Only a case insensitive filesystem needs magic to translate user input
1687 1604 # to actual case in the filesystem.
1688 1605 icasefs = not util.fscasesensitive(r.root)
1689 1606 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1690 1607 default, auditor=r.auditor, ctx=self,
1691 1608 listsubrepos=listsubrepos, badfn=badfn,
1692 1609 icasefs=icasefs)
1693 1610
1694 1611 def _filtersuspectsymlink(self, files):
1695 1612 if not files or self._repo.dirstate._checklink:
1696 1613 return files
1697 1614
1698 1615 # Symlink placeholders may get non-symlink-like contents
1699 1616 # via user error or dereferencing by NFS or Samba servers,
1700 1617 # so we filter out any placeholders that don't look like a
1701 1618 # symlink
1702 1619 sane = []
1703 1620 for f in files:
1704 1621 if self.flags(f) == 'l':
1705 1622 d = self[f].data()
1706 1623 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1707 1624 self._repo.ui.debug('ignoring suspect symlink placeholder'
1708 1625 ' "%s"\n' % f)
1709 1626 continue
1710 1627 sane.append(f)
1711 1628 return sane
1712 1629
1713 1630 def _checklookup(self, files):
1714 1631 # check for any possibly clean files
1715 1632 if not files:
1716 1633 return [], [], []
1717 1634
1718 1635 modified = []
1719 1636 deleted = []
1720 1637 fixup = []
1721 1638 pctx = self._parents[0]
1722 1639 # do a full compare of any files that might have changed
1723 1640 for f in sorted(files):
1724 1641 try:
1725 1642 # This will return True for a file that got replaced by a
1726 1643 # directory in the interim, but fixing that is pretty hard.
1727 1644 if (f not in pctx or self.flags(f) != pctx.flags(f)
1728 1645 or pctx[f].cmp(self[f])):
1729 1646 modified.append(f)
1730 1647 else:
1731 1648 fixup.append(f)
1732 1649 except (IOError, OSError):
1733 1650 # A file become inaccessible in between? Mark it as deleted,
1734 1651 # matching dirstate behavior (issue5584).
1735 1652 # The dirstate has more complex behavior around whether a
1736 1653 # missing file matches a directory, etc, but we don't need to
1737 1654 # bother with that: if f has made it to this point, we're sure
1738 1655 # it's in the dirstate.
1739 1656 deleted.append(f)
1740 1657
1741 1658 return modified, deleted, fixup
1742 1659
1743 1660 def _poststatusfixup(self, status, fixup):
1744 1661 """update dirstate for files that are actually clean"""
1745 1662 poststatus = self._repo.postdsstatus()
1746 1663 if fixup or poststatus:
1747 1664 try:
1748 1665 oldid = self._repo.dirstate.identity()
1749 1666
1750 1667 # updating the dirstate is optional
1751 1668 # so we don't wait on the lock
1752 1669 # wlock can invalidate the dirstate, so cache normal _after_
1753 1670 # taking the lock
1754 1671 with self._repo.wlock(False):
1755 1672 if self._repo.dirstate.identity() == oldid:
1756 1673 if fixup:
1757 1674 normal = self._repo.dirstate.normal
1758 1675 for f in fixup:
1759 1676 normal(f)
1760 1677 # write changes out explicitly, because nesting
1761 1678 # wlock at runtime may prevent 'wlock.release()'
1762 1679 # after this block from doing so for subsequent
1763 1680 # changing files
1764 1681 tr = self._repo.currenttransaction()
1765 1682 self._repo.dirstate.write(tr)
1766 1683
1767 1684 if poststatus:
1768 1685 for ps in poststatus:
1769 1686 ps(self, status)
1770 1687 else:
1771 1688 # in this case, writing changes out breaks
1772 1689 # consistency, because .hg/dirstate was
1773 1690 # already changed simultaneously after last
1774 1691 # caching (see also issue5584 for detail)
1775 1692 self._repo.ui.debug('skip updating dirstate: '
1776 1693 'identity mismatch\n')
1777 1694 except error.LockError:
1778 1695 pass
1779 1696 finally:
1780 1697 # Even if the wlock couldn't be grabbed, clear out the list.
1781 1698 self._repo.clearpostdsstatus()
1782 1699
1783 1700 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1784 1701 unknown=False):
1785 1702 '''Gets the status from the dirstate -- internal use only.'''
1786 1703 listignored, listclean, listunknown = ignored, clean, unknown
1787 1704 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1788 1705 subrepos = []
1789 1706 if '.hgsub' in self:
1790 1707 subrepos = sorted(self.substate)
1791 1708 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1792 1709 listclean, listunknown)
1793 1710
1794 1711 # check for any possibly clean files
1795 1712 fixup = []
1796 1713 if cmp:
1797 1714 modified2, deleted2, fixup = self._checklookup(cmp)
1798 1715 s.modified.extend(modified2)
1799 1716 s.deleted.extend(deleted2)
1800 1717
1801 1718 if fixup and listclean:
1802 1719 s.clean.extend(fixup)
1803 1720
1804 1721 self._poststatusfixup(s, fixup)
1805 1722
1806 1723 if match.always():
1807 1724 # cache for performance
1808 1725 if s.unknown or s.ignored or s.clean:
1809 1726 # "_status" is cached with list*=False in the normal route
1810 1727 self._status = scmutil.status(s.modified, s.added, s.removed,
1811 1728 s.deleted, [], [], [])
1812 1729 else:
1813 1730 self._status = s
1814 1731
1815 1732 return s
1816 1733
1817 1734 @propertycache
1818 1735 def _manifest(self):
1819 1736 """generate a manifest corresponding to the values in self._status
1820 1737
1821 1738 This reuse the file nodeid from parent, but we use special node
1822 1739 identifiers for added and modified files. This is used by manifests
1823 1740 merge to see that files are different and by update logic to avoid
1824 1741 deleting newly added files.
1825 1742 """
1826 1743 return self._buildstatusmanifest(self._status)
1827 1744
1828 1745 def _buildstatusmanifest(self, status):
1829 1746 """Builds a manifest that includes the given status results."""
1830 1747 parents = self.parents()
1831 1748
1832 1749 man = parents[0].manifest().copy()
1833 1750
1834 1751 ff = self._flagfunc
1835 1752 for i, l in ((addednodeid, status.added),
1836 1753 (modifiednodeid, status.modified)):
1837 1754 for f in l:
1838 1755 man[f] = i
1839 1756 try:
1840 1757 man.setflag(f, ff(f))
1841 1758 except OSError:
1842 1759 pass
1843 1760
1844 1761 for f in status.deleted + status.removed:
1845 1762 if f in man:
1846 1763 del man[f]
1847 1764
1848 1765 return man
1849 1766
1850 1767 def _buildstatus(self, other, s, match, listignored, listclean,
1851 1768 listunknown):
1852 1769 """build a status with respect to another context
1853 1770
1854 1771 This includes logic for maintaining the fast path of status when
1855 1772 comparing the working directory against its parent, which is to skip
1856 1773 building a new manifest if self (working directory) is not comparing
1857 1774 against its parent (repo['.']).
1858 1775 """
1859 1776 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1860 1777 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1861 1778 # might have accidentally ended up with the entire contents of the file
1862 1779 # they are supposed to be linking to.
1863 1780 s.modified[:] = self._filtersuspectsymlink(s.modified)
1864 1781 if other != self._repo['.']:
1865 1782 s = super(workingctx, self)._buildstatus(other, s, match,
1866 1783 listignored, listclean,
1867 1784 listunknown)
1868 1785 return s
1869 1786
1870 1787 def _matchstatus(self, other, match):
1871 1788 """override the match method with a filter for directory patterns
1872 1789
1873 1790 We use inheritance to customize the match.bad method only in cases of
1874 1791 workingctx since it belongs only to the working directory when
1875 1792 comparing against the parent changeset.
1876 1793
1877 1794 If we aren't comparing against the working directory's parent, then we
1878 1795 just use the default match object sent to us.
1879 1796 """
1880 1797 superself = super(workingctx, self)
1881 1798 match = superself._matchstatus(other, match)
1882 1799 if other != self._repo['.']:
1883 1800 def bad(f, msg):
1884 1801 # 'f' may be a directory pattern from 'match.files()',
1885 1802 # so 'f not in ctx1' is not enough
1886 1803 if f not in other and not other.hasdir(f):
1887 1804 self._repo.ui.warn('%s: %s\n' %
1888 1805 (self._repo.dirstate.pathto(f), msg))
1889 1806 match.bad = bad
1890 1807 return match
1891 1808
1892 1809 class committablefilectx(basefilectx):
1893 1810 """A committablefilectx provides common functionality for a file context
1894 1811 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1895 1812 def __init__(self, repo, path, filelog=None, ctx=None):
1896 1813 self._repo = repo
1897 1814 self._path = path
1898 1815 self._changeid = None
1899 1816 self._filerev = self._filenode = None
1900 1817
1901 1818 if filelog is not None:
1902 1819 self._filelog = filelog
1903 1820 if ctx:
1904 1821 self._changectx = ctx
1905 1822
1906 1823 def __nonzero__(self):
1907 1824 return True
1908 1825
1909 1826 __bool__ = __nonzero__
1910 1827
1911 1828 def linkrev(self):
1912 1829 # linked to self._changectx no matter if file is modified or not
1913 1830 return self.rev()
1914 1831
1915 1832 def parents(self):
1916 1833 '''return parent filectxs, following copies if necessary'''
1917 1834 def filenode(ctx, path):
1918 1835 return ctx._manifest.get(path, nullid)
1919 1836
1920 1837 path = self._path
1921 1838 fl = self._filelog
1922 1839 pcl = self._changectx._parents
1923 1840 renamed = self.renamed()
1924 1841
1925 1842 if renamed:
1926 1843 pl = [renamed + (None,)]
1927 1844 else:
1928 1845 pl = [(path, filenode(pcl[0], path), fl)]
1929 1846
1930 1847 for pc in pcl[1:]:
1931 1848 pl.append((path, filenode(pc, path), fl))
1932 1849
1933 1850 return [self._parentfilectx(p, fileid=n, filelog=l)
1934 1851 for p, n, l in pl if n != nullid]
1935 1852
1936 1853 def children(self):
1937 1854 return []
1938 1855
1939 1856 class workingfilectx(committablefilectx):
1940 1857 """A workingfilectx object makes access to data related to a particular
1941 1858 file in the working directory convenient."""
1942 1859 def __init__(self, repo, path, filelog=None, workingctx=None):
1943 1860 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1944 1861
1945 1862 @propertycache
1946 1863 def _changectx(self):
1947 1864 return workingctx(self._repo)
1948 1865
1949 1866 def data(self):
1950 1867 return self._repo.wread(self._path)
1951 1868 def renamed(self):
1952 1869 rp = self._repo.dirstate.copied(self._path)
1953 1870 if not rp:
1954 1871 return None
1955 1872 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1956 1873
1957 1874 def size(self):
1958 1875 return self._repo.wvfs.lstat(self._path).st_size
1959 1876 def date(self):
1960 1877 t, tz = self._changectx.date()
1961 1878 try:
1962 1879 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1963 1880 except OSError as err:
1964 1881 if err.errno != errno.ENOENT:
1965 1882 raise
1966 1883 return (t, tz)
1967 1884
1968 1885 def cmp(self, fctx):
1969 1886 """compare with other file context
1970 1887
1971 1888 returns True if different than fctx.
1972 1889 """
1973 1890 # fctx should be a filectx (not a workingfilectx)
1974 1891 # invert comparison to reuse the same code path
1975 1892 return fctx.cmp(self)
1976 1893
1977 1894 def remove(self, ignoremissing=False):
1978 1895 """wraps unlink for a repo's working directory"""
1979 1896 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1980 1897
1981 1898 def write(self, data, flags):
1982 1899 """wraps repo.wwrite"""
1983 1900 self._repo.wwrite(self._path, data, flags)
1984 1901
1985 1902 class workingcommitctx(workingctx):
1986 1903 """A workingcommitctx object makes access to data related to
1987 1904 the revision being committed convenient.
1988 1905
1989 1906 This hides changes in the working directory, if they aren't
1990 1907 committed in this context.
1991 1908 """
1992 1909 def __init__(self, repo, changes,
1993 1910 text="", user=None, date=None, extra=None):
1994 1911 super(workingctx, self).__init__(repo, text, user, date, extra,
1995 1912 changes)
1996 1913
1997 1914 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1998 1915 unknown=False):
1999 1916 """Return matched files only in ``self._status``
2000 1917
2001 1918 Uncommitted files appear "clean" via this context, even if
2002 1919 they aren't actually so in the working directory.
2003 1920 """
2004 1921 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
2005 1922 if clean:
2006 1923 clean = [f for f in self._manifest if f not in self._changedset]
2007 1924 else:
2008 1925 clean = []
2009 1926 return scmutil.status([f for f in self._status.modified if match(f)],
2010 1927 [f for f in self._status.added if match(f)],
2011 1928 [f for f in self._status.removed if match(f)],
2012 1929 [], [], [], clean)
2013 1930
2014 1931 @propertycache
2015 1932 def _changedset(self):
2016 1933 """Return the set of files changed in this context
2017 1934 """
2018 1935 changed = set(self._status.modified)
2019 1936 changed.update(self._status.added)
2020 1937 changed.update(self._status.removed)
2021 1938 return changed
2022 1939
2023 1940 def makecachingfilectxfn(func):
2024 1941 """Create a filectxfn that caches based on the path.
2025 1942
2026 1943 We can't use util.cachefunc because it uses all arguments as the cache
2027 1944 key and this creates a cycle since the arguments include the repo and
2028 1945 memctx.
2029 1946 """
2030 1947 cache = {}
2031 1948
2032 1949 def getfilectx(repo, memctx, path):
2033 1950 if path not in cache:
2034 1951 cache[path] = func(repo, memctx, path)
2035 1952 return cache[path]
2036 1953
2037 1954 return getfilectx
2038 1955
2039 1956 def memfilefromctx(ctx):
2040 1957 """Given a context return a memfilectx for ctx[path]
2041 1958
2042 1959 This is a convenience method for building a memctx based on another
2043 1960 context.
2044 1961 """
2045 1962 def getfilectx(repo, memctx, path):
2046 1963 fctx = ctx[path]
2047 1964 # this is weird but apparently we only keep track of one parent
2048 1965 # (why not only store that instead of a tuple?)
2049 1966 copied = fctx.renamed()
2050 1967 if copied:
2051 1968 copied = copied[0]
2052 1969 return memfilectx(repo, path, fctx.data(),
2053 1970 islink=fctx.islink(), isexec=fctx.isexec(),
2054 1971 copied=copied, memctx=memctx)
2055 1972
2056 1973 return getfilectx
2057 1974
2058 1975 def memfilefrompatch(patchstore):
2059 1976 """Given a patch (e.g. patchstore object) return a memfilectx
2060 1977
2061 1978 This is a convenience method for building a memctx based on a patchstore.
2062 1979 """
2063 1980 def getfilectx(repo, memctx, path):
2064 1981 data, mode, copied = patchstore.getfile(path)
2065 1982 if data is None:
2066 1983 return None
2067 1984 islink, isexec = mode
2068 1985 return memfilectx(repo, path, data, islink=islink,
2069 1986 isexec=isexec, copied=copied,
2070 1987 memctx=memctx)
2071 1988
2072 1989 return getfilectx
2073 1990
2074 1991 class memctx(committablectx):
2075 1992 """Use memctx to perform in-memory commits via localrepo.commitctx().
2076 1993
2077 1994 Revision information is supplied at initialization time while
2078 1995 related files data and is made available through a callback
2079 1996 mechanism. 'repo' is the current localrepo, 'parents' is a
2080 1997 sequence of two parent revisions identifiers (pass None for every
2081 1998 missing parent), 'text' is the commit message and 'files' lists
2082 1999 names of files touched by the revision (normalized and relative to
2083 2000 repository root).
2084 2001
2085 2002 filectxfn(repo, memctx, path) is a callable receiving the
2086 2003 repository, the current memctx object and the normalized path of
2087 2004 requested file, relative to repository root. It is fired by the
2088 2005 commit function for every file in 'files', but calls order is
2089 2006 undefined. If the file is available in the revision being
2090 2007 committed (updated or added), filectxfn returns a memfilectx
2091 2008 object. If the file was removed, filectxfn return None for recent
2092 2009 Mercurial. Moved files are represented by marking the source file
2093 2010 removed and the new file added with copy information (see
2094 2011 memfilectx).
2095 2012
2096 2013 user receives the committer name and defaults to current
2097 2014 repository username, date is the commit date in any format
2098 2015 supported by util.parsedate() and defaults to current date, extra
2099 2016 is a dictionary of metadata or is left empty.
2100 2017 """
2101 2018
2102 2019 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2103 2020 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2104 2021 # this field to determine what to do in filectxfn.
2105 2022 _returnnoneformissingfiles = True
2106 2023
2107 2024 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2108 2025 date=None, extra=None, branch=None, editor=False):
2109 2026 super(memctx, self).__init__(repo, text, user, date, extra)
2110 2027 self._rev = None
2111 2028 self._node = None
2112 2029 parents = [(p or nullid) for p in parents]
2113 2030 p1, p2 = parents
2114 2031 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2115 2032 files = sorted(set(files))
2116 2033 self._files = files
2117 2034 if branch is not None:
2118 2035 self._extra['branch'] = encoding.fromlocal(branch)
2119 2036 self.substate = {}
2120 2037
2121 2038 if isinstance(filectxfn, patch.filestore):
2122 2039 filectxfn = memfilefrompatch(filectxfn)
2123 2040 elif not callable(filectxfn):
2124 2041 # if store is not callable, wrap it in a function
2125 2042 filectxfn = memfilefromctx(filectxfn)
2126 2043
2127 2044 # memoizing increases performance for e.g. vcs convert scenarios.
2128 2045 self._filectxfn = makecachingfilectxfn(filectxfn)
2129 2046
2130 2047 if editor:
2131 2048 self._text = editor(self._repo, self, [])
2132 2049 self._repo.savecommitmessage(self._text)
2133 2050
2134 2051 def filectx(self, path, filelog=None):
2135 2052 """get a file context from the working directory
2136 2053
2137 2054 Returns None if file doesn't exist and should be removed."""
2138 2055 return self._filectxfn(self._repo, self, path)
2139 2056
2140 2057 def commit(self):
2141 2058 """commit context to the repo"""
2142 2059 return self._repo.commitctx(self)
2143 2060
2144 2061 @propertycache
2145 2062 def _manifest(self):
2146 2063 """generate a manifest based on the return values of filectxfn"""
2147 2064
2148 2065 # keep this simple for now; just worry about p1
2149 2066 pctx = self._parents[0]
2150 2067 man = pctx.manifest().copy()
2151 2068
2152 2069 for f in self._status.modified:
2153 2070 p1node = nullid
2154 2071 p2node = nullid
2155 2072 p = pctx[f].parents() # if file isn't in pctx, check p2?
2156 2073 if len(p) > 0:
2157 2074 p1node = p[0].filenode()
2158 2075 if len(p) > 1:
2159 2076 p2node = p[1].filenode()
2160 2077 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2161 2078
2162 2079 for f in self._status.added:
2163 2080 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2164 2081
2165 2082 for f in self._status.removed:
2166 2083 if f in man:
2167 2084 del man[f]
2168 2085
2169 2086 return man
2170 2087
2171 2088 @propertycache
2172 2089 def _status(self):
2173 2090 """Calculate exact status from ``files`` specified at construction
2174 2091 """
2175 2092 man1 = self.p1().manifest()
2176 2093 p2 = self._parents[1]
2177 2094 # "1 < len(self._parents)" can't be used for checking
2178 2095 # existence of the 2nd parent, because "memctx._parents" is
2179 2096 # explicitly initialized by the list, of which length is 2.
2180 2097 if p2.node() != nullid:
2181 2098 man2 = p2.manifest()
2182 2099 managing = lambda f: f in man1 or f in man2
2183 2100 else:
2184 2101 managing = lambda f: f in man1
2185 2102
2186 2103 modified, added, removed = [], [], []
2187 2104 for f in self._files:
2188 2105 if not managing(f):
2189 2106 added.append(f)
2190 2107 elif self[f]:
2191 2108 modified.append(f)
2192 2109 else:
2193 2110 removed.append(f)
2194 2111
2195 2112 return scmutil.status(modified, added, removed, [], [], [], [])
2196 2113
2197 2114 class memfilectx(committablefilectx):
2198 2115 """memfilectx represents an in-memory file to commit.
2199 2116
2200 2117 See memctx and committablefilectx for more details.
2201 2118 """
2202 2119 def __init__(self, repo, path, data, islink=False,
2203 2120 isexec=False, copied=None, memctx=None):
2204 2121 """
2205 2122 path is the normalized file path relative to repository root.
2206 2123 data is the file content as a string.
2207 2124 islink is True if the file is a symbolic link.
2208 2125 isexec is True if the file is executable.
2209 2126 copied is the source file path if current file was copied in the
2210 2127 revision being committed, or None."""
2211 2128 super(memfilectx, self).__init__(repo, path, None, memctx)
2212 2129 self._data = data
2213 2130 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2214 2131 self._copied = None
2215 2132 if copied:
2216 2133 self._copied = (copied, nullid)
2217 2134
2218 2135 def data(self):
2219 2136 return self._data
2220 2137
2221 2138 def remove(self, ignoremissing=False):
2222 2139 """wraps unlink for a repo's working directory"""
2223 2140 # need to figure out what to do here
2224 2141 del self._changectx[self._path]
2225 2142
2226 2143 def write(self, data, flags):
2227 2144 """wraps repo.wwrite"""
2228 2145 self._data = data
2229 2146
2230 2147 class overlayfilectx(committablefilectx):
2231 2148 """Like memfilectx but take an original filectx and optional parameters to
2232 2149 override parts of it. This is useful when fctx.data() is expensive (i.e.
2233 2150 flag processor is expensive) and raw data, flags, and filenode could be
2234 2151 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2235 2152 """
2236 2153
2237 2154 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2238 2155 copied=None, ctx=None):
2239 2156 """originalfctx: filecontext to duplicate
2240 2157
2241 2158 datafunc: None or a function to override data (file content). It is a
2242 2159 function to be lazy. path, flags, copied, ctx: None or overridden value
2243 2160
2244 2161 copied could be (path, rev), or False. copied could also be just path,
2245 2162 and will be converted to (path, nullid). This simplifies some callers.
2246 2163 """
2247 2164
2248 2165 if path is None:
2249 2166 path = originalfctx.path()
2250 2167 if ctx is None:
2251 2168 ctx = originalfctx.changectx()
2252 2169 ctxmatch = lambda: True
2253 2170 else:
2254 2171 ctxmatch = lambda: ctx == originalfctx.changectx()
2255 2172
2256 2173 repo = originalfctx.repo()
2257 2174 flog = originalfctx.filelog()
2258 2175 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2259 2176
2260 2177 if copied is None:
2261 2178 copied = originalfctx.renamed()
2262 2179 copiedmatch = lambda: True
2263 2180 else:
2264 2181 if copied and not isinstance(copied, tuple):
2265 2182 # repo._filecommit will recalculate copyrev so nullid is okay
2266 2183 copied = (copied, nullid)
2267 2184 copiedmatch = lambda: copied == originalfctx.renamed()
2268 2185
2269 2186 # When data, copied (could affect data), ctx (could affect filelog
2270 2187 # parents) are not overridden, rawdata, rawflags, and filenode may be
2271 2188 # reused (repo._filecommit should double check filelog parents).
2272 2189 #
2273 2190 # path, flags are not hashed in filelog (but in manifestlog) so they do
2274 2191 # not affect reusable here.
2275 2192 #
2276 2193 # If ctx or copied is overridden to a same value with originalfctx,
2277 2194 # still consider it's reusable. originalfctx.renamed() may be a bit
2278 2195 # expensive so it's not called unless necessary. Assuming datafunc is
2279 2196 # always expensive, do not call it for this "reusable" test.
2280 2197 reusable = datafunc is None and ctxmatch() and copiedmatch()
2281 2198
2282 2199 if datafunc is None:
2283 2200 datafunc = originalfctx.data
2284 2201 if flags is None:
2285 2202 flags = originalfctx.flags()
2286 2203
2287 2204 self._datafunc = datafunc
2288 2205 self._flags = flags
2289 2206 self._copied = copied
2290 2207
2291 2208 if reusable:
2292 2209 # copy extra fields from originalfctx
2293 2210 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2294 2211 for attr in attrs:
2295 2212 if util.safehasattr(originalfctx, attr):
2296 2213 setattr(self, attr, getattr(originalfctx, attr))
2297 2214
2298 2215 def data(self):
2299 2216 return self._datafunc()
2300 2217
2301 2218 class metadataonlyctx(committablectx):
2302 2219 """Like memctx but it's reusing the manifest of different commit.
2303 2220 Intended to be used by lightweight operations that are creating
2304 2221 metadata-only changes.
2305 2222
2306 2223 Revision information is supplied at initialization time. 'repo' is the
2307 2224 current localrepo, 'ctx' is original revision which manifest we're reuisng
2308 2225 'parents' is a sequence of two parent revisions identifiers (pass None for
2309 2226 every missing parent), 'text' is the commit.
2310 2227
2311 2228 user receives the committer name and defaults to current repository
2312 2229 username, date is the commit date in any format supported by
2313 2230 util.parsedate() and defaults to current date, extra is a dictionary of
2314 2231 metadata or is left empty.
2315 2232 """
2316 2233 def __new__(cls, repo, originalctx, *args, **kwargs):
2317 2234 return super(metadataonlyctx, cls).__new__(cls, repo)
2318 2235
2319 2236 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2320 2237 extra=None, editor=False):
2321 2238 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2322 2239 self._rev = None
2323 2240 self._node = None
2324 2241 self._originalctx = originalctx
2325 2242 self._manifestnode = originalctx.manifestnode()
2326 2243 parents = [(p or nullid) for p in parents]
2327 2244 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2328 2245
2329 2246 # sanity check to ensure that the reused manifest parents are
2330 2247 # manifests of our commit parents
2331 2248 mp1, mp2 = self.manifestctx().parents
2332 2249 if p1 != nullid and p1.manifestnode() != mp1:
2333 2250 raise RuntimeError('can\'t reuse the manifest: '
2334 2251 'its p1 doesn\'t match the new ctx p1')
2335 2252 if p2 != nullid and p2.manifestnode() != mp2:
2336 2253 raise RuntimeError('can\'t reuse the manifest: '
2337 2254 'its p2 doesn\'t match the new ctx p2')
2338 2255
2339 2256 self._files = originalctx.files()
2340 2257 self.substate = {}
2341 2258
2342 2259 if editor:
2343 2260 self._text = editor(self._repo, self, [])
2344 2261 self._repo.savecommitmessage(self._text)
2345 2262
2346 2263 def manifestnode(self):
2347 2264 return self._manifestnode
2348 2265
2349 2266 @property
2350 2267 def _manifestctx(self):
2351 2268 return self._repo.manifestlog[self._manifestnode]
2352 2269
2353 2270 def filectx(self, path, filelog=None):
2354 2271 return self._originalctx.filectx(path, filelog=filelog)
2355 2272
2356 2273 def commit(self):
2357 2274 """commit context to the repo"""
2358 2275 return self._repo.commitctx(self)
2359 2276
2360 2277 @property
2361 2278 def _manifest(self):
2362 2279 return self._originalctx.manifest()
2363 2280
2364 2281 @propertycache
2365 2282 def _status(self):
2366 2283 """Calculate exact status from ``files`` specified in the ``origctx``
2367 2284 and parents manifests.
2368 2285 """
2369 2286 man1 = self.p1().manifest()
2370 2287 p2 = self._parents[1]
2371 2288 # "1 < len(self._parents)" can't be used for checking
2372 2289 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2373 2290 # explicitly initialized by the list, of which length is 2.
2374 2291 if p2.node() != nullid:
2375 2292 man2 = p2.manifest()
2376 2293 managing = lambda f: f in man1 or f in man2
2377 2294 else:
2378 2295 managing = lambda f: f in man1
2379 2296
2380 2297 modified, added, removed = [], [], []
2381 2298 for f in self._files:
2382 2299 if not managing(f):
2383 2300 added.append(f)
2384 2301 elif self[f]:
2385 2302 modified.append(f)
2386 2303 else:
2387 2304 removed.append(f)
2388 2305
2389 2306 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,339 +1,424
1 1 # dagop.py - graph ancestry and topology algorithm for revset
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11
12 12 from . import (
13 13 error,
14 mdiff,
14 15 node,
16 patch,
15 17 smartset,
16 18 )
17 19
18 20 baseset = smartset.baseset
19 21 generatorset = smartset.generatorset
20 22
21 23 def revancestors(repo, revs, followfirst):
22 24 """Like revlog.ancestors(), but supports followfirst."""
23 25 if followfirst:
24 26 cut = 1
25 27 else:
26 28 cut = None
27 29 cl = repo.changelog
28 30
29 31 def iterate():
30 32 revs.sort(reverse=True)
31 33 irevs = iter(revs)
32 34 h = []
33 35
34 36 inputrev = next(irevs, None)
35 37 if inputrev is not None:
36 38 heapq.heappush(h, -inputrev)
37 39
38 40 seen = set()
39 41 while h:
40 42 current = -heapq.heappop(h)
41 43 if current == inputrev:
42 44 inputrev = next(irevs, None)
43 45 if inputrev is not None:
44 46 heapq.heappush(h, -inputrev)
45 47 if current not in seen:
46 48 seen.add(current)
47 49 yield current
48 50 try:
49 51 for parent in cl.parentrevs(current)[:cut]:
50 52 if parent != node.nullrev:
51 53 heapq.heappush(h, -parent)
52 54 except error.WdirUnsupported:
53 55 for parent in repo[current].parents()[:cut]:
54 56 if parent.rev() != node.nullrev:
55 57 heapq.heappush(h, -parent.rev())
56 58
57 59 return generatorset(iterate(), iterasc=False)
58 60
59 61 def revdescendants(repo, revs, followfirst):
60 62 """Like revlog.descendants() but supports followfirst."""
61 63 if followfirst:
62 64 cut = 1
63 65 else:
64 66 cut = None
65 67
66 68 def iterate():
67 69 cl = repo.changelog
68 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
69 71 # smartset (and if it is not, it should.)
70 72 first = min(revs)
71 73 nullrev = node.nullrev
72 74 if first == nullrev:
73 75 # Are there nodes with a null first parent and a non-null
74 76 # second one? Maybe. Do we care? Probably not.
75 77 for i in cl:
76 78 yield i
77 79 else:
78 80 seen = set(revs)
79 81 for i in cl.revs(first + 1):
80 82 for x in cl.parentrevs(i)[:cut]:
81 83 if x != nullrev and x in seen:
82 84 seen.add(i)
83 85 yield i
84 86 break
85 87
86 88 return generatorset(iterate(), iterasc=True)
87 89
88 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
89 91 """return (heads(::<roots> and ::<heads>))
90 92
91 93 If includepath is True, return (<roots>::<heads>)."""
92 94 if not roots:
93 95 return []
94 96 parentrevs = repo.changelog.parentrevs
95 97 roots = set(roots)
96 98 visit = list(heads)
97 99 reachable = set()
98 100 seen = {}
99 101 # prefetch all the things! (because python is slow)
100 102 reached = reachable.add
101 103 dovisit = visit.append
102 104 nextvisit = visit.pop
103 105 # open-code the post-order traversal due to the tiny size of
104 106 # sys.getrecursionlimit()
105 107 while visit:
106 108 rev = nextvisit()
107 109 if rev in roots:
108 110 reached(rev)
109 111 if not includepath:
110 112 continue
111 113 parents = parentrevs(rev)
112 114 seen[rev] = parents
113 115 for parent in parents:
114 116 if parent >= minroot and parent not in seen:
115 117 dovisit(parent)
116 118 if not reachable:
117 119 return baseset()
118 120 if not includepath:
119 121 return reachable
120 122 for rev in sorted(seen):
121 123 for parent in seen[rev]:
122 124 if parent in reachable:
123 125 reached(rev)
124 126 return reachable
125 127
126 128 def reachableroots(repo, roots, heads, includepath=False):
127 129 """return (heads(::<roots> and ::<heads>))
128 130
129 131 If includepath is True, return (<roots>::<heads>)."""
130 132 if not roots:
131 133 return baseset()
132 134 minroot = roots.min()
133 135 roots = list(roots)
134 136 heads = list(heads)
135 137 try:
136 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
137 139 except AttributeError:
138 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
139 141 revs = baseset(revs)
140 142 revs.sort()
141 143 return revs
142 144
145 def _changesrange(fctx1, fctx2, linerange2, diffopts):
146 """Return `(diffinrange, linerange1)` where `diffinrange` is True
147 if diff from fctx2 to fctx1 has changes in linerange2 and
148 `linerange1` is the new line range for fctx1.
149 """
150 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
151 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
152 diffinrange = any(stype == '!' for _, stype in filteredblocks)
153 return diffinrange, linerange1
154
155 def blockancestors(fctx, fromline, toline, followfirst=False):
156 """Yield ancestors of `fctx` with respect to the block of lines within
157 `fromline`-`toline` range.
158 """
159 diffopts = patch.diffopts(fctx._repo.ui)
160 introrev = fctx.introrev()
161 if fctx.rev() != introrev:
162 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
163 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
164 while visit:
165 c, linerange2 = visit.pop(max(visit))
166 pl = c.parents()
167 if followfirst:
168 pl = pl[:1]
169 if not pl:
170 # The block originates from the initial revision.
171 yield c, linerange2
172 continue
173 inrange = False
174 for p in pl:
175 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
176 inrange = inrange or inrangep
177 if linerange1[0] == linerange1[1]:
178 # Parent's linerange is empty, meaning that the block got
179 # introduced in this revision; no need to go futher in this
180 # branch.
181 continue
182 # Set _descendantrev with 'c' (a known descendant) so that, when
183 # _adjustlinkrev is called for 'p', it receives this descendant
184 # (as srcrev) instead possibly topmost introrev.
185 p._descendantrev = c.rev()
186 visit[p.linkrev(), p.filenode()] = p, linerange1
187 if inrange:
188 yield c, linerange2
189
190 def blockdescendants(fctx, fromline, toline):
191 """Yield descendants of `fctx` with respect to the block of lines within
192 `fromline`-`toline` range.
193 """
194 # First possibly yield 'fctx' if it has changes in range with respect to
195 # its parents.
196 try:
197 c, linerange1 = next(blockancestors(fctx, fromline, toline))
198 except StopIteration:
199 pass
200 else:
201 if c == fctx:
202 yield c, linerange1
203
204 diffopts = patch.diffopts(fctx._repo.ui)
205 fl = fctx.filelog()
206 seen = {fctx.filerev(): (fctx, (fromline, toline))}
207 for i in fl.descendants([fctx.filerev()]):
208 c = fctx.filectx(i)
209 inrange = False
210 for x in fl.parentrevs(i):
211 try:
212 p, linerange2 = seen[x]
213 except KeyError:
214 # nullrev or other branch
215 continue
216 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
217 inrange = inrange or inrangep
218 # If revision 'i' has been seen (it's a merge), we assume that its
219 # line range is the same independently of which parents was used
220 # to compute it.
221 assert i not in seen or seen[i][1] == linerange1, (
222 'computed line range for %s is not consistent between '
223 'ancestor branches' % c)
224 seen[i] = c, linerange1
225 if inrange:
226 yield c, linerange1
227
143 228 def toposort(revs, parentsfunc, firstbranch=()):
144 229 """Yield revisions from heads to roots one (topo) branch at a time.
145 230
146 231 This function aims to be used by a graph generator that wishes to minimize
147 232 the number of parallel branches and their interleaving.
148 233
149 234 Example iteration order (numbers show the "true" order in a changelog):
150 235
151 236 o 4
152 237 |
153 238 o 1
154 239 |
155 240 | o 3
156 241 | |
157 242 | o 2
158 243 |/
159 244 o 0
160 245
161 246 Note that the ancestors of merges are understood by the current
162 247 algorithm to be on the same branch. This means no reordering will
163 248 occur behind a merge.
164 249 """
165 250
166 251 ### Quick summary of the algorithm
167 252 #
168 253 # This function is based around a "retention" principle. We keep revisions
169 254 # in memory until we are ready to emit a whole branch that immediately
170 255 # "merges" into an existing one. This reduces the number of parallel
171 256 # branches with interleaved revisions.
172 257 #
173 258 # During iteration revs are split into two groups:
174 259 # A) revision already emitted
175 260 # B) revision in "retention". They are stored as different subgroups.
176 261 #
177 262 # for each REV, we do the following logic:
178 263 #
179 264 # 1) if REV is a parent of (A), we will emit it. If there is a
180 265 # retention group ((B) above) that is blocked on REV being
181 266 # available, we emit all the revisions out of that retention
182 267 # group first.
183 268 #
184 269 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
185 270 # available, if such subgroup exist, we add REV to it and the subgroup is
186 271 # now awaiting for REV.parents() to be available.
187 272 #
188 273 # 3) finally if no such group existed in (B), we create a new subgroup.
189 274 #
190 275 #
191 276 # To bootstrap the algorithm, we emit the tipmost revision (which
192 277 # puts it in group (A) from above).
193 278
194 279 revs.sort(reverse=True)
195 280
196 281 # Set of parents of revision that have been emitted. They can be considered
197 282 # unblocked as the graph generator is already aware of them so there is no
198 283 # need to delay the revisions that reference them.
199 284 #
200 285 # If someone wants to prioritize a branch over the others, pre-filling this
201 286 # set will force all other branches to wait until this branch is ready to be
202 287 # emitted.
203 288 unblocked = set(firstbranch)
204 289
205 290 # list of groups waiting to be displayed, each group is defined by:
206 291 #
207 292 # (revs: lists of revs waiting to be displayed,
208 293 # blocked: set of that cannot be displayed before those in 'revs')
209 294 #
210 295 # The second value ('blocked') correspond to parents of any revision in the
211 296 # group ('revs') that is not itself contained in the group. The main idea
212 297 # of this algorithm is to delay as much as possible the emission of any
213 298 # revision. This means waiting for the moment we are about to display
214 299 # these parents to display the revs in a group.
215 300 #
216 301 # This first implementation is smart until it encounters a merge: it will
217 302 # emit revs as soon as any parent is about to be emitted and can grow an
218 303 # arbitrary number of revs in 'blocked'. In practice this mean we properly
219 304 # retains new branches but gives up on any special ordering for ancestors
220 305 # of merges. The implementation can be improved to handle this better.
221 306 #
222 307 # The first subgroup is special. It corresponds to all the revision that
223 308 # were already emitted. The 'revs' lists is expected to be empty and the
224 309 # 'blocked' set contains the parents revisions of already emitted revision.
225 310 #
226 311 # You could pre-seed the <parents> set of groups[0] to a specific
227 312 # changesets to select what the first emitted branch should be.
228 313 groups = [([], unblocked)]
229 314 pendingheap = []
230 315 pendingset = set()
231 316
232 317 heapq.heapify(pendingheap)
233 318 heappop = heapq.heappop
234 319 heappush = heapq.heappush
235 320 for currentrev in revs:
236 321 # Heap works with smallest element, we want highest so we invert
237 322 if currentrev not in pendingset:
238 323 heappush(pendingheap, -currentrev)
239 324 pendingset.add(currentrev)
240 325 # iterates on pending rev until after the current rev have been
241 326 # processed.
242 327 rev = None
243 328 while rev != currentrev:
244 329 rev = -heappop(pendingheap)
245 330 pendingset.remove(rev)
246 331
247 332 # Seek for a subgroup blocked, waiting for the current revision.
248 333 matching = [i for i, g in enumerate(groups) if rev in g[1]]
249 334
250 335 if matching:
251 336 # The main idea is to gather together all sets that are blocked
252 337 # on the same revision.
253 338 #
254 339 # Groups are merged when a common blocking ancestor is
255 340 # observed. For example, given two groups:
256 341 #
257 342 # revs [5, 4] waiting for 1
258 343 # revs [3, 2] waiting for 1
259 344 #
260 345 # These two groups will be merged when we process
261 346 # 1. In theory, we could have merged the groups when
262 347 # we added 2 to the group it is now in (we could have
263 348 # noticed the groups were both blocked on 1 then), but
264 349 # the way it works now makes the algorithm simpler.
265 350 #
266 351 # We also always keep the oldest subgroup first. We can
267 352 # probably improve the behavior by having the longest set
268 353 # first. That way, graph algorithms could minimise the length
269 354 # of parallel lines their drawing. This is currently not done.
270 355 targetidx = matching.pop(0)
271 356 trevs, tparents = groups[targetidx]
272 357 for i in matching:
273 358 gr = groups[i]
274 359 trevs.extend(gr[0])
275 360 tparents |= gr[1]
276 361 # delete all merged subgroups (except the one we kept)
277 362 # (starting from the last subgroup for performance and
278 363 # sanity reasons)
279 364 for i in reversed(matching):
280 365 del groups[i]
281 366 else:
282 367 # This is a new head. We create a new subgroup for it.
283 368 targetidx = len(groups)
284 369 groups.append(([], {rev}))
285 370
286 371 gr = groups[targetidx]
287 372
288 373 # We now add the current nodes to this subgroups. This is done
289 374 # after the subgroup merging because all elements from a subgroup
290 375 # that relied on this rev must precede it.
291 376 #
292 377 # we also update the <parents> set to include the parents of the
293 378 # new nodes.
294 379 if rev == currentrev: # only display stuff in rev
295 380 gr[0].append(rev)
296 381 gr[1].remove(rev)
297 382 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
298 383 gr[1].update(parents)
299 384 for p in parents:
300 385 if p not in pendingset:
301 386 pendingset.add(p)
302 387 heappush(pendingheap, -p)
303 388
304 389 # Look for a subgroup to display
305 390 #
306 391 # When unblocked is empty (if clause), we were not waiting for any
307 392 # revisions during the first iteration (if no priority was given) or
308 393 # if we emitted a whole disconnected set of the graph (reached a
309 394 # root). In that case we arbitrarily take the oldest known
310 395 # subgroup. The heuristic could probably be better.
311 396 #
312 397 # Otherwise (elif clause) if the subgroup is blocked on
313 398 # a revision we just emitted, we can safely emit it as
314 399 # well.
315 400 if not unblocked:
316 401 if len(groups) > 1: # display other subset
317 402 targetidx = 1
318 403 gr = groups[1]
319 404 elif not gr[1] & unblocked:
320 405 gr = None
321 406
322 407 if gr is not None:
323 408 # update the set of awaited revisions with the one from the
324 409 # subgroup
325 410 unblocked |= gr[1]
326 411 # output all revisions in the subgroup
327 412 for r in gr[0]:
328 413 yield r
329 414 # delete the subgroup that you just output
330 415 # unless it is groups[0] in which case you just empty it.
331 416 if targetidx:
332 417 del groups[targetidx]
333 418 else:
334 419 gr[0][:] = []
335 420 # Check if we have some subgroup waiting for revisions we are not going to
336 421 # iterate over
337 422 for g in groups:
338 423 for r in g[0]:
339 424 yield r
@@ -1,1383 +1,1383
1 1 #
2 2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import cgi
11 11 import copy
12 12 import mimetypes
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_FORBIDDEN,
22 22 HTTP_NOT_FOUND,
23 23 HTTP_OK,
24 24 get_contact,
25 25 paritygen,
26 26 staticfile,
27 27 )
28 28
29 29 from .. import (
30 30 archival,
31 context,
31 dagop,
32 32 encoding,
33 33 error,
34 34 graphmod,
35 35 revset,
36 36 revsetlang,
37 37 scmutil,
38 38 smartset,
39 39 templatefilters,
40 40 templater,
41 41 util,
42 42 )
43 43
44 44 from . import (
45 45 webutil,
46 46 )
47 47
48 48 __all__ = []
49 49 commands = {}
50 50
51 51 class webcommand(object):
52 52 """Decorator used to register a web command handler.
53 53
54 54 The decorator takes as its positional arguments the name/path the
55 55 command should be accessible under.
56 56
57 57 Usage:
58 58
59 59 @webcommand('mycommand')
60 60 def mycommand(web, req, tmpl):
61 61 pass
62 62 """
63 63
64 64 def __init__(self, name):
65 65 self.name = name
66 66
67 67 def __call__(self, func):
68 68 __all__.append(self.name)
69 69 commands[self.name] = func
70 70 return func
71 71
72 72 @webcommand('log')
73 73 def log(web, req, tmpl):
74 74 """
75 75 /log[/{revision}[/{path}]]
76 76 --------------------------
77 77
78 78 Show repository or file history.
79 79
80 80 For URLs of the form ``/log/{revision}``, a list of changesets starting at
81 81 the specified changeset identifier is shown. If ``{revision}`` is not
82 82 defined, the default is ``tip``. This form is equivalent to the
83 83 ``changelog`` handler.
84 84
85 85 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
86 86 file will be shown. This form is equivalent to the ``filelog`` handler.
87 87 """
88 88
89 89 if 'file' in req.form and req.form['file'][0]:
90 90 return filelog(web, req, tmpl)
91 91 else:
92 92 return changelog(web, req, tmpl)
93 93
94 94 @webcommand('rawfile')
95 95 def rawfile(web, req, tmpl):
96 96 guessmime = web.configbool('web', 'guessmime', False)
97 97
98 98 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
99 99 if not path:
100 100 content = manifest(web, req, tmpl)
101 101 req.respond(HTTP_OK, web.ctype)
102 102 return content
103 103
104 104 try:
105 105 fctx = webutil.filectx(web.repo, req)
106 106 except error.LookupError as inst:
107 107 try:
108 108 content = manifest(web, req, tmpl)
109 109 req.respond(HTTP_OK, web.ctype)
110 110 return content
111 111 except ErrorResponse:
112 112 raise inst
113 113
114 114 path = fctx.path()
115 115 text = fctx.data()
116 116 mt = 'application/binary'
117 117 if guessmime:
118 118 mt = mimetypes.guess_type(path)[0]
119 119 if mt is None:
120 120 if util.binary(text):
121 121 mt = 'application/binary'
122 122 else:
123 123 mt = 'text/plain'
124 124 if mt.startswith('text/'):
125 125 mt += '; charset="%s"' % encoding.encoding
126 126
127 127 req.respond(HTTP_OK, mt, path, body=text)
128 128 return []
129 129
130 130 def _filerevision(web, req, tmpl, fctx):
131 131 f = fctx.path()
132 132 text = fctx.data()
133 133 parity = paritygen(web.stripecount)
134 134 ishead = fctx.filerev() in fctx.filelog().headrevs()
135 135
136 136 if util.binary(text):
137 137 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
138 138 text = '(binary:%s)' % mt
139 139
140 140 def lines():
141 141 for lineno, t in enumerate(text.splitlines(True)):
142 142 yield {"line": t,
143 143 "lineid": "l%d" % (lineno + 1),
144 144 "linenumber": "% 6d" % (lineno + 1),
145 145 "parity": next(parity)}
146 146
147 147 return tmpl("filerevision",
148 148 file=f,
149 149 path=webutil.up(f),
150 150 text=lines(),
151 151 symrev=webutil.symrevorshortnode(req, fctx),
152 152 rename=webutil.renamelink(fctx),
153 153 permissions=fctx.manifest().flags(f),
154 154 ishead=int(ishead),
155 155 **webutil.commonentry(web.repo, fctx))
156 156
157 157 @webcommand('file')
158 158 def file(web, req, tmpl):
159 159 """
160 160 /file/{revision}[/{path}]
161 161 -------------------------
162 162
163 163 Show information about a directory or file in the repository.
164 164
165 165 Info about the ``path`` given as a URL parameter will be rendered.
166 166
167 167 If ``path`` is a directory, information about the entries in that
168 168 directory will be rendered. This form is equivalent to the ``manifest``
169 169 handler.
170 170
171 171 If ``path`` is a file, information about that file will be shown via
172 172 the ``filerevision`` template.
173 173
174 174 If ``path`` is not defined, information about the root directory will
175 175 be rendered.
176 176 """
177 177 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
178 178 if not path:
179 179 return manifest(web, req, tmpl)
180 180 try:
181 181 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
182 182 except error.LookupError as inst:
183 183 try:
184 184 return manifest(web, req, tmpl)
185 185 except ErrorResponse:
186 186 raise inst
187 187
188 188 def _search(web, req, tmpl):
189 189 MODE_REVISION = 'rev'
190 190 MODE_KEYWORD = 'keyword'
191 191 MODE_REVSET = 'revset'
192 192
193 193 def revsearch(ctx):
194 194 yield ctx
195 195
196 196 def keywordsearch(query):
197 197 lower = encoding.lower
198 198 qw = lower(query).split()
199 199
200 200 def revgen():
201 201 cl = web.repo.changelog
202 202 for i in xrange(len(web.repo) - 1, 0, -100):
203 203 l = []
204 204 for j in cl.revs(max(0, i - 99), i):
205 205 ctx = web.repo[j]
206 206 l.append(ctx)
207 207 l.reverse()
208 208 for e in l:
209 209 yield e
210 210
211 211 for ctx in revgen():
212 212 miss = 0
213 213 for q in qw:
214 214 if not (q in lower(ctx.user()) or
215 215 q in lower(ctx.description()) or
216 216 q in lower(" ".join(ctx.files()))):
217 217 miss = 1
218 218 break
219 219 if miss:
220 220 continue
221 221
222 222 yield ctx
223 223
224 224 def revsetsearch(revs):
225 225 for r in revs:
226 226 yield web.repo[r]
227 227
228 228 searchfuncs = {
229 229 MODE_REVISION: (revsearch, 'exact revision search'),
230 230 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
231 231 MODE_REVSET: (revsetsearch, 'revset expression search'),
232 232 }
233 233
234 234 def getsearchmode(query):
235 235 try:
236 236 ctx = web.repo[query]
237 237 except (error.RepoError, error.LookupError):
238 238 # query is not an exact revision pointer, need to
239 239 # decide if it's a revset expression or keywords
240 240 pass
241 241 else:
242 242 return MODE_REVISION, ctx
243 243
244 244 revdef = 'reverse(%s)' % query
245 245 try:
246 246 tree = revsetlang.parse(revdef)
247 247 except error.ParseError:
248 248 # can't parse to a revset tree
249 249 return MODE_KEYWORD, query
250 250
251 251 if revsetlang.depth(tree) <= 2:
252 252 # no revset syntax used
253 253 return MODE_KEYWORD, query
254 254
255 255 if any((token, (value or '')[:3]) == ('string', 're:')
256 256 for token, value, pos in revsetlang.tokenize(revdef)):
257 257 return MODE_KEYWORD, query
258 258
259 259 funcsused = revsetlang.funcsused(tree)
260 260 if not funcsused.issubset(revset.safesymbols):
261 261 return MODE_KEYWORD, query
262 262
263 263 mfunc = revset.match(web.repo.ui, revdef)
264 264 try:
265 265 revs = mfunc(web.repo)
266 266 return MODE_REVSET, revs
267 267 # ParseError: wrongly placed tokens, wrongs arguments, etc
268 268 # RepoLookupError: no such revision, e.g. in 'revision:'
269 269 # Abort: bookmark/tag not exists
270 270 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
271 271 except (error.ParseError, error.RepoLookupError, error.Abort,
272 272 LookupError):
273 273 return MODE_KEYWORD, query
274 274
275 275 def changelist(**map):
276 276 count = 0
277 277
278 278 for ctx in searchfunc[0](funcarg):
279 279 count += 1
280 280 n = ctx.node()
281 281 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
282 282 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
283 283
284 284 yield tmpl('searchentry',
285 285 parity=next(parity),
286 286 changelogtag=showtags,
287 287 files=files,
288 288 **webutil.commonentry(web.repo, ctx))
289 289
290 290 if count >= revcount:
291 291 break
292 292
293 293 query = req.form['rev'][0]
294 294 revcount = web.maxchanges
295 295 if 'revcount' in req.form:
296 296 try:
297 297 revcount = int(req.form.get('revcount', [revcount])[0])
298 298 revcount = max(revcount, 1)
299 299 tmpl.defaults['sessionvars']['revcount'] = revcount
300 300 except ValueError:
301 301 pass
302 302
303 303 lessvars = copy.copy(tmpl.defaults['sessionvars'])
304 304 lessvars['revcount'] = max(revcount / 2, 1)
305 305 lessvars['rev'] = query
306 306 morevars = copy.copy(tmpl.defaults['sessionvars'])
307 307 morevars['revcount'] = revcount * 2
308 308 morevars['rev'] = query
309 309
310 310 mode, funcarg = getsearchmode(query)
311 311
312 312 if 'forcekw' in req.form:
313 313 showforcekw = ''
314 314 showunforcekw = searchfuncs[mode][1]
315 315 mode = MODE_KEYWORD
316 316 funcarg = query
317 317 else:
318 318 if mode != MODE_KEYWORD:
319 319 showforcekw = searchfuncs[MODE_KEYWORD][1]
320 320 else:
321 321 showforcekw = ''
322 322 showunforcekw = ''
323 323
324 324 searchfunc = searchfuncs[mode]
325 325
326 326 tip = web.repo['tip']
327 327 parity = paritygen(web.stripecount)
328 328
329 329 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
330 330 entries=changelist, archives=web.archivelist("tip"),
331 331 morevars=morevars, lessvars=lessvars,
332 332 modedesc=searchfunc[1],
333 333 showforcekw=showforcekw, showunforcekw=showunforcekw)
334 334
335 335 @webcommand('changelog')
336 336 def changelog(web, req, tmpl, shortlog=False):
337 337 """
338 338 /changelog[/{revision}]
339 339 -----------------------
340 340
341 341 Show information about multiple changesets.
342 342
343 343 If the optional ``revision`` URL argument is absent, information about
344 344 all changesets starting at ``tip`` will be rendered. If the ``revision``
345 345 argument is present, changesets will be shown starting from the specified
346 346 revision.
347 347
348 348 If ``revision`` is absent, the ``rev`` query string argument may be
349 349 defined. This will perform a search for changesets.
350 350
351 351 The argument for ``rev`` can be a single revision, a revision set,
352 352 or a literal keyword to search for in changeset data (equivalent to
353 353 :hg:`log -k`).
354 354
355 355 The ``revcount`` query string argument defines the maximum numbers of
356 356 changesets to render.
357 357
358 358 For non-searches, the ``changelog`` template will be rendered.
359 359 """
360 360
361 361 query = ''
362 362 if 'node' in req.form:
363 363 ctx = webutil.changectx(web.repo, req)
364 364 symrev = webutil.symrevorshortnode(req, ctx)
365 365 elif 'rev' in req.form:
366 366 return _search(web, req, tmpl)
367 367 else:
368 368 ctx = web.repo['tip']
369 369 symrev = 'tip'
370 370
371 371 def changelist():
372 372 revs = []
373 373 if pos != -1:
374 374 revs = web.repo.changelog.revs(pos, 0)
375 375 curcount = 0
376 376 for rev in revs:
377 377 curcount += 1
378 378 if curcount > revcount + 1:
379 379 break
380 380
381 381 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
382 382 entry['parity'] = next(parity)
383 383 yield entry
384 384
385 385 if shortlog:
386 386 revcount = web.maxshortchanges
387 387 else:
388 388 revcount = web.maxchanges
389 389
390 390 if 'revcount' in req.form:
391 391 try:
392 392 revcount = int(req.form.get('revcount', [revcount])[0])
393 393 revcount = max(revcount, 1)
394 394 tmpl.defaults['sessionvars']['revcount'] = revcount
395 395 except ValueError:
396 396 pass
397 397
398 398 lessvars = copy.copy(tmpl.defaults['sessionvars'])
399 399 lessvars['revcount'] = max(revcount / 2, 1)
400 400 morevars = copy.copy(tmpl.defaults['sessionvars'])
401 401 morevars['revcount'] = revcount * 2
402 402
403 403 count = len(web.repo)
404 404 pos = ctx.rev()
405 405 parity = paritygen(web.stripecount)
406 406
407 407 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
408 408
409 409 entries = list(changelist())
410 410 latestentry = entries[:1]
411 411 if len(entries) > revcount:
412 412 nextentry = entries[-1:]
413 413 entries = entries[:-1]
414 414 else:
415 415 nextentry = []
416 416
417 417 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
418 418 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
419 419 entries=entries,
420 420 latestentry=latestentry, nextentry=nextentry,
421 421 archives=web.archivelist("tip"), revcount=revcount,
422 422 morevars=morevars, lessvars=lessvars, query=query)
423 423
424 424 @webcommand('shortlog')
425 425 def shortlog(web, req, tmpl):
426 426 """
427 427 /shortlog
428 428 ---------
429 429
430 430 Show basic information about a set of changesets.
431 431
432 432 This accepts the same parameters as the ``changelog`` handler. The only
433 433 difference is the ``shortlog`` template will be rendered instead of the
434 434 ``changelog`` template.
435 435 """
436 436 return changelog(web, req, tmpl, shortlog=True)
437 437
438 438 @webcommand('changeset')
439 439 def changeset(web, req, tmpl):
440 440 """
441 441 /changeset[/{revision}]
442 442 -----------------------
443 443
444 444 Show information about a single changeset.
445 445
446 446 A URL path argument is the changeset identifier to show. See ``hg help
447 447 revisions`` for possible values. If not defined, the ``tip`` changeset
448 448 will be shown.
449 449
450 450 The ``changeset`` template is rendered. Contents of the ``changesettag``,
451 451 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
452 452 templates related to diffs may all be used to produce the output.
453 453 """
454 454 ctx = webutil.changectx(web.repo, req)
455 455
456 456 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
457 457
458 458 rev = webcommand('rev')(changeset)
459 459
460 460 def decodepath(path):
461 461 """Hook for mapping a path in the repository to a path in the
462 462 working copy.
463 463
464 464 Extensions (e.g., largefiles) can override this to remap files in
465 465 the virtual file system presented by the manifest command below."""
466 466 return path
467 467
468 468 @webcommand('manifest')
469 469 def manifest(web, req, tmpl):
470 470 """
471 471 /manifest[/{revision}[/{path}]]
472 472 -------------------------------
473 473
474 474 Show information about a directory.
475 475
476 476 If the URL path arguments are omitted, information about the root
477 477 directory for the ``tip`` changeset will be shown.
478 478
479 479 Because this handler can only show information for directories, it
480 480 is recommended to use the ``file`` handler instead, as it can handle both
481 481 directories and files.
482 482
483 483 The ``manifest`` template will be rendered for this handler.
484 484 """
485 485 if 'node' in req.form:
486 486 ctx = webutil.changectx(web.repo, req)
487 487 symrev = webutil.symrevorshortnode(req, ctx)
488 488 else:
489 489 ctx = web.repo['tip']
490 490 symrev = 'tip'
491 491 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
492 492 mf = ctx.manifest()
493 493 node = ctx.node()
494 494
495 495 files = {}
496 496 dirs = {}
497 497 parity = paritygen(web.stripecount)
498 498
499 499 if path and path[-1] != "/":
500 500 path += "/"
501 501 l = len(path)
502 502 abspath = "/" + path
503 503
504 504 for full, n in mf.iteritems():
505 505 # the virtual path (working copy path) used for the full
506 506 # (repository) path
507 507 f = decodepath(full)
508 508
509 509 if f[:l] != path:
510 510 continue
511 511 remain = f[l:]
512 512 elements = remain.split('/')
513 513 if len(elements) == 1:
514 514 files[remain] = full
515 515 else:
516 516 h = dirs # need to retain ref to dirs (root)
517 517 for elem in elements[0:-1]:
518 518 if elem not in h:
519 519 h[elem] = {}
520 520 h = h[elem]
521 521 if len(h) > 1:
522 522 break
523 523 h[None] = None # denotes files present
524 524
525 525 if mf and not files and not dirs:
526 526 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
527 527
528 528 def filelist(**map):
529 529 for f in sorted(files):
530 530 full = files[f]
531 531
532 532 fctx = ctx.filectx(full)
533 533 yield {"file": full,
534 534 "parity": next(parity),
535 535 "basename": f,
536 536 "date": fctx.date(),
537 537 "size": fctx.size(),
538 538 "permissions": mf.flags(full)}
539 539
540 540 def dirlist(**map):
541 541 for d in sorted(dirs):
542 542
543 543 emptydirs = []
544 544 h = dirs[d]
545 545 while isinstance(h, dict) and len(h) == 1:
546 546 k, v = h.items()[0]
547 547 if v:
548 548 emptydirs.append(k)
549 549 h = v
550 550
551 551 path = "%s%s" % (abspath, d)
552 552 yield {"parity": next(parity),
553 553 "path": path,
554 554 "emptydirs": "/".join(emptydirs),
555 555 "basename": d}
556 556
557 557 return tmpl("manifest",
558 558 symrev=symrev,
559 559 path=abspath,
560 560 up=webutil.up(abspath),
561 561 upparity=next(parity),
562 562 fentries=filelist,
563 563 dentries=dirlist,
564 564 archives=web.archivelist(hex(node)),
565 565 **webutil.commonentry(web.repo, ctx))
566 566
567 567 @webcommand('tags')
568 568 def tags(web, req, tmpl):
569 569 """
570 570 /tags
571 571 -----
572 572
573 573 Show information about tags.
574 574
575 575 No arguments are accepted.
576 576
577 577 The ``tags`` template is rendered.
578 578 """
579 579 i = list(reversed(web.repo.tagslist()))
580 580 parity = paritygen(web.stripecount)
581 581
582 582 def entries(notip, latestonly, **map):
583 583 t = i
584 584 if notip:
585 585 t = [(k, n) for k, n in i if k != "tip"]
586 586 if latestonly:
587 587 t = t[:1]
588 588 for k, n in t:
589 589 yield {"parity": next(parity),
590 590 "tag": k,
591 591 "date": web.repo[n].date(),
592 592 "node": hex(n)}
593 593
594 594 return tmpl("tags",
595 595 node=hex(web.repo.changelog.tip()),
596 596 entries=lambda **x: entries(False, False, **x),
597 597 entriesnotip=lambda **x: entries(True, False, **x),
598 598 latestentry=lambda **x: entries(True, True, **x))
599 599
600 600 @webcommand('bookmarks')
601 601 def bookmarks(web, req, tmpl):
602 602 """
603 603 /bookmarks
604 604 ----------
605 605
606 606 Show information about bookmarks.
607 607
608 608 No arguments are accepted.
609 609
610 610 The ``bookmarks`` template is rendered.
611 611 """
612 612 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
613 613 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
614 614 i = sorted(i, key=sortkey, reverse=True)
615 615 parity = paritygen(web.stripecount)
616 616
617 617 def entries(latestonly, **map):
618 618 t = i
619 619 if latestonly:
620 620 t = i[:1]
621 621 for k, n in t:
622 622 yield {"parity": next(parity),
623 623 "bookmark": k,
624 624 "date": web.repo[n].date(),
625 625 "node": hex(n)}
626 626
627 627 if i:
628 628 latestrev = i[0][1]
629 629 else:
630 630 latestrev = -1
631 631
632 632 return tmpl("bookmarks",
633 633 node=hex(web.repo.changelog.tip()),
634 634 lastchange=[{"date": web.repo[latestrev].date()}],
635 635 entries=lambda **x: entries(latestonly=False, **x),
636 636 latestentry=lambda **x: entries(latestonly=True, **x))
637 637
638 638 @webcommand('branches')
639 639 def branches(web, req, tmpl):
640 640 """
641 641 /branches
642 642 ---------
643 643
644 644 Show information about branches.
645 645
646 646 All known branches are contained in the output, even closed branches.
647 647
648 648 No arguments are accepted.
649 649
650 650 The ``branches`` template is rendered.
651 651 """
652 652 entries = webutil.branchentries(web.repo, web.stripecount)
653 653 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
654 654 return tmpl('branches', node=hex(web.repo.changelog.tip()),
655 655 entries=entries, latestentry=latestentry)
656 656
657 657 @webcommand('summary')
658 658 def summary(web, req, tmpl):
659 659 """
660 660 /summary
661 661 --------
662 662
663 663 Show a summary of repository state.
664 664
665 665 Information about the latest changesets, bookmarks, tags, and branches
666 666 is captured by this handler.
667 667
668 668 The ``summary`` template is rendered.
669 669 """
670 670 i = reversed(web.repo.tagslist())
671 671
672 672 def tagentries(**map):
673 673 parity = paritygen(web.stripecount)
674 674 count = 0
675 675 for k, n in i:
676 676 if k == "tip": # skip tip
677 677 continue
678 678
679 679 count += 1
680 680 if count > 10: # limit to 10 tags
681 681 break
682 682
683 683 yield tmpl("tagentry",
684 684 parity=next(parity),
685 685 tag=k,
686 686 node=hex(n),
687 687 date=web.repo[n].date())
688 688
689 689 def bookmarks(**map):
690 690 parity = paritygen(web.stripecount)
691 691 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
692 692 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
693 693 marks = sorted(marks, key=sortkey, reverse=True)
694 694 for k, n in marks[:10]: # limit to 10 bookmarks
695 695 yield {'parity': next(parity),
696 696 'bookmark': k,
697 697 'date': web.repo[n].date(),
698 698 'node': hex(n)}
699 699
700 700 def changelist(**map):
701 701 parity = paritygen(web.stripecount, offset=start - end)
702 702 l = [] # build a list in forward order for efficiency
703 703 revs = []
704 704 if start < end:
705 705 revs = web.repo.changelog.revs(start, end - 1)
706 706 for i in revs:
707 707 ctx = web.repo[i]
708 708
709 709 l.append(tmpl(
710 710 'shortlogentry',
711 711 parity=next(parity),
712 712 **webutil.commonentry(web.repo, ctx)))
713 713
714 714 for entry in reversed(l):
715 715 yield entry
716 716
717 717 tip = web.repo['tip']
718 718 count = len(web.repo)
719 719 start = max(0, count - web.maxchanges)
720 720 end = min(count, start + web.maxchanges)
721 721
722 722 return tmpl("summary",
723 723 desc=web.config("web", "description", "unknown"),
724 724 owner=get_contact(web.config) or "unknown",
725 725 lastchange=tip.date(),
726 726 tags=tagentries,
727 727 bookmarks=bookmarks,
728 728 branches=webutil.branchentries(web.repo, web.stripecount, 10),
729 729 shortlog=changelist,
730 730 node=tip.hex(),
731 731 symrev='tip',
732 732 archives=web.archivelist("tip"),
733 733 labels=web.configlist('web', 'labels'))
734 734
735 735 @webcommand('filediff')
736 736 def filediff(web, req, tmpl):
737 737 """
738 738 /diff/{revision}/{path}
739 739 -----------------------
740 740
741 741 Show how a file changed in a particular commit.
742 742
743 743 The ``filediff`` template is rendered.
744 744
745 745 This handler is registered under both the ``/diff`` and ``/filediff``
746 746 paths. ``/diff`` is used in modern code.
747 747 """
748 748 fctx, ctx = None, None
749 749 try:
750 750 fctx = webutil.filectx(web.repo, req)
751 751 except LookupError:
752 752 ctx = webutil.changectx(web.repo, req)
753 753 path = webutil.cleanpath(web.repo, req.form['file'][0])
754 754 if path not in ctx.files():
755 755 raise
756 756
757 757 if fctx is not None:
758 758 path = fctx.path()
759 759 ctx = fctx.changectx()
760 760 basectx = ctx.p1()
761 761
762 762 style = web.config('web', 'style', 'paper')
763 763 if 'style' in req.form:
764 764 style = req.form['style'][0]
765 765
766 766 diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style)
767 767 if fctx is not None:
768 768 rename = webutil.renamelink(fctx)
769 769 ctx = fctx
770 770 else:
771 771 rename = []
772 772 ctx = ctx
773 773 return tmpl("filediff",
774 774 file=path,
775 775 symrev=webutil.symrevorshortnode(req, ctx),
776 776 rename=rename,
777 777 diff=diffs,
778 778 **webutil.commonentry(web.repo, ctx))
779 779
780 780 diff = webcommand('diff')(filediff)
781 781
782 782 @webcommand('comparison')
783 783 def comparison(web, req, tmpl):
784 784 """
785 785 /comparison/{revision}/{path}
786 786 -----------------------------
787 787
788 788 Show a comparison between the old and new versions of a file from changes
789 789 made on a particular revision.
790 790
791 791 This is similar to the ``diff`` handler. However, this form features
792 792 a split or side-by-side diff rather than a unified diff.
793 793
794 794 The ``context`` query string argument can be used to control the lines of
795 795 context in the diff.
796 796
797 797 The ``filecomparison`` template is rendered.
798 798 """
799 799 ctx = webutil.changectx(web.repo, req)
800 800 if 'file' not in req.form:
801 801 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
802 802 path = webutil.cleanpath(web.repo, req.form['file'][0])
803 803
804 804 parsecontext = lambda v: v == 'full' and -1 or int(v)
805 805 if 'context' in req.form:
806 806 context = parsecontext(req.form['context'][0])
807 807 else:
808 808 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
809 809
810 810 def filelines(f):
811 811 if f.isbinary():
812 812 mt = mimetypes.guess_type(f.path())[0]
813 813 if not mt:
814 814 mt = 'application/octet-stream'
815 815 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
816 816 return f.data().splitlines()
817 817
818 818 fctx = None
819 819 parent = ctx.p1()
820 820 leftrev = parent.rev()
821 821 leftnode = parent.node()
822 822 rightrev = ctx.rev()
823 823 rightnode = ctx.node()
824 824 if path in ctx:
825 825 fctx = ctx[path]
826 826 rightlines = filelines(fctx)
827 827 if path not in parent:
828 828 leftlines = ()
829 829 else:
830 830 pfctx = parent[path]
831 831 leftlines = filelines(pfctx)
832 832 else:
833 833 rightlines = ()
834 834 pfctx = ctx.parents()[0][path]
835 835 leftlines = filelines(pfctx)
836 836
837 837 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
838 838 if fctx is not None:
839 839 rename = webutil.renamelink(fctx)
840 840 ctx = fctx
841 841 else:
842 842 rename = []
843 843 ctx = ctx
844 844 return tmpl('filecomparison',
845 845 file=path,
846 846 symrev=webutil.symrevorshortnode(req, ctx),
847 847 rename=rename,
848 848 leftrev=leftrev,
849 849 leftnode=hex(leftnode),
850 850 rightrev=rightrev,
851 851 rightnode=hex(rightnode),
852 852 comparison=comparison,
853 853 **webutil.commonentry(web.repo, ctx))
854 854
855 855 @webcommand('annotate')
856 856 def annotate(web, req, tmpl):
857 857 """
858 858 /annotate/{revision}/{path}
859 859 ---------------------------
860 860
861 861 Show changeset information for each line in a file.
862 862
863 863 The ``fileannotate`` template is rendered.
864 864 """
865 865 fctx = webutil.filectx(web.repo, req)
866 866 f = fctx.path()
867 867 parity = paritygen(web.stripecount)
868 868
869 869 # parents() is called once per line and several lines likely belong to
870 870 # same revision. So it is worth caching.
871 871 # TODO there are still redundant operations within basefilectx.parents()
872 872 # and from the fctx.annotate() call itself that could be cached.
873 873 parentscache = {}
874 874 def parents(f):
875 875 rev = f.rev()
876 876 if rev not in parentscache:
877 877 parentscache[rev] = []
878 878 for p in f.parents():
879 879 entry = {
880 880 'node': p.hex(),
881 881 'rev': p.rev(),
882 882 }
883 883 parentscache[rev].append(entry)
884 884
885 885 for p in parentscache[rev]:
886 886 yield p
887 887
888 888 def annotate(**map):
889 889 if fctx.isbinary():
890 890 mt = (mimetypes.guess_type(fctx.path())[0]
891 891 or 'application/octet-stream')
892 892 lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
893 893 else:
894 894 lines = webutil.annotate(fctx, web.repo.ui)
895 895
896 896 previousrev = None
897 897 blockparitygen = paritygen(1)
898 898 for lineno, ((f, targetline), l) in enumerate(lines):
899 899 rev = f.rev()
900 900 if rev != previousrev:
901 901 blockhead = True
902 902 blockparity = next(blockparitygen)
903 903 else:
904 904 blockhead = None
905 905 previousrev = rev
906 906 yield {"parity": next(parity),
907 907 "node": f.hex(),
908 908 "rev": rev,
909 909 "author": f.user(),
910 910 "parents": parents(f),
911 911 "desc": f.description(),
912 912 "extra": f.extra(),
913 913 "file": f.path(),
914 914 "blockhead": blockhead,
915 915 "blockparity": blockparity,
916 916 "targetline": targetline,
917 917 "line": l,
918 918 "lineno": lineno + 1,
919 919 "lineid": "l%d" % (lineno + 1),
920 920 "linenumber": "% 6d" % (lineno + 1),
921 921 "revdate": f.date()}
922 922
923 923 return tmpl("fileannotate",
924 924 file=f,
925 925 annotate=annotate,
926 926 path=webutil.up(f),
927 927 symrev=webutil.symrevorshortnode(req, fctx),
928 928 rename=webutil.renamelink(fctx),
929 929 permissions=fctx.manifest().flags(f),
930 930 **webutil.commonentry(web.repo, fctx))
931 931
932 932 @webcommand('filelog')
933 933 def filelog(web, req, tmpl):
934 934 """
935 935 /filelog/{revision}/{path}
936 936 --------------------------
937 937
938 938 Show information about the history of a file in the repository.
939 939
940 940 The ``revcount`` query string argument can be defined to control the
941 941 maximum number of entries to show.
942 942
943 943 The ``filelog`` template will be rendered.
944 944 """
945 945
946 946 try:
947 947 fctx = webutil.filectx(web.repo, req)
948 948 f = fctx.path()
949 949 fl = fctx.filelog()
950 950 except error.LookupError:
951 951 f = webutil.cleanpath(web.repo, req.form['file'][0])
952 952 fl = web.repo.file(f)
953 953 numrevs = len(fl)
954 954 if not numrevs: # file doesn't exist at all
955 955 raise
956 956 rev = webutil.changectx(web.repo, req).rev()
957 957 first = fl.linkrev(0)
958 958 if rev < first: # current rev is from before file existed
959 959 raise
960 960 frev = numrevs - 1
961 961 while fl.linkrev(frev) > rev:
962 962 frev -= 1
963 963 fctx = web.repo.filectx(f, fl.linkrev(frev))
964 964
965 965 revcount = web.maxshortchanges
966 966 if 'revcount' in req.form:
967 967 try:
968 968 revcount = int(req.form.get('revcount', [revcount])[0])
969 969 revcount = max(revcount, 1)
970 970 tmpl.defaults['sessionvars']['revcount'] = revcount
971 971 except ValueError:
972 972 pass
973 973
974 974 lrange = webutil.linerange(req)
975 975
976 976 lessvars = copy.copy(tmpl.defaults['sessionvars'])
977 977 lessvars['revcount'] = max(revcount / 2, 1)
978 978 morevars = copy.copy(tmpl.defaults['sessionvars'])
979 979 morevars['revcount'] = revcount * 2
980 980
981 981 patch = 'patch' in req.form
982 982 if patch:
983 983 lessvars['patch'] = morevars['patch'] = req.form['patch'][0]
984 984 descend = 'descend' in req.form
985 985 if descend:
986 986 lessvars['descend'] = morevars['descend'] = req.form['descend'][0]
987 987
988 988 count = fctx.filerev() + 1
989 989 start = max(0, count - revcount) # first rev on this page
990 990 end = min(count, start + revcount) # last rev on this page
991 991 parity = paritygen(web.stripecount, offset=start - end)
992 992
993 993 repo = web.repo
994 994 revs = fctx.filelog().revs(start, end - 1)
995 995 entries = []
996 996
997 997 diffstyle = web.config('web', 'style', 'paper')
998 998 if 'style' in req.form:
999 999 diffstyle = req.form['style'][0]
1000 1000
1001 1001 def diff(fctx, linerange=None):
1002 1002 ctx = fctx.changectx()
1003 1003 basectx = ctx.p1()
1004 1004 path = fctx.path()
1005 1005 return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle,
1006 1006 linerange=linerange,
1007 1007 lineidprefix='%s-' % ctx.hex()[:12])
1008 1008
1009 1009 linerange = None
1010 1010 if lrange is not None:
1011 1011 linerange = webutil.formatlinerange(*lrange)
1012 1012 # deactivate numeric nav links when linerange is specified as this
1013 1013 # would required a dedicated "revnav" class
1014 1014 nav = None
1015 1015 if descend:
1016 it = context.blockdescendants(fctx, *lrange)
1016 it = dagop.blockdescendants(fctx, *lrange)
1017 1017 else:
1018 it = context.blockancestors(fctx, *lrange)
1018 it = dagop.blockancestors(fctx, *lrange)
1019 1019 for i, (c, lr) in enumerate(it, 1):
1020 1020 diffs = None
1021 1021 if patch:
1022 1022 diffs = diff(c, linerange=lr)
1023 1023 # follow renames accross filtered (not in range) revisions
1024 1024 path = c.path()
1025 1025 entries.append(dict(
1026 1026 parity=next(parity),
1027 1027 filerev=c.rev(),
1028 1028 file=path,
1029 1029 diff=diffs,
1030 1030 linerange=webutil.formatlinerange(*lr),
1031 1031 **webutil.commonentry(repo, c)))
1032 1032 if i == revcount:
1033 1033 break
1034 1034 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1035 1035 morevars['linerange'] = lessvars['linerange']
1036 1036 else:
1037 1037 for i in revs:
1038 1038 iterfctx = fctx.filectx(i)
1039 1039 diffs = None
1040 1040 if patch:
1041 1041 diffs = diff(iterfctx)
1042 1042 entries.append(dict(
1043 1043 parity=next(parity),
1044 1044 filerev=i,
1045 1045 file=f,
1046 1046 diff=diffs,
1047 1047 rename=webutil.renamelink(iterfctx),
1048 1048 **webutil.commonentry(repo, iterfctx)))
1049 1049 entries.reverse()
1050 1050 revnav = webutil.filerevnav(web.repo, fctx.path())
1051 1051 nav = revnav.gen(end - 1, revcount, count)
1052 1052
1053 1053 latestentry = entries[:1]
1054 1054
1055 1055 return tmpl("filelog",
1056 1056 file=f,
1057 1057 nav=nav,
1058 1058 symrev=webutil.symrevorshortnode(req, fctx),
1059 1059 entries=entries,
1060 1060 descend=descend,
1061 1061 patch=patch,
1062 1062 latestentry=latestentry,
1063 1063 linerange=linerange,
1064 1064 revcount=revcount,
1065 1065 morevars=morevars,
1066 1066 lessvars=lessvars,
1067 1067 **webutil.commonentry(web.repo, fctx))
1068 1068
1069 1069 @webcommand('archive')
1070 1070 def archive(web, req, tmpl):
1071 1071 """
1072 1072 /archive/{revision}.{format}[/{path}]
1073 1073 -------------------------------------
1074 1074
1075 1075 Obtain an archive of repository content.
1076 1076
1077 1077 The content and type of the archive is defined by a URL path parameter.
1078 1078 ``format`` is the file extension of the archive type to be generated. e.g.
1079 1079 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1080 1080 server configuration.
1081 1081
1082 1082 The optional ``path`` URL parameter controls content to include in the
1083 1083 archive. If omitted, every file in the specified revision is present in the
1084 1084 archive. If included, only the specified file or contents of the specified
1085 1085 directory will be included in the archive.
1086 1086
1087 1087 No template is used for this handler. Raw, binary content is generated.
1088 1088 """
1089 1089
1090 1090 type_ = req.form.get('type', [None])[0]
1091 1091 allowed = web.configlist("web", "allow_archive")
1092 1092 key = req.form['node'][0]
1093 1093
1094 1094 if type_ not in web.archivespecs:
1095 1095 msg = 'Unsupported archive type: %s' % type_
1096 1096 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1097 1097
1098 1098 if not ((type_ in allowed or
1099 1099 web.configbool("web", "allow" + type_, False))):
1100 1100 msg = 'Archive type not allowed: %s' % type_
1101 1101 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1102 1102
1103 1103 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1104 1104 cnode = web.repo.lookup(key)
1105 1105 arch_version = key
1106 1106 if cnode == key or key == 'tip':
1107 1107 arch_version = short(cnode)
1108 1108 name = "%s-%s" % (reponame, arch_version)
1109 1109
1110 1110 ctx = webutil.changectx(web.repo, req)
1111 1111 pats = []
1112 1112 matchfn = scmutil.match(ctx, [])
1113 1113 file = req.form.get('file', None)
1114 1114 if file:
1115 1115 pats = ['path:' + file[0]]
1116 1116 matchfn = scmutil.match(ctx, pats, default='path')
1117 1117 if pats:
1118 1118 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1119 1119 if not files:
1120 1120 raise ErrorResponse(HTTP_NOT_FOUND,
1121 1121 'file(s) not found: %s' % file[0])
1122 1122
1123 1123 mimetype, artype, extension, encoding = web.archivespecs[type_]
1124 1124 headers = [
1125 1125 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1126 1126 ]
1127 1127 if encoding:
1128 1128 headers.append(('Content-Encoding', encoding))
1129 1129 req.headers.extend(headers)
1130 1130 req.respond(HTTP_OK, mimetype)
1131 1131
1132 1132 archival.archive(web.repo, req, cnode, artype, prefix=name,
1133 1133 matchfn=matchfn,
1134 1134 subrepos=web.configbool("web", "archivesubrepos"))
1135 1135 return []
1136 1136
1137 1137
1138 1138 @webcommand('static')
1139 1139 def static(web, req, tmpl):
1140 1140 fname = req.form['file'][0]
1141 1141 # a repo owner may set web.static in .hg/hgrc to get any file
1142 1142 # readable by the user running the CGI script
1143 1143 static = web.config("web", "static", None, untrusted=False)
1144 1144 if not static:
1145 1145 tp = web.templatepath or templater.templatepaths()
1146 1146 if isinstance(tp, str):
1147 1147 tp = [tp]
1148 1148 static = [os.path.join(p, 'static') for p in tp]
1149 1149 staticfile(static, fname, req)
1150 1150 return []
1151 1151
1152 1152 @webcommand('graph')
1153 1153 def graph(web, req, tmpl):
1154 1154 """
1155 1155 /graph[/{revision}]
1156 1156 -------------------
1157 1157
1158 1158 Show information about the graphical topology of the repository.
1159 1159
1160 1160 Information rendered by this handler can be used to create visual
1161 1161 representations of repository topology.
1162 1162
1163 1163 The ``revision`` URL parameter controls the starting changeset.
1164 1164
1165 1165 The ``revcount`` query string argument can define the number of changesets
1166 1166 to show information for.
1167 1167
1168 1168 This handler will render the ``graph`` template.
1169 1169 """
1170 1170
1171 1171 if 'node' in req.form:
1172 1172 ctx = webutil.changectx(web.repo, req)
1173 1173 symrev = webutil.symrevorshortnode(req, ctx)
1174 1174 else:
1175 1175 ctx = web.repo['tip']
1176 1176 symrev = 'tip'
1177 1177 rev = ctx.rev()
1178 1178
1179 1179 bg_height = 39
1180 1180 revcount = web.maxshortchanges
1181 1181 if 'revcount' in req.form:
1182 1182 try:
1183 1183 revcount = int(req.form.get('revcount', [revcount])[0])
1184 1184 revcount = max(revcount, 1)
1185 1185 tmpl.defaults['sessionvars']['revcount'] = revcount
1186 1186 except ValueError:
1187 1187 pass
1188 1188
1189 1189 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1190 1190 lessvars['revcount'] = max(revcount / 2, 1)
1191 1191 morevars = copy.copy(tmpl.defaults['sessionvars'])
1192 1192 morevars['revcount'] = revcount * 2
1193 1193
1194 1194 count = len(web.repo)
1195 1195 pos = rev
1196 1196
1197 1197 uprev = min(max(0, count - 1), rev + revcount)
1198 1198 downrev = max(0, rev - revcount)
1199 1199 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1200 1200
1201 1201 tree = []
1202 1202 if pos != -1:
1203 1203 allrevs = web.repo.changelog.revs(pos, 0)
1204 1204 revs = []
1205 1205 for i in allrevs:
1206 1206 revs.append(i)
1207 1207 if len(revs) >= revcount:
1208 1208 break
1209 1209
1210 1210 # We have to feed a baseset to dagwalker as it is expecting smartset
1211 1211 # object. This does not have a big impact on hgweb performance itself
1212 1212 # since hgweb graphing code is not itself lazy yet.
1213 1213 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1214 1214 # As we said one line above... not lazy.
1215 1215 tree = list(graphmod.colored(dag, web.repo))
1216 1216
1217 1217 def getcolumns(tree):
1218 1218 cols = 0
1219 1219 for (id, type, ctx, vtx, edges) in tree:
1220 1220 if type != graphmod.CHANGESET:
1221 1221 continue
1222 1222 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1223 1223 max([edge[1] for edge in edges] or [0]))
1224 1224 return cols
1225 1225
1226 1226 def graphdata(usetuples, encodestr):
1227 1227 data = []
1228 1228
1229 1229 row = 0
1230 1230 for (id, type, ctx, vtx, edges) in tree:
1231 1231 if type != graphmod.CHANGESET:
1232 1232 continue
1233 1233 node = str(ctx)
1234 1234 age = encodestr(templatefilters.age(ctx.date()))
1235 1235 desc = templatefilters.firstline(encodestr(ctx.description()))
1236 1236 desc = cgi.escape(templatefilters.nonempty(desc))
1237 1237 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1238 1238 branch = cgi.escape(encodestr(ctx.branch()))
1239 1239 try:
1240 1240 branchnode = web.repo.branchtip(branch)
1241 1241 except error.RepoLookupError:
1242 1242 branchnode = None
1243 1243 branch = branch, branchnode == ctx.node()
1244 1244
1245 1245 if usetuples:
1246 1246 data.append((node, vtx, edges, desc, user, age, branch,
1247 1247 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1248 1248 [cgi.escape(encodestr(x))
1249 1249 for x in ctx.bookmarks()]))
1250 1250 else:
1251 1251 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1252 1252 'color': (edge[2] - 1) % 6 + 1,
1253 1253 'width': edge[3], 'bcolor': edge[4]}
1254 1254 for edge in edges]
1255 1255
1256 1256 data.append(
1257 1257 {'node': node,
1258 1258 'col': vtx[0],
1259 1259 'color': (vtx[1] - 1) % 6 + 1,
1260 1260 'edges': edgedata,
1261 1261 'row': row,
1262 1262 'nextrow': row + 1,
1263 1263 'desc': desc,
1264 1264 'user': user,
1265 1265 'age': age,
1266 1266 'bookmarks': webutil.nodebookmarksdict(
1267 1267 web.repo, ctx.node()),
1268 1268 'branches': webutil.nodebranchdict(web.repo, ctx),
1269 1269 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1270 1270 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1271 1271
1272 1272 row += 1
1273 1273
1274 1274 return data
1275 1275
1276 1276 cols = getcolumns(tree)
1277 1277 rows = len(tree)
1278 1278 canvasheight = (rows + 1) * bg_height - 27
1279 1279
1280 1280 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1281 1281 uprev=uprev,
1282 1282 lessvars=lessvars, morevars=morevars, downrev=downrev,
1283 1283 cols=cols, rows=rows,
1284 1284 canvaswidth=(cols + 1) * bg_height,
1285 1285 truecanvasheight=rows * bg_height,
1286 1286 canvasheight=canvasheight, bg_height=bg_height,
1287 1287 # {jsdata} will be passed to |json, so it must be in utf-8
1288 1288 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1289 1289 nodes=lambda **x: graphdata(False, str),
1290 1290 node=ctx.hex(), changenav=changenav)
1291 1291
1292 1292 def _getdoc(e):
1293 1293 doc = e[0].__doc__
1294 1294 if doc:
1295 1295 doc = _(doc).partition('\n')[0]
1296 1296 else:
1297 1297 doc = _('(no help text available)')
1298 1298 return doc
1299 1299
1300 1300 @webcommand('help')
1301 1301 def help(web, req, tmpl):
1302 1302 """
1303 1303 /help[/{topic}]
1304 1304 ---------------
1305 1305
1306 1306 Render help documentation.
1307 1307
1308 1308 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1309 1309 is defined, that help topic will be rendered. If not, an index of
1310 1310 available help topics will be rendered.
1311 1311
1312 1312 The ``help`` template will be rendered when requesting help for a topic.
1313 1313 ``helptopics`` will be rendered for the index of help topics.
1314 1314 """
1315 1315 from .. import commands, help as helpmod # avoid cycle
1316 1316
1317 1317 topicname = req.form.get('node', [None])[0]
1318 1318 if not topicname:
1319 1319 def topics(**map):
1320 1320 for entries, summary, _doc in helpmod.helptable:
1321 1321 yield {'topic': entries[0], 'summary': summary}
1322 1322
1323 1323 early, other = [], []
1324 1324 primary = lambda s: s.partition('|')[0]
1325 1325 for c, e in commands.table.iteritems():
1326 1326 doc = _getdoc(e)
1327 1327 if 'DEPRECATED' in doc or c.startswith('debug'):
1328 1328 continue
1329 1329 cmd = primary(c)
1330 1330 if cmd.startswith('^'):
1331 1331 early.append((cmd[1:], doc))
1332 1332 else:
1333 1333 other.append((cmd, doc))
1334 1334
1335 1335 early.sort()
1336 1336 other.sort()
1337 1337
1338 1338 def earlycommands(**map):
1339 1339 for c, doc in early:
1340 1340 yield {'topic': c, 'summary': doc}
1341 1341
1342 1342 def othercommands(**map):
1343 1343 for c, doc in other:
1344 1344 yield {'topic': c, 'summary': doc}
1345 1345
1346 1346 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1347 1347 othercommands=othercommands, title='Index')
1348 1348
1349 1349 # Render an index of sub-topics.
1350 1350 if topicname in helpmod.subtopics:
1351 1351 topics = []
1352 1352 for entries, summary, _doc in helpmod.subtopics[topicname]:
1353 1353 topics.append({
1354 1354 'topic': '%s.%s' % (topicname, entries[0]),
1355 1355 'basename': entries[0],
1356 1356 'summary': summary,
1357 1357 })
1358 1358
1359 1359 return tmpl('helptopics', topics=topics, title=topicname,
1360 1360 subindex=True)
1361 1361
1362 1362 u = webutil.wsgiui.load()
1363 1363 u.verbose = True
1364 1364
1365 1365 # Render a page from a sub-topic.
1366 1366 if '.' in topicname:
1367 1367 # TODO implement support for rendering sections, like
1368 1368 # `hg help` works.
1369 1369 topic, subtopic = topicname.split('.', 1)
1370 1370 if topic not in helpmod.subtopics:
1371 1371 raise ErrorResponse(HTTP_NOT_FOUND)
1372 1372 else:
1373 1373 topic = topicname
1374 1374 subtopic = None
1375 1375
1376 1376 try:
1377 1377 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1378 1378 except error.UnknownCommand:
1379 1379 raise ErrorResponse(HTTP_NOT_FOUND)
1380 1380 return tmpl('help', topic=topicname, doc=doc)
1381 1381
1382 1382 # tell hggettext to extract docstrings from these functions:
1383 1383 i18nfunctions = commands.values()
@@ -1,2020 +1,2018
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 pathutil,
23 23 phases,
24 24 registrar,
25 25 repoview,
26 26 revsetlang,
27 27 scmutil,
28 28 smartset,
29 29 util,
30 30 )
31 31
32 32 # helpers for processing parsed tree
33 33 getsymbol = revsetlang.getsymbol
34 34 getstring = revsetlang.getstring
35 35 getinteger = revsetlang.getinteger
36 36 getboolean = revsetlang.getboolean
37 37 getlist = revsetlang.getlist
38 38 getrange = revsetlang.getrange
39 39 getargs = revsetlang.getargs
40 40 getargsdict = revsetlang.getargsdict
41 41
42 42 # constants used as an argument of match() and matchany()
43 43 anyorder = revsetlang.anyorder
44 44 defineorder = revsetlang.defineorder
45 45 followorder = revsetlang.followorder
46 46
47 47 baseset = smartset.baseset
48 48 generatorset = smartset.generatorset
49 49 spanset = smartset.spanset
50 50 fullreposet = smartset.fullreposet
51 51
52 52 # helpers
53 53
54 54 def getset(repo, subset, x):
55 55 if not x:
56 56 raise error.ParseError(_("missing argument"))
57 57 return methods[x[0]](repo, subset, *x[1:])
58 58
59 59 def _getrevsource(repo, r):
60 60 extra = repo[r].extra()
61 61 for label in ('source', 'transplant_source', 'rebase_source'):
62 62 if label in extra:
63 63 try:
64 64 return repo[extra[label]].rev()
65 65 except error.RepoLookupError:
66 66 pass
67 67 return None
68 68
69 69 # operator methods
70 70
71 71 def stringset(repo, subset, x):
72 72 x = scmutil.intrev(repo[x])
73 73 if (x in subset
74 74 or x == node.nullrev and isinstance(subset, fullreposet)):
75 75 return baseset([x])
76 76 return baseset()
77 77
78 78 def rangeset(repo, subset, x, y, order):
79 79 m = getset(repo, fullreposet(repo), x)
80 80 n = getset(repo, fullreposet(repo), y)
81 81
82 82 if not m or not n:
83 83 return baseset()
84 84 return _makerangeset(repo, subset, m.first(), n.last(), order)
85 85
86 86 def rangeall(repo, subset, x, order):
87 87 assert x is None
88 88 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
89 89
90 90 def rangepre(repo, subset, y, order):
91 91 # ':y' can't be rewritten to '0:y' since '0' may be hidden
92 92 n = getset(repo, fullreposet(repo), y)
93 93 if not n:
94 94 return baseset()
95 95 return _makerangeset(repo, subset, 0, n.last(), order)
96 96
97 97 def rangepost(repo, subset, x, order):
98 98 m = getset(repo, fullreposet(repo), x)
99 99 if not m:
100 100 return baseset()
101 101 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
102 102
103 103 def _makerangeset(repo, subset, m, n, order):
104 104 if m == n:
105 105 r = baseset([m])
106 106 elif n == node.wdirrev:
107 107 r = spanset(repo, m, len(repo)) + baseset([n])
108 108 elif m == node.wdirrev:
109 109 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
110 110 elif m < n:
111 111 r = spanset(repo, m, n + 1)
112 112 else:
113 113 r = spanset(repo, m, n - 1)
114 114
115 115 if order == defineorder:
116 116 return r & subset
117 117 else:
118 118 # carrying the sorting over when possible would be more efficient
119 119 return subset & r
120 120
121 121 def dagrange(repo, subset, x, y, order):
122 122 r = fullreposet(repo)
123 123 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
124 124 includepath=True)
125 125 return subset & xs
126 126
127 127 def andset(repo, subset, x, y, order):
128 128 return getset(repo, getset(repo, subset, x), y)
129 129
130 130 def differenceset(repo, subset, x, y, order):
131 131 return getset(repo, subset, x) - getset(repo, subset, y)
132 132
133 133 def _orsetlist(repo, subset, xs):
134 134 assert xs
135 135 if len(xs) == 1:
136 136 return getset(repo, subset, xs[0])
137 137 p = len(xs) // 2
138 138 a = _orsetlist(repo, subset, xs[:p])
139 139 b = _orsetlist(repo, subset, xs[p:])
140 140 return a + b
141 141
142 142 def orset(repo, subset, x, order):
143 143 xs = getlist(x)
144 144 if order == followorder:
145 145 # slow path to take the subset order
146 146 return subset & _orsetlist(repo, fullreposet(repo), xs)
147 147 else:
148 148 return _orsetlist(repo, subset, xs)
149 149
150 150 def notset(repo, subset, x, order):
151 151 return subset - getset(repo, subset, x)
152 152
153 153 def listset(repo, subset, *xs):
154 154 raise error.ParseError(_("can't use a list in this context"),
155 155 hint=_('see hg help "revsets.x or y"'))
156 156
157 157 def keyvaluepair(repo, subset, k, v):
158 158 raise error.ParseError(_("can't use a key-value pair in this context"))
159 159
160 160 def func(repo, subset, a, b, order):
161 161 f = getsymbol(a)
162 162 if f in symbols:
163 163 func = symbols[f]
164 164 if getattr(func, '_takeorder', False):
165 165 return func(repo, subset, b, order)
166 166 return func(repo, subset, b)
167 167
168 168 keep = lambda fn: getattr(fn, '__doc__', None) is not None
169 169
170 170 syms = [s for (s, fn) in symbols.items() if keep(fn)]
171 171 raise error.UnknownIdentifier(f, syms)
172 172
173 173 # functions
174 174
175 175 # symbols are callables like:
176 176 # fn(repo, subset, x)
177 177 # with:
178 178 # repo - current repository instance
179 179 # subset - of revisions to be examined
180 180 # x - argument in tree form
181 181 symbols = {}
182 182
183 183 # symbols which can't be used for a DoS attack for any given input
184 184 # (e.g. those which accept regexes as plain strings shouldn't be included)
185 185 # functions that just return a lot of changesets (like all) don't count here
186 186 safesymbols = set()
187 187
188 188 predicate = registrar.revsetpredicate()
189 189
190 190 @predicate('_destupdate')
191 191 def _destupdate(repo, subset, x):
192 192 # experimental revset for update destination
193 193 args = getargsdict(x, 'limit', 'clean')
194 194 return subset & baseset([destutil.destupdate(repo, **args)[0]])
195 195
196 196 @predicate('_destmerge')
197 197 def _destmerge(repo, subset, x):
198 198 # experimental revset for merge destination
199 199 sourceset = None
200 200 if x is not None:
201 201 sourceset = getset(repo, fullreposet(repo), x)
202 202 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
203 203
204 204 @predicate('adds(pattern)', safe=True)
205 205 def adds(repo, subset, x):
206 206 """Changesets that add a file matching pattern.
207 207
208 208 The pattern without explicit kind like ``glob:`` is expected to be
209 209 relative to the current directory and match against a file or a
210 210 directory.
211 211 """
212 212 # i18n: "adds" is a keyword
213 213 pat = getstring(x, _("adds requires a pattern"))
214 214 return checkstatus(repo, subset, pat, 1)
215 215
216 216 @predicate('ancestor(*changeset)', safe=True)
217 217 def ancestor(repo, subset, x):
218 218 """A greatest common ancestor of the changesets.
219 219
220 220 Accepts 0 or more changesets.
221 221 Will return empty list when passed no args.
222 222 Greatest common ancestor of a single changeset is that changeset.
223 223 """
224 224 # i18n: "ancestor" is a keyword
225 225 l = getlist(x)
226 226 rl = fullreposet(repo)
227 227 anc = None
228 228
229 229 # (getset(repo, rl, i) for i in l) generates a list of lists
230 230 for revs in (getset(repo, rl, i) for i in l):
231 231 for r in revs:
232 232 if anc is None:
233 233 anc = repo[r]
234 234 else:
235 235 anc = anc.ancestor(repo[r])
236 236
237 237 if anc is not None and anc.rev() in subset:
238 238 return baseset([anc.rev()])
239 239 return baseset()
240 240
241 241 def _ancestors(repo, subset, x, followfirst=False):
242 242 heads = getset(repo, fullreposet(repo), x)
243 243 if not heads:
244 244 return baseset()
245 245 s = dagop.revancestors(repo, heads, followfirst)
246 246 return subset & s
247 247
248 248 @predicate('ancestors(set)', safe=True)
249 249 def ancestors(repo, subset, x):
250 250 """Changesets that are ancestors of a changeset in set.
251 251 """
252 252 return _ancestors(repo, subset, x)
253 253
254 254 @predicate('_firstancestors', safe=True)
255 255 def _firstancestors(repo, subset, x):
256 256 # ``_firstancestors(set)``
257 257 # Like ``ancestors(set)`` but follows only the first parents.
258 258 return _ancestors(repo, subset, x, followfirst=True)
259 259
260 260 def _childrenspec(repo, subset, x, n, order):
261 261 """Changesets that are the Nth child of a changeset
262 262 in set.
263 263 """
264 264 cs = set()
265 265 for r in getset(repo, fullreposet(repo), x):
266 266 for i in range(n):
267 267 c = repo[r].children()
268 268 if len(c) == 0:
269 269 break
270 270 if len(c) > 1:
271 271 raise error.RepoLookupError(
272 272 _("revision in set has more than one child"))
273 273 r = c[0].rev()
274 274 else:
275 275 cs.add(r)
276 276 return subset & cs
277 277
278 278 def ancestorspec(repo, subset, x, n, order):
279 279 """``set~n``
280 280 Changesets that are the Nth ancestor (first parents only) of a changeset
281 281 in set.
282 282 """
283 283 n = getinteger(n, _("~ expects a number"))
284 284 if n < 0:
285 285 # children lookup
286 286 return _childrenspec(repo, subset, x, -n, order)
287 287 ps = set()
288 288 cl = repo.changelog
289 289 for r in getset(repo, fullreposet(repo), x):
290 290 for i in range(n):
291 291 try:
292 292 r = cl.parentrevs(r)[0]
293 293 except error.WdirUnsupported:
294 294 r = repo[r].parents()[0].rev()
295 295 ps.add(r)
296 296 return subset & ps
297 297
298 298 @predicate('author(string)', safe=True)
299 299 def author(repo, subset, x):
300 300 """Alias for ``user(string)``.
301 301 """
302 302 # i18n: "author" is a keyword
303 303 n = getstring(x, _("author requires a string"))
304 304 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
305 305 return subset.filter(lambda x: matcher(repo[x].user()),
306 306 condrepr=('<user %r>', n))
307 307
308 308 @predicate('bisect(string)', safe=True)
309 309 def bisect(repo, subset, x):
310 310 """Changesets marked in the specified bisect status:
311 311
312 312 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
313 313 - ``goods``, ``bads`` : csets topologically good/bad
314 314 - ``range`` : csets taking part in the bisection
315 315 - ``pruned`` : csets that are goods, bads or skipped
316 316 - ``untested`` : csets whose fate is yet unknown
317 317 - ``ignored`` : csets ignored due to DAG topology
318 318 - ``current`` : the cset currently being bisected
319 319 """
320 320 # i18n: "bisect" is a keyword
321 321 status = getstring(x, _("bisect requires a string")).lower()
322 322 state = set(hbisect.get(repo, status))
323 323 return subset & state
324 324
325 325 # Backward-compatibility
326 326 # - no help entry so that we do not advertise it any more
327 327 @predicate('bisected', safe=True)
328 328 def bisected(repo, subset, x):
329 329 return bisect(repo, subset, x)
330 330
331 331 @predicate('bookmark([name])', safe=True)
332 332 def bookmark(repo, subset, x):
333 333 """The named bookmark or all bookmarks.
334 334
335 335 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
336 336 """
337 337 # i18n: "bookmark" is a keyword
338 338 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
339 339 if args:
340 340 bm = getstring(args[0],
341 341 # i18n: "bookmark" is a keyword
342 342 _('the argument to bookmark must be a string'))
343 343 kind, pattern, matcher = util.stringmatcher(bm)
344 344 bms = set()
345 345 if kind == 'literal':
346 346 bmrev = repo._bookmarks.get(pattern, None)
347 347 if not bmrev:
348 348 raise error.RepoLookupError(_("bookmark '%s' does not exist")
349 349 % pattern)
350 350 bms.add(repo[bmrev].rev())
351 351 else:
352 352 matchrevs = set()
353 353 for name, bmrev in repo._bookmarks.iteritems():
354 354 if matcher(name):
355 355 matchrevs.add(bmrev)
356 356 if not matchrevs:
357 357 raise error.RepoLookupError(_("no bookmarks exist"
358 358 " that match '%s'") % pattern)
359 359 for bmrev in matchrevs:
360 360 bms.add(repo[bmrev].rev())
361 361 else:
362 362 bms = {repo[r].rev() for r in repo._bookmarks.values()}
363 363 bms -= {node.nullrev}
364 364 return subset & bms
365 365
366 366 @predicate('branch(string or set)', safe=True)
367 367 def branch(repo, subset, x):
368 368 """
369 369 All changesets belonging to the given branch or the branches of the given
370 370 changesets.
371 371
372 372 Pattern matching is supported for `string`. See
373 373 :hg:`help revisions.patterns`.
374 374 """
375 375 getbi = repo.revbranchcache().branchinfo
376 376 def getbranch(r):
377 377 try:
378 378 return getbi(r)[0]
379 379 except error.WdirUnsupported:
380 380 return repo[r].branch()
381 381
382 382 try:
383 383 b = getstring(x, '')
384 384 except error.ParseError:
385 385 # not a string, but another revspec, e.g. tip()
386 386 pass
387 387 else:
388 388 kind, pattern, matcher = util.stringmatcher(b)
389 389 if kind == 'literal':
390 390 # note: falls through to the revspec case if no branch with
391 391 # this name exists and pattern kind is not specified explicitly
392 392 if pattern in repo.branchmap():
393 393 return subset.filter(lambda r: matcher(getbranch(r)),
394 394 condrepr=('<branch %r>', b))
395 395 if b.startswith('literal:'):
396 396 raise error.RepoLookupError(_("branch '%s' does not exist")
397 397 % pattern)
398 398 else:
399 399 return subset.filter(lambda r: matcher(getbranch(r)),
400 400 condrepr=('<branch %r>', b))
401 401
402 402 s = getset(repo, fullreposet(repo), x)
403 403 b = set()
404 404 for r in s:
405 405 b.add(getbranch(r))
406 406 c = s.__contains__
407 407 return subset.filter(lambda r: c(r) or getbranch(r) in b,
408 408 condrepr=lambda: '<branch %r>' % sorted(b))
409 409
410 410 @predicate('bumped()', safe=True)
411 411 def bumped(repo, subset, x):
412 412 """Mutable changesets marked as successors of public changesets.
413 413
414 414 Only non-public and non-obsolete changesets can be `bumped`.
415 415 """
416 416 # i18n: "bumped" is a keyword
417 417 getargs(x, 0, 0, _("bumped takes no arguments"))
418 418 bumped = obsmod.getrevs(repo, 'bumped')
419 419 return subset & bumped
420 420
421 421 @predicate('bundle()', safe=True)
422 422 def bundle(repo, subset, x):
423 423 """Changesets in the bundle.
424 424
425 425 Bundle must be specified by the -R option."""
426 426
427 427 try:
428 428 bundlerevs = repo.changelog.bundlerevs
429 429 except AttributeError:
430 430 raise error.Abort(_("no bundle provided - specify with -R"))
431 431 return subset & bundlerevs
432 432
433 433 def checkstatus(repo, subset, pat, field):
434 434 hasset = matchmod.patkind(pat) == 'set'
435 435
436 436 mcache = [None]
437 437 def matches(x):
438 438 c = repo[x]
439 439 if not mcache[0] or hasset:
440 440 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
441 441 m = mcache[0]
442 442 fname = None
443 443 if not m.anypats() and len(m.files()) == 1:
444 444 fname = m.files()[0]
445 445 if fname is not None:
446 446 if fname not in c.files():
447 447 return False
448 448 else:
449 449 for f in c.files():
450 450 if m(f):
451 451 break
452 452 else:
453 453 return False
454 454 files = repo.status(c.p1().node(), c.node())[field]
455 455 if fname is not None:
456 456 if fname in files:
457 457 return True
458 458 else:
459 459 for f in files:
460 460 if m(f):
461 461 return True
462 462
463 463 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
464 464
465 465 def _children(repo, subset, parentset):
466 466 if not parentset:
467 467 return baseset()
468 468 cs = set()
469 469 pr = repo.changelog.parentrevs
470 470 minrev = parentset.min()
471 471 nullrev = node.nullrev
472 472 for r in subset:
473 473 if r <= minrev:
474 474 continue
475 475 p1, p2 = pr(r)
476 476 if p1 in parentset:
477 477 cs.add(r)
478 478 if p2 != nullrev and p2 in parentset:
479 479 cs.add(r)
480 480 return baseset(cs)
481 481
482 482 @predicate('children(set)', safe=True)
483 483 def children(repo, subset, x):
484 484 """Child changesets of changesets in set.
485 485 """
486 486 s = getset(repo, fullreposet(repo), x)
487 487 cs = _children(repo, subset, s)
488 488 return subset & cs
489 489
490 490 @predicate('closed()', safe=True)
491 491 def closed(repo, subset, x):
492 492 """Changeset is closed.
493 493 """
494 494 # i18n: "closed" is a keyword
495 495 getargs(x, 0, 0, _("closed takes no arguments"))
496 496 return subset.filter(lambda r: repo[r].closesbranch(),
497 497 condrepr='<branch closed>')
498 498
499 499 @predicate('contains(pattern)')
500 500 def contains(repo, subset, x):
501 501 """The revision's manifest contains a file matching pattern (but might not
502 502 modify it). See :hg:`help patterns` for information about file patterns.
503 503
504 504 The pattern without explicit kind like ``glob:`` is expected to be
505 505 relative to the current directory and match against a file exactly
506 506 for efficiency.
507 507 """
508 508 # i18n: "contains" is a keyword
509 509 pat = getstring(x, _("contains requires a pattern"))
510 510
511 511 def matches(x):
512 512 if not matchmod.patkind(pat):
513 513 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
514 514 if pats in repo[x]:
515 515 return True
516 516 else:
517 517 c = repo[x]
518 518 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
519 519 for f in c.manifest():
520 520 if m(f):
521 521 return True
522 522 return False
523 523
524 524 return subset.filter(matches, condrepr=('<contains %r>', pat))
525 525
526 526 @predicate('converted([id])', safe=True)
527 527 def converted(repo, subset, x):
528 528 """Changesets converted from the given identifier in the old repository if
529 529 present, or all converted changesets if no identifier is specified.
530 530 """
531 531
532 532 # There is exactly no chance of resolving the revision, so do a simple
533 533 # string compare and hope for the best
534 534
535 535 rev = None
536 536 # i18n: "converted" is a keyword
537 537 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
538 538 if l:
539 539 # i18n: "converted" is a keyword
540 540 rev = getstring(l[0], _('converted requires a revision'))
541 541
542 542 def _matchvalue(r):
543 543 source = repo[r].extra().get('convert_revision', None)
544 544 return source is not None and (rev is None or source.startswith(rev))
545 545
546 546 return subset.filter(lambda r: _matchvalue(r),
547 547 condrepr=('<converted %r>', rev))
548 548
549 549 @predicate('date(interval)', safe=True)
550 550 def date(repo, subset, x):
551 551 """Changesets within the interval, see :hg:`help dates`.
552 552 """
553 553 # i18n: "date" is a keyword
554 554 ds = getstring(x, _("date requires a string"))
555 555 dm = util.matchdate(ds)
556 556 return subset.filter(lambda x: dm(repo[x].date()[0]),
557 557 condrepr=('<date %r>', ds))
558 558
559 559 @predicate('desc(string)', safe=True)
560 560 def desc(repo, subset, x):
561 561 """Search commit message for string. The match is case-insensitive.
562 562
563 563 Pattern matching is supported for `string`. See
564 564 :hg:`help revisions.patterns`.
565 565 """
566 566 # i18n: "desc" is a keyword
567 567 ds = getstring(x, _("desc requires a string"))
568 568
569 569 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
570 570
571 571 return subset.filter(lambda r: matcher(repo[r].description()),
572 572 condrepr=('<desc %r>', ds))
573 573
574 574 def _descendants(repo, subset, x, followfirst=False):
575 575 roots = getset(repo, fullreposet(repo), x)
576 576 if not roots:
577 577 return baseset()
578 578 s = dagop.revdescendants(repo, roots, followfirst)
579 579
580 580 # Both sets need to be ascending in order to lazily return the union
581 581 # in the correct order.
582 582 base = subset & roots
583 583 desc = subset & s
584 584 result = base + desc
585 585 if subset.isascending():
586 586 result.sort()
587 587 elif subset.isdescending():
588 588 result.sort(reverse=True)
589 589 else:
590 590 result = subset & result
591 591 return result
592 592
593 593 @predicate('descendants(set)', safe=True)
594 594 def descendants(repo, subset, x):
595 595 """Changesets which are descendants of changesets in set.
596 596 """
597 597 return _descendants(repo, subset, x)
598 598
599 599 @predicate('_firstdescendants', safe=True)
600 600 def _firstdescendants(repo, subset, x):
601 601 # ``_firstdescendants(set)``
602 602 # Like ``descendants(set)`` but follows only the first parents.
603 603 return _descendants(repo, subset, x, followfirst=True)
604 604
605 605 @predicate('destination([set])', safe=True)
606 606 def destination(repo, subset, x):
607 607 """Changesets that were created by a graft, transplant or rebase operation,
608 608 with the given revisions specified as the source. Omitting the optional set
609 609 is the same as passing all().
610 610 """
611 611 if x is not None:
612 612 sources = getset(repo, fullreposet(repo), x)
613 613 else:
614 614 sources = fullreposet(repo)
615 615
616 616 dests = set()
617 617
618 618 # subset contains all of the possible destinations that can be returned, so
619 619 # iterate over them and see if their source(s) were provided in the arg set.
620 620 # Even if the immediate src of r is not in the arg set, src's source (or
621 621 # further back) may be. Scanning back further than the immediate src allows
622 622 # transitive transplants and rebases to yield the same results as transitive
623 623 # grafts.
624 624 for r in subset:
625 625 src = _getrevsource(repo, r)
626 626 lineage = None
627 627
628 628 while src is not None:
629 629 if lineage is None:
630 630 lineage = list()
631 631
632 632 lineage.append(r)
633 633
634 634 # The visited lineage is a match if the current source is in the arg
635 635 # set. Since every candidate dest is visited by way of iterating
636 636 # subset, any dests further back in the lineage will be tested by a
637 637 # different iteration over subset. Likewise, if the src was already
638 638 # selected, the current lineage can be selected without going back
639 639 # further.
640 640 if src in sources or src in dests:
641 641 dests.update(lineage)
642 642 break
643 643
644 644 r = src
645 645 src = _getrevsource(repo, r)
646 646
647 647 return subset.filter(dests.__contains__,
648 648 condrepr=lambda: '<destination %r>' % sorted(dests))
649 649
650 650 @predicate('divergent()', safe=True)
651 651 def divergent(repo, subset, x):
652 652 """
653 653 Final successors of changesets with an alternative set of final successors.
654 654 """
655 655 # i18n: "divergent" is a keyword
656 656 getargs(x, 0, 0, _("divergent takes no arguments"))
657 657 divergent = obsmod.getrevs(repo, 'divergent')
658 658 return subset & divergent
659 659
660 660 @predicate('extinct()', safe=True)
661 661 def extinct(repo, subset, x):
662 662 """Obsolete changesets with obsolete descendants only.
663 663 """
664 664 # i18n: "extinct" is a keyword
665 665 getargs(x, 0, 0, _("extinct takes no arguments"))
666 666 extincts = obsmod.getrevs(repo, 'extinct')
667 667 return subset & extincts
668 668
669 669 @predicate('extra(label, [value])', safe=True)
670 670 def extra(repo, subset, x):
671 671 """Changesets with the given label in the extra metadata, with the given
672 672 optional value.
673 673
674 674 Pattern matching is supported for `value`. See
675 675 :hg:`help revisions.patterns`.
676 676 """
677 677 args = getargsdict(x, 'extra', 'label value')
678 678 if 'label' not in args:
679 679 # i18n: "extra" is a keyword
680 680 raise error.ParseError(_('extra takes at least 1 argument'))
681 681 # i18n: "extra" is a keyword
682 682 label = getstring(args['label'], _('first argument to extra must be '
683 683 'a string'))
684 684 value = None
685 685
686 686 if 'value' in args:
687 687 # i18n: "extra" is a keyword
688 688 value = getstring(args['value'], _('second argument to extra must be '
689 689 'a string'))
690 690 kind, value, matcher = util.stringmatcher(value)
691 691
692 692 def _matchvalue(r):
693 693 extra = repo[r].extra()
694 694 return label in extra and (value is None or matcher(extra[label]))
695 695
696 696 return subset.filter(lambda r: _matchvalue(r),
697 697 condrepr=('<extra[%r] %r>', label, value))
698 698
699 699 @predicate('filelog(pattern)', safe=True)
700 700 def filelog(repo, subset, x):
701 701 """Changesets connected to the specified filelog.
702 702
703 703 For performance reasons, visits only revisions mentioned in the file-level
704 704 filelog, rather than filtering through all changesets (much faster, but
705 705 doesn't include deletes or duplicate changes). For a slower, more accurate
706 706 result, use ``file()``.
707 707
708 708 The pattern without explicit kind like ``glob:`` is expected to be
709 709 relative to the current directory and match against a file exactly
710 710 for efficiency.
711 711
712 712 If some linkrev points to revisions filtered by the current repoview, we'll
713 713 work around it to return a non-filtered value.
714 714 """
715 715
716 716 # i18n: "filelog" is a keyword
717 717 pat = getstring(x, _("filelog requires a pattern"))
718 718 s = set()
719 719 cl = repo.changelog
720 720
721 721 if not matchmod.patkind(pat):
722 722 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
723 723 files = [f]
724 724 else:
725 725 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
726 726 files = (f for f in repo[None] if m(f))
727 727
728 728 for f in files:
729 729 fl = repo.file(f)
730 730 known = {}
731 731 scanpos = 0
732 732 for fr in list(fl):
733 733 fn = fl.node(fr)
734 734 if fn in known:
735 735 s.add(known[fn])
736 736 continue
737 737
738 738 lr = fl.linkrev(fr)
739 739 if lr in cl:
740 740 s.add(lr)
741 741 elif scanpos is not None:
742 742 # lowest matching changeset is filtered, scan further
743 743 # ahead in changelog
744 744 start = max(lr, scanpos) + 1
745 745 scanpos = None
746 746 for r in cl.revs(start):
747 747 # minimize parsing of non-matching entries
748 748 if f in cl.revision(r) and f in cl.readfiles(r):
749 749 try:
750 750 # try to use manifest delta fastpath
751 751 n = repo[r].filenode(f)
752 752 if n not in known:
753 753 if n == fn:
754 754 s.add(r)
755 755 scanpos = r
756 756 break
757 757 else:
758 758 known[n] = r
759 759 except error.ManifestLookupError:
760 760 # deletion in changelog
761 761 continue
762 762
763 763 return subset & s
764 764
765 765 @predicate('first(set, [n])', safe=True, takeorder=True)
766 766 def first(repo, subset, x, order):
767 767 """An alias for limit().
768 768 """
769 769 return limit(repo, subset, x, order)
770 770
771 771 def _follow(repo, subset, x, name, followfirst=False):
772 772 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
773 773 "and an optional revset") % name)
774 774 c = repo['.']
775 775 if l:
776 776 x = getstring(l[0], _("%s expected a pattern") % name)
777 777 rev = None
778 778 if len(l) >= 2:
779 779 revs = getset(repo, fullreposet(repo), l[1])
780 780 if len(revs) != 1:
781 781 raise error.RepoLookupError(
782 782 _("%s expected one starting revision") % name)
783 783 rev = revs.last()
784 784 c = repo[rev]
785 785 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
786 786 ctx=repo[rev], default='path')
787 787
788 788 files = c.manifest().walk(matcher)
789 789
790 790 s = set()
791 791 for fname in files:
792 792 fctx = c[fname]
793 793 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
794 794 # include the revision responsible for the most recent version
795 795 s.add(fctx.introrev())
796 796 else:
797 797 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
798 798
799 799 return subset & s
800 800
801 801 @predicate('follow([pattern[, startrev]])', safe=True)
802 802 def follow(repo, subset, x):
803 803 """
804 804 An alias for ``::.`` (ancestors of the working directory's first parent).
805 805 If pattern is specified, the histories of files matching given
806 806 pattern in the revision given by startrev are followed, including copies.
807 807 """
808 808 return _follow(repo, subset, x, 'follow')
809 809
810 810 @predicate('_followfirst', safe=True)
811 811 def _followfirst(repo, subset, x):
812 812 # ``followfirst([pattern[, startrev]])``
813 813 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
814 814 # of every revisions or files revisions.
815 815 return _follow(repo, subset, x, '_followfirst', followfirst=True)
816 816
817 817 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
818 818 safe=True)
819 819 def followlines(repo, subset, x):
820 820 """Changesets modifying `file` in line range ('fromline', 'toline').
821 821
822 822 Line range corresponds to 'file' content at 'startrev' and should hence be
823 823 consistent with file size. If startrev is not specified, working directory's
824 824 parent is used.
825 825
826 826 By default, ancestors of 'startrev' are returned. If 'descend' is True,
827 827 descendants of 'startrev' are returned though renames are (currently) not
828 828 followed in this direction.
829 829 """
830 from . import context # avoid circular import issues
831
832 830 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
833 831 if len(args['lines']) != 1:
834 832 raise error.ParseError(_("followlines requires a line range"))
835 833
836 834 rev = '.'
837 835 if 'startrev' in args:
838 836 revs = getset(repo, fullreposet(repo), args['startrev'])
839 837 if len(revs) != 1:
840 838 raise error.ParseError(
841 839 # i18n: "followlines" is a keyword
842 840 _("followlines expects exactly one revision"))
843 841 rev = revs.last()
844 842
845 843 pat = getstring(args['file'], _("followlines requires a pattern"))
846 844 if not matchmod.patkind(pat):
847 845 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
848 846 else:
849 847 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
850 848 files = [f for f in repo[rev] if m(f)]
851 849 if len(files) != 1:
852 850 # i18n: "followlines" is a keyword
853 851 raise error.ParseError(_("followlines expects exactly one file"))
854 852 fname = files[0]
855 853
856 854 # i18n: "followlines" is a keyword
857 855 lr = getrange(args['lines'][0], _("followlines expects a line range"))
858 856 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
859 857 for a in lr]
860 858 fromline, toline = util.processlinerange(fromline, toline)
861 859
862 860 fctx = repo[rev].filectx(fname)
863 861 descend = False
864 862 if 'descend' in args:
865 863 descend = getboolean(args['descend'],
866 864 # i18n: "descend" is a keyword
867 865 _("descend argument must be a boolean"))
868 866 if descend:
869 867 rs = generatorset(
870 868 (c.rev() for c, _linerange
871 in context.blockdescendants(fctx, fromline, toline)),
869 in dagop.blockdescendants(fctx, fromline, toline)),
872 870 iterasc=True)
873 871 else:
874 872 rs = generatorset(
875 873 (c.rev() for c, _linerange
876 in context.blockancestors(fctx, fromline, toline)),
874 in dagop.blockancestors(fctx, fromline, toline)),
877 875 iterasc=False)
878 876 return subset & rs
879 877
880 878 @predicate('all()', safe=True)
881 879 def getall(repo, subset, x):
882 880 """All changesets, the same as ``0:tip``.
883 881 """
884 882 # i18n: "all" is a keyword
885 883 getargs(x, 0, 0, _("all takes no arguments"))
886 884 return subset & spanset(repo) # drop "null" if any
887 885
888 886 @predicate('grep(regex)')
889 887 def grep(repo, subset, x):
890 888 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
891 889 to ensure special escape characters are handled correctly. Unlike
892 890 ``keyword(string)``, the match is case-sensitive.
893 891 """
894 892 try:
895 893 # i18n: "grep" is a keyword
896 894 gr = re.compile(getstring(x, _("grep requires a string")))
897 895 except re.error as e:
898 896 raise error.ParseError(_('invalid match pattern: %s') % e)
899 897
900 898 def matches(x):
901 899 c = repo[x]
902 900 for e in c.files() + [c.user(), c.description()]:
903 901 if gr.search(e):
904 902 return True
905 903 return False
906 904
907 905 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
908 906
909 907 @predicate('_matchfiles', safe=True)
910 908 def _matchfiles(repo, subset, x):
911 909 # _matchfiles takes a revset list of prefixed arguments:
912 910 #
913 911 # [p:foo, i:bar, x:baz]
914 912 #
915 913 # builds a match object from them and filters subset. Allowed
916 914 # prefixes are 'p:' for regular patterns, 'i:' for include
917 915 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
918 916 # a revision identifier, or the empty string to reference the
919 917 # working directory, from which the match object is
920 918 # initialized. Use 'd:' to set the default matching mode, default
921 919 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
922 920
923 921 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
924 922 pats, inc, exc = [], [], []
925 923 rev, default = None, None
926 924 for arg in l:
927 925 s = getstring(arg, "_matchfiles requires string arguments")
928 926 prefix, value = s[:2], s[2:]
929 927 if prefix == 'p:':
930 928 pats.append(value)
931 929 elif prefix == 'i:':
932 930 inc.append(value)
933 931 elif prefix == 'x:':
934 932 exc.append(value)
935 933 elif prefix == 'r:':
936 934 if rev is not None:
937 935 raise error.ParseError('_matchfiles expected at most one '
938 936 'revision')
939 937 if value != '': # empty means working directory; leave rev as None
940 938 rev = value
941 939 elif prefix == 'd:':
942 940 if default is not None:
943 941 raise error.ParseError('_matchfiles expected at most one '
944 942 'default mode')
945 943 default = value
946 944 else:
947 945 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
948 946 if not default:
949 947 default = 'glob'
950 948
951 949 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
952 950 exclude=exc, ctx=repo[rev], default=default)
953 951
954 952 # This directly read the changelog data as creating changectx for all
955 953 # revisions is quite expensive.
956 954 getfiles = repo.changelog.readfiles
957 955 wdirrev = node.wdirrev
958 956 def matches(x):
959 957 if x == wdirrev:
960 958 files = repo[x].files()
961 959 else:
962 960 files = getfiles(x)
963 961 for f in files:
964 962 if m(f):
965 963 return True
966 964 return False
967 965
968 966 return subset.filter(matches,
969 967 condrepr=('<matchfiles patterns=%r, include=%r '
970 968 'exclude=%r, default=%r, rev=%r>',
971 969 pats, inc, exc, default, rev))
972 970
973 971 @predicate('file(pattern)', safe=True)
974 972 def hasfile(repo, subset, x):
975 973 """Changesets affecting files matched by pattern.
976 974
977 975 For a faster but less accurate result, consider using ``filelog()``
978 976 instead.
979 977
980 978 This predicate uses ``glob:`` as the default kind of pattern.
981 979 """
982 980 # i18n: "file" is a keyword
983 981 pat = getstring(x, _("file requires a pattern"))
984 982 return _matchfiles(repo, subset, ('string', 'p:' + pat))
985 983
986 984 @predicate('head()', safe=True)
987 985 def head(repo, subset, x):
988 986 """Changeset is a named branch head.
989 987 """
990 988 # i18n: "head" is a keyword
991 989 getargs(x, 0, 0, _("head takes no arguments"))
992 990 hs = set()
993 991 cl = repo.changelog
994 992 for ls in repo.branchmap().itervalues():
995 993 hs.update(cl.rev(h) for h in ls)
996 994 return subset & baseset(hs)
997 995
998 996 @predicate('heads(set)', safe=True)
999 997 def heads(repo, subset, x):
1000 998 """Members of set with no children in set.
1001 999 """
1002 1000 s = getset(repo, subset, x)
1003 1001 ps = parents(repo, subset, x)
1004 1002 return s - ps
1005 1003
1006 1004 @predicate('hidden()', safe=True)
1007 1005 def hidden(repo, subset, x):
1008 1006 """Hidden changesets.
1009 1007 """
1010 1008 # i18n: "hidden" is a keyword
1011 1009 getargs(x, 0, 0, _("hidden takes no arguments"))
1012 1010 hiddenrevs = repoview.filterrevs(repo, 'visible')
1013 1011 return subset & hiddenrevs
1014 1012
1015 1013 @predicate('keyword(string)', safe=True)
1016 1014 def keyword(repo, subset, x):
1017 1015 """Search commit message, user name, and names of changed files for
1018 1016 string. The match is case-insensitive.
1019 1017
1020 1018 For a regular expression or case sensitive search of these fields, use
1021 1019 ``grep(regex)``.
1022 1020 """
1023 1021 # i18n: "keyword" is a keyword
1024 1022 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1025 1023
1026 1024 def matches(r):
1027 1025 c = repo[r]
1028 1026 return any(kw in encoding.lower(t)
1029 1027 for t in c.files() + [c.user(), c.description()])
1030 1028
1031 1029 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1032 1030
1033 1031 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1034 1032 def limit(repo, subset, x, order):
1035 1033 """First n members of set, defaulting to 1, starting from offset.
1036 1034 """
1037 1035 args = getargsdict(x, 'limit', 'set n offset')
1038 1036 if 'set' not in args:
1039 1037 # i18n: "limit" is a keyword
1040 1038 raise error.ParseError(_("limit requires one to three arguments"))
1041 1039 # i18n: "limit" is a keyword
1042 1040 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1043 1041 if lim < 0:
1044 1042 raise error.ParseError(_("negative number to select"))
1045 1043 # i18n: "limit" is a keyword
1046 1044 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1047 1045 if ofs < 0:
1048 1046 raise error.ParseError(_("negative offset"))
1049 1047 os = getset(repo, fullreposet(repo), args['set'])
1050 1048 ls = os.slice(ofs, ofs + lim)
1051 1049 if order == followorder and lim > 1:
1052 1050 return subset & ls
1053 1051 return ls & subset
1054 1052
1055 1053 @predicate('last(set, [n])', safe=True, takeorder=True)
1056 1054 def last(repo, subset, x, order):
1057 1055 """Last n members of set, defaulting to 1.
1058 1056 """
1059 1057 # i18n: "last" is a keyword
1060 1058 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1061 1059 lim = 1
1062 1060 if len(l) == 2:
1063 1061 # i18n: "last" is a keyword
1064 1062 lim = getinteger(l[1], _("last expects a number"))
1065 1063 if lim < 0:
1066 1064 raise error.ParseError(_("negative number to select"))
1067 1065 os = getset(repo, fullreposet(repo), l[0])
1068 1066 os.reverse()
1069 1067 ls = os.slice(0, lim)
1070 1068 if order == followorder and lim > 1:
1071 1069 return subset & ls
1072 1070 ls.reverse()
1073 1071 return ls & subset
1074 1072
1075 1073 @predicate('max(set)', safe=True)
1076 1074 def maxrev(repo, subset, x):
1077 1075 """Changeset with highest revision number in set.
1078 1076 """
1079 1077 os = getset(repo, fullreposet(repo), x)
1080 1078 try:
1081 1079 m = os.max()
1082 1080 if m in subset:
1083 1081 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1084 1082 except ValueError:
1085 1083 # os.max() throws a ValueError when the collection is empty.
1086 1084 # Same as python's max().
1087 1085 pass
1088 1086 return baseset(datarepr=('<max %r, %r>', subset, os))
1089 1087
1090 1088 @predicate('merge()', safe=True)
1091 1089 def merge(repo, subset, x):
1092 1090 """Changeset is a merge changeset.
1093 1091 """
1094 1092 # i18n: "merge" is a keyword
1095 1093 getargs(x, 0, 0, _("merge takes no arguments"))
1096 1094 cl = repo.changelog
1097 1095 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1098 1096 condrepr='<merge>')
1099 1097
1100 1098 @predicate('branchpoint()', safe=True)
1101 1099 def branchpoint(repo, subset, x):
1102 1100 """Changesets with more than one child.
1103 1101 """
1104 1102 # i18n: "branchpoint" is a keyword
1105 1103 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1106 1104 cl = repo.changelog
1107 1105 if not subset:
1108 1106 return baseset()
1109 1107 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1110 1108 # (and if it is not, it should.)
1111 1109 baserev = min(subset)
1112 1110 parentscount = [0]*(len(repo) - baserev)
1113 1111 for r in cl.revs(start=baserev + 1):
1114 1112 for p in cl.parentrevs(r):
1115 1113 if p >= baserev:
1116 1114 parentscount[p - baserev] += 1
1117 1115 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1118 1116 condrepr='<branchpoint>')
1119 1117
1120 1118 @predicate('min(set)', safe=True)
1121 1119 def minrev(repo, subset, x):
1122 1120 """Changeset with lowest revision number in set.
1123 1121 """
1124 1122 os = getset(repo, fullreposet(repo), x)
1125 1123 try:
1126 1124 m = os.min()
1127 1125 if m in subset:
1128 1126 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1129 1127 except ValueError:
1130 1128 # os.min() throws a ValueError when the collection is empty.
1131 1129 # Same as python's min().
1132 1130 pass
1133 1131 return baseset(datarepr=('<min %r, %r>', subset, os))
1134 1132
1135 1133 @predicate('modifies(pattern)', safe=True)
1136 1134 def modifies(repo, subset, x):
1137 1135 """Changesets modifying files matched by pattern.
1138 1136
1139 1137 The pattern without explicit kind like ``glob:`` is expected to be
1140 1138 relative to the current directory and match against a file or a
1141 1139 directory.
1142 1140 """
1143 1141 # i18n: "modifies" is a keyword
1144 1142 pat = getstring(x, _("modifies requires a pattern"))
1145 1143 return checkstatus(repo, subset, pat, 0)
1146 1144
1147 1145 @predicate('named(namespace)')
1148 1146 def named(repo, subset, x):
1149 1147 """The changesets in a given namespace.
1150 1148
1151 1149 Pattern matching is supported for `namespace`. See
1152 1150 :hg:`help revisions.patterns`.
1153 1151 """
1154 1152 # i18n: "named" is a keyword
1155 1153 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1156 1154
1157 1155 ns = getstring(args[0],
1158 1156 # i18n: "named" is a keyword
1159 1157 _('the argument to named must be a string'))
1160 1158 kind, pattern, matcher = util.stringmatcher(ns)
1161 1159 namespaces = set()
1162 1160 if kind == 'literal':
1163 1161 if pattern not in repo.names:
1164 1162 raise error.RepoLookupError(_("namespace '%s' does not exist")
1165 1163 % ns)
1166 1164 namespaces.add(repo.names[pattern])
1167 1165 else:
1168 1166 for name, ns in repo.names.iteritems():
1169 1167 if matcher(name):
1170 1168 namespaces.add(ns)
1171 1169 if not namespaces:
1172 1170 raise error.RepoLookupError(_("no namespace exists"
1173 1171 " that match '%s'") % pattern)
1174 1172
1175 1173 names = set()
1176 1174 for ns in namespaces:
1177 1175 for name in ns.listnames(repo):
1178 1176 if name not in ns.deprecated:
1179 1177 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1180 1178
1181 1179 names -= {node.nullrev}
1182 1180 return subset & names
1183 1181
1184 1182 @predicate('id(string)', safe=True)
1185 1183 def node_(repo, subset, x):
1186 1184 """Revision non-ambiguously specified by the given hex string prefix.
1187 1185 """
1188 1186 # i18n: "id" is a keyword
1189 1187 l = getargs(x, 1, 1, _("id requires one argument"))
1190 1188 # i18n: "id" is a keyword
1191 1189 n = getstring(l[0], _("id requires a string"))
1192 1190 if len(n) == 40:
1193 1191 try:
1194 1192 rn = repo.changelog.rev(node.bin(n))
1195 1193 except error.WdirUnsupported:
1196 1194 rn = node.wdirrev
1197 1195 except (LookupError, TypeError):
1198 1196 rn = None
1199 1197 else:
1200 1198 rn = None
1201 1199 try:
1202 1200 pm = repo.changelog._partialmatch(n)
1203 1201 if pm is not None:
1204 1202 rn = repo.changelog.rev(pm)
1205 1203 except error.WdirUnsupported:
1206 1204 rn = node.wdirrev
1207 1205
1208 1206 if rn is None:
1209 1207 return baseset()
1210 1208 result = baseset([rn])
1211 1209 return result & subset
1212 1210
1213 1211 @predicate('obsolete()', safe=True)
1214 1212 def obsolete(repo, subset, x):
1215 1213 """Mutable changeset with a newer version."""
1216 1214 # i18n: "obsolete" is a keyword
1217 1215 getargs(x, 0, 0, _("obsolete takes no arguments"))
1218 1216 obsoletes = obsmod.getrevs(repo, 'obsolete')
1219 1217 return subset & obsoletes
1220 1218
1221 1219 @predicate('only(set, [set])', safe=True)
1222 1220 def only(repo, subset, x):
1223 1221 """Changesets that are ancestors of the first set that are not ancestors
1224 1222 of any other head in the repo. If a second set is specified, the result
1225 1223 is ancestors of the first set that are not ancestors of the second set
1226 1224 (i.e. ::<set1> - ::<set2>).
1227 1225 """
1228 1226 cl = repo.changelog
1229 1227 # i18n: "only" is a keyword
1230 1228 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1231 1229 include = getset(repo, fullreposet(repo), args[0])
1232 1230 if len(args) == 1:
1233 1231 if not include:
1234 1232 return baseset()
1235 1233
1236 1234 descendants = set(dagop.revdescendants(repo, include, False))
1237 1235 exclude = [rev for rev in cl.headrevs()
1238 1236 if not rev in descendants and not rev in include]
1239 1237 else:
1240 1238 exclude = getset(repo, fullreposet(repo), args[1])
1241 1239
1242 1240 results = set(cl.findmissingrevs(common=exclude, heads=include))
1243 1241 # XXX we should turn this into a baseset instead of a set, smartset may do
1244 1242 # some optimizations from the fact this is a baseset.
1245 1243 return subset & results
1246 1244
1247 1245 @predicate('origin([set])', safe=True)
1248 1246 def origin(repo, subset, x):
1249 1247 """
1250 1248 Changesets that were specified as a source for the grafts, transplants or
1251 1249 rebases that created the given revisions. Omitting the optional set is the
1252 1250 same as passing all(). If a changeset created by these operations is itself
1253 1251 specified as a source for one of these operations, only the source changeset
1254 1252 for the first operation is selected.
1255 1253 """
1256 1254 if x is not None:
1257 1255 dests = getset(repo, fullreposet(repo), x)
1258 1256 else:
1259 1257 dests = fullreposet(repo)
1260 1258
1261 1259 def _firstsrc(rev):
1262 1260 src = _getrevsource(repo, rev)
1263 1261 if src is None:
1264 1262 return None
1265 1263
1266 1264 while True:
1267 1265 prev = _getrevsource(repo, src)
1268 1266
1269 1267 if prev is None:
1270 1268 return src
1271 1269 src = prev
1272 1270
1273 1271 o = {_firstsrc(r) for r in dests}
1274 1272 o -= {None}
1275 1273 # XXX we should turn this into a baseset instead of a set, smartset may do
1276 1274 # some optimizations from the fact this is a baseset.
1277 1275 return subset & o
1278 1276
1279 1277 @predicate('outgoing([path])', safe=False)
1280 1278 def outgoing(repo, subset, x):
1281 1279 """Changesets not found in the specified destination repository, or the
1282 1280 default push location.
1283 1281 """
1284 1282 # Avoid cycles.
1285 1283 from . import (
1286 1284 discovery,
1287 1285 hg,
1288 1286 )
1289 1287 # i18n: "outgoing" is a keyword
1290 1288 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1291 1289 # i18n: "outgoing" is a keyword
1292 1290 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1293 1291 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1294 1292 dest, branches = hg.parseurl(dest)
1295 1293 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1296 1294 if revs:
1297 1295 revs = [repo.lookup(rev) for rev in revs]
1298 1296 other = hg.peer(repo, {}, dest)
1299 1297 repo.ui.pushbuffer()
1300 1298 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1301 1299 repo.ui.popbuffer()
1302 1300 cl = repo.changelog
1303 1301 o = {cl.rev(r) for r in outgoing.missing}
1304 1302 return subset & o
1305 1303
1306 1304 @predicate('p1([set])', safe=True)
1307 1305 def p1(repo, subset, x):
1308 1306 """First parent of changesets in set, or the working directory.
1309 1307 """
1310 1308 if x is None:
1311 1309 p = repo[x].p1().rev()
1312 1310 if p >= 0:
1313 1311 return subset & baseset([p])
1314 1312 return baseset()
1315 1313
1316 1314 ps = set()
1317 1315 cl = repo.changelog
1318 1316 for r in getset(repo, fullreposet(repo), x):
1319 1317 try:
1320 1318 ps.add(cl.parentrevs(r)[0])
1321 1319 except error.WdirUnsupported:
1322 1320 ps.add(repo[r].parents()[0].rev())
1323 1321 ps -= {node.nullrev}
1324 1322 # XXX we should turn this into a baseset instead of a set, smartset may do
1325 1323 # some optimizations from the fact this is a baseset.
1326 1324 return subset & ps
1327 1325
1328 1326 @predicate('p2([set])', safe=True)
1329 1327 def p2(repo, subset, x):
1330 1328 """Second parent of changesets in set, or the working directory.
1331 1329 """
1332 1330 if x is None:
1333 1331 ps = repo[x].parents()
1334 1332 try:
1335 1333 p = ps[1].rev()
1336 1334 if p >= 0:
1337 1335 return subset & baseset([p])
1338 1336 return baseset()
1339 1337 except IndexError:
1340 1338 return baseset()
1341 1339
1342 1340 ps = set()
1343 1341 cl = repo.changelog
1344 1342 for r in getset(repo, fullreposet(repo), x):
1345 1343 try:
1346 1344 ps.add(cl.parentrevs(r)[1])
1347 1345 except error.WdirUnsupported:
1348 1346 parents = repo[r].parents()
1349 1347 if len(parents) == 2:
1350 1348 ps.add(parents[1])
1351 1349 ps -= {node.nullrev}
1352 1350 # XXX we should turn this into a baseset instead of a set, smartset may do
1353 1351 # some optimizations from the fact this is a baseset.
1354 1352 return subset & ps
1355 1353
1356 1354 def parentpost(repo, subset, x, order):
1357 1355 return p1(repo, subset, x)
1358 1356
1359 1357 @predicate('parents([set])', safe=True)
1360 1358 def parents(repo, subset, x):
1361 1359 """
1362 1360 The set of all parents for all changesets in set, or the working directory.
1363 1361 """
1364 1362 if x is None:
1365 1363 ps = set(p.rev() for p in repo[x].parents())
1366 1364 else:
1367 1365 ps = set()
1368 1366 cl = repo.changelog
1369 1367 up = ps.update
1370 1368 parentrevs = cl.parentrevs
1371 1369 for r in getset(repo, fullreposet(repo), x):
1372 1370 try:
1373 1371 up(parentrevs(r))
1374 1372 except error.WdirUnsupported:
1375 1373 up(p.rev() for p in repo[r].parents())
1376 1374 ps -= {node.nullrev}
1377 1375 return subset & ps
1378 1376
1379 1377 def _phase(repo, subset, *targets):
1380 1378 """helper to select all rev in <targets> phases"""
1381 1379 s = repo._phasecache.getrevset(repo, targets)
1382 1380 return subset & s
1383 1381
1384 1382 @predicate('draft()', safe=True)
1385 1383 def draft(repo, subset, x):
1386 1384 """Changeset in draft phase."""
1387 1385 # i18n: "draft" is a keyword
1388 1386 getargs(x, 0, 0, _("draft takes no arguments"))
1389 1387 target = phases.draft
1390 1388 return _phase(repo, subset, target)
1391 1389
1392 1390 @predicate('secret()', safe=True)
1393 1391 def secret(repo, subset, x):
1394 1392 """Changeset in secret phase."""
1395 1393 # i18n: "secret" is a keyword
1396 1394 getargs(x, 0, 0, _("secret takes no arguments"))
1397 1395 target = phases.secret
1398 1396 return _phase(repo, subset, target)
1399 1397
1400 1398 def parentspec(repo, subset, x, n, order):
1401 1399 """``set^0``
1402 1400 The set.
1403 1401 ``set^1`` (or ``set^``), ``set^2``
1404 1402 First or second parent, respectively, of all changesets in set.
1405 1403 """
1406 1404 try:
1407 1405 n = int(n[1])
1408 1406 if n not in (0, 1, 2):
1409 1407 raise ValueError
1410 1408 except (TypeError, ValueError):
1411 1409 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1412 1410 ps = set()
1413 1411 cl = repo.changelog
1414 1412 for r in getset(repo, fullreposet(repo), x):
1415 1413 if n == 0:
1416 1414 ps.add(r)
1417 1415 elif n == 1:
1418 1416 try:
1419 1417 ps.add(cl.parentrevs(r)[0])
1420 1418 except error.WdirUnsupported:
1421 1419 ps.add(repo[r].parents()[0].rev())
1422 1420 else:
1423 1421 try:
1424 1422 parents = cl.parentrevs(r)
1425 1423 if parents[1] != node.nullrev:
1426 1424 ps.add(parents[1])
1427 1425 except error.WdirUnsupported:
1428 1426 parents = repo[r].parents()
1429 1427 if len(parents) == 2:
1430 1428 ps.add(parents[1].rev())
1431 1429 return subset & ps
1432 1430
1433 1431 @predicate('present(set)', safe=True)
1434 1432 def present(repo, subset, x):
1435 1433 """An empty set, if any revision in set isn't found; otherwise,
1436 1434 all revisions in set.
1437 1435
1438 1436 If any of specified revisions is not present in the local repository,
1439 1437 the query is normally aborted. But this predicate allows the query
1440 1438 to continue even in such cases.
1441 1439 """
1442 1440 try:
1443 1441 return getset(repo, subset, x)
1444 1442 except error.RepoLookupError:
1445 1443 return baseset()
1446 1444
1447 1445 # for internal use
1448 1446 @predicate('_notpublic', safe=True)
1449 1447 def _notpublic(repo, subset, x):
1450 1448 getargs(x, 0, 0, "_notpublic takes no arguments")
1451 1449 return _phase(repo, subset, phases.draft, phases.secret)
1452 1450
1453 1451 @predicate('public()', safe=True)
1454 1452 def public(repo, subset, x):
1455 1453 """Changeset in public phase."""
1456 1454 # i18n: "public" is a keyword
1457 1455 getargs(x, 0, 0, _("public takes no arguments"))
1458 1456 phase = repo._phasecache.phase
1459 1457 target = phases.public
1460 1458 condition = lambda r: phase(repo, r) == target
1461 1459 return subset.filter(condition, condrepr=('<phase %r>', target),
1462 1460 cache=False)
1463 1461
1464 1462 @predicate('remote([id [,path]])', safe=False)
1465 1463 def remote(repo, subset, x):
1466 1464 """Local revision that corresponds to the given identifier in a
1467 1465 remote repository, if present. Here, the '.' identifier is a
1468 1466 synonym for the current local branch.
1469 1467 """
1470 1468
1471 1469 from . import hg # avoid start-up nasties
1472 1470 # i18n: "remote" is a keyword
1473 1471 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1474 1472
1475 1473 q = '.'
1476 1474 if len(l) > 0:
1477 1475 # i18n: "remote" is a keyword
1478 1476 q = getstring(l[0], _("remote requires a string id"))
1479 1477 if q == '.':
1480 1478 q = repo['.'].branch()
1481 1479
1482 1480 dest = ''
1483 1481 if len(l) > 1:
1484 1482 # i18n: "remote" is a keyword
1485 1483 dest = getstring(l[1], _("remote requires a repository path"))
1486 1484 dest = repo.ui.expandpath(dest or 'default')
1487 1485 dest, branches = hg.parseurl(dest)
1488 1486 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1489 1487 if revs:
1490 1488 revs = [repo.lookup(rev) for rev in revs]
1491 1489 other = hg.peer(repo, {}, dest)
1492 1490 n = other.lookup(q)
1493 1491 if n in repo:
1494 1492 r = repo[n].rev()
1495 1493 if r in subset:
1496 1494 return baseset([r])
1497 1495 return baseset()
1498 1496
1499 1497 @predicate('removes(pattern)', safe=True)
1500 1498 def removes(repo, subset, x):
1501 1499 """Changesets which remove files matching pattern.
1502 1500
1503 1501 The pattern without explicit kind like ``glob:`` is expected to be
1504 1502 relative to the current directory and match against a file or a
1505 1503 directory.
1506 1504 """
1507 1505 # i18n: "removes" is a keyword
1508 1506 pat = getstring(x, _("removes requires a pattern"))
1509 1507 return checkstatus(repo, subset, pat, 2)
1510 1508
1511 1509 @predicate('rev(number)', safe=True)
1512 1510 def rev(repo, subset, x):
1513 1511 """Revision with the given numeric identifier.
1514 1512 """
1515 1513 # i18n: "rev" is a keyword
1516 1514 l = getargs(x, 1, 1, _("rev requires one argument"))
1517 1515 try:
1518 1516 # i18n: "rev" is a keyword
1519 1517 l = int(getstring(l[0], _("rev requires a number")))
1520 1518 except (TypeError, ValueError):
1521 1519 # i18n: "rev" is a keyword
1522 1520 raise error.ParseError(_("rev expects a number"))
1523 1521 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1524 1522 return baseset()
1525 1523 return subset & baseset([l])
1526 1524
1527 1525 @predicate('matching(revision [, field])', safe=True)
1528 1526 def matching(repo, subset, x):
1529 1527 """Changesets in which a given set of fields match the set of fields in the
1530 1528 selected revision or set.
1531 1529
1532 1530 To match more than one field pass the list of fields to match separated
1533 1531 by spaces (e.g. ``author description``).
1534 1532
1535 1533 Valid fields are most regular revision fields and some special fields.
1536 1534
1537 1535 Regular revision fields are ``description``, ``author``, ``branch``,
1538 1536 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1539 1537 and ``diff``.
1540 1538 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1541 1539 contents of the revision. Two revisions matching their ``diff`` will
1542 1540 also match their ``files``.
1543 1541
1544 1542 Special fields are ``summary`` and ``metadata``:
1545 1543 ``summary`` matches the first line of the description.
1546 1544 ``metadata`` is equivalent to matching ``description user date``
1547 1545 (i.e. it matches the main metadata fields).
1548 1546
1549 1547 ``metadata`` is the default field which is used when no fields are
1550 1548 specified. You can match more than one field at a time.
1551 1549 """
1552 1550 # i18n: "matching" is a keyword
1553 1551 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1554 1552
1555 1553 revs = getset(repo, fullreposet(repo), l[0])
1556 1554
1557 1555 fieldlist = ['metadata']
1558 1556 if len(l) > 1:
1559 1557 fieldlist = getstring(l[1],
1560 1558 # i18n: "matching" is a keyword
1561 1559 _("matching requires a string "
1562 1560 "as its second argument")).split()
1563 1561
1564 1562 # Make sure that there are no repeated fields,
1565 1563 # expand the 'special' 'metadata' field type
1566 1564 # and check the 'files' whenever we check the 'diff'
1567 1565 fields = []
1568 1566 for field in fieldlist:
1569 1567 if field == 'metadata':
1570 1568 fields += ['user', 'description', 'date']
1571 1569 elif field == 'diff':
1572 1570 # a revision matching the diff must also match the files
1573 1571 # since matching the diff is very costly, make sure to
1574 1572 # also match the files first
1575 1573 fields += ['files', 'diff']
1576 1574 else:
1577 1575 if field == 'author':
1578 1576 field = 'user'
1579 1577 fields.append(field)
1580 1578 fields = set(fields)
1581 1579 if 'summary' in fields and 'description' in fields:
1582 1580 # If a revision matches its description it also matches its summary
1583 1581 fields.discard('summary')
1584 1582
1585 1583 # We may want to match more than one field
1586 1584 # Not all fields take the same amount of time to be matched
1587 1585 # Sort the selected fields in order of increasing matching cost
1588 1586 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1589 1587 'files', 'description', 'substate', 'diff']
1590 1588 def fieldkeyfunc(f):
1591 1589 try:
1592 1590 return fieldorder.index(f)
1593 1591 except ValueError:
1594 1592 # assume an unknown field is very costly
1595 1593 return len(fieldorder)
1596 1594 fields = list(fields)
1597 1595 fields.sort(key=fieldkeyfunc)
1598 1596
1599 1597 # Each field will be matched with its own "getfield" function
1600 1598 # which will be added to the getfieldfuncs array of functions
1601 1599 getfieldfuncs = []
1602 1600 _funcs = {
1603 1601 'user': lambda r: repo[r].user(),
1604 1602 'branch': lambda r: repo[r].branch(),
1605 1603 'date': lambda r: repo[r].date(),
1606 1604 'description': lambda r: repo[r].description(),
1607 1605 'files': lambda r: repo[r].files(),
1608 1606 'parents': lambda r: repo[r].parents(),
1609 1607 'phase': lambda r: repo[r].phase(),
1610 1608 'substate': lambda r: repo[r].substate,
1611 1609 'summary': lambda r: repo[r].description().splitlines()[0],
1612 1610 'diff': lambda r: list(repo[r].diff(git=True),)
1613 1611 }
1614 1612 for info in fields:
1615 1613 getfield = _funcs.get(info, None)
1616 1614 if getfield is None:
1617 1615 raise error.ParseError(
1618 1616 # i18n: "matching" is a keyword
1619 1617 _("unexpected field name passed to matching: %s") % info)
1620 1618 getfieldfuncs.append(getfield)
1621 1619 # convert the getfield array of functions into a "getinfo" function
1622 1620 # which returns an array of field values (or a single value if there
1623 1621 # is only one field to match)
1624 1622 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1625 1623
1626 1624 def matches(x):
1627 1625 for rev in revs:
1628 1626 target = getinfo(rev)
1629 1627 match = True
1630 1628 for n, f in enumerate(getfieldfuncs):
1631 1629 if target[n] != f(x):
1632 1630 match = False
1633 1631 if match:
1634 1632 return True
1635 1633 return False
1636 1634
1637 1635 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1638 1636
1639 1637 @predicate('reverse(set)', safe=True, takeorder=True)
1640 1638 def reverse(repo, subset, x, order):
1641 1639 """Reverse order of set.
1642 1640 """
1643 1641 l = getset(repo, subset, x)
1644 1642 if order == defineorder:
1645 1643 l.reverse()
1646 1644 return l
1647 1645
1648 1646 @predicate('roots(set)', safe=True)
1649 1647 def roots(repo, subset, x):
1650 1648 """Changesets in set with no parent changeset in set.
1651 1649 """
1652 1650 s = getset(repo, fullreposet(repo), x)
1653 1651 parents = repo.changelog.parentrevs
1654 1652 def filter(r):
1655 1653 for p in parents(r):
1656 1654 if 0 <= p and p in s:
1657 1655 return False
1658 1656 return True
1659 1657 return subset & s.filter(filter, condrepr='<roots>')
1660 1658
1661 1659 _sortkeyfuncs = {
1662 1660 'rev': lambda c: c.rev(),
1663 1661 'branch': lambda c: c.branch(),
1664 1662 'desc': lambda c: c.description(),
1665 1663 'user': lambda c: c.user(),
1666 1664 'author': lambda c: c.user(),
1667 1665 'date': lambda c: c.date()[0],
1668 1666 }
1669 1667
1670 1668 def _getsortargs(x):
1671 1669 """Parse sort options into (set, [(key, reverse)], opts)"""
1672 1670 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1673 1671 if 'set' not in args:
1674 1672 # i18n: "sort" is a keyword
1675 1673 raise error.ParseError(_('sort requires one or two arguments'))
1676 1674 keys = "rev"
1677 1675 if 'keys' in args:
1678 1676 # i18n: "sort" is a keyword
1679 1677 keys = getstring(args['keys'], _("sort spec must be a string"))
1680 1678
1681 1679 keyflags = []
1682 1680 for k in keys.split():
1683 1681 fk = k
1684 1682 reverse = (k[0] == '-')
1685 1683 if reverse:
1686 1684 k = k[1:]
1687 1685 if k not in _sortkeyfuncs and k != 'topo':
1688 1686 raise error.ParseError(_("unknown sort key %r") % fk)
1689 1687 keyflags.append((k, reverse))
1690 1688
1691 1689 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1692 1690 # i18n: "topo" is a keyword
1693 1691 raise error.ParseError(_('topo sort order cannot be combined '
1694 1692 'with other sort keys'))
1695 1693
1696 1694 opts = {}
1697 1695 if 'topo.firstbranch' in args:
1698 1696 if any(k == 'topo' for k, reverse in keyflags):
1699 1697 opts['topo.firstbranch'] = args['topo.firstbranch']
1700 1698 else:
1701 1699 # i18n: "topo" and "topo.firstbranch" are keywords
1702 1700 raise error.ParseError(_('topo.firstbranch can only be used '
1703 1701 'when using the topo sort key'))
1704 1702
1705 1703 return args['set'], keyflags, opts
1706 1704
1707 1705 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1708 1706 def sort(repo, subset, x, order):
1709 1707 """Sort set by keys. The default sort order is ascending, specify a key
1710 1708 as ``-key`` to sort in descending order.
1711 1709
1712 1710 The keys can be:
1713 1711
1714 1712 - ``rev`` for the revision number,
1715 1713 - ``branch`` for the branch name,
1716 1714 - ``desc`` for the commit message (description),
1717 1715 - ``user`` for user name (``author`` can be used as an alias),
1718 1716 - ``date`` for the commit date
1719 1717 - ``topo`` for a reverse topographical sort
1720 1718
1721 1719 The ``topo`` sort order cannot be combined with other sort keys. This sort
1722 1720 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1723 1721 specifies what topographical branches to prioritize in the sort.
1724 1722
1725 1723 """
1726 1724 s, keyflags, opts = _getsortargs(x)
1727 1725 revs = getset(repo, subset, s)
1728 1726
1729 1727 if not keyflags or order != defineorder:
1730 1728 return revs
1731 1729 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1732 1730 revs.sort(reverse=keyflags[0][1])
1733 1731 return revs
1734 1732 elif keyflags[0][0] == "topo":
1735 1733 firstbranch = ()
1736 1734 if 'topo.firstbranch' in opts:
1737 1735 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1738 1736 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1739 1737 firstbranch),
1740 1738 istopo=True)
1741 1739 if keyflags[0][1]:
1742 1740 revs.reverse()
1743 1741 return revs
1744 1742
1745 1743 # sort() is guaranteed to be stable
1746 1744 ctxs = [repo[r] for r in revs]
1747 1745 for k, reverse in reversed(keyflags):
1748 1746 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1749 1747 return baseset([c.rev() for c in ctxs])
1750 1748
1751 1749 @predicate('subrepo([pattern])')
1752 1750 def subrepo(repo, subset, x):
1753 1751 """Changesets that add, modify or remove the given subrepo. If no subrepo
1754 1752 pattern is named, any subrepo changes are returned.
1755 1753 """
1756 1754 # i18n: "subrepo" is a keyword
1757 1755 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1758 1756 pat = None
1759 1757 if len(args) != 0:
1760 1758 pat = getstring(args[0], _("subrepo requires a pattern"))
1761 1759
1762 1760 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1763 1761
1764 1762 def submatches(names):
1765 1763 k, p, m = util.stringmatcher(pat)
1766 1764 for name in names:
1767 1765 if m(name):
1768 1766 yield name
1769 1767
1770 1768 def matches(x):
1771 1769 c = repo[x]
1772 1770 s = repo.status(c.p1().node(), c.node(), match=m)
1773 1771
1774 1772 if pat is None:
1775 1773 return s.added or s.modified or s.removed
1776 1774
1777 1775 if s.added:
1778 1776 return any(submatches(c.substate.keys()))
1779 1777
1780 1778 if s.modified:
1781 1779 subs = set(c.p1().substate.keys())
1782 1780 subs.update(c.substate.keys())
1783 1781
1784 1782 for path in submatches(subs):
1785 1783 if c.p1().substate.get(path) != c.substate.get(path):
1786 1784 return True
1787 1785
1788 1786 if s.removed:
1789 1787 return any(submatches(c.p1().substate.keys()))
1790 1788
1791 1789 return False
1792 1790
1793 1791 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1794 1792
1795 1793 def _substringmatcher(pattern, casesensitive=True):
1796 1794 kind, pattern, matcher = util.stringmatcher(pattern,
1797 1795 casesensitive=casesensitive)
1798 1796 if kind == 'literal':
1799 1797 if not casesensitive:
1800 1798 pattern = encoding.lower(pattern)
1801 1799 matcher = lambda s: pattern in encoding.lower(s)
1802 1800 else:
1803 1801 matcher = lambda s: pattern in s
1804 1802 return kind, pattern, matcher
1805 1803
1806 1804 @predicate('tag([name])', safe=True)
1807 1805 def tag(repo, subset, x):
1808 1806 """The specified tag by name, or all tagged revisions if no name is given.
1809 1807
1810 1808 Pattern matching is supported for `name`. See
1811 1809 :hg:`help revisions.patterns`.
1812 1810 """
1813 1811 # i18n: "tag" is a keyword
1814 1812 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1815 1813 cl = repo.changelog
1816 1814 if args:
1817 1815 pattern = getstring(args[0],
1818 1816 # i18n: "tag" is a keyword
1819 1817 _('the argument to tag must be a string'))
1820 1818 kind, pattern, matcher = util.stringmatcher(pattern)
1821 1819 if kind == 'literal':
1822 1820 # avoid resolving all tags
1823 1821 tn = repo._tagscache.tags.get(pattern, None)
1824 1822 if tn is None:
1825 1823 raise error.RepoLookupError(_("tag '%s' does not exist")
1826 1824 % pattern)
1827 1825 s = {repo[tn].rev()}
1828 1826 else:
1829 1827 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1830 1828 else:
1831 1829 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1832 1830 return subset & s
1833 1831
1834 1832 @predicate('tagged', safe=True)
1835 1833 def tagged(repo, subset, x):
1836 1834 return tag(repo, subset, x)
1837 1835
1838 1836 @predicate('unstable()', safe=True)
1839 1837 def unstable(repo, subset, x):
1840 1838 """Non-obsolete changesets with obsolete ancestors.
1841 1839 """
1842 1840 # i18n: "unstable" is a keyword
1843 1841 getargs(x, 0, 0, _("unstable takes no arguments"))
1844 1842 unstables = obsmod.getrevs(repo, 'unstable')
1845 1843 return subset & unstables
1846 1844
1847 1845
1848 1846 @predicate('user(string)', safe=True)
1849 1847 def user(repo, subset, x):
1850 1848 """User name contains string. The match is case-insensitive.
1851 1849
1852 1850 Pattern matching is supported for `string`. See
1853 1851 :hg:`help revisions.patterns`.
1854 1852 """
1855 1853 return author(repo, subset, x)
1856 1854
1857 1855 @predicate('wdir()', safe=True)
1858 1856 def wdir(repo, subset, x):
1859 1857 """Working directory. (EXPERIMENTAL)"""
1860 1858 # i18n: "wdir" is a keyword
1861 1859 getargs(x, 0, 0, _("wdir takes no arguments"))
1862 1860 if node.wdirrev in subset or isinstance(subset, fullreposet):
1863 1861 return baseset([node.wdirrev])
1864 1862 return baseset()
1865 1863
1866 1864 def _orderedlist(repo, subset, x):
1867 1865 s = getstring(x, "internal error")
1868 1866 if not s:
1869 1867 return baseset()
1870 1868 # remove duplicates here. it's difficult for caller to deduplicate sets
1871 1869 # because different symbols can point to the same rev.
1872 1870 cl = repo.changelog
1873 1871 ls = []
1874 1872 seen = set()
1875 1873 for t in s.split('\0'):
1876 1874 try:
1877 1875 # fast path for integer revision
1878 1876 r = int(t)
1879 1877 if str(r) != t or r not in cl:
1880 1878 raise ValueError
1881 1879 revs = [r]
1882 1880 except ValueError:
1883 1881 revs = stringset(repo, subset, t)
1884 1882
1885 1883 for r in revs:
1886 1884 if r in seen:
1887 1885 continue
1888 1886 if (r in subset
1889 1887 or r == node.nullrev and isinstance(subset, fullreposet)):
1890 1888 ls.append(r)
1891 1889 seen.add(r)
1892 1890 return baseset(ls)
1893 1891
1894 1892 # for internal use
1895 1893 @predicate('_list', safe=True, takeorder=True)
1896 1894 def _list(repo, subset, x, order):
1897 1895 if order == followorder:
1898 1896 # slow path to take the subset order
1899 1897 return subset & _orderedlist(repo, fullreposet(repo), x)
1900 1898 else:
1901 1899 return _orderedlist(repo, subset, x)
1902 1900
1903 1901 def _orderedintlist(repo, subset, x):
1904 1902 s = getstring(x, "internal error")
1905 1903 if not s:
1906 1904 return baseset()
1907 1905 ls = [int(r) for r in s.split('\0')]
1908 1906 s = subset
1909 1907 return baseset([r for r in ls if r in s])
1910 1908
1911 1909 # for internal use
1912 1910 @predicate('_intlist', safe=True, takeorder=True)
1913 1911 def _intlist(repo, subset, x, order):
1914 1912 if order == followorder:
1915 1913 # slow path to take the subset order
1916 1914 return subset & _orderedintlist(repo, fullreposet(repo), x)
1917 1915 else:
1918 1916 return _orderedintlist(repo, subset, x)
1919 1917
1920 1918 def _orderedhexlist(repo, subset, x):
1921 1919 s = getstring(x, "internal error")
1922 1920 if not s:
1923 1921 return baseset()
1924 1922 cl = repo.changelog
1925 1923 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1926 1924 s = subset
1927 1925 return baseset([r for r in ls if r in s])
1928 1926
1929 1927 # for internal use
1930 1928 @predicate('_hexlist', safe=True, takeorder=True)
1931 1929 def _hexlist(repo, subset, x, order):
1932 1930 if order == followorder:
1933 1931 # slow path to take the subset order
1934 1932 return subset & _orderedhexlist(repo, fullreposet(repo), x)
1935 1933 else:
1936 1934 return _orderedhexlist(repo, subset, x)
1937 1935
1938 1936 methods = {
1939 1937 "range": rangeset,
1940 1938 "rangeall": rangeall,
1941 1939 "rangepre": rangepre,
1942 1940 "rangepost": rangepost,
1943 1941 "dagrange": dagrange,
1944 1942 "string": stringset,
1945 1943 "symbol": stringset,
1946 1944 "and": andset,
1947 1945 "or": orset,
1948 1946 "not": notset,
1949 1947 "difference": differenceset,
1950 1948 "list": listset,
1951 1949 "keyvalue": keyvaluepair,
1952 1950 "func": func,
1953 1951 "ancestor": ancestorspec,
1954 1952 "parent": parentspec,
1955 1953 "parentpost": parentpost,
1956 1954 }
1957 1955
1958 1956 def posttreebuilthook(tree, repo):
1959 1957 # hook for extensions to execute code on the optimized tree
1960 1958 pass
1961 1959
1962 1960 def match(ui, spec, repo=None, order=defineorder):
1963 1961 """Create a matcher for a single revision spec
1964 1962
1965 1963 If order=followorder, a matcher takes the ordering specified by the input
1966 1964 set.
1967 1965 """
1968 1966 return matchany(ui, [spec], repo=repo, order=order)
1969 1967
1970 1968 def matchany(ui, specs, repo=None, order=defineorder):
1971 1969 """Create a matcher that will include any revisions matching one of the
1972 1970 given specs
1973 1971
1974 1972 If order=followorder, a matcher takes the ordering specified by the input
1975 1973 set.
1976 1974 """
1977 1975 if not specs:
1978 1976 def mfunc(repo, subset=None):
1979 1977 return baseset()
1980 1978 return mfunc
1981 1979 if not all(specs):
1982 1980 raise error.ParseError(_("empty query"))
1983 1981 lookup = None
1984 1982 if repo:
1985 1983 lookup = repo.__contains__
1986 1984 if len(specs) == 1:
1987 1985 tree = revsetlang.parse(specs[0], lookup)
1988 1986 else:
1989 1987 tree = ('or',
1990 1988 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
1991 1989
1992 1990 if ui:
1993 1991 tree = revsetlang.expandaliases(ui, tree)
1994 1992 tree = revsetlang.foldconcat(tree)
1995 1993 tree = revsetlang.analyze(tree, order)
1996 1994 tree = revsetlang.optimize(tree)
1997 1995 posttreebuilthook(tree, repo)
1998 1996 return makematcher(tree)
1999 1997
2000 1998 def makematcher(tree):
2001 1999 """Create a matcher from an evaluatable tree"""
2002 2000 def mfunc(repo, subset=None):
2003 2001 if subset is None:
2004 2002 subset = fullreposet(repo)
2005 2003 return getset(repo, subset, tree)
2006 2004 return mfunc
2007 2005
2008 2006 def loadpredicate(ui, extname, registrarobj):
2009 2007 """Load revset predicates from specified registrarobj
2010 2008 """
2011 2009 for name, func in registrarobj._table.iteritems():
2012 2010 symbols[name] = func
2013 2011 if func._safe:
2014 2012 safesymbols.add(name)
2015 2013
2016 2014 # load built-in predicates explicitly to setup safesymbols
2017 2015 loadpredicate(None, None, predicate)
2018 2016
2019 2017 # tell hggettext to extract docstrings from these functions:
2020 2018 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now