##// END OF EJS Templates
context: add obsolete() method to basefilectx...
av6 -
r35087:a9454beb default
parent child Browse files
Show More
@@ -1,2604 +1,2606
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 revlog,
45 45 scmutil,
46 46 sparse,
47 47 subrepo,
48 48 util,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 nonascii = re.compile(r'[^\x21-\x7f]').search
54 54
55 55 class basectx(object):
56 56 """A basectx object represents the common logic for its children:
57 57 changectx: read-only context that is already present in the repo,
58 58 workingctx: a context that represents the working directory and can
59 59 be committed,
60 60 memctx: a context that represents changes in-memory and can also
61 61 be committed."""
62 62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 63 if isinstance(changeid, basectx):
64 64 return changeid
65 65
66 66 o = super(basectx, cls).__new__(cls)
67 67
68 68 o._repo = repo
69 69 o._rev = nullrev
70 70 o._node = nullid
71 71
72 72 return o
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 __str__ = encoding.strmethod(__bytes__)
78 78
79 79 def __int__(self):
80 80 return self.rev()
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 msg = ("'context.unstable' is deprecated, "
210 210 "use 'context.orphan'")
211 211 self._repo.ui.deprecwarn(msg, '4.4')
212 212 return self.orphan()
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete but it's ancestor are"""
216 216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217 217
218 218 def bumped(self):
219 219 msg = ("'context.bumped' is deprecated, "
220 220 "use 'context.phasedivergent'")
221 221 self._repo.ui.deprecwarn(msg, '4.4')
222 222 return self.phasedivergent()
223 223
224 224 def phasedivergent(self):
225 225 """True if the changeset try to be a successor of a public changeset
226 226
227 227 Only non-public and non-obsolete changesets may be bumped.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230 230
231 231 def divergent(self):
232 232 msg = ("'context.divergent' is deprecated, "
233 233 "use 'context.contentdivergent'")
234 234 self._repo.ui.deprecwarn(msg, '4.4')
235 235 return self.contentdivergent()
236 236
237 237 def contentdivergent(self):
238 238 """Is a successors of a changeset with multiple possible successors set
239 239
240 240 Only non-public and non-obsolete changesets may be divergent.
241 241 """
242 242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243 243
244 244 def troubled(self):
245 245 msg = ("'context.troubled' is deprecated, "
246 246 "use 'context.isunstable'")
247 247 self._repo.ui.deprecwarn(msg, '4.4')
248 248 return self.isunstable()
249 249
250 250 def isunstable(self):
251 251 """True if the changeset is either unstable, bumped or divergent"""
252 252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253 253
254 254 def troubles(self):
255 255 """Keep the old version around in order to avoid breaking extensions
256 256 about different return values.
257 257 """
258 258 msg = ("'context.troubles' is deprecated, "
259 259 "use 'context.instabilities'")
260 260 self._repo.ui.deprecwarn(msg, '4.4')
261 261
262 262 troubles = []
263 263 if self.orphan():
264 264 troubles.append('orphan')
265 265 if self.phasedivergent():
266 266 troubles.append('bumped')
267 267 if self.contentdivergent():
268 268 troubles.append('divergent')
269 269 return troubles
270 270
271 271 def instabilities(self):
272 272 """return the list of instabilities affecting this changeset.
273 273
274 274 Instabilities are returned as strings. possible values are:
275 275 - orphan,
276 276 - phase-divergent,
277 277 - content-divergent.
278 278 """
279 279 instabilities = []
280 280 if self.orphan():
281 281 instabilities.append('orphan')
282 282 if self.phasedivergent():
283 283 instabilities.append('phase-divergent')
284 284 if self.contentdivergent():
285 285 instabilities.append('content-divergent')
286 286 return instabilities
287 287
288 288 def parents(self):
289 289 """return contexts for each parent changeset"""
290 290 return self._parents
291 291
292 292 def p1(self):
293 293 return self._parents[0]
294 294
295 295 def p2(self):
296 296 parents = self._parents
297 297 if len(parents) == 2:
298 298 return parents[1]
299 299 return changectx(self._repo, nullrev)
300 300
301 301 def _fileinfo(self, path):
302 302 if r'_manifest' in self.__dict__:
303 303 try:
304 304 return self._manifest[path], self._manifest.flags(path)
305 305 except KeyError:
306 306 raise error.ManifestLookupError(self._node, path,
307 307 _('not found in manifest'))
308 308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 309 if path in self._manifestdelta:
310 310 return (self._manifestdelta[path],
311 311 self._manifestdelta.flags(path))
312 312 mfl = self._repo.manifestlog
313 313 try:
314 314 node, flag = mfl[self._changeset.manifest].find(path)
315 315 except KeyError:
316 316 raise error.ManifestLookupError(self._node, path,
317 317 _('not found in manifest'))
318 318
319 319 return node, flag
320 320
321 321 def filenode(self, path):
322 322 return self._fileinfo(path)[0]
323 323
324 324 def flags(self, path):
325 325 try:
326 326 return self._fileinfo(path)[1]
327 327 except error.LookupError:
328 328 return ''
329 329
330 330 def sub(self, path, allowcreate=True):
331 331 '''return a subrepo for the stored revision of path, never wdir()'''
332 332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333 333
334 334 def nullsub(self, path, pctx):
335 335 return subrepo.nullsubrepo(self, path, pctx)
336 336
337 337 def workingsub(self, path):
338 338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 339 context.
340 340 '''
341 341 return subrepo.subrepo(self, path, allowwdir=True)
342 342
343 343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 344 listsubrepos=False, badfn=None):
345 345 r = self._repo
346 346 return matchmod.match(r.root, r.getcwd(), pats,
347 347 include, exclude, default,
348 348 auditor=r.nofsauditor, ctx=self,
349 349 listsubrepos=listsubrepos, badfn=badfn)
350 350
351 351 def diff(self, ctx2=None, match=None, **opts):
352 352 """Returns a diff generator for the given contexts and matcher"""
353 353 if ctx2 is None:
354 354 ctx2 = self.p1()
355 355 if ctx2 is not None:
356 356 ctx2 = self._repo[ctx2]
357 357 diffopts = patch.diffopts(self._repo.ui, opts)
358 358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359 359
360 360 def dirs(self):
361 361 return self._manifest.dirs()
362 362
363 363 def hasdir(self, dir):
364 364 return self._manifest.hasdir(dir)
365 365
366 366 def status(self, other=None, match=None, listignored=False,
367 367 listclean=False, listunknown=False, listsubrepos=False):
368 368 """return status of files between two nodes or node and working
369 369 directory.
370 370
371 371 If other is None, compare this node with working directory.
372 372
373 373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 374 """
375 375
376 376 ctx1 = self
377 377 ctx2 = self._repo[other]
378 378
379 379 # This next code block is, admittedly, fragile logic that tests for
380 380 # reversing the contexts and wouldn't need to exist if it weren't for
381 381 # the fast (and common) code path of comparing the working directory
382 382 # with its first parent.
383 383 #
384 384 # What we're aiming for here is the ability to call:
385 385 #
386 386 # workingctx.status(parentctx)
387 387 #
388 388 # If we always built the manifest for each context and compared those,
389 389 # then we'd be done. But the special case of the above call means we
390 390 # just copy the manifest of the parent.
391 391 reversed = False
392 392 if (not isinstance(ctx1, changectx)
393 393 and isinstance(ctx2, changectx)):
394 394 reversed = True
395 395 ctx1, ctx2 = ctx2, ctx1
396 396
397 397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 398 match = ctx2._matchstatus(ctx1, match)
399 399 r = scmutil.status([], [], [], [], [], [], [])
400 400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 401 listunknown)
402 402
403 403 if reversed:
404 404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 405 # these make no sense to reverse.
406 406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 407 r.clean)
408 408
409 409 if listsubrepos:
410 410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 411 try:
412 412 rev2 = ctx2.subrev(subpath)
413 413 except KeyError:
414 414 # A subrepo that existed in node1 was deleted between
415 415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 416 # won't contain that subpath. The best we can do ignore it.
417 417 rev2 = None
418 418 submatch = matchmod.subdirmatcher(subpath, match)
419 419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 420 clean=listclean, unknown=listunknown,
421 421 listsubrepos=True)
422 422 for rfiles, sfiles in zip(r, s):
423 423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424 424
425 425 for l in r:
426 426 l.sort()
427 427
428 428 return r
429 429
430 430 def _filterederror(repo, changeid):
431 431 """build an exception to be raised about a filtered changeid
432 432
433 433 This is extracted in a function to help extensions (eg: evolve) to
434 434 experiment with various message variants."""
435 435 if repo.filtername.startswith('visible'):
436 436 msg = _("hidden revision '%s'") % changeid
437 437 hint = _('use --hidden to access hidden revisions')
438 438 return error.FilteredRepoLookupError(msg, hint=hint)
439 439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 440 msg %= (changeid, repo.filtername)
441 441 return error.FilteredRepoLookupError(msg)
442 442
443 443 class changectx(basectx):
444 444 """A changecontext object makes access to data related to a particular
445 445 changeset convenient. It represents a read-only context already present in
446 446 the repo."""
447 447 def __init__(self, repo, changeid=''):
448 448 """changeid is a revision number, node, or tag"""
449 449
450 450 # since basectx.__new__ already took care of copying the object, we
451 451 # don't need to do anything in __init__, so we just exit here
452 452 if isinstance(changeid, basectx):
453 453 return
454 454
455 455 if changeid == '':
456 456 changeid = '.'
457 457 self._repo = repo
458 458
459 459 try:
460 460 if isinstance(changeid, int):
461 461 self._node = repo.changelog.node(changeid)
462 462 self._rev = changeid
463 463 return
464 464 if not pycompat.ispy3 and isinstance(changeid, long):
465 465 changeid = str(changeid)
466 466 if changeid == 'null':
467 467 self._node = nullid
468 468 self._rev = nullrev
469 469 return
470 470 if changeid == 'tip':
471 471 self._node = repo.changelog.tip()
472 472 self._rev = repo.changelog.rev(self._node)
473 473 return
474 474 if (changeid == '.'
475 475 or repo.local() and changeid == repo.dirstate.p1()):
476 476 # this is a hack to delay/avoid loading obsmarkers
477 477 # when we know that '.' won't be hidden
478 478 self._node = repo.dirstate.p1()
479 479 self._rev = repo.unfiltered().changelog.rev(self._node)
480 480 return
481 481 if len(changeid) == 20:
482 482 try:
483 483 self._node = changeid
484 484 self._rev = repo.changelog.rev(changeid)
485 485 return
486 486 except error.FilteredRepoLookupError:
487 487 raise
488 488 except LookupError:
489 489 pass
490 490
491 491 try:
492 492 r = int(changeid)
493 493 if '%d' % r != changeid:
494 494 raise ValueError
495 495 l = len(repo.changelog)
496 496 if r < 0:
497 497 r += l
498 498 if r < 0 or r >= l and r != wdirrev:
499 499 raise ValueError
500 500 self._rev = r
501 501 self._node = repo.changelog.node(r)
502 502 return
503 503 except error.FilteredIndexError:
504 504 raise
505 505 except (ValueError, OverflowError, IndexError):
506 506 pass
507 507
508 508 if len(changeid) == 40:
509 509 try:
510 510 self._node = bin(changeid)
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513 except error.FilteredLookupError:
514 514 raise
515 515 except (TypeError, LookupError):
516 516 pass
517 517
518 518 # lookup bookmarks through the name interface
519 519 try:
520 520 self._node = repo.names.singlenode(repo, changeid)
521 521 self._rev = repo.changelog.rev(self._node)
522 522 return
523 523 except KeyError:
524 524 pass
525 525 except error.FilteredRepoLookupError:
526 526 raise
527 527 except error.RepoLookupError:
528 528 pass
529 529
530 530 self._node = repo.unfiltered().changelog._partialmatch(changeid)
531 531 if self._node is not None:
532 532 self._rev = repo.changelog.rev(self._node)
533 533 return
534 534
535 535 # lookup failed
536 536 # check if it might have come from damaged dirstate
537 537 #
538 538 # XXX we could avoid the unfiltered if we had a recognizable
539 539 # exception for filtered changeset access
540 540 if (repo.local()
541 541 and changeid in repo.unfiltered().dirstate.parents()):
542 542 msg = _("working directory has unknown parent '%s'!")
543 543 raise error.Abort(msg % short(changeid))
544 544 try:
545 545 if len(changeid) == 20 and nonascii(changeid):
546 546 changeid = hex(changeid)
547 547 except TypeError:
548 548 pass
549 549 except (error.FilteredIndexError, error.FilteredLookupError,
550 550 error.FilteredRepoLookupError):
551 551 raise _filterederror(repo, changeid)
552 552 except IndexError:
553 553 pass
554 554 raise error.RepoLookupError(
555 555 _("unknown revision '%s'") % changeid)
556 556
557 557 def __hash__(self):
558 558 try:
559 559 return hash(self._rev)
560 560 except AttributeError:
561 561 return id(self)
562 562
563 563 def __nonzero__(self):
564 564 return self._rev != nullrev
565 565
566 566 __bool__ = __nonzero__
567 567
568 568 @propertycache
569 569 def _changeset(self):
570 570 return self._repo.changelog.changelogrevision(self.rev())
571 571
572 572 @propertycache
573 573 def _manifest(self):
574 574 return self._manifestctx.read()
575 575
576 576 @property
577 577 def _manifestctx(self):
578 578 return self._repo.manifestlog[self._changeset.manifest]
579 579
580 580 @propertycache
581 581 def _manifestdelta(self):
582 582 return self._manifestctx.readdelta()
583 583
584 584 @propertycache
585 585 def _parents(self):
586 586 repo = self._repo
587 587 p1, p2 = repo.changelog.parentrevs(self._rev)
588 588 if p2 == nullrev:
589 589 return [changectx(repo, p1)]
590 590 return [changectx(repo, p1), changectx(repo, p2)]
591 591
592 592 def changeset(self):
593 593 c = self._changeset
594 594 return (
595 595 c.manifest,
596 596 c.user,
597 597 c.date,
598 598 c.files,
599 599 c.description,
600 600 c.extra,
601 601 )
602 602 def manifestnode(self):
603 603 return self._changeset.manifest
604 604
605 605 def user(self):
606 606 return self._changeset.user
607 607 def date(self):
608 608 return self._changeset.date
609 609 def files(self):
610 610 return self._changeset.files
611 611 def description(self):
612 612 return self._changeset.description
613 613 def branch(self):
614 614 return encoding.tolocal(self._changeset.extra.get("branch"))
615 615 def closesbranch(self):
616 616 return 'close' in self._changeset.extra
617 617 def extra(self):
618 618 return self._changeset.extra
619 619 def tags(self):
620 620 return self._repo.nodetags(self._node)
621 621 def bookmarks(self):
622 622 return self._repo.nodebookmarks(self._node)
623 623 def phase(self):
624 624 return self._repo._phasecache.phase(self._repo, self._rev)
625 625 def hidden(self):
626 626 return self._rev in repoview.filterrevs(self._repo, 'visible')
627 627
628 628 def isinmemory(self):
629 629 return False
630 630
631 631 def children(self):
632 632 """return contexts for each child changeset"""
633 633 c = self._repo.changelog.children(self._node)
634 634 return [changectx(self._repo, x) for x in c]
635 635
636 636 def ancestors(self):
637 637 for a in self._repo.changelog.ancestors([self._rev]):
638 638 yield changectx(self._repo, a)
639 639
640 640 def descendants(self):
641 641 for d in self._repo.changelog.descendants([self._rev]):
642 642 yield changectx(self._repo, d)
643 643
644 644 def filectx(self, path, fileid=None, filelog=None):
645 645 """get a file context from this changeset"""
646 646 if fileid is None:
647 647 fileid = self.filenode(path)
648 648 return filectx(self._repo, path, fileid=fileid,
649 649 changectx=self, filelog=filelog)
650 650
651 651 def ancestor(self, c2, warn=False):
652 652 """return the "best" ancestor context of self and c2
653 653
654 654 If there are multiple candidates, it will show a message and check
655 655 merge.preferancestor configuration before falling back to the
656 656 revlog ancestor."""
657 657 # deal with workingctxs
658 658 n2 = c2._node
659 659 if n2 is None:
660 660 n2 = c2._parents[0]._node
661 661 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
662 662 if not cahs:
663 663 anc = nullid
664 664 elif len(cahs) == 1:
665 665 anc = cahs[0]
666 666 else:
667 667 # experimental config: merge.preferancestor
668 668 for r in self._repo.ui.configlist('merge', 'preferancestor'):
669 669 try:
670 670 ctx = changectx(self._repo, r)
671 671 except error.RepoLookupError:
672 672 continue
673 673 anc = ctx.node()
674 674 if anc in cahs:
675 675 break
676 676 else:
677 677 anc = self._repo.changelog.ancestor(self._node, n2)
678 678 if warn:
679 679 self._repo.ui.status(
680 680 (_("note: using %s as ancestor of %s and %s\n") %
681 681 (short(anc), short(self._node), short(n2))) +
682 682 ''.join(_(" alternatively, use --config "
683 683 "merge.preferancestor=%s\n") %
684 684 short(n) for n in sorted(cahs) if n != anc))
685 685 return changectx(self._repo, anc)
686 686
687 687 def descendant(self, other):
688 688 """True if other is descendant of this changeset"""
689 689 return self._repo.changelog.descendant(self._rev, other._rev)
690 690
691 691 def walk(self, match):
692 692 '''Generates matching file names.'''
693 693
694 694 # Wrap match.bad method to have message with nodeid
695 695 def bad(fn, msg):
696 696 # The manifest doesn't know about subrepos, so don't complain about
697 697 # paths into valid subrepos.
698 698 if any(fn == s or fn.startswith(s + '/')
699 699 for s in self.substate):
700 700 return
701 701 match.bad(fn, _('no such file in rev %s') % self)
702 702
703 703 m = matchmod.badmatch(match, bad)
704 704 return self._manifest.walk(m)
705 705
706 706 def matches(self, match):
707 707 return self.walk(match)
708 708
709 709 class basefilectx(object):
710 710 """A filecontext object represents the common logic for its children:
711 711 filectx: read-only access to a filerevision that is already present
712 712 in the repo,
713 713 workingfilectx: a filecontext that represents files from the working
714 714 directory,
715 715 memfilectx: a filecontext that represents files in-memory,
716 716 overlayfilectx: duplicate another filecontext with some fields overridden.
717 717 """
718 718 @propertycache
719 719 def _filelog(self):
720 720 return self._repo.file(self._path)
721 721
722 722 @propertycache
723 723 def _changeid(self):
724 724 if r'_changeid' in self.__dict__:
725 725 return self._changeid
726 726 elif r'_changectx' in self.__dict__:
727 727 return self._changectx.rev()
728 728 elif r'_descendantrev' in self.__dict__:
729 729 # this file context was created from a revision with a known
730 730 # descendant, we can (lazily) correct for linkrev aliases
731 731 return self._adjustlinkrev(self._descendantrev)
732 732 else:
733 733 return self._filelog.linkrev(self._filerev)
734 734
735 735 @propertycache
736 736 def _filenode(self):
737 737 if r'_fileid' in self.__dict__:
738 738 return self._filelog.lookup(self._fileid)
739 739 else:
740 740 return self._changectx.filenode(self._path)
741 741
742 742 @propertycache
743 743 def _filerev(self):
744 744 return self._filelog.rev(self._filenode)
745 745
746 746 @propertycache
747 747 def _repopath(self):
748 748 return self._path
749 749
750 750 def __nonzero__(self):
751 751 try:
752 752 self._filenode
753 753 return True
754 754 except error.LookupError:
755 755 # file is missing
756 756 return False
757 757
758 758 __bool__ = __nonzero__
759 759
760 760 def __bytes__(self):
761 761 try:
762 762 return "%s@%s" % (self.path(), self._changectx)
763 763 except error.LookupError:
764 764 return "%s@???" % self.path()
765 765
766 766 __str__ = encoding.strmethod(__bytes__)
767 767
768 768 def __repr__(self):
769 769 return "<%s %s>" % (type(self).__name__, str(self))
770 770
771 771 def __hash__(self):
772 772 try:
773 773 return hash((self._path, self._filenode))
774 774 except AttributeError:
775 775 return id(self)
776 776
777 777 def __eq__(self, other):
778 778 try:
779 779 return (type(self) == type(other) and self._path == other._path
780 780 and self._filenode == other._filenode)
781 781 except AttributeError:
782 782 return False
783 783
784 784 def __ne__(self, other):
785 785 return not (self == other)
786 786
787 787 def filerev(self):
788 788 return self._filerev
789 789 def filenode(self):
790 790 return self._filenode
791 791 @propertycache
792 792 def _flags(self):
793 793 return self._changectx.flags(self._path)
794 794 def flags(self):
795 795 return self._flags
796 796 def filelog(self):
797 797 return self._filelog
798 798 def rev(self):
799 799 return self._changeid
800 800 def linkrev(self):
801 801 return self._filelog.linkrev(self._filerev)
802 802 def node(self):
803 803 return self._changectx.node()
804 804 def hex(self):
805 805 return self._changectx.hex()
806 806 def user(self):
807 807 return self._changectx.user()
808 808 def date(self):
809 809 return self._changectx.date()
810 810 def files(self):
811 811 return self._changectx.files()
812 812 def description(self):
813 813 return self._changectx.description()
814 814 def branch(self):
815 815 return self._changectx.branch()
816 816 def extra(self):
817 817 return self._changectx.extra()
818 818 def phase(self):
819 819 return self._changectx.phase()
820 820 def phasestr(self):
821 821 return self._changectx.phasestr()
822 def obsolete(self):
823 return self._changectx.obsolete()
822 824 def manifest(self):
823 825 return self._changectx.manifest()
824 826 def changectx(self):
825 827 return self._changectx
826 828 def renamed(self):
827 829 return self._copied
828 830 def repo(self):
829 831 return self._repo
830 832 def size(self):
831 833 return len(self.data())
832 834
833 835 def path(self):
834 836 return self._path
835 837
836 838 def isbinary(self):
837 839 try:
838 840 return util.binary(self.data())
839 841 except IOError:
840 842 return False
841 843 def isexec(self):
842 844 return 'x' in self.flags()
843 845 def islink(self):
844 846 return 'l' in self.flags()
845 847
846 848 def isabsent(self):
847 849 """whether this filectx represents a file not in self._changectx
848 850
849 851 This is mainly for merge code to detect change/delete conflicts. This is
850 852 expected to be True for all subclasses of basectx."""
851 853 return False
852 854
853 855 _customcmp = False
854 856 def cmp(self, fctx):
855 857 """compare with other file context
856 858
857 859 returns True if different than fctx.
858 860 """
859 861 if fctx._customcmp:
860 862 return fctx.cmp(self)
861 863
862 864 if (fctx._filenode is None
863 865 and (self._repo._encodefilterpats
864 866 # if file data starts with '\1\n', empty metadata block is
865 867 # prepended, which adds 4 bytes to filelog.size().
866 868 or self.size() - 4 == fctx.size())
867 869 or self.size() == fctx.size()):
868 870 return self._filelog.cmp(self._filenode, fctx.data())
869 871
870 872 return True
871 873
872 874 def _adjustlinkrev(self, srcrev, inclusive=False):
873 875 """return the first ancestor of <srcrev> introducing <fnode>
874 876
875 877 If the linkrev of the file revision does not point to an ancestor of
876 878 srcrev, we'll walk down the ancestors until we find one introducing
877 879 this file revision.
878 880
879 881 :srcrev: the changeset revision we search ancestors from
880 882 :inclusive: if true, the src revision will also be checked
881 883 """
882 884 repo = self._repo
883 885 cl = repo.unfiltered().changelog
884 886 mfl = repo.manifestlog
885 887 # fetch the linkrev
886 888 lkr = self.linkrev()
887 889 # hack to reuse ancestor computation when searching for renames
888 890 memberanc = getattr(self, '_ancestrycontext', None)
889 891 iteranc = None
890 892 if srcrev is None:
891 893 # wctx case, used by workingfilectx during mergecopy
892 894 revs = [p.rev() for p in self._repo[None].parents()]
893 895 inclusive = True # we skipped the real (revless) source
894 896 else:
895 897 revs = [srcrev]
896 898 if memberanc is None:
897 899 memberanc = iteranc = cl.ancestors(revs, lkr,
898 900 inclusive=inclusive)
899 901 # check if this linkrev is an ancestor of srcrev
900 902 if lkr not in memberanc:
901 903 if iteranc is None:
902 904 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
903 905 fnode = self._filenode
904 906 path = self._path
905 907 for a in iteranc:
906 908 ac = cl.read(a) # get changeset data (we avoid object creation)
907 909 if path in ac[3]: # checking the 'files' field.
908 910 # The file has been touched, check if the content is
909 911 # similar to the one we search for.
910 912 if fnode == mfl[ac[0]].readfast().get(path):
911 913 return a
912 914 # In theory, we should never get out of that loop without a result.
913 915 # But if manifest uses a buggy file revision (not children of the
914 916 # one it replaces) we could. Such a buggy situation will likely
915 917 # result is crash somewhere else at to some point.
916 918 return lkr
917 919
918 920 def introrev(self):
919 921 """return the rev of the changeset which introduced this file revision
920 922
921 923 This method is different from linkrev because it take into account the
922 924 changeset the filectx was created from. It ensures the returned
923 925 revision is one of its ancestors. This prevents bugs from
924 926 'linkrev-shadowing' when a file revision is used by multiple
925 927 changesets.
926 928 """
927 929 lkr = self.linkrev()
928 930 attrs = vars(self)
929 931 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
930 932 if noctx or self.rev() == lkr:
931 933 return self.linkrev()
932 934 return self._adjustlinkrev(self.rev(), inclusive=True)
933 935
934 936 def _parentfilectx(self, path, fileid, filelog):
935 937 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
936 938 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
937 939 if '_changeid' in vars(self) or '_changectx' in vars(self):
938 940 # If self is associated with a changeset (probably explicitly
939 941 # fed), ensure the created filectx is associated with a
940 942 # changeset that is an ancestor of self.changectx.
941 943 # This lets us later use _adjustlinkrev to get a correct link.
942 944 fctx._descendantrev = self.rev()
943 945 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
944 946 elif '_descendantrev' in vars(self):
945 947 # Otherwise propagate _descendantrev if we have one associated.
946 948 fctx._descendantrev = self._descendantrev
947 949 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
948 950 return fctx
949 951
950 952 def parents(self):
951 953 _path = self._path
952 954 fl = self._filelog
953 955 parents = self._filelog.parents(self._filenode)
954 956 pl = [(_path, node, fl) for node in parents if node != nullid]
955 957
956 958 r = fl.renamed(self._filenode)
957 959 if r:
958 960 # - In the simple rename case, both parent are nullid, pl is empty.
959 961 # - In case of merge, only one of the parent is null id and should
960 962 # be replaced with the rename information. This parent is -always-
961 963 # the first one.
962 964 #
963 965 # As null id have always been filtered out in the previous list
964 966 # comprehension, inserting to 0 will always result in "replacing
965 967 # first nullid parent with rename information.
966 968 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
967 969
968 970 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
969 971
970 972 def p1(self):
971 973 return self.parents()[0]
972 974
973 975 def p2(self):
974 976 p = self.parents()
975 977 if len(p) == 2:
976 978 return p[1]
977 979 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
978 980
979 981 def annotate(self, follow=False, linenumber=False, skiprevs=None,
980 982 diffopts=None):
981 983 '''returns a list of tuples of ((ctx, number), line) for each line
982 984 in the file, where ctx is the filectx of the node where
983 985 that line was last changed; if linenumber parameter is true, number is
984 986 the line number at the first appearance in the managed file, otherwise,
985 987 number has a fixed value of False.
986 988 '''
987 989
988 990 def lines(text):
989 991 if text.endswith("\n"):
990 992 return text.count("\n")
991 993 return text.count("\n") + int(bool(text))
992 994
993 995 if linenumber:
994 996 def decorate(text, rev):
995 997 return ([annotateline(fctx=rev, lineno=i)
996 998 for i in xrange(1, lines(text) + 1)], text)
997 999 else:
998 1000 def decorate(text, rev):
999 1001 return ([annotateline(fctx=rev)] * lines(text), text)
1000 1002
1001 1003 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1002 1004
1003 1005 def parents(f):
1004 1006 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1005 1007 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1006 1008 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1007 1009 # isn't an ancestor of the srcrev.
1008 1010 f._changeid
1009 1011 pl = f.parents()
1010 1012
1011 1013 # Don't return renamed parents if we aren't following.
1012 1014 if not follow:
1013 1015 pl = [p for p in pl if p.path() == f.path()]
1014 1016
1015 1017 # renamed filectx won't have a filelog yet, so set it
1016 1018 # from the cache to save time
1017 1019 for p in pl:
1018 1020 if not '_filelog' in p.__dict__:
1019 1021 p._filelog = getlog(p.path())
1020 1022
1021 1023 return pl
1022 1024
1023 1025 # use linkrev to find the first changeset where self appeared
1024 1026 base = self
1025 1027 introrev = self.introrev()
1026 1028 if self.rev() != introrev:
1027 1029 base = self.filectx(self.filenode(), changeid=introrev)
1028 1030 if getattr(base, '_ancestrycontext', None) is None:
1029 1031 cl = self._repo.changelog
1030 1032 if introrev is None:
1031 1033 # wctx is not inclusive, but works because _ancestrycontext
1032 1034 # is used to test filelog revisions
1033 1035 ac = cl.ancestors([p.rev() for p in base.parents()],
1034 1036 inclusive=True)
1035 1037 else:
1036 1038 ac = cl.ancestors([introrev], inclusive=True)
1037 1039 base._ancestrycontext = ac
1038 1040
1039 1041 # This algorithm would prefer to be recursive, but Python is a
1040 1042 # bit recursion-hostile. Instead we do an iterative
1041 1043 # depth-first search.
1042 1044
1043 1045 # 1st DFS pre-calculates pcache and needed
1044 1046 visit = [base]
1045 1047 pcache = {}
1046 1048 needed = {base: 1}
1047 1049 while visit:
1048 1050 f = visit.pop()
1049 1051 if f in pcache:
1050 1052 continue
1051 1053 pl = parents(f)
1052 1054 pcache[f] = pl
1053 1055 for p in pl:
1054 1056 needed[p] = needed.get(p, 0) + 1
1055 1057 if p not in pcache:
1056 1058 visit.append(p)
1057 1059
1058 1060 # 2nd DFS does the actual annotate
1059 1061 visit[:] = [base]
1060 1062 hist = {}
1061 1063 while visit:
1062 1064 f = visit[-1]
1063 1065 if f in hist:
1064 1066 visit.pop()
1065 1067 continue
1066 1068
1067 1069 ready = True
1068 1070 pl = pcache[f]
1069 1071 for p in pl:
1070 1072 if p not in hist:
1071 1073 ready = False
1072 1074 visit.append(p)
1073 1075 if ready:
1074 1076 visit.pop()
1075 1077 curr = decorate(f.data(), f)
1076 1078 skipchild = False
1077 1079 if skiprevs is not None:
1078 1080 skipchild = f._changeid in skiprevs
1079 1081 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1080 1082 diffopts)
1081 1083 for p in pl:
1082 1084 if needed[p] == 1:
1083 1085 del hist[p]
1084 1086 del needed[p]
1085 1087 else:
1086 1088 needed[p] -= 1
1087 1089
1088 1090 hist[f] = curr
1089 1091 del pcache[f]
1090 1092
1091 1093 return zip(hist[base][0], hist[base][1].splitlines(True))
1092 1094
1093 1095 def ancestors(self, followfirst=False):
1094 1096 visit = {}
1095 1097 c = self
1096 1098 if followfirst:
1097 1099 cut = 1
1098 1100 else:
1099 1101 cut = None
1100 1102
1101 1103 while True:
1102 1104 for parent in c.parents()[:cut]:
1103 1105 visit[(parent.linkrev(), parent.filenode())] = parent
1104 1106 if not visit:
1105 1107 break
1106 1108 c = visit.pop(max(visit))
1107 1109 yield c
1108 1110
1109 1111 def decodeddata(self):
1110 1112 """Returns `data()` after running repository decoding filters.
1111 1113
1112 1114 This is often equivalent to how the data would be expressed on disk.
1113 1115 """
1114 1116 return self._repo.wwritedata(self.path(), self.data())
1115 1117
1116 1118 @attr.s(slots=True, frozen=True)
1117 1119 class annotateline(object):
1118 1120 fctx = attr.ib()
1119 1121 lineno = attr.ib(default=False)
1120 1122 # Whether this annotation was the result of a skip-annotate.
1121 1123 skip = attr.ib(default=False)
1122 1124
1123 1125 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1124 1126 r'''
1125 1127 Given parent and child fctxes and annotate data for parents, for all lines
1126 1128 in either parent that match the child, annotate the child with the parent's
1127 1129 data.
1128 1130
1129 1131 Additionally, if `skipchild` is True, replace all other lines with parent
1130 1132 annotate data as well such that child is never blamed for any lines.
1131 1133
1132 1134 See test-annotate.py for unit tests.
1133 1135 '''
1134 1136 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1135 1137 for parent in parents]
1136 1138
1137 1139 if skipchild:
1138 1140 # Need to iterate over the blocks twice -- make it a list
1139 1141 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1140 1142 # Mercurial currently prefers p2 over p1 for annotate.
1141 1143 # TODO: change this?
1142 1144 for parent, blocks in pblocks:
1143 1145 for (a1, a2, b1, b2), t in blocks:
1144 1146 # Changed blocks ('!') or blocks made only of blank lines ('~')
1145 1147 # belong to the child.
1146 1148 if t == '=':
1147 1149 child[0][b1:b2] = parent[0][a1:a2]
1148 1150
1149 1151 if skipchild:
1150 1152 # Now try and match up anything that couldn't be matched,
1151 1153 # Reversing pblocks maintains bias towards p2, matching above
1152 1154 # behavior.
1153 1155 pblocks.reverse()
1154 1156
1155 1157 # The heuristics are:
1156 1158 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1157 1159 # This could potentially be smarter but works well enough.
1158 1160 # * For a non-matching section, do a best-effort fit. Match lines in
1159 1161 # diff hunks 1:1, dropping lines as necessary.
1160 1162 # * Repeat the last line as a last resort.
1161 1163
1162 1164 # First, replace as much as possible without repeating the last line.
1163 1165 remaining = [(parent, []) for parent, _blocks in pblocks]
1164 1166 for idx, (parent, blocks) in enumerate(pblocks):
1165 1167 for (a1, a2, b1, b2), _t in blocks:
1166 1168 if a2 - a1 >= b2 - b1:
1167 1169 for bk in xrange(b1, b2):
1168 1170 if child[0][bk].fctx == childfctx:
1169 1171 ak = min(a1 + (bk - b1), a2 - 1)
1170 1172 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1171 1173 else:
1172 1174 remaining[idx][1].append((a1, a2, b1, b2))
1173 1175
1174 1176 # Then, look at anything left, which might involve repeating the last
1175 1177 # line.
1176 1178 for parent, blocks in remaining:
1177 1179 for a1, a2, b1, b2 in blocks:
1178 1180 for bk in xrange(b1, b2):
1179 1181 if child[0][bk].fctx == childfctx:
1180 1182 ak = min(a1 + (bk - b1), a2 - 1)
1181 1183 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1182 1184 return child
1183 1185
1184 1186 class filectx(basefilectx):
1185 1187 """A filecontext object makes access to data related to a particular
1186 1188 filerevision convenient."""
1187 1189 def __init__(self, repo, path, changeid=None, fileid=None,
1188 1190 filelog=None, changectx=None):
1189 1191 """changeid can be a changeset revision, node, or tag.
1190 1192 fileid can be a file revision or node."""
1191 1193 self._repo = repo
1192 1194 self._path = path
1193 1195
1194 1196 assert (changeid is not None
1195 1197 or fileid is not None
1196 1198 or changectx is not None), \
1197 1199 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1198 1200 % (changeid, fileid, changectx))
1199 1201
1200 1202 if filelog is not None:
1201 1203 self._filelog = filelog
1202 1204
1203 1205 if changeid is not None:
1204 1206 self._changeid = changeid
1205 1207 if changectx is not None:
1206 1208 self._changectx = changectx
1207 1209 if fileid is not None:
1208 1210 self._fileid = fileid
1209 1211
1210 1212 @propertycache
1211 1213 def _changectx(self):
1212 1214 try:
1213 1215 return changectx(self._repo, self._changeid)
1214 1216 except error.FilteredRepoLookupError:
1215 1217 # Linkrev may point to any revision in the repository. When the
1216 1218 # repository is filtered this may lead to `filectx` trying to build
1217 1219 # `changectx` for filtered revision. In such case we fallback to
1218 1220 # creating `changectx` on the unfiltered version of the reposition.
1219 1221 # This fallback should not be an issue because `changectx` from
1220 1222 # `filectx` are not used in complex operations that care about
1221 1223 # filtering.
1222 1224 #
1223 1225 # This fallback is a cheap and dirty fix that prevent several
1224 1226 # crashes. It does not ensure the behavior is correct. However the
1225 1227 # behavior was not correct before filtering either and "incorrect
1226 1228 # behavior" is seen as better as "crash"
1227 1229 #
1228 1230 # Linkrevs have several serious troubles with filtering that are
1229 1231 # complicated to solve. Proper handling of the issue here should be
1230 1232 # considered when solving linkrev issue are on the table.
1231 1233 return changectx(self._repo.unfiltered(), self._changeid)
1232 1234
1233 1235 def filectx(self, fileid, changeid=None):
1234 1236 '''opens an arbitrary revision of the file without
1235 1237 opening a new filelog'''
1236 1238 return filectx(self._repo, self._path, fileid=fileid,
1237 1239 filelog=self._filelog, changeid=changeid)
1238 1240
1239 1241 def rawdata(self):
1240 1242 return self._filelog.revision(self._filenode, raw=True)
1241 1243
1242 1244 def rawflags(self):
1243 1245 """low-level revlog flags"""
1244 1246 return self._filelog.flags(self._filerev)
1245 1247
1246 1248 def data(self):
1247 1249 try:
1248 1250 return self._filelog.read(self._filenode)
1249 1251 except error.CensoredNodeError:
1250 1252 if self._repo.ui.config("censor", "policy") == "ignore":
1251 1253 return ""
1252 1254 raise error.Abort(_("censored node: %s") % short(self._filenode),
1253 1255 hint=_("set censor.policy to ignore errors"))
1254 1256
1255 1257 def size(self):
1256 1258 return self._filelog.size(self._filerev)
1257 1259
1258 1260 @propertycache
1259 1261 def _copied(self):
1260 1262 """check if file was actually renamed in this changeset revision
1261 1263
1262 1264 If rename logged in file revision, we report copy for changeset only
1263 1265 if file revisions linkrev points back to the changeset in question
1264 1266 or both changeset parents contain different file revisions.
1265 1267 """
1266 1268
1267 1269 renamed = self._filelog.renamed(self._filenode)
1268 1270 if not renamed:
1269 1271 return renamed
1270 1272
1271 1273 if self.rev() == self.linkrev():
1272 1274 return renamed
1273 1275
1274 1276 name = self.path()
1275 1277 fnode = self._filenode
1276 1278 for p in self._changectx.parents():
1277 1279 try:
1278 1280 if fnode == p.filenode(name):
1279 1281 return None
1280 1282 except error.LookupError:
1281 1283 pass
1282 1284 return renamed
1283 1285
1284 1286 def children(self):
1285 1287 # hard for renames
1286 1288 c = self._filelog.children(self._filenode)
1287 1289 return [filectx(self._repo, self._path, fileid=x,
1288 1290 filelog=self._filelog) for x in c]
1289 1291
1290 1292 class committablectx(basectx):
1291 1293 """A committablectx object provides common functionality for a context that
1292 1294 wants the ability to commit, e.g. workingctx or memctx."""
1293 1295 def __init__(self, repo, text="", user=None, date=None, extra=None,
1294 1296 changes=None):
1295 1297 self._repo = repo
1296 1298 self._rev = None
1297 1299 self._node = None
1298 1300 self._text = text
1299 1301 if date:
1300 1302 self._date = util.parsedate(date)
1301 1303 if user:
1302 1304 self._user = user
1303 1305 if changes:
1304 1306 self._status = changes
1305 1307
1306 1308 self._extra = {}
1307 1309 if extra:
1308 1310 self._extra = extra.copy()
1309 1311 if 'branch' not in self._extra:
1310 1312 try:
1311 1313 branch = encoding.fromlocal(self._repo.dirstate.branch())
1312 1314 except UnicodeDecodeError:
1313 1315 raise error.Abort(_('branch name not in UTF-8!'))
1314 1316 self._extra['branch'] = branch
1315 1317 if self._extra['branch'] == '':
1316 1318 self._extra['branch'] = 'default'
1317 1319
1318 1320 def __bytes__(self):
1319 1321 return bytes(self._parents[0]) + "+"
1320 1322
1321 1323 __str__ = encoding.strmethod(__bytes__)
1322 1324
1323 1325 def __nonzero__(self):
1324 1326 return True
1325 1327
1326 1328 __bool__ = __nonzero__
1327 1329
1328 1330 def _buildflagfunc(self):
1329 1331 # Create a fallback function for getting file flags when the
1330 1332 # filesystem doesn't support them
1331 1333
1332 1334 copiesget = self._repo.dirstate.copies().get
1333 1335 parents = self.parents()
1334 1336 if len(parents) < 2:
1335 1337 # when we have one parent, it's easy: copy from parent
1336 1338 man = parents[0].manifest()
1337 1339 def func(f):
1338 1340 f = copiesget(f, f)
1339 1341 return man.flags(f)
1340 1342 else:
1341 1343 # merges are tricky: we try to reconstruct the unstored
1342 1344 # result from the merge (issue1802)
1343 1345 p1, p2 = parents
1344 1346 pa = p1.ancestor(p2)
1345 1347 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1346 1348
1347 1349 def func(f):
1348 1350 f = copiesget(f, f) # may be wrong for merges with copies
1349 1351 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1350 1352 if fl1 == fl2:
1351 1353 return fl1
1352 1354 if fl1 == fla:
1353 1355 return fl2
1354 1356 if fl2 == fla:
1355 1357 return fl1
1356 1358 return '' # punt for conflicts
1357 1359
1358 1360 return func
1359 1361
1360 1362 @propertycache
1361 1363 def _flagfunc(self):
1362 1364 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1363 1365
1364 1366 @propertycache
1365 1367 def _status(self):
1366 1368 return self._repo.status()
1367 1369
1368 1370 @propertycache
1369 1371 def _user(self):
1370 1372 return self._repo.ui.username()
1371 1373
1372 1374 @propertycache
1373 1375 def _date(self):
1374 1376 ui = self._repo.ui
1375 1377 date = ui.configdate('devel', 'default-date')
1376 1378 if date is None:
1377 1379 date = util.makedate()
1378 1380 return date
1379 1381
1380 1382 def subrev(self, subpath):
1381 1383 return None
1382 1384
1383 1385 def manifestnode(self):
1384 1386 return None
1385 1387 def user(self):
1386 1388 return self._user or self._repo.ui.username()
1387 1389 def date(self):
1388 1390 return self._date
1389 1391 def description(self):
1390 1392 return self._text
1391 1393 def files(self):
1392 1394 return sorted(self._status.modified + self._status.added +
1393 1395 self._status.removed)
1394 1396
1395 1397 def modified(self):
1396 1398 return self._status.modified
1397 1399 def added(self):
1398 1400 return self._status.added
1399 1401 def removed(self):
1400 1402 return self._status.removed
1401 1403 def deleted(self):
1402 1404 return self._status.deleted
1403 1405 def branch(self):
1404 1406 return encoding.tolocal(self._extra['branch'])
1405 1407 def closesbranch(self):
1406 1408 return 'close' in self._extra
1407 1409 def extra(self):
1408 1410 return self._extra
1409 1411
1410 1412 def isinmemory(self):
1411 1413 return False
1412 1414
1413 1415 def tags(self):
1414 1416 return []
1415 1417
1416 1418 def bookmarks(self):
1417 1419 b = []
1418 1420 for p in self.parents():
1419 1421 b.extend(p.bookmarks())
1420 1422 return b
1421 1423
1422 1424 def phase(self):
1423 1425 phase = phases.draft # default phase to draft
1424 1426 for p in self.parents():
1425 1427 phase = max(phase, p.phase())
1426 1428 return phase
1427 1429
1428 1430 def hidden(self):
1429 1431 return False
1430 1432
1431 1433 def children(self):
1432 1434 return []
1433 1435
1434 1436 def flags(self, path):
1435 1437 if r'_manifest' in self.__dict__:
1436 1438 try:
1437 1439 return self._manifest.flags(path)
1438 1440 except KeyError:
1439 1441 return ''
1440 1442
1441 1443 try:
1442 1444 return self._flagfunc(path)
1443 1445 except OSError:
1444 1446 return ''
1445 1447
1446 1448 def ancestor(self, c2):
1447 1449 """return the "best" ancestor context of self and c2"""
1448 1450 return self._parents[0].ancestor(c2) # punt on two parents for now
1449 1451
1450 1452 def walk(self, match):
1451 1453 '''Generates matching file names.'''
1452 1454 return sorted(self._repo.dirstate.walk(match,
1453 1455 subrepos=sorted(self.substate),
1454 1456 unknown=True, ignored=False))
1455 1457
1456 1458 def matches(self, match):
1457 1459 return sorted(self._repo.dirstate.matches(match))
1458 1460
1459 1461 def ancestors(self):
1460 1462 for p in self._parents:
1461 1463 yield p
1462 1464 for a in self._repo.changelog.ancestors(
1463 1465 [p.rev() for p in self._parents]):
1464 1466 yield changectx(self._repo, a)
1465 1467
1466 1468 def markcommitted(self, node):
1467 1469 """Perform post-commit cleanup necessary after committing this ctx
1468 1470
1469 1471 Specifically, this updates backing stores this working context
1470 1472 wraps to reflect the fact that the changes reflected by this
1471 1473 workingctx have been committed. For example, it marks
1472 1474 modified and added files as normal in the dirstate.
1473 1475
1474 1476 """
1475 1477
1476 1478 with self._repo.dirstate.parentchange():
1477 1479 for f in self.modified() + self.added():
1478 1480 self._repo.dirstate.normal(f)
1479 1481 for f in self.removed():
1480 1482 self._repo.dirstate.drop(f)
1481 1483 self._repo.dirstate.setparents(node)
1482 1484
1483 1485 # write changes out explicitly, because nesting wlock at
1484 1486 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1485 1487 # from immediately doing so for subsequent changing files
1486 1488 self._repo.dirstate.write(self._repo.currenttransaction())
1487 1489
1488 1490 def dirty(self, missing=False, merge=True, branch=True):
1489 1491 return False
1490 1492
1491 1493 class workingctx(committablectx):
1492 1494 """A workingctx object makes access to data related to
1493 1495 the current working directory convenient.
1494 1496 date - any valid date string or (unixtime, offset), or None.
1495 1497 user - username string, or None.
1496 1498 extra - a dictionary of extra values, or None.
1497 1499 changes - a list of file lists as returned by localrepo.status()
1498 1500 or None to use the repository status.
1499 1501 """
1500 1502 def __init__(self, repo, text="", user=None, date=None, extra=None,
1501 1503 changes=None):
1502 1504 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1503 1505
1504 1506 def __iter__(self):
1505 1507 d = self._repo.dirstate
1506 1508 for f in d:
1507 1509 if d[f] != 'r':
1508 1510 yield f
1509 1511
1510 1512 def __contains__(self, key):
1511 1513 return self._repo.dirstate[key] not in "?r"
1512 1514
1513 1515 def hex(self):
1514 1516 return hex(wdirid)
1515 1517
1516 1518 @propertycache
1517 1519 def _parents(self):
1518 1520 p = self._repo.dirstate.parents()
1519 1521 if p[1] == nullid:
1520 1522 p = p[:-1]
1521 1523 return [changectx(self._repo, x) for x in p]
1522 1524
1523 1525 def filectx(self, path, filelog=None):
1524 1526 """get a file context from the working directory"""
1525 1527 return workingfilectx(self._repo, path, workingctx=self,
1526 1528 filelog=filelog)
1527 1529
1528 1530 def dirty(self, missing=False, merge=True, branch=True):
1529 1531 "check whether a working directory is modified"
1530 1532 # check subrepos first
1531 1533 for s in sorted(self.substate):
1532 1534 if self.sub(s).dirty(missing=missing):
1533 1535 return True
1534 1536 # check current working dir
1535 1537 return ((merge and self.p2()) or
1536 1538 (branch and self.branch() != self.p1().branch()) or
1537 1539 self.modified() or self.added() or self.removed() or
1538 1540 (missing and self.deleted()))
1539 1541
1540 1542 def add(self, list, prefix=""):
1541 1543 with self._repo.wlock():
1542 1544 ui, ds = self._repo.ui, self._repo.dirstate
1543 1545 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1544 1546 rejected = []
1545 1547 lstat = self._repo.wvfs.lstat
1546 1548 for f in list:
1547 1549 # ds.pathto() returns an absolute file when this is invoked from
1548 1550 # the keyword extension. That gets flagged as non-portable on
1549 1551 # Windows, since it contains the drive letter and colon.
1550 1552 scmutil.checkportable(ui, os.path.join(prefix, f))
1551 1553 try:
1552 1554 st = lstat(f)
1553 1555 except OSError:
1554 1556 ui.warn(_("%s does not exist!\n") % uipath(f))
1555 1557 rejected.append(f)
1556 1558 continue
1557 1559 if st.st_size > 10000000:
1558 1560 ui.warn(_("%s: up to %d MB of RAM may be required "
1559 1561 "to manage this file\n"
1560 1562 "(use 'hg revert %s' to cancel the "
1561 1563 "pending addition)\n")
1562 1564 % (f, 3 * st.st_size // 1000000, uipath(f)))
1563 1565 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1564 1566 ui.warn(_("%s not added: only files and symlinks "
1565 1567 "supported currently\n") % uipath(f))
1566 1568 rejected.append(f)
1567 1569 elif ds[f] in 'amn':
1568 1570 ui.warn(_("%s already tracked!\n") % uipath(f))
1569 1571 elif ds[f] == 'r':
1570 1572 ds.normallookup(f)
1571 1573 else:
1572 1574 ds.add(f)
1573 1575 return rejected
1574 1576
1575 1577 def forget(self, files, prefix=""):
1576 1578 with self._repo.wlock():
1577 1579 ds = self._repo.dirstate
1578 1580 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1579 1581 rejected = []
1580 1582 for f in files:
1581 1583 if f not in self._repo.dirstate:
1582 1584 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1583 1585 rejected.append(f)
1584 1586 elif self._repo.dirstate[f] != 'a':
1585 1587 self._repo.dirstate.remove(f)
1586 1588 else:
1587 1589 self._repo.dirstate.drop(f)
1588 1590 return rejected
1589 1591
1590 1592 def undelete(self, list):
1591 1593 pctxs = self.parents()
1592 1594 with self._repo.wlock():
1593 1595 ds = self._repo.dirstate
1594 1596 for f in list:
1595 1597 if self._repo.dirstate[f] != 'r':
1596 1598 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1597 1599 else:
1598 1600 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1599 1601 t = fctx.data()
1600 1602 self._repo.wwrite(f, t, fctx.flags())
1601 1603 self._repo.dirstate.normal(f)
1602 1604
1603 1605 def copy(self, source, dest):
1604 1606 try:
1605 1607 st = self._repo.wvfs.lstat(dest)
1606 1608 except OSError as err:
1607 1609 if err.errno != errno.ENOENT:
1608 1610 raise
1609 1611 self._repo.ui.warn(_("%s does not exist!\n")
1610 1612 % self._repo.dirstate.pathto(dest))
1611 1613 return
1612 1614 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1613 1615 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1614 1616 "symbolic link\n")
1615 1617 % self._repo.dirstate.pathto(dest))
1616 1618 else:
1617 1619 with self._repo.wlock():
1618 1620 if self._repo.dirstate[dest] in '?':
1619 1621 self._repo.dirstate.add(dest)
1620 1622 elif self._repo.dirstate[dest] in 'r':
1621 1623 self._repo.dirstate.normallookup(dest)
1622 1624 self._repo.dirstate.copy(source, dest)
1623 1625
1624 1626 def match(self, pats=None, include=None, exclude=None, default='glob',
1625 1627 listsubrepos=False, badfn=None):
1626 1628 r = self._repo
1627 1629
1628 1630 # Only a case insensitive filesystem needs magic to translate user input
1629 1631 # to actual case in the filesystem.
1630 1632 icasefs = not util.fscasesensitive(r.root)
1631 1633 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1632 1634 default, auditor=r.auditor, ctx=self,
1633 1635 listsubrepos=listsubrepos, badfn=badfn,
1634 1636 icasefs=icasefs)
1635 1637
1636 1638 def flushall(self):
1637 1639 pass # For overlayworkingfilectx compatibility.
1638 1640
1639 1641 def _filtersuspectsymlink(self, files):
1640 1642 if not files or self._repo.dirstate._checklink:
1641 1643 return files
1642 1644
1643 1645 # Symlink placeholders may get non-symlink-like contents
1644 1646 # via user error or dereferencing by NFS or Samba servers,
1645 1647 # so we filter out any placeholders that don't look like a
1646 1648 # symlink
1647 1649 sane = []
1648 1650 for f in files:
1649 1651 if self.flags(f) == 'l':
1650 1652 d = self[f].data()
1651 1653 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1652 1654 self._repo.ui.debug('ignoring suspect symlink placeholder'
1653 1655 ' "%s"\n' % f)
1654 1656 continue
1655 1657 sane.append(f)
1656 1658 return sane
1657 1659
1658 1660 def _checklookup(self, files):
1659 1661 # check for any possibly clean files
1660 1662 if not files:
1661 1663 return [], [], []
1662 1664
1663 1665 modified = []
1664 1666 deleted = []
1665 1667 fixup = []
1666 1668 pctx = self._parents[0]
1667 1669 # do a full compare of any files that might have changed
1668 1670 for f in sorted(files):
1669 1671 try:
1670 1672 # This will return True for a file that got replaced by a
1671 1673 # directory in the interim, but fixing that is pretty hard.
1672 1674 if (f not in pctx or self.flags(f) != pctx.flags(f)
1673 1675 or pctx[f].cmp(self[f])):
1674 1676 modified.append(f)
1675 1677 else:
1676 1678 fixup.append(f)
1677 1679 except (IOError, OSError):
1678 1680 # A file become inaccessible in between? Mark it as deleted,
1679 1681 # matching dirstate behavior (issue5584).
1680 1682 # The dirstate has more complex behavior around whether a
1681 1683 # missing file matches a directory, etc, but we don't need to
1682 1684 # bother with that: if f has made it to this point, we're sure
1683 1685 # it's in the dirstate.
1684 1686 deleted.append(f)
1685 1687
1686 1688 return modified, deleted, fixup
1687 1689
1688 1690 def _poststatusfixup(self, status, fixup):
1689 1691 """update dirstate for files that are actually clean"""
1690 1692 poststatus = self._repo.postdsstatus()
1691 1693 if fixup or poststatus:
1692 1694 try:
1693 1695 oldid = self._repo.dirstate.identity()
1694 1696
1695 1697 # updating the dirstate is optional
1696 1698 # so we don't wait on the lock
1697 1699 # wlock can invalidate the dirstate, so cache normal _after_
1698 1700 # taking the lock
1699 1701 with self._repo.wlock(False):
1700 1702 if self._repo.dirstate.identity() == oldid:
1701 1703 if fixup:
1702 1704 normal = self._repo.dirstate.normal
1703 1705 for f in fixup:
1704 1706 normal(f)
1705 1707 # write changes out explicitly, because nesting
1706 1708 # wlock at runtime may prevent 'wlock.release()'
1707 1709 # after this block from doing so for subsequent
1708 1710 # changing files
1709 1711 tr = self._repo.currenttransaction()
1710 1712 self._repo.dirstate.write(tr)
1711 1713
1712 1714 if poststatus:
1713 1715 for ps in poststatus:
1714 1716 ps(self, status)
1715 1717 else:
1716 1718 # in this case, writing changes out breaks
1717 1719 # consistency, because .hg/dirstate was
1718 1720 # already changed simultaneously after last
1719 1721 # caching (see also issue5584 for detail)
1720 1722 self._repo.ui.debug('skip updating dirstate: '
1721 1723 'identity mismatch\n')
1722 1724 except error.LockError:
1723 1725 pass
1724 1726 finally:
1725 1727 # Even if the wlock couldn't be grabbed, clear out the list.
1726 1728 self._repo.clearpostdsstatus()
1727 1729
1728 1730 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1729 1731 '''Gets the status from the dirstate -- internal use only.'''
1730 1732 subrepos = []
1731 1733 if '.hgsub' in self:
1732 1734 subrepos = sorted(self.substate)
1733 1735 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1734 1736 clean=clean, unknown=unknown)
1735 1737
1736 1738 # check for any possibly clean files
1737 1739 fixup = []
1738 1740 if cmp:
1739 1741 modified2, deleted2, fixup = self._checklookup(cmp)
1740 1742 s.modified.extend(modified2)
1741 1743 s.deleted.extend(deleted2)
1742 1744
1743 1745 if fixup and clean:
1744 1746 s.clean.extend(fixup)
1745 1747
1746 1748 self._poststatusfixup(s, fixup)
1747 1749
1748 1750 if match.always():
1749 1751 # cache for performance
1750 1752 if s.unknown or s.ignored or s.clean:
1751 1753 # "_status" is cached with list*=False in the normal route
1752 1754 self._status = scmutil.status(s.modified, s.added, s.removed,
1753 1755 s.deleted, [], [], [])
1754 1756 else:
1755 1757 self._status = s
1756 1758
1757 1759 return s
1758 1760
1759 1761 @propertycache
1760 1762 def _manifest(self):
1761 1763 """generate a manifest corresponding to the values in self._status
1762 1764
1763 1765 This reuse the file nodeid from parent, but we use special node
1764 1766 identifiers for added and modified files. This is used by manifests
1765 1767 merge to see that files are different and by update logic to avoid
1766 1768 deleting newly added files.
1767 1769 """
1768 1770 return self._buildstatusmanifest(self._status)
1769 1771
1770 1772 def _buildstatusmanifest(self, status):
1771 1773 """Builds a manifest that includes the given status results."""
1772 1774 parents = self.parents()
1773 1775
1774 1776 man = parents[0].manifest().copy()
1775 1777
1776 1778 ff = self._flagfunc
1777 1779 for i, l in ((addednodeid, status.added),
1778 1780 (modifiednodeid, status.modified)):
1779 1781 for f in l:
1780 1782 man[f] = i
1781 1783 try:
1782 1784 man.setflag(f, ff(f))
1783 1785 except OSError:
1784 1786 pass
1785 1787
1786 1788 for f in status.deleted + status.removed:
1787 1789 if f in man:
1788 1790 del man[f]
1789 1791
1790 1792 return man
1791 1793
1792 1794 def _buildstatus(self, other, s, match, listignored, listclean,
1793 1795 listunknown):
1794 1796 """build a status with respect to another context
1795 1797
1796 1798 This includes logic for maintaining the fast path of status when
1797 1799 comparing the working directory against its parent, which is to skip
1798 1800 building a new manifest if self (working directory) is not comparing
1799 1801 against its parent (repo['.']).
1800 1802 """
1801 1803 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1802 1804 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1803 1805 # might have accidentally ended up with the entire contents of the file
1804 1806 # they are supposed to be linking to.
1805 1807 s.modified[:] = self._filtersuspectsymlink(s.modified)
1806 1808 if other != self._repo['.']:
1807 1809 s = super(workingctx, self)._buildstatus(other, s, match,
1808 1810 listignored, listclean,
1809 1811 listunknown)
1810 1812 return s
1811 1813
1812 1814 def _matchstatus(self, other, match):
1813 1815 """override the match method with a filter for directory patterns
1814 1816
1815 1817 We use inheritance to customize the match.bad method only in cases of
1816 1818 workingctx since it belongs only to the working directory when
1817 1819 comparing against the parent changeset.
1818 1820
1819 1821 If we aren't comparing against the working directory's parent, then we
1820 1822 just use the default match object sent to us.
1821 1823 """
1822 1824 if other != self._repo['.']:
1823 1825 def bad(f, msg):
1824 1826 # 'f' may be a directory pattern from 'match.files()',
1825 1827 # so 'f not in ctx1' is not enough
1826 1828 if f not in other and not other.hasdir(f):
1827 1829 self._repo.ui.warn('%s: %s\n' %
1828 1830 (self._repo.dirstate.pathto(f), msg))
1829 1831 match.bad = bad
1830 1832 return match
1831 1833
1832 1834 def markcommitted(self, node):
1833 1835 super(workingctx, self).markcommitted(node)
1834 1836
1835 1837 sparse.aftercommit(self._repo, node)
1836 1838
1837 1839 class committablefilectx(basefilectx):
1838 1840 """A committablefilectx provides common functionality for a file context
1839 1841 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1840 1842 def __init__(self, repo, path, filelog=None, ctx=None):
1841 1843 self._repo = repo
1842 1844 self._path = path
1843 1845 self._changeid = None
1844 1846 self._filerev = self._filenode = None
1845 1847
1846 1848 if filelog is not None:
1847 1849 self._filelog = filelog
1848 1850 if ctx:
1849 1851 self._changectx = ctx
1850 1852
1851 1853 def __nonzero__(self):
1852 1854 return True
1853 1855
1854 1856 __bool__ = __nonzero__
1855 1857
1856 1858 def linkrev(self):
1857 1859 # linked to self._changectx no matter if file is modified or not
1858 1860 return self.rev()
1859 1861
1860 1862 def parents(self):
1861 1863 '''return parent filectxs, following copies if necessary'''
1862 1864 def filenode(ctx, path):
1863 1865 return ctx._manifest.get(path, nullid)
1864 1866
1865 1867 path = self._path
1866 1868 fl = self._filelog
1867 1869 pcl = self._changectx._parents
1868 1870 renamed = self.renamed()
1869 1871
1870 1872 if renamed:
1871 1873 pl = [renamed + (None,)]
1872 1874 else:
1873 1875 pl = [(path, filenode(pcl[0], path), fl)]
1874 1876
1875 1877 for pc in pcl[1:]:
1876 1878 pl.append((path, filenode(pc, path), fl))
1877 1879
1878 1880 return [self._parentfilectx(p, fileid=n, filelog=l)
1879 1881 for p, n, l in pl if n != nullid]
1880 1882
1881 1883 def children(self):
1882 1884 return []
1883 1885
1884 1886 class workingfilectx(committablefilectx):
1885 1887 """A workingfilectx object makes access to data related to a particular
1886 1888 file in the working directory convenient."""
1887 1889 def __init__(self, repo, path, filelog=None, workingctx=None):
1888 1890 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1889 1891
1890 1892 @propertycache
1891 1893 def _changectx(self):
1892 1894 return workingctx(self._repo)
1893 1895
1894 1896 def data(self):
1895 1897 return self._repo.wread(self._path)
1896 1898 def renamed(self):
1897 1899 rp = self._repo.dirstate.copied(self._path)
1898 1900 if not rp:
1899 1901 return None
1900 1902 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1901 1903
1902 1904 def size(self):
1903 1905 return self._repo.wvfs.lstat(self._path).st_size
1904 1906 def date(self):
1905 1907 t, tz = self._changectx.date()
1906 1908 try:
1907 1909 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1908 1910 except OSError as err:
1909 1911 if err.errno != errno.ENOENT:
1910 1912 raise
1911 1913 return (t, tz)
1912 1914
1913 1915 def exists(self):
1914 1916 return self._repo.wvfs.exists(self._path)
1915 1917
1916 1918 def lexists(self):
1917 1919 return self._repo.wvfs.lexists(self._path)
1918 1920
1919 1921 def audit(self):
1920 1922 return self._repo.wvfs.audit(self._path)
1921 1923
1922 1924 def cmp(self, fctx):
1923 1925 """compare with other file context
1924 1926
1925 1927 returns True if different than fctx.
1926 1928 """
1927 1929 # fctx should be a filectx (not a workingfilectx)
1928 1930 # invert comparison to reuse the same code path
1929 1931 return fctx.cmp(self)
1930 1932
1931 1933 def remove(self, ignoremissing=False):
1932 1934 """wraps unlink for a repo's working directory"""
1933 1935 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1934 1936
1935 1937 def write(self, data, flags, backgroundclose=False):
1936 1938 """wraps repo.wwrite"""
1937 1939 self._repo.wwrite(self._path, data, flags,
1938 1940 backgroundclose=backgroundclose)
1939 1941
1940 1942 def markcopied(self, src):
1941 1943 """marks this file a copy of `src`"""
1942 1944 if self._repo.dirstate[self._path] in "nma":
1943 1945 self._repo.dirstate.copy(src, self._path)
1944 1946
1945 1947 def clearunknown(self):
1946 1948 """Removes conflicting items in the working directory so that
1947 1949 ``write()`` can be called successfully.
1948 1950 """
1949 1951 wvfs = self._repo.wvfs
1950 1952 f = self._path
1951 1953 wvfs.audit(f)
1952 1954 if wvfs.isdir(f) and not wvfs.islink(f):
1953 1955 wvfs.rmtree(f, forcibly=True)
1954 1956 for p in reversed(list(util.finddirs(f))):
1955 1957 if wvfs.isfileorlink(p):
1956 1958 wvfs.unlink(p)
1957 1959 break
1958 1960
1959 1961 def setflags(self, l, x):
1960 1962 self._repo.wvfs.setflags(self._path, l, x)
1961 1963
1962 1964 class overlayworkingctx(workingctx):
1963 1965 """Wraps another mutable context with a write-back cache that can be flushed
1964 1966 at a later time.
1965 1967
1966 1968 self._cache[path] maps to a dict with keys: {
1967 1969 'exists': bool?
1968 1970 'date': date?
1969 1971 'data': str?
1970 1972 'flags': str?
1971 1973 }
1972 1974 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1973 1975 is `False`, the file was deleted.
1974 1976 """
1975 1977
1976 1978 def __init__(self, repo, wrappedctx):
1977 1979 super(overlayworkingctx, self).__init__(repo)
1978 1980 self._repo = repo
1979 1981 self._wrappedctx = wrappedctx
1980 1982 self._clean()
1981 1983
1982 1984 def data(self, path):
1983 1985 if self.isdirty(path):
1984 1986 if self._cache[path]['exists']:
1985 1987 if self._cache[path]['data']:
1986 1988 return self._cache[path]['data']
1987 1989 else:
1988 1990 # Must fallback here, too, because we only set flags.
1989 1991 return self._wrappedctx[path].data()
1990 1992 else:
1991 1993 raise error.ProgrammingError("No such file or directory: %s" %
1992 1994 self._path)
1993 1995 else:
1994 1996 return self._wrappedctx[path].data()
1995 1997
1996 1998 def isinmemory(self):
1997 1999 return True
1998 2000
1999 2001 def filedate(self, path):
2000 2002 if self.isdirty(path):
2001 2003 return self._cache[path]['date']
2002 2004 else:
2003 2005 return self._wrappedctx[path].date()
2004 2006
2005 2007 def flags(self, path):
2006 2008 if self.isdirty(path):
2007 2009 if self._cache[path]['exists']:
2008 2010 return self._cache[path]['flags']
2009 2011 else:
2010 2012 raise error.ProgrammingError("No such file or directory: %s" %
2011 2013 self._path)
2012 2014 else:
2013 2015 return self._wrappedctx[path].flags()
2014 2016
2015 2017 def write(self, path, data, flags=''):
2016 2018 if data is None:
2017 2019 raise error.ProgrammingError("data must be non-None")
2018 2020 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2019 2021 flags=flags)
2020 2022
2021 2023 def setflags(self, path, l, x):
2022 2024 self._markdirty(path, exists=True, date=util.makedate(),
2023 2025 flags=(l and 'l' or '') + (x and 'x' or ''))
2024 2026
2025 2027 def remove(self, path):
2026 2028 self._markdirty(path, exists=False)
2027 2029
2028 2030 def exists(self, path):
2029 2031 """exists behaves like `lexists`, but needs to follow symlinks and
2030 2032 return False if they are broken.
2031 2033 """
2032 2034 if self.isdirty(path):
2033 2035 # If this path exists and is a symlink, "follow" it by calling
2034 2036 # exists on the destination path.
2035 2037 if (self._cache[path]['exists'] and
2036 2038 'l' in self._cache[path]['flags']):
2037 2039 return self.exists(self._cache[path]['data'].strip())
2038 2040 else:
2039 2041 return self._cache[path]['exists']
2040 2042 return self._wrappedctx[path].exists()
2041 2043
2042 2044 def lexists(self, path):
2043 2045 """lexists returns True if the path exists"""
2044 2046 if self.isdirty(path):
2045 2047 return self._cache[path]['exists']
2046 2048 return self._wrappedctx[path].lexists()
2047 2049
2048 2050 def size(self, path):
2049 2051 if self.isdirty(path):
2050 2052 if self._cache[path]['exists']:
2051 2053 return len(self._cache[path]['data'])
2052 2054 else:
2053 2055 raise error.ProgrammingError("No such file or directory: %s" %
2054 2056 self._path)
2055 2057 return self._wrappedctx[path].size()
2056 2058
2057 2059 def flushall(self):
2058 2060 for path in self._writeorder:
2059 2061 entry = self._cache[path]
2060 2062 if entry['exists']:
2061 2063 self._wrappedctx[path].clearunknown()
2062 2064 if entry['data'] is not None:
2063 2065 if entry['flags'] is None:
2064 2066 raise error.ProgrammingError('data set but not flags')
2065 2067 self._wrappedctx[path].write(
2066 2068 entry['data'],
2067 2069 entry['flags'])
2068 2070 else:
2069 2071 self._wrappedctx[path].setflags(
2070 2072 'l' in entry['flags'],
2071 2073 'x' in entry['flags'])
2072 2074 else:
2073 2075 self._wrappedctx[path].remove(path)
2074 2076 self._clean()
2075 2077
2076 2078 def isdirty(self, path):
2077 2079 return path in self._cache
2078 2080
2079 2081 def _clean(self):
2080 2082 self._cache = {}
2081 2083 self._writeorder = []
2082 2084
2083 2085 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2084 2086 if path not in self._cache:
2085 2087 self._writeorder.append(path)
2086 2088
2087 2089 self._cache[path] = {
2088 2090 'exists': exists,
2089 2091 'data': data,
2090 2092 'date': date,
2091 2093 'flags': flags,
2092 2094 }
2093 2095
2094 2096 def filectx(self, path, filelog=None):
2095 2097 return overlayworkingfilectx(self._repo, path, parent=self,
2096 2098 filelog=filelog)
2097 2099
2098 2100 class overlayworkingfilectx(workingfilectx):
2099 2101 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2100 2102 cache, which can be flushed through later by calling ``flush()``."""
2101 2103
2102 2104 def __init__(self, repo, path, filelog=None, parent=None):
2103 2105 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2104 2106 parent)
2105 2107 self._repo = repo
2106 2108 self._parent = parent
2107 2109 self._path = path
2108 2110
2109 2111 def cmp(self, fctx):
2110 2112 return self.data() != fctx.data()
2111 2113
2112 2114 def ctx(self):
2113 2115 return self._parent
2114 2116
2115 2117 def data(self):
2116 2118 return self._parent.data(self._path)
2117 2119
2118 2120 def date(self):
2119 2121 return self._parent.filedate(self._path)
2120 2122
2121 2123 def exists(self):
2122 2124 return self.lexists()
2123 2125
2124 2126 def lexists(self):
2125 2127 return self._parent.exists(self._path)
2126 2128
2127 2129 def renamed(self):
2128 2130 # Copies are currently tracked in the dirstate as before. Straight copy
2129 2131 # from workingfilectx.
2130 2132 rp = self._repo.dirstate.copied(self._path)
2131 2133 if not rp:
2132 2134 return None
2133 2135 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2134 2136
2135 2137 def size(self):
2136 2138 return self._parent.size(self._path)
2137 2139
2138 2140 def audit(self):
2139 2141 pass
2140 2142
2141 2143 def flags(self):
2142 2144 return self._parent.flags(self._path)
2143 2145
2144 2146 def setflags(self, islink, isexec):
2145 2147 return self._parent.setflags(self._path, islink, isexec)
2146 2148
2147 2149 def write(self, data, flags, backgroundclose=False):
2148 2150 return self._parent.write(self._path, data, flags)
2149 2151
2150 2152 def remove(self, ignoremissing=False):
2151 2153 return self._parent.remove(self._path)
2152 2154
2153 2155 class workingcommitctx(workingctx):
2154 2156 """A workingcommitctx object makes access to data related to
2155 2157 the revision being committed convenient.
2156 2158
2157 2159 This hides changes in the working directory, if they aren't
2158 2160 committed in this context.
2159 2161 """
2160 2162 def __init__(self, repo, changes,
2161 2163 text="", user=None, date=None, extra=None):
2162 2164 super(workingctx, self).__init__(repo, text, user, date, extra,
2163 2165 changes)
2164 2166
2165 2167 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2166 2168 """Return matched files only in ``self._status``
2167 2169
2168 2170 Uncommitted files appear "clean" via this context, even if
2169 2171 they aren't actually so in the working directory.
2170 2172 """
2171 2173 if clean:
2172 2174 clean = [f for f in self._manifest if f not in self._changedset]
2173 2175 else:
2174 2176 clean = []
2175 2177 return scmutil.status([f for f in self._status.modified if match(f)],
2176 2178 [f for f in self._status.added if match(f)],
2177 2179 [f for f in self._status.removed if match(f)],
2178 2180 [], [], [], clean)
2179 2181
2180 2182 @propertycache
2181 2183 def _changedset(self):
2182 2184 """Return the set of files changed in this context
2183 2185 """
2184 2186 changed = set(self._status.modified)
2185 2187 changed.update(self._status.added)
2186 2188 changed.update(self._status.removed)
2187 2189 return changed
2188 2190
2189 2191 def makecachingfilectxfn(func):
2190 2192 """Create a filectxfn that caches based on the path.
2191 2193
2192 2194 We can't use util.cachefunc because it uses all arguments as the cache
2193 2195 key and this creates a cycle since the arguments include the repo and
2194 2196 memctx.
2195 2197 """
2196 2198 cache = {}
2197 2199
2198 2200 def getfilectx(repo, memctx, path):
2199 2201 if path not in cache:
2200 2202 cache[path] = func(repo, memctx, path)
2201 2203 return cache[path]
2202 2204
2203 2205 return getfilectx
2204 2206
2205 2207 def memfilefromctx(ctx):
2206 2208 """Given a context return a memfilectx for ctx[path]
2207 2209
2208 2210 This is a convenience method for building a memctx based on another
2209 2211 context.
2210 2212 """
2211 2213 def getfilectx(repo, memctx, path):
2212 2214 fctx = ctx[path]
2213 2215 # this is weird but apparently we only keep track of one parent
2214 2216 # (why not only store that instead of a tuple?)
2215 2217 copied = fctx.renamed()
2216 2218 if copied:
2217 2219 copied = copied[0]
2218 2220 return memfilectx(repo, path, fctx.data(),
2219 2221 islink=fctx.islink(), isexec=fctx.isexec(),
2220 2222 copied=copied, memctx=memctx)
2221 2223
2222 2224 return getfilectx
2223 2225
2224 2226 def memfilefrompatch(patchstore):
2225 2227 """Given a patch (e.g. patchstore object) return a memfilectx
2226 2228
2227 2229 This is a convenience method for building a memctx based on a patchstore.
2228 2230 """
2229 2231 def getfilectx(repo, memctx, path):
2230 2232 data, mode, copied = patchstore.getfile(path)
2231 2233 if data is None:
2232 2234 return None
2233 2235 islink, isexec = mode
2234 2236 return memfilectx(repo, path, data, islink=islink,
2235 2237 isexec=isexec, copied=copied,
2236 2238 memctx=memctx)
2237 2239
2238 2240 return getfilectx
2239 2241
2240 2242 class memctx(committablectx):
2241 2243 """Use memctx to perform in-memory commits via localrepo.commitctx().
2242 2244
2243 2245 Revision information is supplied at initialization time while
2244 2246 related files data and is made available through a callback
2245 2247 mechanism. 'repo' is the current localrepo, 'parents' is a
2246 2248 sequence of two parent revisions identifiers (pass None for every
2247 2249 missing parent), 'text' is the commit message and 'files' lists
2248 2250 names of files touched by the revision (normalized and relative to
2249 2251 repository root).
2250 2252
2251 2253 filectxfn(repo, memctx, path) is a callable receiving the
2252 2254 repository, the current memctx object and the normalized path of
2253 2255 requested file, relative to repository root. It is fired by the
2254 2256 commit function for every file in 'files', but calls order is
2255 2257 undefined. If the file is available in the revision being
2256 2258 committed (updated or added), filectxfn returns a memfilectx
2257 2259 object. If the file was removed, filectxfn return None for recent
2258 2260 Mercurial. Moved files are represented by marking the source file
2259 2261 removed and the new file added with copy information (see
2260 2262 memfilectx).
2261 2263
2262 2264 user receives the committer name and defaults to current
2263 2265 repository username, date is the commit date in any format
2264 2266 supported by util.parsedate() and defaults to current date, extra
2265 2267 is a dictionary of metadata or is left empty.
2266 2268 """
2267 2269
2268 2270 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2269 2271 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2270 2272 # this field to determine what to do in filectxfn.
2271 2273 _returnnoneformissingfiles = True
2272 2274
2273 2275 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2274 2276 date=None, extra=None, branch=None, editor=False):
2275 2277 super(memctx, self).__init__(repo, text, user, date, extra)
2276 2278 self._rev = None
2277 2279 self._node = None
2278 2280 parents = [(p or nullid) for p in parents]
2279 2281 p1, p2 = parents
2280 2282 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2281 2283 files = sorted(set(files))
2282 2284 self._files = files
2283 2285 if branch is not None:
2284 2286 self._extra['branch'] = encoding.fromlocal(branch)
2285 2287 self.substate = {}
2286 2288
2287 2289 if isinstance(filectxfn, patch.filestore):
2288 2290 filectxfn = memfilefrompatch(filectxfn)
2289 2291 elif not callable(filectxfn):
2290 2292 # if store is not callable, wrap it in a function
2291 2293 filectxfn = memfilefromctx(filectxfn)
2292 2294
2293 2295 # memoizing increases performance for e.g. vcs convert scenarios.
2294 2296 self._filectxfn = makecachingfilectxfn(filectxfn)
2295 2297
2296 2298 if editor:
2297 2299 self._text = editor(self._repo, self, [])
2298 2300 self._repo.savecommitmessage(self._text)
2299 2301
2300 2302 def filectx(self, path, filelog=None):
2301 2303 """get a file context from the working directory
2302 2304
2303 2305 Returns None if file doesn't exist and should be removed."""
2304 2306 return self._filectxfn(self._repo, self, path)
2305 2307
2306 2308 def commit(self):
2307 2309 """commit context to the repo"""
2308 2310 return self._repo.commitctx(self)
2309 2311
2310 2312 @propertycache
2311 2313 def _manifest(self):
2312 2314 """generate a manifest based on the return values of filectxfn"""
2313 2315
2314 2316 # keep this simple for now; just worry about p1
2315 2317 pctx = self._parents[0]
2316 2318 man = pctx.manifest().copy()
2317 2319
2318 2320 for f in self._status.modified:
2319 2321 p1node = nullid
2320 2322 p2node = nullid
2321 2323 p = pctx[f].parents() # if file isn't in pctx, check p2?
2322 2324 if len(p) > 0:
2323 2325 p1node = p[0].filenode()
2324 2326 if len(p) > 1:
2325 2327 p2node = p[1].filenode()
2326 2328 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2327 2329
2328 2330 for f in self._status.added:
2329 2331 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2330 2332
2331 2333 for f in self._status.removed:
2332 2334 if f in man:
2333 2335 del man[f]
2334 2336
2335 2337 return man
2336 2338
2337 2339 @propertycache
2338 2340 def _status(self):
2339 2341 """Calculate exact status from ``files`` specified at construction
2340 2342 """
2341 2343 man1 = self.p1().manifest()
2342 2344 p2 = self._parents[1]
2343 2345 # "1 < len(self._parents)" can't be used for checking
2344 2346 # existence of the 2nd parent, because "memctx._parents" is
2345 2347 # explicitly initialized by the list, of which length is 2.
2346 2348 if p2.node() != nullid:
2347 2349 man2 = p2.manifest()
2348 2350 managing = lambda f: f in man1 or f in man2
2349 2351 else:
2350 2352 managing = lambda f: f in man1
2351 2353
2352 2354 modified, added, removed = [], [], []
2353 2355 for f in self._files:
2354 2356 if not managing(f):
2355 2357 added.append(f)
2356 2358 elif self[f]:
2357 2359 modified.append(f)
2358 2360 else:
2359 2361 removed.append(f)
2360 2362
2361 2363 return scmutil.status(modified, added, removed, [], [], [], [])
2362 2364
2363 2365 class memfilectx(committablefilectx):
2364 2366 """memfilectx represents an in-memory file to commit.
2365 2367
2366 2368 See memctx and committablefilectx for more details.
2367 2369 """
2368 2370 def __init__(self, repo, path, data, islink=False,
2369 2371 isexec=False, copied=None, memctx=None):
2370 2372 """
2371 2373 path is the normalized file path relative to repository root.
2372 2374 data is the file content as a string.
2373 2375 islink is True if the file is a symbolic link.
2374 2376 isexec is True if the file is executable.
2375 2377 copied is the source file path if current file was copied in the
2376 2378 revision being committed, or None."""
2377 2379 super(memfilectx, self).__init__(repo, path, None, memctx)
2378 2380 self._data = data
2379 2381 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2380 2382 self._copied = None
2381 2383 if copied:
2382 2384 self._copied = (copied, nullid)
2383 2385
2384 2386 def data(self):
2385 2387 return self._data
2386 2388
2387 2389 def remove(self, ignoremissing=False):
2388 2390 """wraps unlink for a repo's working directory"""
2389 2391 # need to figure out what to do here
2390 2392 del self._changectx[self._path]
2391 2393
2392 2394 def write(self, data, flags):
2393 2395 """wraps repo.wwrite"""
2394 2396 self._data = data
2395 2397
2396 2398 class overlayfilectx(committablefilectx):
2397 2399 """Like memfilectx but take an original filectx and optional parameters to
2398 2400 override parts of it. This is useful when fctx.data() is expensive (i.e.
2399 2401 flag processor is expensive) and raw data, flags, and filenode could be
2400 2402 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2401 2403 """
2402 2404
2403 2405 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2404 2406 copied=None, ctx=None):
2405 2407 """originalfctx: filecontext to duplicate
2406 2408
2407 2409 datafunc: None or a function to override data (file content). It is a
2408 2410 function to be lazy. path, flags, copied, ctx: None or overridden value
2409 2411
2410 2412 copied could be (path, rev), or False. copied could also be just path,
2411 2413 and will be converted to (path, nullid). This simplifies some callers.
2412 2414 """
2413 2415
2414 2416 if path is None:
2415 2417 path = originalfctx.path()
2416 2418 if ctx is None:
2417 2419 ctx = originalfctx.changectx()
2418 2420 ctxmatch = lambda: True
2419 2421 else:
2420 2422 ctxmatch = lambda: ctx == originalfctx.changectx()
2421 2423
2422 2424 repo = originalfctx.repo()
2423 2425 flog = originalfctx.filelog()
2424 2426 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2425 2427
2426 2428 if copied is None:
2427 2429 copied = originalfctx.renamed()
2428 2430 copiedmatch = lambda: True
2429 2431 else:
2430 2432 if copied and not isinstance(copied, tuple):
2431 2433 # repo._filecommit will recalculate copyrev so nullid is okay
2432 2434 copied = (copied, nullid)
2433 2435 copiedmatch = lambda: copied == originalfctx.renamed()
2434 2436
2435 2437 # When data, copied (could affect data), ctx (could affect filelog
2436 2438 # parents) are not overridden, rawdata, rawflags, and filenode may be
2437 2439 # reused (repo._filecommit should double check filelog parents).
2438 2440 #
2439 2441 # path, flags are not hashed in filelog (but in manifestlog) so they do
2440 2442 # not affect reusable here.
2441 2443 #
2442 2444 # If ctx or copied is overridden to a same value with originalfctx,
2443 2445 # still consider it's reusable. originalfctx.renamed() may be a bit
2444 2446 # expensive so it's not called unless necessary. Assuming datafunc is
2445 2447 # always expensive, do not call it for this "reusable" test.
2446 2448 reusable = datafunc is None and ctxmatch() and copiedmatch()
2447 2449
2448 2450 if datafunc is None:
2449 2451 datafunc = originalfctx.data
2450 2452 if flags is None:
2451 2453 flags = originalfctx.flags()
2452 2454
2453 2455 self._datafunc = datafunc
2454 2456 self._flags = flags
2455 2457 self._copied = copied
2456 2458
2457 2459 if reusable:
2458 2460 # copy extra fields from originalfctx
2459 2461 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2460 2462 for attr_ in attrs:
2461 2463 if util.safehasattr(originalfctx, attr_):
2462 2464 setattr(self, attr_, getattr(originalfctx, attr_))
2463 2465
2464 2466 def data(self):
2465 2467 return self._datafunc()
2466 2468
2467 2469 class metadataonlyctx(committablectx):
2468 2470 """Like memctx but it's reusing the manifest of different commit.
2469 2471 Intended to be used by lightweight operations that are creating
2470 2472 metadata-only changes.
2471 2473
2472 2474 Revision information is supplied at initialization time. 'repo' is the
2473 2475 current localrepo, 'ctx' is original revision which manifest we're reuisng
2474 2476 'parents' is a sequence of two parent revisions identifiers (pass None for
2475 2477 every missing parent), 'text' is the commit.
2476 2478
2477 2479 user receives the committer name and defaults to current repository
2478 2480 username, date is the commit date in any format supported by
2479 2481 util.parsedate() and defaults to current date, extra is a dictionary of
2480 2482 metadata or is left empty.
2481 2483 """
2482 2484 def __new__(cls, repo, originalctx, *args, **kwargs):
2483 2485 return super(metadataonlyctx, cls).__new__(cls, repo)
2484 2486
2485 2487 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2486 2488 date=None, extra=None, editor=False):
2487 2489 if text is None:
2488 2490 text = originalctx.description()
2489 2491 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2490 2492 self._rev = None
2491 2493 self._node = None
2492 2494 self._originalctx = originalctx
2493 2495 self._manifestnode = originalctx.manifestnode()
2494 2496 if parents is None:
2495 2497 parents = originalctx.parents()
2496 2498 else:
2497 2499 parents = [repo[p] for p in parents if p is not None]
2498 2500 parents = parents[:]
2499 2501 while len(parents) < 2:
2500 2502 parents.append(repo[nullid])
2501 2503 p1, p2 = self._parents = parents
2502 2504
2503 2505 # sanity check to ensure that the reused manifest parents are
2504 2506 # manifests of our commit parents
2505 2507 mp1, mp2 = self.manifestctx().parents
2506 2508 if p1 != nullid and p1.manifestnode() != mp1:
2507 2509 raise RuntimeError('can\'t reuse the manifest: '
2508 2510 'its p1 doesn\'t match the new ctx p1')
2509 2511 if p2 != nullid and p2.manifestnode() != mp2:
2510 2512 raise RuntimeError('can\'t reuse the manifest: '
2511 2513 'its p2 doesn\'t match the new ctx p2')
2512 2514
2513 2515 self._files = originalctx.files()
2514 2516 self.substate = {}
2515 2517
2516 2518 if editor:
2517 2519 self._text = editor(self._repo, self, [])
2518 2520 self._repo.savecommitmessage(self._text)
2519 2521
2520 2522 def manifestnode(self):
2521 2523 return self._manifestnode
2522 2524
2523 2525 @property
2524 2526 def _manifestctx(self):
2525 2527 return self._repo.manifestlog[self._manifestnode]
2526 2528
2527 2529 def filectx(self, path, filelog=None):
2528 2530 return self._originalctx.filectx(path, filelog=filelog)
2529 2531
2530 2532 def commit(self):
2531 2533 """commit context to the repo"""
2532 2534 return self._repo.commitctx(self)
2533 2535
2534 2536 @property
2535 2537 def _manifest(self):
2536 2538 return self._originalctx.manifest()
2537 2539
2538 2540 @propertycache
2539 2541 def _status(self):
2540 2542 """Calculate exact status from ``files`` specified in the ``origctx``
2541 2543 and parents manifests.
2542 2544 """
2543 2545 man1 = self.p1().manifest()
2544 2546 p2 = self._parents[1]
2545 2547 # "1 < len(self._parents)" can't be used for checking
2546 2548 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2547 2549 # explicitly initialized by the list, of which length is 2.
2548 2550 if p2.node() != nullid:
2549 2551 man2 = p2.manifest()
2550 2552 managing = lambda f: f in man1 or f in man2
2551 2553 else:
2552 2554 managing = lambda f: f in man1
2553 2555
2554 2556 modified, added, removed = [], [], []
2555 2557 for f in self._files:
2556 2558 if not managing(f):
2557 2559 added.append(f)
2558 2560 elif f in self:
2559 2561 modified.append(f)
2560 2562 else:
2561 2563 removed.append(f)
2562 2564
2563 2565 return scmutil.status(modified, added, removed, [], [], [], [])
2564 2566
2565 2567 class arbitraryfilectx(object):
2566 2568 """Allows you to use filectx-like functions on a file in an arbitrary
2567 2569 location on disk, possibly not in the working directory.
2568 2570 """
2569 2571 def __init__(self, path, repo=None):
2570 2572 # Repo is optional because contrib/simplemerge uses this class.
2571 2573 self._repo = repo
2572 2574 self._path = path
2573 2575
2574 2576 def cmp(self, fctx):
2575 2577 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2576 2578 # path if either side is a symlink.
2577 2579 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2578 2580 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2579 2581 # Add a fast-path for merge if both sides are disk-backed.
2580 2582 # Note that filecmp uses the opposite return values (True if same)
2581 2583 # from our cmp functions (True if different).
2582 2584 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2583 2585 return self.data() != fctx.data()
2584 2586
2585 2587 def path(self):
2586 2588 return self._path
2587 2589
2588 2590 def flags(self):
2589 2591 return ''
2590 2592
2591 2593 def data(self):
2592 2594 return util.readfile(self._path)
2593 2595
2594 2596 def decodeddata(self):
2595 2597 with open(self._path, "rb") as f:
2596 2598 return f.read()
2597 2599
2598 2600 def remove(self):
2599 2601 util.unlink(self._path)
2600 2602
2601 2603 def write(self, data, flags):
2602 2604 assert not flags
2603 2605 with open(self._path, "w") as f:
2604 2606 f.write(data)
@@ -1,643 +1,644
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import difflib
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, nullid, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_BAD_REQUEST,
22 22 HTTP_NOT_FOUND,
23 23 paritygen,
24 24 )
25 25
26 26 from .. import (
27 27 context,
28 28 error,
29 29 match,
30 30 mdiff,
31 31 patch,
32 32 pathutil,
33 33 pycompat,
34 34 templatefilters,
35 35 ui as uimod,
36 36 util,
37 37 )
38 38
39 39 def up(p):
40 40 if p[0] != "/":
41 41 p = "/" + p
42 42 if p[-1] == "/":
43 43 p = p[:-1]
44 44 up = os.path.dirname(p)
45 45 if up == "/":
46 46 return "/"
47 47 return up + "/"
48 48
49 49 def _navseq(step, firststep=None):
50 50 if firststep:
51 51 yield firststep
52 52 if firststep >= 20 and firststep <= 40:
53 53 firststep = 50
54 54 yield firststep
55 55 assert step > 0
56 56 assert firststep > 0
57 57 while step <= firststep:
58 58 step *= 10
59 59 while True:
60 60 yield 1 * step
61 61 yield 3 * step
62 62 step *= 10
63 63
64 64 class revnav(object):
65 65
66 66 def __init__(self, repo):
67 67 """Navigation generation object
68 68
69 69 :repo: repo object we generate nav for
70 70 """
71 71 # used for hex generation
72 72 self._revlog = repo.changelog
73 73
74 74 def __nonzero__(self):
75 75 """return True if any revision to navigate over"""
76 76 return self._first() is not None
77 77
78 78 __bool__ = __nonzero__
79 79
80 80 def _first(self):
81 81 """return the minimum non-filtered changeset or None"""
82 82 try:
83 83 return next(iter(self._revlog))
84 84 except StopIteration:
85 85 return None
86 86
87 87 def hex(self, rev):
88 88 return hex(self._revlog.node(rev))
89 89
90 90 def gen(self, pos, pagelen, limit):
91 91 """computes label and revision id for navigation link
92 92
93 93 :pos: is the revision relative to which we generate navigation.
94 94 :pagelen: the size of each navigation page
95 95 :limit: how far shall we link
96 96
97 97 The return is:
98 98 - a single element tuple
99 99 - containing a dictionary with a `before` and `after` key
100 100 - values are generator functions taking arbitrary number of kwargs
101 101 - yield items are dictionaries with `label` and `node` keys
102 102 """
103 103 if not self:
104 104 # empty repo
105 105 return ({'before': (), 'after': ()},)
106 106
107 107 targets = []
108 108 for f in _navseq(1, pagelen):
109 109 if f > limit:
110 110 break
111 111 targets.append(pos + f)
112 112 targets.append(pos - f)
113 113 targets.sort()
114 114
115 115 first = self._first()
116 116 navbefore = [("(%i)" % first, self.hex(first))]
117 117 navafter = []
118 118 for rev in targets:
119 119 if rev not in self._revlog:
120 120 continue
121 121 if pos < rev < limit:
122 122 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
123 123 if 0 < rev < pos:
124 124 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
125 125
126 126
127 127 navafter.append(("tip", "tip"))
128 128
129 129 data = lambda i: {"label": i[0], "node": i[1]}
130 130 return ({'before': lambda **map: (data(i) for i in navbefore),
131 131 'after': lambda **map: (data(i) for i in navafter)},)
132 132
133 133 class filerevnav(revnav):
134 134
135 135 def __init__(self, repo, path):
136 136 """Navigation generation object
137 137
138 138 :repo: repo object we generate nav for
139 139 :path: path of the file we generate nav for
140 140 """
141 141 # used for iteration
142 142 self._changelog = repo.unfiltered().changelog
143 143 # used for hex generation
144 144 self._revlog = repo.file(path)
145 145
146 146 def hex(self, rev):
147 147 return hex(self._changelog.node(self._revlog.linkrev(rev)))
148 148
149 149 class _siblings(object):
150 150 def __init__(self, siblings=None, hiderev=None):
151 151 if siblings is None:
152 152 siblings = []
153 153 self.siblings = [s for s in siblings if s.node() != nullid]
154 154 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
155 155 self.siblings = []
156 156
157 157 def __iter__(self):
158 158 for s in self.siblings:
159 159 d = {
160 160 'node': s.hex(),
161 161 'rev': s.rev(),
162 162 'user': s.user(),
163 163 'date': s.date(),
164 164 'description': s.description(),
165 165 'branch': s.branch(),
166 166 }
167 167 if util.safehasattr(s, 'path'):
168 168 d['file'] = s.path()
169 169 yield d
170 170
171 171 def __len__(self):
172 172 return len(self.siblings)
173 173
174 174 def difffeatureopts(req, ui, section):
175 175 diffopts = patch.difffeatureopts(ui, untrusted=True,
176 176 section=section, whitespace=True)
177 177
178 178 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
179 179 v = req.form.get(k, [None])[0]
180 180 if v is not None:
181 181 v = util.parsebool(v)
182 182 setattr(diffopts, k, v if v is not None else True)
183 183
184 184 return diffopts
185 185
186 186 def annotate(req, fctx, ui):
187 187 diffopts = difffeatureopts(req, ui, 'annotate')
188 188 return fctx.annotate(follow=True, linenumber=True, diffopts=diffopts)
189 189
190 190 def parents(ctx, hide=None):
191 191 if isinstance(ctx, context.basefilectx):
192 192 introrev = ctx.introrev()
193 193 if ctx.changectx().rev() != introrev:
194 194 return _siblings([ctx.repo()[introrev]], hide)
195 195 return _siblings(ctx.parents(), hide)
196 196
197 197 def children(ctx, hide=None):
198 198 return _siblings(ctx.children(), hide)
199 199
200 200 def renamelink(fctx):
201 201 r = fctx.renamed()
202 202 if r:
203 203 return [{'file': r[0], 'node': hex(r[1])}]
204 204 return []
205 205
206 206 def nodetagsdict(repo, node):
207 207 return [{"name": i} for i in repo.nodetags(node)]
208 208
209 209 def nodebookmarksdict(repo, node):
210 210 return [{"name": i} for i in repo.nodebookmarks(node)]
211 211
212 212 def nodebranchdict(repo, ctx):
213 213 branches = []
214 214 branch = ctx.branch()
215 215 # If this is an empty repo, ctx.node() == nullid,
216 216 # ctx.branch() == 'default'.
217 217 try:
218 218 branchnode = repo.branchtip(branch)
219 219 except error.RepoLookupError:
220 220 branchnode = None
221 221 if branchnode == ctx.node():
222 222 branches.append({"name": branch})
223 223 return branches
224 224
225 225 def nodeinbranch(repo, ctx):
226 226 branches = []
227 227 branch = ctx.branch()
228 228 try:
229 229 branchnode = repo.branchtip(branch)
230 230 except error.RepoLookupError:
231 231 branchnode = None
232 232 if branch != 'default' and branchnode != ctx.node():
233 233 branches.append({"name": branch})
234 234 return branches
235 235
236 236 def nodebranchnodefault(ctx):
237 237 branches = []
238 238 branch = ctx.branch()
239 239 if branch != 'default':
240 240 branches.append({"name": branch})
241 241 return branches
242 242
243 243 def showtag(repo, tmpl, t1, node=nullid, **args):
244 244 for t in repo.nodetags(node):
245 245 yield tmpl(t1, tag=t, **args)
246 246
247 247 def showbookmark(repo, tmpl, t1, node=nullid, **args):
248 248 for t in repo.nodebookmarks(node):
249 249 yield tmpl(t1, bookmark=t, **args)
250 250
251 251 def branchentries(repo, stripecount, limit=0):
252 252 tips = []
253 253 heads = repo.heads()
254 254 parity = paritygen(stripecount)
255 255 sortkey = lambda item: (not item[1], item[0].rev())
256 256
257 257 def entries(**map):
258 258 count = 0
259 259 if not tips:
260 260 for tag, hs, tip, closed in repo.branchmap().iterbranches():
261 261 tips.append((repo[tip], closed))
262 262 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
263 263 if limit > 0 and count >= limit:
264 264 return
265 265 count += 1
266 266 if closed:
267 267 status = 'closed'
268 268 elif ctx.node() not in heads:
269 269 status = 'inactive'
270 270 else:
271 271 status = 'open'
272 272 yield {
273 273 'parity': next(parity),
274 274 'branch': ctx.branch(),
275 275 'status': status,
276 276 'node': ctx.hex(),
277 277 'date': ctx.date()
278 278 }
279 279
280 280 return entries
281 281
282 282 def cleanpath(repo, path):
283 283 path = path.lstrip('/')
284 284 return pathutil.canonpath(repo.root, '', path)
285 285
286 286 def changeidctx(repo, changeid):
287 287 try:
288 288 ctx = repo[changeid]
289 289 except error.RepoError:
290 290 man = repo.manifestlog._revlog
291 291 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
292 292
293 293 return ctx
294 294
295 295 def changectx(repo, req):
296 296 changeid = "tip"
297 297 if 'node' in req.form:
298 298 changeid = req.form['node'][0]
299 299 ipos = changeid.find(':')
300 300 if ipos != -1:
301 301 changeid = changeid[(ipos + 1):]
302 302 elif 'manifest' in req.form:
303 303 changeid = req.form['manifest'][0]
304 304
305 305 return changeidctx(repo, changeid)
306 306
307 307 def basechangectx(repo, req):
308 308 if 'node' in req.form:
309 309 changeid = req.form['node'][0]
310 310 ipos = changeid.find(':')
311 311 if ipos != -1:
312 312 changeid = changeid[:ipos]
313 313 return changeidctx(repo, changeid)
314 314
315 315 return None
316 316
317 317 def filectx(repo, req):
318 318 if 'file' not in req.form:
319 319 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
320 320 path = cleanpath(repo, req.form['file'][0])
321 321 if 'node' in req.form:
322 322 changeid = req.form['node'][0]
323 323 elif 'filenode' in req.form:
324 324 changeid = req.form['filenode'][0]
325 325 else:
326 326 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
327 327 try:
328 328 fctx = repo[changeid][path]
329 329 except error.RepoError:
330 330 fctx = repo.filectx(path, fileid=changeid)
331 331
332 332 return fctx
333 333
334 334 def linerange(req):
335 335 linerange = req.form.get('linerange')
336 336 if linerange is None:
337 337 return None
338 338 if len(linerange) > 1:
339 339 raise ErrorResponse(HTTP_BAD_REQUEST,
340 340 'redundant linerange parameter')
341 341 try:
342 342 fromline, toline = map(int, linerange[0].split(':', 1))
343 343 except ValueError:
344 344 raise ErrorResponse(HTTP_BAD_REQUEST,
345 345 'invalid linerange parameter')
346 346 try:
347 347 return util.processlinerange(fromline, toline)
348 348 except error.ParseError as exc:
349 349 raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
350 350
351 351 def formatlinerange(fromline, toline):
352 352 return '%d:%d' % (fromline + 1, toline)
353 353
354 354 def commonentry(repo, ctx):
355 355 node = ctx.node()
356 356 return {
357 357 'rev': ctx.rev(),
358 358 'node': hex(node),
359 359 'author': ctx.user(),
360 360 'desc': ctx.description(),
361 361 'date': ctx.date(),
362 362 'extra': ctx.extra(),
363 363 'phase': ctx.phasestr(),
364 'obsolete': ctx.obsolete(),
364 365 'branch': nodebranchnodefault(ctx),
365 366 'inbranch': nodeinbranch(repo, ctx),
366 367 'branches': nodebranchdict(repo, ctx),
367 368 'tags': nodetagsdict(repo, node),
368 369 'bookmarks': nodebookmarksdict(repo, node),
369 370 'parent': lambda **x: parents(ctx),
370 371 'child': lambda **x: children(ctx),
371 372 }
372 373
373 374 def changelistentry(web, ctx, tmpl):
374 375 '''Obtain a dictionary to be used for entries in a changelist.
375 376
376 377 This function is called when producing items for the "entries" list passed
377 378 to the "shortlog" and "changelog" templates.
378 379 '''
379 380 repo = web.repo
380 381 rev = ctx.rev()
381 382 n = ctx.node()
382 383 showtags = showtag(repo, tmpl, 'changelogtag', n)
383 384 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
384 385
385 386 entry = commonentry(repo, ctx)
386 387 entry.update(
387 388 allparents=lambda **x: parents(ctx),
388 389 parent=lambda **x: parents(ctx, rev - 1),
389 390 child=lambda **x: children(ctx, rev + 1),
390 391 changelogtag=showtags,
391 392 files=files,
392 393 )
393 394 return entry
394 395
395 396 def symrevorshortnode(req, ctx):
396 397 if 'node' in req.form:
397 398 return templatefilters.revescape(req.form['node'][0])
398 399 else:
399 400 return short(ctx.node())
400 401
401 402 def changesetentry(web, req, tmpl, ctx):
402 403 '''Obtain a dictionary to be used to render the "changeset" template.'''
403 404
404 405 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
405 406 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
406 407 ctx.node())
407 408 showbranch = nodebranchnodefault(ctx)
408 409
409 410 files = []
410 411 parity = paritygen(web.stripecount)
411 412 for blockno, f in enumerate(ctx.files()):
412 413 template = f in ctx and 'filenodelink' or 'filenolink'
413 414 files.append(tmpl(template,
414 415 node=ctx.hex(), file=f, blockno=blockno + 1,
415 416 parity=next(parity)))
416 417
417 418 basectx = basechangectx(web.repo, req)
418 419 if basectx is None:
419 420 basectx = ctx.p1()
420 421
421 422 style = web.config('web', 'style')
422 423 if 'style' in req.form:
423 424 style = req.form['style'][0]
424 425
425 426 diff = diffs(web, tmpl, ctx, basectx, None, style)
426 427
427 428 parity = paritygen(web.stripecount)
428 429 diffstatsgen = diffstatgen(ctx, basectx)
429 430 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
430 431
431 432 return dict(
432 433 diff=diff,
433 434 symrev=symrevorshortnode(req, ctx),
434 435 basenode=basectx.hex(),
435 436 changesettag=showtags,
436 437 changesetbookmark=showbookmarks,
437 438 changesetbranch=showbranch,
438 439 files=files,
439 440 diffsummary=lambda **x: diffsummary(diffstatsgen),
440 441 diffstat=diffstats,
441 442 archives=web.archivelist(ctx.hex()),
442 443 **commonentry(web.repo, ctx))
443 444
444 445 def listfilediffs(tmpl, files, node, max):
445 446 for f in files[:max]:
446 447 yield tmpl('filedifflink', node=hex(node), file=f)
447 448 if len(files) > max:
448 449 yield tmpl('fileellipses')
449 450
450 451 def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
451 452 lineidprefix=''):
452 453
453 454 def prettyprintlines(lines, blockno):
454 455 for lineno, l in enumerate(lines, 1):
455 456 difflineno = "%d.%d" % (blockno, lineno)
456 457 if l.startswith('+'):
457 458 ltype = "difflineplus"
458 459 elif l.startswith('-'):
459 460 ltype = "difflineminus"
460 461 elif l.startswith('@'):
461 462 ltype = "difflineat"
462 463 else:
463 464 ltype = "diffline"
464 465 yield tmpl(ltype,
465 466 line=l,
466 467 lineno=lineno,
467 468 lineid=lineidprefix + "l%s" % difflineno,
468 469 linenumber="% 8s" % difflineno)
469 470
470 471 repo = web.repo
471 472 if files:
472 473 m = match.exact(repo.root, repo.getcwd(), files)
473 474 else:
474 475 m = match.always(repo.root, repo.getcwd())
475 476
476 477 diffopts = patch.diffopts(repo.ui, untrusted=True)
477 478 node1 = basectx.node()
478 479 node2 = ctx.node()
479 480 parity = paritygen(web.stripecount)
480 481
481 482 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
482 483 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
483 484 if style != 'raw':
484 485 header = header[1:]
485 486 lines = [h + '\n' for h in header]
486 487 for hunkrange, hunklines in hunks:
487 488 if linerange is not None and hunkrange is not None:
488 489 s1, l1, s2, l2 = hunkrange
489 490 if not mdiff.hunkinrange((s2, l2), linerange):
490 491 continue
491 492 lines.extend(hunklines)
492 493 if lines:
493 494 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
494 495 lines=prettyprintlines(lines, blockno))
495 496
496 497 def compare(tmpl, context, leftlines, rightlines):
497 498 '''Generator function that provides side-by-side comparison data.'''
498 499
499 500 def compline(type, leftlineno, leftline, rightlineno, rightline):
500 501 lineid = leftlineno and ("l%s" % leftlineno) or ''
501 502 lineid += rightlineno and ("r%s" % rightlineno) or ''
502 503 return tmpl('comparisonline',
503 504 type=type,
504 505 lineid=lineid,
505 506 leftlineno=leftlineno,
506 507 leftlinenumber="% 6s" % (leftlineno or ''),
507 508 leftline=leftline or '',
508 509 rightlineno=rightlineno,
509 510 rightlinenumber="% 6s" % (rightlineno or ''),
510 511 rightline=rightline or '')
511 512
512 513 def getblock(opcodes):
513 514 for type, llo, lhi, rlo, rhi in opcodes:
514 515 len1 = lhi - llo
515 516 len2 = rhi - rlo
516 517 count = min(len1, len2)
517 518 for i in xrange(count):
518 519 yield compline(type=type,
519 520 leftlineno=llo + i + 1,
520 521 leftline=leftlines[llo + i],
521 522 rightlineno=rlo + i + 1,
522 523 rightline=rightlines[rlo + i])
523 524 if len1 > len2:
524 525 for i in xrange(llo + count, lhi):
525 526 yield compline(type=type,
526 527 leftlineno=i + 1,
527 528 leftline=leftlines[i],
528 529 rightlineno=None,
529 530 rightline=None)
530 531 elif len2 > len1:
531 532 for i in xrange(rlo + count, rhi):
532 533 yield compline(type=type,
533 534 leftlineno=None,
534 535 leftline=None,
535 536 rightlineno=i + 1,
536 537 rightline=rightlines[i])
537 538
538 539 s = difflib.SequenceMatcher(None, leftlines, rightlines)
539 540 if context < 0:
540 541 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
541 542 else:
542 543 for oc in s.get_grouped_opcodes(n=context):
543 544 yield tmpl('comparisonblock', lines=getblock(oc))
544 545
545 546 def diffstatgen(ctx, basectx):
546 547 '''Generator function that provides the diffstat data.'''
547 548
548 549 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
549 550 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
550 551 while True:
551 552 yield stats, maxname, maxtotal, addtotal, removetotal, binary
552 553
553 554 def diffsummary(statgen):
554 555 '''Return a short summary of the diff.'''
555 556
556 557 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
557 558 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
558 559 len(stats), addtotal, removetotal)
559 560
560 561 def diffstat(tmpl, ctx, statgen, parity):
561 562 '''Return a diffstat template for each file in the diff.'''
562 563
563 564 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
564 565 files = ctx.files()
565 566
566 567 def pct(i):
567 568 if maxtotal == 0:
568 569 return 0
569 570 return (float(i) / maxtotal) * 100
570 571
571 572 fileno = 0
572 573 for filename, adds, removes, isbinary in stats:
573 574 template = filename in files and 'diffstatlink' or 'diffstatnolink'
574 575 total = adds + removes
575 576 fileno += 1
576 577 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
577 578 total=total, addpct=pct(adds), removepct=pct(removes),
578 579 parity=next(parity))
579 580
580 581 class sessionvars(object):
581 582 def __init__(self, vars, start='?'):
582 583 self.start = start
583 584 self.vars = vars
584 585 def __getitem__(self, key):
585 586 return self.vars[key]
586 587 def __setitem__(self, key, value):
587 588 self.vars[key] = value
588 589 def __copy__(self):
589 590 return sessionvars(copy.copy(self.vars), self.start)
590 591 def __iter__(self):
591 592 separator = self.start
592 593 for key, value in sorted(self.vars.iteritems()):
593 594 yield {'name': key,
594 595 'value': pycompat.bytestr(value),
595 596 'separator': separator,
596 597 }
597 598 separator = '&'
598 599
599 600 class wsgiui(uimod.ui):
600 601 # default termwidth breaks under mod_wsgi
601 602 def termwidth(self):
602 603 return 80
603 604
604 605 def getwebsubs(repo):
605 606 websubtable = []
606 607 websubdefs = repo.ui.configitems('websub')
607 608 # we must maintain interhg backwards compatibility
608 609 websubdefs += repo.ui.configitems('interhg')
609 610 for key, pattern in websubdefs:
610 611 # grab the delimiter from the character after the "s"
611 612 unesc = pattern[1]
612 613 delim = re.escape(unesc)
613 614
614 615 # identify portions of the pattern, taking care to avoid escaped
615 616 # delimiters. the replace format and flags are optional, but
616 617 # delimiters are required.
617 618 match = re.match(
618 619 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
619 620 % (delim, delim, delim), pattern)
620 621 if not match:
621 622 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
622 623 % (key, pattern))
623 624 continue
624 625
625 626 # we need to unescape the delimiter for regexp and format
626 627 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
627 628 regexp = delim_re.sub(unesc, match.group(1))
628 629 format = delim_re.sub(unesc, match.group(2))
629 630
630 631 # the pattern allows for 6 regexp flags, so set them if necessary
631 632 flagin = match.group(3)
632 633 flags = 0
633 634 if flagin:
634 635 for flag in flagin.upper():
635 636 flags |= re.__dict__[flag]
636 637
637 638 try:
638 639 regexp = re.compile(regexp, flags)
639 640 websubtable.append((regexp, format))
640 641 except re.error:
641 642 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
642 643 % (key, regexp))
643 644 return websubtable
General Comments 0
You need to be logged in to leave comments. Login now