##// END OF EJS Templates
context: add instabilities() method to basefilectx...
av6 -
r35092:bd274393 default
parent child Browse files
Show More
@@ -1,2606 +1,2608
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 revlog,
45 45 scmutil,
46 46 sparse,
47 47 subrepo,
48 48 util,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 nonascii = re.compile(r'[^\x21-\x7f]').search
54 54
55 55 class basectx(object):
56 56 """A basectx object represents the common logic for its children:
57 57 changectx: read-only context that is already present in the repo,
58 58 workingctx: a context that represents the working directory and can
59 59 be committed,
60 60 memctx: a context that represents changes in-memory and can also
61 61 be committed."""
62 62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 63 if isinstance(changeid, basectx):
64 64 return changeid
65 65
66 66 o = super(basectx, cls).__new__(cls)
67 67
68 68 o._repo = repo
69 69 o._rev = nullrev
70 70 o._node = nullid
71 71
72 72 return o
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 __str__ = encoding.strmethod(__bytes__)
78 78
79 79 def __int__(self):
80 80 return self.rev()
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 msg = ("'context.unstable' is deprecated, "
210 210 "use 'context.orphan'")
211 211 self._repo.ui.deprecwarn(msg, '4.4')
212 212 return self.orphan()
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete but it's ancestor are"""
216 216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217 217
218 218 def bumped(self):
219 219 msg = ("'context.bumped' is deprecated, "
220 220 "use 'context.phasedivergent'")
221 221 self._repo.ui.deprecwarn(msg, '4.4')
222 222 return self.phasedivergent()
223 223
224 224 def phasedivergent(self):
225 225 """True if the changeset try to be a successor of a public changeset
226 226
227 227 Only non-public and non-obsolete changesets may be bumped.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230 230
231 231 def divergent(self):
232 232 msg = ("'context.divergent' is deprecated, "
233 233 "use 'context.contentdivergent'")
234 234 self._repo.ui.deprecwarn(msg, '4.4')
235 235 return self.contentdivergent()
236 236
237 237 def contentdivergent(self):
238 238 """Is a successors of a changeset with multiple possible successors set
239 239
240 240 Only non-public and non-obsolete changesets may be divergent.
241 241 """
242 242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243 243
244 244 def troubled(self):
245 245 msg = ("'context.troubled' is deprecated, "
246 246 "use 'context.isunstable'")
247 247 self._repo.ui.deprecwarn(msg, '4.4')
248 248 return self.isunstable()
249 249
250 250 def isunstable(self):
251 251 """True if the changeset is either unstable, bumped or divergent"""
252 252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253 253
254 254 def troubles(self):
255 255 """Keep the old version around in order to avoid breaking extensions
256 256 about different return values.
257 257 """
258 258 msg = ("'context.troubles' is deprecated, "
259 259 "use 'context.instabilities'")
260 260 self._repo.ui.deprecwarn(msg, '4.4')
261 261
262 262 troubles = []
263 263 if self.orphan():
264 264 troubles.append('orphan')
265 265 if self.phasedivergent():
266 266 troubles.append('bumped')
267 267 if self.contentdivergent():
268 268 troubles.append('divergent')
269 269 return troubles
270 270
271 271 def instabilities(self):
272 272 """return the list of instabilities affecting this changeset.
273 273
274 274 Instabilities are returned as strings. possible values are:
275 275 - orphan,
276 276 - phase-divergent,
277 277 - content-divergent.
278 278 """
279 279 instabilities = []
280 280 if self.orphan():
281 281 instabilities.append('orphan')
282 282 if self.phasedivergent():
283 283 instabilities.append('phase-divergent')
284 284 if self.contentdivergent():
285 285 instabilities.append('content-divergent')
286 286 return instabilities
287 287
288 288 def parents(self):
289 289 """return contexts for each parent changeset"""
290 290 return self._parents
291 291
292 292 def p1(self):
293 293 return self._parents[0]
294 294
295 295 def p2(self):
296 296 parents = self._parents
297 297 if len(parents) == 2:
298 298 return parents[1]
299 299 return changectx(self._repo, nullrev)
300 300
301 301 def _fileinfo(self, path):
302 302 if r'_manifest' in self.__dict__:
303 303 try:
304 304 return self._manifest[path], self._manifest.flags(path)
305 305 except KeyError:
306 306 raise error.ManifestLookupError(self._node, path,
307 307 _('not found in manifest'))
308 308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 309 if path in self._manifestdelta:
310 310 return (self._manifestdelta[path],
311 311 self._manifestdelta.flags(path))
312 312 mfl = self._repo.manifestlog
313 313 try:
314 314 node, flag = mfl[self._changeset.manifest].find(path)
315 315 except KeyError:
316 316 raise error.ManifestLookupError(self._node, path,
317 317 _('not found in manifest'))
318 318
319 319 return node, flag
320 320
321 321 def filenode(self, path):
322 322 return self._fileinfo(path)[0]
323 323
324 324 def flags(self, path):
325 325 try:
326 326 return self._fileinfo(path)[1]
327 327 except error.LookupError:
328 328 return ''
329 329
330 330 def sub(self, path, allowcreate=True):
331 331 '''return a subrepo for the stored revision of path, never wdir()'''
332 332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333 333
334 334 def nullsub(self, path, pctx):
335 335 return subrepo.nullsubrepo(self, path, pctx)
336 336
337 337 def workingsub(self, path):
338 338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 339 context.
340 340 '''
341 341 return subrepo.subrepo(self, path, allowwdir=True)
342 342
343 343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 344 listsubrepos=False, badfn=None):
345 345 r = self._repo
346 346 return matchmod.match(r.root, r.getcwd(), pats,
347 347 include, exclude, default,
348 348 auditor=r.nofsauditor, ctx=self,
349 349 listsubrepos=listsubrepos, badfn=badfn)
350 350
351 351 def diff(self, ctx2=None, match=None, **opts):
352 352 """Returns a diff generator for the given contexts and matcher"""
353 353 if ctx2 is None:
354 354 ctx2 = self.p1()
355 355 if ctx2 is not None:
356 356 ctx2 = self._repo[ctx2]
357 357 diffopts = patch.diffopts(self._repo.ui, opts)
358 358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359 359
360 360 def dirs(self):
361 361 return self._manifest.dirs()
362 362
363 363 def hasdir(self, dir):
364 364 return self._manifest.hasdir(dir)
365 365
366 366 def status(self, other=None, match=None, listignored=False,
367 367 listclean=False, listunknown=False, listsubrepos=False):
368 368 """return status of files between two nodes or node and working
369 369 directory.
370 370
371 371 If other is None, compare this node with working directory.
372 372
373 373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 374 """
375 375
376 376 ctx1 = self
377 377 ctx2 = self._repo[other]
378 378
379 379 # This next code block is, admittedly, fragile logic that tests for
380 380 # reversing the contexts and wouldn't need to exist if it weren't for
381 381 # the fast (and common) code path of comparing the working directory
382 382 # with its first parent.
383 383 #
384 384 # What we're aiming for here is the ability to call:
385 385 #
386 386 # workingctx.status(parentctx)
387 387 #
388 388 # If we always built the manifest for each context and compared those,
389 389 # then we'd be done. But the special case of the above call means we
390 390 # just copy the manifest of the parent.
391 391 reversed = False
392 392 if (not isinstance(ctx1, changectx)
393 393 and isinstance(ctx2, changectx)):
394 394 reversed = True
395 395 ctx1, ctx2 = ctx2, ctx1
396 396
397 397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 398 match = ctx2._matchstatus(ctx1, match)
399 399 r = scmutil.status([], [], [], [], [], [], [])
400 400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 401 listunknown)
402 402
403 403 if reversed:
404 404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 405 # these make no sense to reverse.
406 406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 407 r.clean)
408 408
409 409 if listsubrepos:
410 410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 411 try:
412 412 rev2 = ctx2.subrev(subpath)
413 413 except KeyError:
414 414 # A subrepo that existed in node1 was deleted between
415 415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 416 # won't contain that subpath. The best we can do ignore it.
417 417 rev2 = None
418 418 submatch = matchmod.subdirmatcher(subpath, match)
419 419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 420 clean=listclean, unknown=listunknown,
421 421 listsubrepos=True)
422 422 for rfiles, sfiles in zip(r, s):
423 423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424 424
425 425 for l in r:
426 426 l.sort()
427 427
428 428 return r
429 429
430 430 def _filterederror(repo, changeid):
431 431 """build an exception to be raised about a filtered changeid
432 432
433 433 This is extracted in a function to help extensions (eg: evolve) to
434 434 experiment with various message variants."""
435 435 if repo.filtername.startswith('visible'):
436 436 msg = _("hidden revision '%s'") % changeid
437 437 hint = _('use --hidden to access hidden revisions')
438 438 return error.FilteredRepoLookupError(msg, hint=hint)
439 439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 440 msg %= (changeid, repo.filtername)
441 441 return error.FilteredRepoLookupError(msg)
442 442
443 443 class changectx(basectx):
444 444 """A changecontext object makes access to data related to a particular
445 445 changeset convenient. It represents a read-only context already present in
446 446 the repo."""
447 447 def __init__(self, repo, changeid=''):
448 448 """changeid is a revision number, node, or tag"""
449 449
450 450 # since basectx.__new__ already took care of copying the object, we
451 451 # don't need to do anything in __init__, so we just exit here
452 452 if isinstance(changeid, basectx):
453 453 return
454 454
455 455 if changeid == '':
456 456 changeid = '.'
457 457 self._repo = repo
458 458
459 459 try:
460 460 if isinstance(changeid, int):
461 461 self._node = repo.changelog.node(changeid)
462 462 self._rev = changeid
463 463 return
464 464 if not pycompat.ispy3 and isinstance(changeid, long):
465 465 changeid = str(changeid)
466 466 if changeid == 'null':
467 467 self._node = nullid
468 468 self._rev = nullrev
469 469 return
470 470 if changeid == 'tip':
471 471 self._node = repo.changelog.tip()
472 472 self._rev = repo.changelog.rev(self._node)
473 473 return
474 474 if (changeid == '.'
475 475 or repo.local() and changeid == repo.dirstate.p1()):
476 476 # this is a hack to delay/avoid loading obsmarkers
477 477 # when we know that '.' won't be hidden
478 478 self._node = repo.dirstate.p1()
479 479 self._rev = repo.unfiltered().changelog.rev(self._node)
480 480 return
481 481 if len(changeid) == 20:
482 482 try:
483 483 self._node = changeid
484 484 self._rev = repo.changelog.rev(changeid)
485 485 return
486 486 except error.FilteredRepoLookupError:
487 487 raise
488 488 except LookupError:
489 489 pass
490 490
491 491 try:
492 492 r = int(changeid)
493 493 if '%d' % r != changeid:
494 494 raise ValueError
495 495 l = len(repo.changelog)
496 496 if r < 0:
497 497 r += l
498 498 if r < 0 or r >= l and r != wdirrev:
499 499 raise ValueError
500 500 self._rev = r
501 501 self._node = repo.changelog.node(r)
502 502 return
503 503 except error.FilteredIndexError:
504 504 raise
505 505 except (ValueError, OverflowError, IndexError):
506 506 pass
507 507
508 508 if len(changeid) == 40:
509 509 try:
510 510 self._node = bin(changeid)
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513 except error.FilteredLookupError:
514 514 raise
515 515 except (TypeError, LookupError):
516 516 pass
517 517
518 518 # lookup bookmarks through the name interface
519 519 try:
520 520 self._node = repo.names.singlenode(repo, changeid)
521 521 self._rev = repo.changelog.rev(self._node)
522 522 return
523 523 except KeyError:
524 524 pass
525 525 except error.FilteredRepoLookupError:
526 526 raise
527 527 except error.RepoLookupError:
528 528 pass
529 529
530 530 self._node = repo.unfiltered().changelog._partialmatch(changeid)
531 531 if self._node is not None:
532 532 self._rev = repo.changelog.rev(self._node)
533 533 return
534 534
535 535 # lookup failed
536 536 # check if it might have come from damaged dirstate
537 537 #
538 538 # XXX we could avoid the unfiltered if we had a recognizable
539 539 # exception for filtered changeset access
540 540 if (repo.local()
541 541 and changeid in repo.unfiltered().dirstate.parents()):
542 542 msg = _("working directory has unknown parent '%s'!")
543 543 raise error.Abort(msg % short(changeid))
544 544 try:
545 545 if len(changeid) == 20 and nonascii(changeid):
546 546 changeid = hex(changeid)
547 547 except TypeError:
548 548 pass
549 549 except (error.FilteredIndexError, error.FilteredLookupError,
550 550 error.FilteredRepoLookupError):
551 551 raise _filterederror(repo, changeid)
552 552 except IndexError:
553 553 pass
554 554 raise error.RepoLookupError(
555 555 _("unknown revision '%s'") % changeid)
556 556
557 557 def __hash__(self):
558 558 try:
559 559 return hash(self._rev)
560 560 except AttributeError:
561 561 return id(self)
562 562
563 563 def __nonzero__(self):
564 564 return self._rev != nullrev
565 565
566 566 __bool__ = __nonzero__
567 567
568 568 @propertycache
569 569 def _changeset(self):
570 570 return self._repo.changelog.changelogrevision(self.rev())
571 571
572 572 @propertycache
573 573 def _manifest(self):
574 574 return self._manifestctx.read()
575 575
576 576 @property
577 577 def _manifestctx(self):
578 578 return self._repo.manifestlog[self._changeset.manifest]
579 579
580 580 @propertycache
581 581 def _manifestdelta(self):
582 582 return self._manifestctx.readdelta()
583 583
584 584 @propertycache
585 585 def _parents(self):
586 586 repo = self._repo
587 587 p1, p2 = repo.changelog.parentrevs(self._rev)
588 588 if p2 == nullrev:
589 589 return [changectx(repo, p1)]
590 590 return [changectx(repo, p1), changectx(repo, p2)]
591 591
592 592 def changeset(self):
593 593 c = self._changeset
594 594 return (
595 595 c.manifest,
596 596 c.user,
597 597 c.date,
598 598 c.files,
599 599 c.description,
600 600 c.extra,
601 601 )
602 602 def manifestnode(self):
603 603 return self._changeset.manifest
604 604
605 605 def user(self):
606 606 return self._changeset.user
607 607 def date(self):
608 608 return self._changeset.date
609 609 def files(self):
610 610 return self._changeset.files
611 611 def description(self):
612 612 return self._changeset.description
613 613 def branch(self):
614 614 return encoding.tolocal(self._changeset.extra.get("branch"))
615 615 def closesbranch(self):
616 616 return 'close' in self._changeset.extra
617 617 def extra(self):
618 618 return self._changeset.extra
619 619 def tags(self):
620 620 return self._repo.nodetags(self._node)
621 621 def bookmarks(self):
622 622 return self._repo.nodebookmarks(self._node)
623 623 def phase(self):
624 624 return self._repo._phasecache.phase(self._repo, self._rev)
625 625 def hidden(self):
626 626 return self._rev in repoview.filterrevs(self._repo, 'visible')
627 627
628 628 def isinmemory(self):
629 629 return False
630 630
631 631 def children(self):
632 632 """return contexts for each child changeset"""
633 633 c = self._repo.changelog.children(self._node)
634 634 return [changectx(self._repo, x) for x in c]
635 635
636 636 def ancestors(self):
637 637 for a in self._repo.changelog.ancestors([self._rev]):
638 638 yield changectx(self._repo, a)
639 639
640 640 def descendants(self):
641 641 for d in self._repo.changelog.descendants([self._rev]):
642 642 yield changectx(self._repo, d)
643 643
644 644 def filectx(self, path, fileid=None, filelog=None):
645 645 """get a file context from this changeset"""
646 646 if fileid is None:
647 647 fileid = self.filenode(path)
648 648 return filectx(self._repo, path, fileid=fileid,
649 649 changectx=self, filelog=filelog)
650 650
651 651 def ancestor(self, c2, warn=False):
652 652 """return the "best" ancestor context of self and c2
653 653
654 654 If there are multiple candidates, it will show a message and check
655 655 merge.preferancestor configuration before falling back to the
656 656 revlog ancestor."""
657 657 # deal with workingctxs
658 658 n2 = c2._node
659 659 if n2 is None:
660 660 n2 = c2._parents[0]._node
661 661 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
662 662 if not cahs:
663 663 anc = nullid
664 664 elif len(cahs) == 1:
665 665 anc = cahs[0]
666 666 else:
667 667 # experimental config: merge.preferancestor
668 668 for r in self._repo.ui.configlist('merge', 'preferancestor'):
669 669 try:
670 670 ctx = changectx(self._repo, r)
671 671 except error.RepoLookupError:
672 672 continue
673 673 anc = ctx.node()
674 674 if anc in cahs:
675 675 break
676 676 else:
677 677 anc = self._repo.changelog.ancestor(self._node, n2)
678 678 if warn:
679 679 self._repo.ui.status(
680 680 (_("note: using %s as ancestor of %s and %s\n") %
681 681 (short(anc), short(self._node), short(n2))) +
682 682 ''.join(_(" alternatively, use --config "
683 683 "merge.preferancestor=%s\n") %
684 684 short(n) for n in sorted(cahs) if n != anc))
685 685 return changectx(self._repo, anc)
686 686
687 687 def descendant(self, other):
688 688 """True if other is descendant of this changeset"""
689 689 return self._repo.changelog.descendant(self._rev, other._rev)
690 690
691 691 def walk(self, match):
692 692 '''Generates matching file names.'''
693 693
694 694 # Wrap match.bad method to have message with nodeid
695 695 def bad(fn, msg):
696 696 # The manifest doesn't know about subrepos, so don't complain about
697 697 # paths into valid subrepos.
698 698 if any(fn == s or fn.startswith(s + '/')
699 699 for s in self.substate):
700 700 return
701 701 match.bad(fn, _('no such file in rev %s') % self)
702 702
703 703 m = matchmod.badmatch(match, bad)
704 704 return self._manifest.walk(m)
705 705
706 706 def matches(self, match):
707 707 return self.walk(match)
708 708
709 709 class basefilectx(object):
710 710 """A filecontext object represents the common logic for its children:
711 711 filectx: read-only access to a filerevision that is already present
712 712 in the repo,
713 713 workingfilectx: a filecontext that represents files from the working
714 714 directory,
715 715 memfilectx: a filecontext that represents files in-memory,
716 716 overlayfilectx: duplicate another filecontext with some fields overridden.
717 717 """
718 718 @propertycache
719 719 def _filelog(self):
720 720 return self._repo.file(self._path)
721 721
722 722 @propertycache
723 723 def _changeid(self):
724 724 if r'_changeid' in self.__dict__:
725 725 return self._changeid
726 726 elif r'_changectx' in self.__dict__:
727 727 return self._changectx.rev()
728 728 elif r'_descendantrev' in self.__dict__:
729 729 # this file context was created from a revision with a known
730 730 # descendant, we can (lazily) correct for linkrev aliases
731 731 return self._adjustlinkrev(self._descendantrev)
732 732 else:
733 733 return self._filelog.linkrev(self._filerev)
734 734
735 735 @propertycache
736 736 def _filenode(self):
737 737 if r'_fileid' in self.__dict__:
738 738 return self._filelog.lookup(self._fileid)
739 739 else:
740 740 return self._changectx.filenode(self._path)
741 741
742 742 @propertycache
743 743 def _filerev(self):
744 744 return self._filelog.rev(self._filenode)
745 745
746 746 @propertycache
747 747 def _repopath(self):
748 748 return self._path
749 749
750 750 def __nonzero__(self):
751 751 try:
752 752 self._filenode
753 753 return True
754 754 except error.LookupError:
755 755 # file is missing
756 756 return False
757 757
758 758 __bool__ = __nonzero__
759 759
760 760 def __bytes__(self):
761 761 try:
762 762 return "%s@%s" % (self.path(), self._changectx)
763 763 except error.LookupError:
764 764 return "%s@???" % self.path()
765 765
766 766 __str__ = encoding.strmethod(__bytes__)
767 767
768 768 def __repr__(self):
769 769 return "<%s %s>" % (type(self).__name__, str(self))
770 770
771 771 def __hash__(self):
772 772 try:
773 773 return hash((self._path, self._filenode))
774 774 except AttributeError:
775 775 return id(self)
776 776
777 777 def __eq__(self, other):
778 778 try:
779 779 return (type(self) == type(other) and self._path == other._path
780 780 and self._filenode == other._filenode)
781 781 except AttributeError:
782 782 return False
783 783
784 784 def __ne__(self, other):
785 785 return not (self == other)
786 786
787 787 def filerev(self):
788 788 return self._filerev
789 789 def filenode(self):
790 790 return self._filenode
791 791 @propertycache
792 792 def _flags(self):
793 793 return self._changectx.flags(self._path)
794 794 def flags(self):
795 795 return self._flags
796 796 def filelog(self):
797 797 return self._filelog
798 798 def rev(self):
799 799 return self._changeid
800 800 def linkrev(self):
801 801 return self._filelog.linkrev(self._filerev)
802 802 def node(self):
803 803 return self._changectx.node()
804 804 def hex(self):
805 805 return self._changectx.hex()
806 806 def user(self):
807 807 return self._changectx.user()
808 808 def date(self):
809 809 return self._changectx.date()
810 810 def files(self):
811 811 return self._changectx.files()
812 812 def description(self):
813 813 return self._changectx.description()
814 814 def branch(self):
815 815 return self._changectx.branch()
816 816 def extra(self):
817 817 return self._changectx.extra()
818 818 def phase(self):
819 819 return self._changectx.phase()
820 820 def phasestr(self):
821 821 return self._changectx.phasestr()
822 822 def obsolete(self):
823 823 return self._changectx.obsolete()
824 def instabilities(self):
825 return self._changectx.instabilities()
824 826 def manifest(self):
825 827 return self._changectx.manifest()
826 828 def changectx(self):
827 829 return self._changectx
828 830 def renamed(self):
829 831 return self._copied
830 832 def repo(self):
831 833 return self._repo
832 834 def size(self):
833 835 return len(self.data())
834 836
835 837 def path(self):
836 838 return self._path
837 839
838 840 def isbinary(self):
839 841 try:
840 842 return util.binary(self.data())
841 843 except IOError:
842 844 return False
843 845 def isexec(self):
844 846 return 'x' in self.flags()
845 847 def islink(self):
846 848 return 'l' in self.flags()
847 849
848 850 def isabsent(self):
849 851 """whether this filectx represents a file not in self._changectx
850 852
851 853 This is mainly for merge code to detect change/delete conflicts. This is
852 854 expected to be True for all subclasses of basectx."""
853 855 return False
854 856
855 857 _customcmp = False
856 858 def cmp(self, fctx):
857 859 """compare with other file context
858 860
859 861 returns True if different than fctx.
860 862 """
861 863 if fctx._customcmp:
862 864 return fctx.cmp(self)
863 865
864 866 if (fctx._filenode is None
865 867 and (self._repo._encodefilterpats
866 868 # if file data starts with '\1\n', empty metadata block is
867 869 # prepended, which adds 4 bytes to filelog.size().
868 870 or self.size() - 4 == fctx.size())
869 871 or self.size() == fctx.size()):
870 872 return self._filelog.cmp(self._filenode, fctx.data())
871 873
872 874 return True
873 875
874 876 def _adjustlinkrev(self, srcrev, inclusive=False):
875 877 """return the first ancestor of <srcrev> introducing <fnode>
876 878
877 879 If the linkrev of the file revision does not point to an ancestor of
878 880 srcrev, we'll walk down the ancestors until we find one introducing
879 881 this file revision.
880 882
881 883 :srcrev: the changeset revision we search ancestors from
882 884 :inclusive: if true, the src revision will also be checked
883 885 """
884 886 repo = self._repo
885 887 cl = repo.unfiltered().changelog
886 888 mfl = repo.manifestlog
887 889 # fetch the linkrev
888 890 lkr = self.linkrev()
889 891 # hack to reuse ancestor computation when searching for renames
890 892 memberanc = getattr(self, '_ancestrycontext', None)
891 893 iteranc = None
892 894 if srcrev is None:
893 895 # wctx case, used by workingfilectx during mergecopy
894 896 revs = [p.rev() for p in self._repo[None].parents()]
895 897 inclusive = True # we skipped the real (revless) source
896 898 else:
897 899 revs = [srcrev]
898 900 if memberanc is None:
899 901 memberanc = iteranc = cl.ancestors(revs, lkr,
900 902 inclusive=inclusive)
901 903 # check if this linkrev is an ancestor of srcrev
902 904 if lkr not in memberanc:
903 905 if iteranc is None:
904 906 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
905 907 fnode = self._filenode
906 908 path = self._path
907 909 for a in iteranc:
908 910 ac = cl.read(a) # get changeset data (we avoid object creation)
909 911 if path in ac[3]: # checking the 'files' field.
910 912 # The file has been touched, check if the content is
911 913 # similar to the one we search for.
912 914 if fnode == mfl[ac[0]].readfast().get(path):
913 915 return a
914 916 # In theory, we should never get out of that loop without a result.
915 917 # But if manifest uses a buggy file revision (not children of the
916 918 # one it replaces) we could. Such a buggy situation will likely
917 919 # result is crash somewhere else at to some point.
918 920 return lkr
919 921
920 922 def introrev(self):
921 923 """return the rev of the changeset which introduced this file revision
922 924
923 925 This method is different from linkrev because it take into account the
924 926 changeset the filectx was created from. It ensures the returned
925 927 revision is one of its ancestors. This prevents bugs from
926 928 'linkrev-shadowing' when a file revision is used by multiple
927 929 changesets.
928 930 """
929 931 lkr = self.linkrev()
930 932 attrs = vars(self)
931 933 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
932 934 if noctx or self.rev() == lkr:
933 935 return self.linkrev()
934 936 return self._adjustlinkrev(self.rev(), inclusive=True)
935 937
936 938 def _parentfilectx(self, path, fileid, filelog):
937 939 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
938 940 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
939 941 if '_changeid' in vars(self) or '_changectx' in vars(self):
940 942 # If self is associated with a changeset (probably explicitly
941 943 # fed), ensure the created filectx is associated with a
942 944 # changeset that is an ancestor of self.changectx.
943 945 # This lets us later use _adjustlinkrev to get a correct link.
944 946 fctx._descendantrev = self.rev()
945 947 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
946 948 elif '_descendantrev' in vars(self):
947 949 # Otherwise propagate _descendantrev if we have one associated.
948 950 fctx._descendantrev = self._descendantrev
949 951 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
950 952 return fctx
951 953
952 954 def parents(self):
953 955 _path = self._path
954 956 fl = self._filelog
955 957 parents = self._filelog.parents(self._filenode)
956 958 pl = [(_path, node, fl) for node in parents if node != nullid]
957 959
958 960 r = fl.renamed(self._filenode)
959 961 if r:
960 962 # - In the simple rename case, both parent are nullid, pl is empty.
961 963 # - In case of merge, only one of the parent is null id and should
962 964 # be replaced with the rename information. This parent is -always-
963 965 # the first one.
964 966 #
965 967 # As null id have always been filtered out in the previous list
966 968 # comprehension, inserting to 0 will always result in "replacing
967 969 # first nullid parent with rename information.
968 970 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
969 971
970 972 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
971 973
972 974 def p1(self):
973 975 return self.parents()[0]
974 976
975 977 def p2(self):
976 978 p = self.parents()
977 979 if len(p) == 2:
978 980 return p[1]
979 981 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
980 982
981 983 def annotate(self, follow=False, linenumber=False, skiprevs=None,
982 984 diffopts=None):
983 985 '''returns a list of tuples of ((ctx, number), line) for each line
984 986 in the file, where ctx is the filectx of the node where
985 987 that line was last changed; if linenumber parameter is true, number is
986 988 the line number at the first appearance in the managed file, otherwise,
987 989 number has a fixed value of False.
988 990 '''
989 991
990 992 def lines(text):
991 993 if text.endswith("\n"):
992 994 return text.count("\n")
993 995 return text.count("\n") + int(bool(text))
994 996
995 997 if linenumber:
996 998 def decorate(text, rev):
997 999 return ([annotateline(fctx=rev, lineno=i)
998 1000 for i in xrange(1, lines(text) + 1)], text)
999 1001 else:
1000 1002 def decorate(text, rev):
1001 1003 return ([annotateline(fctx=rev)] * lines(text), text)
1002 1004
1003 1005 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1004 1006
1005 1007 def parents(f):
1006 1008 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1007 1009 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1008 1010 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1009 1011 # isn't an ancestor of the srcrev.
1010 1012 f._changeid
1011 1013 pl = f.parents()
1012 1014
1013 1015 # Don't return renamed parents if we aren't following.
1014 1016 if not follow:
1015 1017 pl = [p for p in pl if p.path() == f.path()]
1016 1018
1017 1019 # renamed filectx won't have a filelog yet, so set it
1018 1020 # from the cache to save time
1019 1021 for p in pl:
1020 1022 if not '_filelog' in p.__dict__:
1021 1023 p._filelog = getlog(p.path())
1022 1024
1023 1025 return pl
1024 1026
1025 1027 # use linkrev to find the first changeset where self appeared
1026 1028 base = self
1027 1029 introrev = self.introrev()
1028 1030 if self.rev() != introrev:
1029 1031 base = self.filectx(self.filenode(), changeid=introrev)
1030 1032 if getattr(base, '_ancestrycontext', None) is None:
1031 1033 cl = self._repo.changelog
1032 1034 if introrev is None:
1033 1035 # wctx is not inclusive, but works because _ancestrycontext
1034 1036 # is used to test filelog revisions
1035 1037 ac = cl.ancestors([p.rev() for p in base.parents()],
1036 1038 inclusive=True)
1037 1039 else:
1038 1040 ac = cl.ancestors([introrev], inclusive=True)
1039 1041 base._ancestrycontext = ac
1040 1042
1041 1043 # This algorithm would prefer to be recursive, but Python is a
1042 1044 # bit recursion-hostile. Instead we do an iterative
1043 1045 # depth-first search.
1044 1046
1045 1047 # 1st DFS pre-calculates pcache and needed
1046 1048 visit = [base]
1047 1049 pcache = {}
1048 1050 needed = {base: 1}
1049 1051 while visit:
1050 1052 f = visit.pop()
1051 1053 if f in pcache:
1052 1054 continue
1053 1055 pl = parents(f)
1054 1056 pcache[f] = pl
1055 1057 for p in pl:
1056 1058 needed[p] = needed.get(p, 0) + 1
1057 1059 if p not in pcache:
1058 1060 visit.append(p)
1059 1061
1060 1062 # 2nd DFS does the actual annotate
1061 1063 visit[:] = [base]
1062 1064 hist = {}
1063 1065 while visit:
1064 1066 f = visit[-1]
1065 1067 if f in hist:
1066 1068 visit.pop()
1067 1069 continue
1068 1070
1069 1071 ready = True
1070 1072 pl = pcache[f]
1071 1073 for p in pl:
1072 1074 if p not in hist:
1073 1075 ready = False
1074 1076 visit.append(p)
1075 1077 if ready:
1076 1078 visit.pop()
1077 1079 curr = decorate(f.data(), f)
1078 1080 skipchild = False
1079 1081 if skiprevs is not None:
1080 1082 skipchild = f._changeid in skiprevs
1081 1083 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1082 1084 diffopts)
1083 1085 for p in pl:
1084 1086 if needed[p] == 1:
1085 1087 del hist[p]
1086 1088 del needed[p]
1087 1089 else:
1088 1090 needed[p] -= 1
1089 1091
1090 1092 hist[f] = curr
1091 1093 del pcache[f]
1092 1094
1093 1095 return zip(hist[base][0], hist[base][1].splitlines(True))
1094 1096
1095 1097 def ancestors(self, followfirst=False):
1096 1098 visit = {}
1097 1099 c = self
1098 1100 if followfirst:
1099 1101 cut = 1
1100 1102 else:
1101 1103 cut = None
1102 1104
1103 1105 while True:
1104 1106 for parent in c.parents()[:cut]:
1105 1107 visit[(parent.linkrev(), parent.filenode())] = parent
1106 1108 if not visit:
1107 1109 break
1108 1110 c = visit.pop(max(visit))
1109 1111 yield c
1110 1112
1111 1113 def decodeddata(self):
1112 1114 """Returns `data()` after running repository decoding filters.
1113 1115
1114 1116 This is often equivalent to how the data would be expressed on disk.
1115 1117 """
1116 1118 return self._repo.wwritedata(self.path(), self.data())
1117 1119
1118 1120 @attr.s(slots=True, frozen=True)
1119 1121 class annotateline(object):
1120 1122 fctx = attr.ib()
1121 1123 lineno = attr.ib(default=False)
1122 1124 # Whether this annotation was the result of a skip-annotate.
1123 1125 skip = attr.ib(default=False)
1124 1126
1125 1127 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1126 1128 r'''
1127 1129 Given parent and child fctxes and annotate data for parents, for all lines
1128 1130 in either parent that match the child, annotate the child with the parent's
1129 1131 data.
1130 1132
1131 1133 Additionally, if `skipchild` is True, replace all other lines with parent
1132 1134 annotate data as well such that child is never blamed for any lines.
1133 1135
1134 1136 See test-annotate.py for unit tests.
1135 1137 '''
1136 1138 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1137 1139 for parent in parents]
1138 1140
1139 1141 if skipchild:
1140 1142 # Need to iterate over the blocks twice -- make it a list
1141 1143 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1142 1144 # Mercurial currently prefers p2 over p1 for annotate.
1143 1145 # TODO: change this?
1144 1146 for parent, blocks in pblocks:
1145 1147 for (a1, a2, b1, b2), t in blocks:
1146 1148 # Changed blocks ('!') or blocks made only of blank lines ('~')
1147 1149 # belong to the child.
1148 1150 if t == '=':
1149 1151 child[0][b1:b2] = parent[0][a1:a2]
1150 1152
1151 1153 if skipchild:
1152 1154 # Now try and match up anything that couldn't be matched,
1153 1155 # Reversing pblocks maintains bias towards p2, matching above
1154 1156 # behavior.
1155 1157 pblocks.reverse()
1156 1158
1157 1159 # The heuristics are:
1158 1160 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1159 1161 # This could potentially be smarter but works well enough.
1160 1162 # * For a non-matching section, do a best-effort fit. Match lines in
1161 1163 # diff hunks 1:1, dropping lines as necessary.
1162 1164 # * Repeat the last line as a last resort.
1163 1165
1164 1166 # First, replace as much as possible without repeating the last line.
1165 1167 remaining = [(parent, []) for parent, _blocks in pblocks]
1166 1168 for idx, (parent, blocks) in enumerate(pblocks):
1167 1169 for (a1, a2, b1, b2), _t in blocks:
1168 1170 if a2 - a1 >= b2 - b1:
1169 1171 for bk in xrange(b1, b2):
1170 1172 if child[0][bk].fctx == childfctx:
1171 1173 ak = min(a1 + (bk - b1), a2 - 1)
1172 1174 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1173 1175 else:
1174 1176 remaining[idx][1].append((a1, a2, b1, b2))
1175 1177
1176 1178 # Then, look at anything left, which might involve repeating the last
1177 1179 # line.
1178 1180 for parent, blocks in remaining:
1179 1181 for a1, a2, b1, b2 in blocks:
1180 1182 for bk in xrange(b1, b2):
1181 1183 if child[0][bk].fctx == childfctx:
1182 1184 ak = min(a1 + (bk - b1), a2 - 1)
1183 1185 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1184 1186 return child
1185 1187
1186 1188 class filectx(basefilectx):
1187 1189 """A filecontext object makes access to data related to a particular
1188 1190 filerevision convenient."""
1189 1191 def __init__(self, repo, path, changeid=None, fileid=None,
1190 1192 filelog=None, changectx=None):
1191 1193 """changeid can be a changeset revision, node, or tag.
1192 1194 fileid can be a file revision or node."""
1193 1195 self._repo = repo
1194 1196 self._path = path
1195 1197
1196 1198 assert (changeid is not None
1197 1199 or fileid is not None
1198 1200 or changectx is not None), \
1199 1201 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1200 1202 % (changeid, fileid, changectx))
1201 1203
1202 1204 if filelog is not None:
1203 1205 self._filelog = filelog
1204 1206
1205 1207 if changeid is not None:
1206 1208 self._changeid = changeid
1207 1209 if changectx is not None:
1208 1210 self._changectx = changectx
1209 1211 if fileid is not None:
1210 1212 self._fileid = fileid
1211 1213
1212 1214 @propertycache
1213 1215 def _changectx(self):
1214 1216 try:
1215 1217 return changectx(self._repo, self._changeid)
1216 1218 except error.FilteredRepoLookupError:
1217 1219 # Linkrev may point to any revision in the repository. When the
1218 1220 # repository is filtered this may lead to `filectx` trying to build
1219 1221 # `changectx` for filtered revision. In such case we fallback to
1220 1222 # creating `changectx` on the unfiltered version of the reposition.
1221 1223 # This fallback should not be an issue because `changectx` from
1222 1224 # `filectx` are not used in complex operations that care about
1223 1225 # filtering.
1224 1226 #
1225 1227 # This fallback is a cheap and dirty fix that prevent several
1226 1228 # crashes. It does not ensure the behavior is correct. However the
1227 1229 # behavior was not correct before filtering either and "incorrect
1228 1230 # behavior" is seen as better as "crash"
1229 1231 #
1230 1232 # Linkrevs have several serious troubles with filtering that are
1231 1233 # complicated to solve. Proper handling of the issue here should be
1232 1234 # considered when solving linkrev issue are on the table.
1233 1235 return changectx(self._repo.unfiltered(), self._changeid)
1234 1236
1235 1237 def filectx(self, fileid, changeid=None):
1236 1238 '''opens an arbitrary revision of the file without
1237 1239 opening a new filelog'''
1238 1240 return filectx(self._repo, self._path, fileid=fileid,
1239 1241 filelog=self._filelog, changeid=changeid)
1240 1242
1241 1243 def rawdata(self):
1242 1244 return self._filelog.revision(self._filenode, raw=True)
1243 1245
1244 1246 def rawflags(self):
1245 1247 """low-level revlog flags"""
1246 1248 return self._filelog.flags(self._filerev)
1247 1249
1248 1250 def data(self):
1249 1251 try:
1250 1252 return self._filelog.read(self._filenode)
1251 1253 except error.CensoredNodeError:
1252 1254 if self._repo.ui.config("censor", "policy") == "ignore":
1253 1255 return ""
1254 1256 raise error.Abort(_("censored node: %s") % short(self._filenode),
1255 1257 hint=_("set censor.policy to ignore errors"))
1256 1258
1257 1259 def size(self):
1258 1260 return self._filelog.size(self._filerev)
1259 1261
1260 1262 @propertycache
1261 1263 def _copied(self):
1262 1264 """check if file was actually renamed in this changeset revision
1263 1265
1264 1266 If rename logged in file revision, we report copy for changeset only
1265 1267 if file revisions linkrev points back to the changeset in question
1266 1268 or both changeset parents contain different file revisions.
1267 1269 """
1268 1270
1269 1271 renamed = self._filelog.renamed(self._filenode)
1270 1272 if not renamed:
1271 1273 return renamed
1272 1274
1273 1275 if self.rev() == self.linkrev():
1274 1276 return renamed
1275 1277
1276 1278 name = self.path()
1277 1279 fnode = self._filenode
1278 1280 for p in self._changectx.parents():
1279 1281 try:
1280 1282 if fnode == p.filenode(name):
1281 1283 return None
1282 1284 except error.LookupError:
1283 1285 pass
1284 1286 return renamed
1285 1287
1286 1288 def children(self):
1287 1289 # hard for renames
1288 1290 c = self._filelog.children(self._filenode)
1289 1291 return [filectx(self._repo, self._path, fileid=x,
1290 1292 filelog=self._filelog) for x in c]
1291 1293
1292 1294 class committablectx(basectx):
1293 1295 """A committablectx object provides common functionality for a context that
1294 1296 wants the ability to commit, e.g. workingctx or memctx."""
1295 1297 def __init__(self, repo, text="", user=None, date=None, extra=None,
1296 1298 changes=None):
1297 1299 self._repo = repo
1298 1300 self._rev = None
1299 1301 self._node = None
1300 1302 self._text = text
1301 1303 if date:
1302 1304 self._date = util.parsedate(date)
1303 1305 if user:
1304 1306 self._user = user
1305 1307 if changes:
1306 1308 self._status = changes
1307 1309
1308 1310 self._extra = {}
1309 1311 if extra:
1310 1312 self._extra = extra.copy()
1311 1313 if 'branch' not in self._extra:
1312 1314 try:
1313 1315 branch = encoding.fromlocal(self._repo.dirstate.branch())
1314 1316 except UnicodeDecodeError:
1315 1317 raise error.Abort(_('branch name not in UTF-8!'))
1316 1318 self._extra['branch'] = branch
1317 1319 if self._extra['branch'] == '':
1318 1320 self._extra['branch'] = 'default'
1319 1321
1320 1322 def __bytes__(self):
1321 1323 return bytes(self._parents[0]) + "+"
1322 1324
1323 1325 __str__ = encoding.strmethod(__bytes__)
1324 1326
1325 1327 def __nonzero__(self):
1326 1328 return True
1327 1329
1328 1330 __bool__ = __nonzero__
1329 1331
1330 1332 def _buildflagfunc(self):
1331 1333 # Create a fallback function for getting file flags when the
1332 1334 # filesystem doesn't support them
1333 1335
1334 1336 copiesget = self._repo.dirstate.copies().get
1335 1337 parents = self.parents()
1336 1338 if len(parents) < 2:
1337 1339 # when we have one parent, it's easy: copy from parent
1338 1340 man = parents[0].manifest()
1339 1341 def func(f):
1340 1342 f = copiesget(f, f)
1341 1343 return man.flags(f)
1342 1344 else:
1343 1345 # merges are tricky: we try to reconstruct the unstored
1344 1346 # result from the merge (issue1802)
1345 1347 p1, p2 = parents
1346 1348 pa = p1.ancestor(p2)
1347 1349 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1348 1350
1349 1351 def func(f):
1350 1352 f = copiesget(f, f) # may be wrong for merges with copies
1351 1353 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1352 1354 if fl1 == fl2:
1353 1355 return fl1
1354 1356 if fl1 == fla:
1355 1357 return fl2
1356 1358 if fl2 == fla:
1357 1359 return fl1
1358 1360 return '' # punt for conflicts
1359 1361
1360 1362 return func
1361 1363
1362 1364 @propertycache
1363 1365 def _flagfunc(self):
1364 1366 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1365 1367
1366 1368 @propertycache
1367 1369 def _status(self):
1368 1370 return self._repo.status()
1369 1371
1370 1372 @propertycache
1371 1373 def _user(self):
1372 1374 return self._repo.ui.username()
1373 1375
1374 1376 @propertycache
1375 1377 def _date(self):
1376 1378 ui = self._repo.ui
1377 1379 date = ui.configdate('devel', 'default-date')
1378 1380 if date is None:
1379 1381 date = util.makedate()
1380 1382 return date
1381 1383
1382 1384 def subrev(self, subpath):
1383 1385 return None
1384 1386
1385 1387 def manifestnode(self):
1386 1388 return None
1387 1389 def user(self):
1388 1390 return self._user or self._repo.ui.username()
1389 1391 def date(self):
1390 1392 return self._date
1391 1393 def description(self):
1392 1394 return self._text
1393 1395 def files(self):
1394 1396 return sorted(self._status.modified + self._status.added +
1395 1397 self._status.removed)
1396 1398
1397 1399 def modified(self):
1398 1400 return self._status.modified
1399 1401 def added(self):
1400 1402 return self._status.added
1401 1403 def removed(self):
1402 1404 return self._status.removed
1403 1405 def deleted(self):
1404 1406 return self._status.deleted
1405 1407 def branch(self):
1406 1408 return encoding.tolocal(self._extra['branch'])
1407 1409 def closesbranch(self):
1408 1410 return 'close' in self._extra
1409 1411 def extra(self):
1410 1412 return self._extra
1411 1413
1412 1414 def isinmemory(self):
1413 1415 return False
1414 1416
1415 1417 def tags(self):
1416 1418 return []
1417 1419
1418 1420 def bookmarks(self):
1419 1421 b = []
1420 1422 for p in self.parents():
1421 1423 b.extend(p.bookmarks())
1422 1424 return b
1423 1425
1424 1426 def phase(self):
1425 1427 phase = phases.draft # default phase to draft
1426 1428 for p in self.parents():
1427 1429 phase = max(phase, p.phase())
1428 1430 return phase
1429 1431
1430 1432 def hidden(self):
1431 1433 return False
1432 1434
1433 1435 def children(self):
1434 1436 return []
1435 1437
1436 1438 def flags(self, path):
1437 1439 if r'_manifest' in self.__dict__:
1438 1440 try:
1439 1441 return self._manifest.flags(path)
1440 1442 except KeyError:
1441 1443 return ''
1442 1444
1443 1445 try:
1444 1446 return self._flagfunc(path)
1445 1447 except OSError:
1446 1448 return ''
1447 1449
1448 1450 def ancestor(self, c2):
1449 1451 """return the "best" ancestor context of self and c2"""
1450 1452 return self._parents[0].ancestor(c2) # punt on two parents for now
1451 1453
1452 1454 def walk(self, match):
1453 1455 '''Generates matching file names.'''
1454 1456 return sorted(self._repo.dirstate.walk(match,
1455 1457 subrepos=sorted(self.substate),
1456 1458 unknown=True, ignored=False))
1457 1459
1458 1460 def matches(self, match):
1459 1461 return sorted(self._repo.dirstate.matches(match))
1460 1462
1461 1463 def ancestors(self):
1462 1464 for p in self._parents:
1463 1465 yield p
1464 1466 for a in self._repo.changelog.ancestors(
1465 1467 [p.rev() for p in self._parents]):
1466 1468 yield changectx(self._repo, a)
1467 1469
1468 1470 def markcommitted(self, node):
1469 1471 """Perform post-commit cleanup necessary after committing this ctx
1470 1472
1471 1473 Specifically, this updates backing stores this working context
1472 1474 wraps to reflect the fact that the changes reflected by this
1473 1475 workingctx have been committed. For example, it marks
1474 1476 modified and added files as normal in the dirstate.
1475 1477
1476 1478 """
1477 1479
1478 1480 with self._repo.dirstate.parentchange():
1479 1481 for f in self.modified() + self.added():
1480 1482 self._repo.dirstate.normal(f)
1481 1483 for f in self.removed():
1482 1484 self._repo.dirstate.drop(f)
1483 1485 self._repo.dirstate.setparents(node)
1484 1486
1485 1487 # write changes out explicitly, because nesting wlock at
1486 1488 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1487 1489 # from immediately doing so for subsequent changing files
1488 1490 self._repo.dirstate.write(self._repo.currenttransaction())
1489 1491
1490 1492 def dirty(self, missing=False, merge=True, branch=True):
1491 1493 return False
1492 1494
1493 1495 class workingctx(committablectx):
1494 1496 """A workingctx object makes access to data related to
1495 1497 the current working directory convenient.
1496 1498 date - any valid date string or (unixtime, offset), or None.
1497 1499 user - username string, or None.
1498 1500 extra - a dictionary of extra values, or None.
1499 1501 changes - a list of file lists as returned by localrepo.status()
1500 1502 or None to use the repository status.
1501 1503 """
1502 1504 def __init__(self, repo, text="", user=None, date=None, extra=None,
1503 1505 changes=None):
1504 1506 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1505 1507
1506 1508 def __iter__(self):
1507 1509 d = self._repo.dirstate
1508 1510 for f in d:
1509 1511 if d[f] != 'r':
1510 1512 yield f
1511 1513
1512 1514 def __contains__(self, key):
1513 1515 return self._repo.dirstate[key] not in "?r"
1514 1516
1515 1517 def hex(self):
1516 1518 return hex(wdirid)
1517 1519
1518 1520 @propertycache
1519 1521 def _parents(self):
1520 1522 p = self._repo.dirstate.parents()
1521 1523 if p[1] == nullid:
1522 1524 p = p[:-1]
1523 1525 return [changectx(self._repo, x) for x in p]
1524 1526
1525 1527 def filectx(self, path, filelog=None):
1526 1528 """get a file context from the working directory"""
1527 1529 return workingfilectx(self._repo, path, workingctx=self,
1528 1530 filelog=filelog)
1529 1531
1530 1532 def dirty(self, missing=False, merge=True, branch=True):
1531 1533 "check whether a working directory is modified"
1532 1534 # check subrepos first
1533 1535 for s in sorted(self.substate):
1534 1536 if self.sub(s).dirty(missing=missing):
1535 1537 return True
1536 1538 # check current working dir
1537 1539 return ((merge and self.p2()) or
1538 1540 (branch and self.branch() != self.p1().branch()) or
1539 1541 self.modified() or self.added() or self.removed() or
1540 1542 (missing and self.deleted()))
1541 1543
1542 1544 def add(self, list, prefix=""):
1543 1545 with self._repo.wlock():
1544 1546 ui, ds = self._repo.ui, self._repo.dirstate
1545 1547 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1546 1548 rejected = []
1547 1549 lstat = self._repo.wvfs.lstat
1548 1550 for f in list:
1549 1551 # ds.pathto() returns an absolute file when this is invoked from
1550 1552 # the keyword extension. That gets flagged as non-portable on
1551 1553 # Windows, since it contains the drive letter and colon.
1552 1554 scmutil.checkportable(ui, os.path.join(prefix, f))
1553 1555 try:
1554 1556 st = lstat(f)
1555 1557 except OSError:
1556 1558 ui.warn(_("%s does not exist!\n") % uipath(f))
1557 1559 rejected.append(f)
1558 1560 continue
1559 1561 if st.st_size > 10000000:
1560 1562 ui.warn(_("%s: up to %d MB of RAM may be required "
1561 1563 "to manage this file\n"
1562 1564 "(use 'hg revert %s' to cancel the "
1563 1565 "pending addition)\n")
1564 1566 % (f, 3 * st.st_size // 1000000, uipath(f)))
1565 1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1566 1568 ui.warn(_("%s not added: only files and symlinks "
1567 1569 "supported currently\n") % uipath(f))
1568 1570 rejected.append(f)
1569 1571 elif ds[f] in 'amn':
1570 1572 ui.warn(_("%s already tracked!\n") % uipath(f))
1571 1573 elif ds[f] == 'r':
1572 1574 ds.normallookup(f)
1573 1575 else:
1574 1576 ds.add(f)
1575 1577 return rejected
1576 1578
1577 1579 def forget(self, files, prefix=""):
1578 1580 with self._repo.wlock():
1579 1581 ds = self._repo.dirstate
1580 1582 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1581 1583 rejected = []
1582 1584 for f in files:
1583 1585 if f not in self._repo.dirstate:
1584 1586 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1585 1587 rejected.append(f)
1586 1588 elif self._repo.dirstate[f] != 'a':
1587 1589 self._repo.dirstate.remove(f)
1588 1590 else:
1589 1591 self._repo.dirstate.drop(f)
1590 1592 return rejected
1591 1593
1592 1594 def undelete(self, list):
1593 1595 pctxs = self.parents()
1594 1596 with self._repo.wlock():
1595 1597 ds = self._repo.dirstate
1596 1598 for f in list:
1597 1599 if self._repo.dirstate[f] != 'r':
1598 1600 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1599 1601 else:
1600 1602 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1601 1603 t = fctx.data()
1602 1604 self._repo.wwrite(f, t, fctx.flags())
1603 1605 self._repo.dirstate.normal(f)
1604 1606
1605 1607 def copy(self, source, dest):
1606 1608 try:
1607 1609 st = self._repo.wvfs.lstat(dest)
1608 1610 except OSError as err:
1609 1611 if err.errno != errno.ENOENT:
1610 1612 raise
1611 1613 self._repo.ui.warn(_("%s does not exist!\n")
1612 1614 % self._repo.dirstate.pathto(dest))
1613 1615 return
1614 1616 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1615 1617 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1616 1618 "symbolic link\n")
1617 1619 % self._repo.dirstate.pathto(dest))
1618 1620 else:
1619 1621 with self._repo.wlock():
1620 1622 if self._repo.dirstate[dest] in '?':
1621 1623 self._repo.dirstate.add(dest)
1622 1624 elif self._repo.dirstate[dest] in 'r':
1623 1625 self._repo.dirstate.normallookup(dest)
1624 1626 self._repo.dirstate.copy(source, dest)
1625 1627
1626 1628 def match(self, pats=None, include=None, exclude=None, default='glob',
1627 1629 listsubrepos=False, badfn=None):
1628 1630 r = self._repo
1629 1631
1630 1632 # Only a case insensitive filesystem needs magic to translate user input
1631 1633 # to actual case in the filesystem.
1632 1634 icasefs = not util.fscasesensitive(r.root)
1633 1635 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1634 1636 default, auditor=r.auditor, ctx=self,
1635 1637 listsubrepos=listsubrepos, badfn=badfn,
1636 1638 icasefs=icasefs)
1637 1639
1638 1640 def flushall(self):
1639 1641 pass # For overlayworkingfilectx compatibility.
1640 1642
1641 1643 def _filtersuspectsymlink(self, files):
1642 1644 if not files or self._repo.dirstate._checklink:
1643 1645 return files
1644 1646
1645 1647 # Symlink placeholders may get non-symlink-like contents
1646 1648 # via user error or dereferencing by NFS or Samba servers,
1647 1649 # so we filter out any placeholders that don't look like a
1648 1650 # symlink
1649 1651 sane = []
1650 1652 for f in files:
1651 1653 if self.flags(f) == 'l':
1652 1654 d = self[f].data()
1653 1655 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1654 1656 self._repo.ui.debug('ignoring suspect symlink placeholder'
1655 1657 ' "%s"\n' % f)
1656 1658 continue
1657 1659 sane.append(f)
1658 1660 return sane
1659 1661
1660 1662 def _checklookup(self, files):
1661 1663 # check for any possibly clean files
1662 1664 if not files:
1663 1665 return [], [], []
1664 1666
1665 1667 modified = []
1666 1668 deleted = []
1667 1669 fixup = []
1668 1670 pctx = self._parents[0]
1669 1671 # do a full compare of any files that might have changed
1670 1672 for f in sorted(files):
1671 1673 try:
1672 1674 # This will return True for a file that got replaced by a
1673 1675 # directory in the interim, but fixing that is pretty hard.
1674 1676 if (f not in pctx or self.flags(f) != pctx.flags(f)
1675 1677 or pctx[f].cmp(self[f])):
1676 1678 modified.append(f)
1677 1679 else:
1678 1680 fixup.append(f)
1679 1681 except (IOError, OSError):
1680 1682 # A file become inaccessible in between? Mark it as deleted,
1681 1683 # matching dirstate behavior (issue5584).
1682 1684 # The dirstate has more complex behavior around whether a
1683 1685 # missing file matches a directory, etc, but we don't need to
1684 1686 # bother with that: if f has made it to this point, we're sure
1685 1687 # it's in the dirstate.
1686 1688 deleted.append(f)
1687 1689
1688 1690 return modified, deleted, fixup
1689 1691
1690 1692 def _poststatusfixup(self, status, fixup):
1691 1693 """update dirstate for files that are actually clean"""
1692 1694 poststatus = self._repo.postdsstatus()
1693 1695 if fixup or poststatus:
1694 1696 try:
1695 1697 oldid = self._repo.dirstate.identity()
1696 1698
1697 1699 # updating the dirstate is optional
1698 1700 # so we don't wait on the lock
1699 1701 # wlock can invalidate the dirstate, so cache normal _after_
1700 1702 # taking the lock
1701 1703 with self._repo.wlock(False):
1702 1704 if self._repo.dirstate.identity() == oldid:
1703 1705 if fixup:
1704 1706 normal = self._repo.dirstate.normal
1705 1707 for f in fixup:
1706 1708 normal(f)
1707 1709 # write changes out explicitly, because nesting
1708 1710 # wlock at runtime may prevent 'wlock.release()'
1709 1711 # after this block from doing so for subsequent
1710 1712 # changing files
1711 1713 tr = self._repo.currenttransaction()
1712 1714 self._repo.dirstate.write(tr)
1713 1715
1714 1716 if poststatus:
1715 1717 for ps in poststatus:
1716 1718 ps(self, status)
1717 1719 else:
1718 1720 # in this case, writing changes out breaks
1719 1721 # consistency, because .hg/dirstate was
1720 1722 # already changed simultaneously after last
1721 1723 # caching (see also issue5584 for detail)
1722 1724 self._repo.ui.debug('skip updating dirstate: '
1723 1725 'identity mismatch\n')
1724 1726 except error.LockError:
1725 1727 pass
1726 1728 finally:
1727 1729 # Even if the wlock couldn't be grabbed, clear out the list.
1728 1730 self._repo.clearpostdsstatus()
1729 1731
1730 1732 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1731 1733 '''Gets the status from the dirstate -- internal use only.'''
1732 1734 subrepos = []
1733 1735 if '.hgsub' in self:
1734 1736 subrepos = sorted(self.substate)
1735 1737 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1736 1738 clean=clean, unknown=unknown)
1737 1739
1738 1740 # check for any possibly clean files
1739 1741 fixup = []
1740 1742 if cmp:
1741 1743 modified2, deleted2, fixup = self._checklookup(cmp)
1742 1744 s.modified.extend(modified2)
1743 1745 s.deleted.extend(deleted2)
1744 1746
1745 1747 if fixup and clean:
1746 1748 s.clean.extend(fixup)
1747 1749
1748 1750 self._poststatusfixup(s, fixup)
1749 1751
1750 1752 if match.always():
1751 1753 # cache for performance
1752 1754 if s.unknown or s.ignored or s.clean:
1753 1755 # "_status" is cached with list*=False in the normal route
1754 1756 self._status = scmutil.status(s.modified, s.added, s.removed,
1755 1757 s.deleted, [], [], [])
1756 1758 else:
1757 1759 self._status = s
1758 1760
1759 1761 return s
1760 1762
1761 1763 @propertycache
1762 1764 def _manifest(self):
1763 1765 """generate a manifest corresponding to the values in self._status
1764 1766
1765 1767 This reuse the file nodeid from parent, but we use special node
1766 1768 identifiers for added and modified files. This is used by manifests
1767 1769 merge to see that files are different and by update logic to avoid
1768 1770 deleting newly added files.
1769 1771 """
1770 1772 return self._buildstatusmanifest(self._status)
1771 1773
1772 1774 def _buildstatusmanifest(self, status):
1773 1775 """Builds a manifest that includes the given status results."""
1774 1776 parents = self.parents()
1775 1777
1776 1778 man = parents[0].manifest().copy()
1777 1779
1778 1780 ff = self._flagfunc
1779 1781 for i, l in ((addednodeid, status.added),
1780 1782 (modifiednodeid, status.modified)):
1781 1783 for f in l:
1782 1784 man[f] = i
1783 1785 try:
1784 1786 man.setflag(f, ff(f))
1785 1787 except OSError:
1786 1788 pass
1787 1789
1788 1790 for f in status.deleted + status.removed:
1789 1791 if f in man:
1790 1792 del man[f]
1791 1793
1792 1794 return man
1793 1795
1794 1796 def _buildstatus(self, other, s, match, listignored, listclean,
1795 1797 listunknown):
1796 1798 """build a status with respect to another context
1797 1799
1798 1800 This includes logic for maintaining the fast path of status when
1799 1801 comparing the working directory against its parent, which is to skip
1800 1802 building a new manifest if self (working directory) is not comparing
1801 1803 against its parent (repo['.']).
1802 1804 """
1803 1805 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1804 1806 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1805 1807 # might have accidentally ended up with the entire contents of the file
1806 1808 # they are supposed to be linking to.
1807 1809 s.modified[:] = self._filtersuspectsymlink(s.modified)
1808 1810 if other != self._repo['.']:
1809 1811 s = super(workingctx, self)._buildstatus(other, s, match,
1810 1812 listignored, listclean,
1811 1813 listunknown)
1812 1814 return s
1813 1815
1814 1816 def _matchstatus(self, other, match):
1815 1817 """override the match method with a filter for directory patterns
1816 1818
1817 1819 We use inheritance to customize the match.bad method only in cases of
1818 1820 workingctx since it belongs only to the working directory when
1819 1821 comparing against the parent changeset.
1820 1822
1821 1823 If we aren't comparing against the working directory's parent, then we
1822 1824 just use the default match object sent to us.
1823 1825 """
1824 1826 if other != self._repo['.']:
1825 1827 def bad(f, msg):
1826 1828 # 'f' may be a directory pattern from 'match.files()',
1827 1829 # so 'f not in ctx1' is not enough
1828 1830 if f not in other and not other.hasdir(f):
1829 1831 self._repo.ui.warn('%s: %s\n' %
1830 1832 (self._repo.dirstate.pathto(f), msg))
1831 1833 match.bad = bad
1832 1834 return match
1833 1835
1834 1836 def markcommitted(self, node):
1835 1837 super(workingctx, self).markcommitted(node)
1836 1838
1837 1839 sparse.aftercommit(self._repo, node)
1838 1840
1839 1841 class committablefilectx(basefilectx):
1840 1842 """A committablefilectx provides common functionality for a file context
1841 1843 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1842 1844 def __init__(self, repo, path, filelog=None, ctx=None):
1843 1845 self._repo = repo
1844 1846 self._path = path
1845 1847 self._changeid = None
1846 1848 self._filerev = self._filenode = None
1847 1849
1848 1850 if filelog is not None:
1849 1851 self._filelog = filelog
1850 1852 if ctx:
1851 1853 self._changectx = ctx
1852 1854
1853 1855 def __nonzero__(self):
1854 1856 return True
1855 1857
1856 1858 __bool__ = __nonzero__
1857 1859
1858 1860 def linkrev(self):
1859 1861 # linked to self._changectx no matter if file is modified or not
1860 1862 return self.rev()
1861 1863
1862 1864 def parents(self):
1863 1865 '''return parent filectxs, following copies if necessary'''
1864 1866 def filenode(ctx, path):
1865 1867 return ctx._manifest.get(path, nullid)
1866 1868
1867 1869 path = self._path
1868 1870 fl = self._filelog
1869 1871 pcl = self._changectx._parents
1870 1872 renamed = self.renamed()
1871 1873
1872 1874 if renamed:
1873 1875 pl = [renamed + (None,)]
1874 1876 else:
1875 1877 pl = [(path, filenode(pcl[0], path), fl)]
1876 1878
1877 1879 for pc in pcl[1:]:
1878 1880 pl.append((path, filenode(pc, path), fl))
1879 1881
1880 1882 return [self._parentfilectx(p, fileid=n, filelog=l)
1881 1883 for p, n, l in pl if n != nullid]
1882 1884
1883 1885 def children(self):
1884 1886 return []
1885 1887
1886 1888 class workingfilectx(committablefilectx):
1887 1889 """A workingfilectx object makes access to data related to a particular
1888 1890 file in the working directory convenient."""
1889 1891 def __init__(self, repo, path, filelog=None, workingctx=None):
1890 1892 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1891 1893
1892 1894 @propertycache
1893 1895 def _changectx(self):
1894 1896 return workingctx(self._repo)
1895 1897
1896 1898 def data(self):
1897 1899 return self._repo.wread(self._path)
1898 1900 def renamed(self):
1899 1901 rp = self._repo.dirstate.copied(self._path)
1900 1902 if not rp:
1901 1903 return None
1902 1904 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1903 1905
1904 1906 def size(self):
1905 1907 return self._repo.wvfs.lstat(self._path).st_size
1906 1908 def date(self):
1907 1909 t, tz = self._changectx.date()
1908 1910 try:
1909 1911 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1910 1912 except OSError as err:
1911 1913 if err.errno != errno.ENOENT:
1912 1914 raise
1913 1915 return (t, tz)
1914 1916
1915 1917 def exists(self):
1916 1918 return self._repo.wvfs.exists(self._path)
1917 1919
1918 1920 def lexists(self):
1919 1921 return self._repo.wvfs.lexists(self._path)
1920 1922
1921 1923 def audit(self):
1922 1924 return self._repo.wvfs.audit(self._path)
1923 1925
1924 1926 def cmp(self, fctx):
1925 1927 """compare with other file context
1926 1928
1927 1929 returns True if different than fctx.
1928 1930 """
1929 1931 # fctx should be a filectx (not a workingfilectx)
1930 1932 # invert comparison to reuse the same code path
1931 1933 return fctx.cmp(self)
1932 1934
1933 1935 def remove(self, ignoremissing=False):
1934 1936 """wraps unlink for a repo's working directory"""
1935 1937 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1936 1938
1937 1939 def write(self, data, flags, backgroundclose=False):
1938 1940 """wraps repo.wwrite"""
1939 1941 self._repo.wwrite(self._path, data, flags,
1940 1942 backgroundclose=backgroundclose)
1941 1943
1942 1944 def markcopied(self, src):
1943 1945 """marks this file a copy of `src`"""
1944 1946 if self._repo.dirstate[self._path] in "nma":
1945 1947 self._repo.dirstate.copy(src, self._path)
1946 1948
1947 1949 def clearunknown(self):
1948 1950 """Removes conflicting items in the working directory so that
1949 1951 ``write()`` can be called successfully.
1950 1952 """
1951 1953 wvfs = self._repo.wvfs
1952 1954 f = self._path
1953 1955 wvfs.audit(f)
1954 1956 if wvfs.isdir(f) and not wvfs.islink(f):
1955 1957 wvfs.rmtree(f, forcibly=True)
1956 1958 for p in reversed(list(util.finddirs(f))):
1957 1959 if wvfs.isfileorlink(p):
1958 1960 wvfs.unlink(p)
1959 1961 break
1960 1962
1961 1963 def setflags(self, l, x):
1962 1964 self._repo.wvfs.setflags(self._path, l, x)
1963 1965
1964 1966 class overlayworkingctx(workingctx):
1965 1967 """Wraps another mutable context with a write-back cache that can be flushed
1966 1968 at a later time.
1967 1969
1968 1970 self._cache[path] maps to a dict with keys: {
1969 1971 'exists': bool?
1970 1972 'date': date?
1971 1973 'data': str?
1972 1974 'flags': str?
1973 1975 }
1974 1976 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1975 1977 is `False`, the file was deleted.
1976 1978 """
1977 1979
1978 1980 def __init__(self, repo, wrappedctx):
1979 1981 super(overlayworkingctx, self).__init__(repo)
1980 1982 self._repo = repo
1981 1983 self._wrappedctx = wrappedctx
1982 1984 self._clean()
1983 1985
1984 1986 def data(self, path):
1985 1987 if self.isdirty(path):
1986 1988 if self._cache[path]['exists']:
1987 1989 if self._cache[path]['data']:
1988 1990 return self._cache[path]['data']
1989 1991 else:
1990 1992 # Must fallback here, too, because we only set flags.
1991 1993 return self._wrappedctx[path].data()
1992 1994 else:
1993 1995 raise error.ProgrammingError("No such file or directory: %s" %
1994 1996 self._path)
1995 1997 else:
1996 1998 return self._wrappedctx[path].data()
1997 1999
1998 2000 def isinmemory(self):
1999 2001 return True
2000 2002
2001 2003 def filedate(self, path):
2002 2004 if self.isdirty(path):
2003 2005 return self._cache[path]['date']
2004 2006 else:
2005 2007 return self._wrappedctx[path].date()
2006 2008
2007 2009 def flags(self, path):
2008 2010 if self.isdirty(path):
2009 2011 if self._cache[path]['exists']:
2010 2012 return self._cache[path]['flags']
2011 2013 else:
2012 2014 raise error.ProgrammingError("No such file or directory: %s" %
2013 2015 self._path)
2014 2016 else:
2015 2017 return self._wrappedctx[path].flags()
2016 2018
2017 2019 def write(self, path, data, flags=''):
2018 2020 if data is None:
2019 2021 raise error.ProgrammingError("data must be non-None")
2020 2022 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2021 2023 flags=flags)
2022 2024
2023 2025 def setflags(self, path, l, x):
2024 2026 self._markdirty(path, exists=True, date=util.makedate(),
2025 2027 flags=(l and 'l' or '') + (x and 'x' or ''))
2026 2028
2027 2029 def remove(self, path):
2028 2030 self._markdirty(path, exists=False)
2029 2031
2030 2032 def exists(self, path):
2031 2033 """exists behaves like `lexists`, but needs to follow symlinks and
2032 2034 return False if they are broken.
2033 2035 """
2034 2036 if self.isdirty(path):
2035 2037 # If this path exists and is a symlink, "follow" it by calling
2036 2038 # exists on the destination path.
2037 2039 if (self._cache[path]['exists'] and
2038 2040 'l' in self._cache[path]['flags']):
2039 2041 return self.exists(self._cache[path]['data'].strip())
2040 2042 else:
2041 2043 return self._cache[path]['exists']
2042 2044 return self._wrappedctx[path].exists()
2043 2045
2044 2046 def lexists(self, path):
2045 2047 """lexists returns True if the path exists"""
2046 2048 if self.isdirty(path):
2047 2049 return self._cache[path]['exists']
2048 2050 return self._wrappedctx[path].lexists()
2049 2051
2050 2052 def size(self, path):
2051 2053 if self.isdirty(path):
2052 2054 if self._cache[path]['exists']:
2053 2055 return len(self._cache[path]['data'])
2054 2056 else:
2055 2057 raise error.ProgrammingError("No such file or directory: %s" %
2056 2058 self._path)
2057 2059 return self._wrappedctx[path].size()
2058 2060
2059 2061 def flushall(self):
2060 2062 for path in self._writeorder:
2061 2063 entry = self._cache[path]
2062 2064 if entry['exists']:
2063 2065 self._wrappedctx[path].clearunknown()
2064 2066 if entry['data'] is not None:
2065 2067 if entry['flags'] is None:
2066 2068 raise error.ProgrammingError('data set but not flags')
2067 2069 self._wrappedctx[path].write(
2068 2070 entry['data'],
2069 2071 entry['flags'])
2070 2072 else:
2071 2073 self._wrappedctx[path].setflags(
2072 2074 'l' in entry['flags'],
2073 2075 'x' in entry['flags'])
2074 2076 else:
2075 2077 self._wrappedctx[path].remove(path)
2076 2078 self._clean()
2077 2079
2078 2080 def isdirty(self, path):
2079 2081 return path in self._cache
2080 2082
2081 2083 def _clean(self):
2082 2084 self._cache = {}
2083 2085 self._writeorder = []
2084 2086
2085 2087 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2086 2088 if path not in self._cache:
2087 2089 self._writeorder.append(path)
2088 2090
2089 2091 self._cache[path] = {
2090 2092 'exists': exists,
2091 2093 'data': data,
2092 2094 'date': date,
2093 2095 'flags': flags,
2094 2096 }
2095 2097
2096 2098 def filectx(self, path, filelog=None):
2097 2099 return overlayworkingfilectx(self._repo, path, parent=self,
2098 2100 filelog=filelog)
2099 2101
2100 2102 class overlayworkingfilectx(workingfilectx):
2101 2103 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2102 2104 cache, which can be flushed through later by calling ``flush()``."""
2103 2105
2104 2106 def __init__(self, repo, path, filelog=None, parent=None):
2105 2107 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2106 2108 parent)
2107 2109 self._repo = repo
2108 2110 self._parent = parent
2109 2111 self._path = path
2110 2112
2111 2113 def cmp(self, fctx):
2112 2114 return self.data() != fctx.data()
2113 2115
2114 2116 def ctx(self):
2115 2117 return self._parent
2116 2118
2117 2119 def data(self):
2118 2120 return self._parent.data(self._path)
2119 2121
2120 2122 def date(self):
2121 2123 return self._parent.filedate(self._path)
2122 2124
2123 2125 def exists(self):
2124 2126 return self.lexists()
2125 2127
2126 2128 def lexists(self):
2127 2129 return self._parent.exists(self._path)
2128 2130
2129 2131 def renamed(self):
2130 2132 # Copies are currently tracked in the dirstate as before. Straight copy
2131 2133 # from workingfilectx.
2132 2134 rp = self._repo.dirstate.copied(self._path)
2133 2135 if not rp:
2134 2136 return None
2135 2137 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2136 2138
2137 2139 def size(self):
2138 2140 return self._parent.size(self._path)
2139 2141
2140 2142 def audit(self):
2141 2143 pass
2142 2144
2143 2145 def flags(self):
2144 2146 return self._parent.flags(self._path)
2145 2147
2146 2148 def setflags(self, islink, isexec):
2147 2149 return self._parent.setflags(self._path, islink, isexec)
2148 2150
2149 2151 def write(self, data, flags, backgroundclose=False):
2150 2152 return self._parent.write(self._path, data, flags)
2151 2153
2152 2154 def remove(self, ignoremissing=False):
2153 2155 return self._parent.remove(self._path)
2154 2156
2155 2157 class workingcommitctx(workingctx):
2156 2158 """A workingcommitctx object makes access to data related to
2157 2159 the revision being committed convenient.
2158 2160
2159 2161 This hides changes in the working directory, if they aren't
2160 2162 committed in this context.
2161 2163 """
2162 2164 def __init__(self, repo, changes,
2163 2165 text="", user=None, date=None, extra=None):
2164 2166 super(workingctx, self).__init__(repo, text, user, date, extra,
2165 2167 changes)
2166 2168
2167 2169 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2168 2170 """Return matched files only in ``self._status``
2169 2171
2170 2172 Uncommitted files appear "clean" via this context, even if
2171 2173 they aren't actually so in the working directory.
2172 2174 """
2173 2175 if clean:
2174 2176 clean = [f for f in self._manifest if f not in self._changedset]
2175 2177 else:
2176 2178 clean = []
2177 2179 return scmutil.status([f for f in self._status.modified if match(f)],
2178 2180 [f for f in self._status.added if match(f)],
2179 2181 [f for f in self._status.removed if match(f)],
2180 2182 [], [], [], clean)
2181 2183
2182 2184 @propertycache
2183 2185 def _changedset(self):
2184 2186 """Return the set of files changed in this context
2185 2187 """
2186 2188 changed = set(self._status.modified)
2187 2189 changed.update(self._status.added)
2188 2190 changed.update(self._status.removed)
2189 2191 return changed
2190 2192
2191 2193 def makecachingfilectxfn(func):
2192 2194 """Create a filectxfn that caches based on the path.
2193 2195
2194 2196 We can't use util.cachefunc because it uses all arguments as the cache
2195 2197 key and this creates a cycle since the arguments include the repo and
2196 2198 memctx.
2197 2199 """
2198 2200 cache = {}
2199 2201
2200 2202 def getfilectx(repo, memctx, path):
2201 2203 if path not in cache:
2202 2204 cache[path] = func(repo, memctx, path)
2203 2205 return cache[path]
2204 2206
2205 2207 return getfilectx
2206 2208
2207 2209 def memfilefromctx(ctx):
2208 2210 """Given a context return a memfilectx for ctx[path]
2209 2211
2210 2212 This is a convenience method for building a memctx based on another
2211 2213 context.
2212 2214 """
2213 2215 def getfilectx(repo, memctx, path):
2214 2216 fctx = ctx[path]
2215 2217 # this is weird but apparently we only keep track of one parent
2216 2218 # (why not only store that instead of a tuple?)
2217 2219 copied = fctx.renamed()
2218 2220 if copied:
2219 2221 copied = copied[0]
2220 2222 return memfilectx(repo, path, fctx.data(),
2221 2223 islink=fctx.islink(), isexec=fctx.isexec(),
2222 2224 copied=copied, memctx=memctx)
2223 2225
2224 2226 return getfilectx
2225 2227
2226 2228 def memfilefrompatch(patchstore):
2227 2229 """Given a patch (e.g. patchstore object) return a memfilectx
2228 2230
2229 2231 This is a convenience method for building a memctx based on a patchstore.
2230 2232 """
2231 2233 def getfilectx(repo, memctx, path):
2232 2234 data, mode, copied = patchstore.getfile(path)
2233 2235 if data is None:
2234 2236 return None
2235 2237 islink, isexec = mode
2236 2238 return memfilectx(repo, path, data, islink=islink,
2237 2239 isexec=isexec, copied=copied,
2238 2240 memctx=memctx)
2239 2241
2240 2242 return getfilectx
2241 2243
2242 2244 class memctx(committablectx):
2243 2245 """Use memctx to perform in-memory commits via localrepo.commitctx().
2244 2246
2245 2247 Revision information is supplied at initialization time while
2246 2248 related files data and is made available through a callback
2247 2249 mechanism. 'repo' is the current localrepo, 'parents' is a
2248 2250 sequence of two parent revisions identifiers (pass None for every
2249 2251 missing parent), 'text' is the commit message and 'files' lists
2250 2252 names of files touched by the revision (normalized and relative to
2251 2253 repository root).
2252 2254
2253 2255 filectxfn(repo, memctx, path) is a callable receiving the
2254 2256 repository, the current memctx object and the normalized path of
2255 2257 requested file, relative to repository root. It is fired by the
2256 2258 commit function for every file in 'files', but calls order is
2257 2259 undefined. If the file is available in the revision being
2258 2260 committed (updated or added), filectxfn returns a memfilectx
2259 2261 object. If the file was removed, filectxfn return None for recent
2260 2262 Mercurial. Moved files are represented by marking the source file
2261 2263 removed and the new file added with copy information (see
2262 2264 memfilectx).
2263 2265
2264 2266 user receives the committer name and defaults to current
2265 2267 repository username, date is the commit date in any format
2266 2268 supported by util.parsedate() and defaults to current date, extra
2267 2269 is a dictionary of metadata or is left empty.
2268 2270 """
2269 2271
2270 2272 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2271 2273 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2272 2274 # this field to determine what to do in filectxfn.
2273 2275 _returnnoneformissingfiles = True
2274 2276
2275 2277 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2276 2278 date=None, extra=None, branch=None, editor=False):
2277 2279 super(memctx, self).__init__(repo, text, user, date, extra)
2278 2280 self._rev = None
2279 2281 self._node = None
2280 2282 parents = [(p or nullid) for p in parents]
2281 2283 p1, p2 = parents
2282 2284 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2283 2285 files = sorted(set(files))
2284 2286 self._files = files
2285 2287 if branch is not None:
2286 2288 self._extra['branch'] = encoding.fromlocal(branch)
2287 2289 self.substate = {}
2288 2290
2289 2291 if isinstance(filectxfn, patch.filestore):
2290 2292 filectxfn = memfilefrompatch(filectxfn)
2291 2293 elif not callable(filectxfn):
2292 2294 # if store is not callable, wrap it in a function
2293 2295 filectxfn = memfilefromctx(filectxfn)
2294 2296
2295 2297 # memoizing increases performance for e.g. vcs convert scenarios.
2296 2298 self._filectxfn = makecachingfilectxfn(filectxfn)
2297 2299
2298 2300 if editor:
2299 2301 self._text = editor(self._repo, self, [])
2300 2302 self._repo.savecommitmessage(self._text)
2301 2303
2302 2304 def filectx(self, path, filelog=None):
2303 2305 """get a file context from the working directory
2304 2306
2305 2307 Returns None if file doesn't exist and should be removed."""
2306 2308 return self._filectxfn(self._repo, self, path)
2307 2309
2308 2310 def commit(self):
2309 2311 """commit context to the repo"""
2310 2312 return self._repo.commitctx(self)
2311 2313
2312 2314 @propertycache
2313 2315 def _manifest(self):
2314 2316 """generate a manifest based on the return values of filectxfn"""
2315 2317
2316 2318 # keep this simple for now; just worry about p1
2317 2319 pctx = self._parents[0]
2318 2320 man = pctx.manifest().copy()
2319 2321
2320 2322 for f in self._status.modified:
2321 2323 p1node = nullid
2322 2324 p2node = nullid
2323 2325 p = pctx[f].parents() # if file isn't in pctx, check p2?
2324 2326 if len(p) > 0:
2325 2327 p1node = p[0].filenode()
2326 2328 if len(p) > 1:
2327 2329 p2node = p[1].filenode()
2328 2330 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2329 2331
2330 2332 for f in self._status.added:
2331 2333 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2332 2334
2333 2335 for f in self._status.removed:
2334 2336 if f in man:
2335 2337 del man[f]
2336 2338
2337 2339 return man
2338 2340
2339 2341 @propertycache
2340 2342 def _status(self):
2341 2343 """Calculate exact status from ``files`` specified at construction
2342 2344 """
2343 2345 man1 = self.p1().manifest()
2344 2346 p2 = self._parents[1]
2345 2347 # "1 < len(self._parents)" can't be used for checking
2346 2348 # existence of the 2nd parent, because "memctx._parents" is
2347 2349 # explicitly initialized by the list, of which length is 2.
2348 2350 if p2.node() != nullid:
2349 2351 man2 = p2.manifest()
2350 2352 managing = lambda f: f in man1 or f in man2
2351 2353 else:
2352 2354 managing = lambda f: f in man1
2353 2355
2354 2356 modified, added, removed = [], [], []
2355 2357 for f in self._files:
2356 2358 if not managing(f):
2357 2359 added.append(f)
2358 2360 elif self[f]:
2359 2361 modified.append(f)
2360 2362 else:
2361 2363 removed.append(f)
2362 2364
2363 2365 return scmutil.status(modified, added, removed, [], [], [], [])
2364 2366
2365 2367 class memfilectx(committablefilectx):
2366 2368 """memfilectx represents an in-memory file to commit.
2367 2369
2368 2370 See memctx and committablefilectx for more details.
2369 2371 """
2370 2372 def __init__(self, repo, path, data, islink=False,
2371 2373 isexec=False, copied=None, memctx=None):
2372 2374 """
2373 2375 path is the normalized file path relative to repository root.
2374 2376 data is the file content as a string.
2375 2377 islink is True if the file is a symbolic link.
2376 2378 isexec is True if the file is executable.
2377 2379 copied is the source file path if current file was copied in the
2378 2380 revision being committed, or None."""
2379 2381 super(memfilectx, self).__init__(repo, path, None, memctx)
2380 2382 self._data = data
2381 2383 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2382 2384 self._copied = None
2383 2385 if copied:
2384 2386 self._copied = (copied, nullid)
2385 2387
2386 2388 def data(self):
2387 2389 return self._data
2388 2390
2389 2391 def remove(self, ignoremissing=False):
2390 2392 """wraps unlink for a repo's working directory"""
2391 2393 # need to figure out what to do here
2392 2394 del self._changectx[self._path]
2393 2395
2394 2396 def write(self, data, flags):
2395 2397 """wraps repo.wwrite"""
2396 2398 self._data = data
2397 2399
2398 2400 class overlayfilectx(committablefilectx):
2399 2401 """Like memfilectx but take an original filectx and optional parameters to
2400 2402 override parts of it. This is useful when fctx.data() is expensive (i.e.
2401 2403 flag processor is expensive) and raw data, flags, and filenode could be
2402 2404 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2403 2405 """
2404 2406
2405 2407 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2406 2408 copied=None, ctx=None):
2407 2409 """originalfctx: filecontext to duplicate
2408 2410
2409 2411 datafunc: None or a function to override data (file content). It is a
2410 2412 function to be lazy. path, flags, copied, ctx: None or overridden value
2411 2413
2412 2414 copied could be (path, rev), or False. copied could also be just path,
2413 2415 and will be converted to (path, nullid). This simplifies some callers.
2414 2416 """
2415 2417
2416 2418 if path is None:
2417 2419 path = originalfctx.path()
2418 2420 if ctx is None:
2419 2421 ctx = originalfctx.changectx()
2420 2422 ctxmatch = lambda: True
2421 2423 else:
2422 2424 ctxmatch = lambda: ctx == originalfctx.changectx()
2423 2425
2424 2426 repo = originalfctx.repo()
2425 2427 flog = originalfctx.filelog()
2426 2428 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2427 2429
2428 2430 if copied is None:
2429 2431 copied = originalfctx.renamed()
2430 2432 copiedmatch = lambda: True
2431 2433 else:
2432 2434 if copied and not isinstance(copied, tuple):
2433 2435 # repo._filecommit will recalculate copyrev so nullid is okay
2434 2436 copied = (copied, nullid)
2435 2437 copiedmatch = lambda: copied == originalfctx.renamed()
2436 2438
2437 2439 # When data, copied (could affect data), ctx (could affect filelog
2438 2440 # parents) are not overridden, rawdata, rawflags, and filenode may be
2439 2441 # reused (repo._filecommit should double check filelog parents).
2440 2442 #
2441 2443 # path, flags are not hashed in filelog (but in manifestlog) so they do
2442 2444 # not affect reusable here.
2443 2445 #
2444 2446 # If ctx or copied is overridden to a same value with originalfctx,
2445 2447 # still consider it's reusable. originalfctx.renamed() may be a bit
2446 2448 # expensive so it's not called unless necessary. Assuming datafunc is
2447 2449 # always expensive, do not call it for this "reusable" test.
2448 2450 reusable = datafunc is None and ctxmatch() and copiedmatch()
2449 2451
2450 2452 if datafunc is None:
2451 2453 datafunc = originalfctx.data
2452 2454 if flags is None:
2453 2455 flags = originalfctx.flags()
2454 2456
2455 2457 self._datafunc = datafunc
2456 2458 self._flags = flags
2457 2459 self._copied = copied
2458 2460
2459 2461 if reusable:
2460 2462 # copy extra fields from originalfctx
2461 2463 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2462 2464 for attr_ in attrs:
2463 2465 if util.safehasattr(originalfctx, attr_):
2464 2466 setattr(self, attr_, getattr(originalfctx, attr_))
2465 2467
2466 2468 def data(self):
2467 2469 return self._datafunc()
2468 2470
2469 2471 class metadataonlyctx(committablectx):
2470 2472 """Like memctx but it's reusing the manifest of different commit.
2471 2473 Intended to be used by lightweight operations that are creating
2472 2474 metadata-only changes.
2473 2475
2474 2476 Revision information is supplied at initialization time. 'repo' is the
2475 2477 current localrepo, 'ctx' is original revision which manifest we're reuisng
2476 2478 'parents' is a sequence of two parent revisions identifiers (pass None for
2477 2479 every missing parent), 'text' is the commit.
2478 2480
2479 2481 user receives the committer name and defaults to current repository
2480 2482 username, date is the commit date in any format supported by
2481 2483 util.parsedate() and defaults to current date, extra is a dictionary of
2482 2484 metadata or is left empty.
2483 2485 """
2484 2486 def __new__(cls, repo, originalctx, *args, **kwargs):
2485 2487 return super(metadataonlyctx, cls).__new__(cls, repo)
2486 2488
2487 2489 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2488 2490 date=None, extra=None, editor=False):
2489 2491 if text is None:
2490 2492 text = originalctx.description()
2491 2493 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2492 2494 self._rev = None
2493 2495 self._node = None
2494 2496 self._originalctx = originalctx
2495 2497 self._manifestnode = originalctx.manifestnode()
2496 2498 if parents is None:
2497 2499 parents = originalctx.parents()
2498 2500 else:
2499 2501 parents = [repo[p] for p in parents if p is not None]
2500 2502 parents = parents[:]
2501 2503 while len(parents) < 2:
2502 2504 parents.append(repo[nullid])
2503 2505 p1, p2 = self._parents = parents
2504 2506
2505 2507 # sanity check to ensure that the reused manifest parents are
2506 2508 # manifests of our commit parents
2507 2509 mp1, mp2 = self.manifestctx().parents
2508 2510 if p1 != nullid and p1.manifestnode() != mp1:
2509 2511 raise RuntimeError('can\'t reuse the manifest: '
2510 2512 'its p1 doesn\'t match the new ctx p1')
2511 2513 if p2 != nullid and p2.manifestnode() != mp2:
2512 2514 raise RuntimeError('can\'t reuse the manifest: '
2513 2515 'its p2 doesn\'t match the new ctx p2')
2514 2516
2515 2517 self._files = originalctx.files()
2516 2518 self.substate = {}
2517 2519
2518 2520 if editor:
2519 2521 self._text = editor(self._repo, self, [])
2520 2522 self._repo.savecommitmessage(self._text)
2521 2523
2522 2524 def manifestnode(self):
2523 2525 return self._manifestnode
2524 2526
2525 2527 @property
2526 2528 def _manifestctx(self):
2527 2529 return self._repo.manifestlog[self._manifestnode]
2528 2530
2529 2531 def filectx(self, path, filelog=None):
2530 2532 return self._originalctx.filectx(path, filelog=filelog)
2531 2533
2532 2534 def commit(self):
2533 2535 """commit context to the repo"""
2534 2536 return self._repo.commitctx(self)
2535 2537
2536 2538 @property
2537 2539 def _manifest(self):
2538 2540 return self._originalctx.manifest()
2539 2541
2540 2542 @propertycache
2541 2543 def _status(self):
2542 2544 """Calculate exact status from ``files`` specified in the ``origctx``
2543 2545 and parents manifests.
2544 2546 """
2545 2547 man1 = self.p1().manifest()
2546 2548 p2 = self._parents[1]
2547 2549 # "1 < len(self._parents)" can't be used for checking
2548 2550 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2549 2551 # explicitly initialized by the list, of which length is 2.
2550 2552 if p2.node() != nullid:
2551 2553 man2 = p2.manifest()
2552 2554 managing = lambda f: f in man1 or f in man2
2553 2555 else:
2554 2556 managing = lambda f: f in man1
2555 2557
2556 2558 modified, added, removed = [], [], []
2557 2559 for f in self._files:
2558 2560 if not managing(f):
2559 2561 added.append(f)
2560 2562 elif f in self:
2561 2563 modified.append(f)
2562 2564 else:
2563 2565 removed.append(f)
2564 2566
2565 2567 return scmutil.status(modified, added, removed, [], [], [], [])
2566 2568
2567 2569 class arbitraryfilectx(object):
2568 2570 """Allows you to use filectx-like functions on a file in an arbitrary
2569 2571 location on disk, possibly not in the working directory.
2570 2572 """
2571 2573 def __init__(self, path, repo=None):
2572 2574 # Repo is optional because contrib/simplemerge uses this class.
2573 2575 self._repo = repo
2574 2576 self._path = path
2575 2577
2576 2578 def cmp(self, fctx):
2577 2579 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2578 2580 # path if either side is a symlink.
2579 2581 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2580 2582 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2581 2583 # Add a fast-path for merge if both sides are disk-backed.
2582 2584 # Note that filecmp uses the opposite return values (True if same)
2583 2585 # from our cmp functions (True if different).
2584 2586 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2585 2587 return self.data() != fctx.data()
2586 2588
2587 2589 def path(self):
2588 2590 return self._path
2589 2591
2590 2592 def flags(self):
2591 2593 return ''
2592 2594
2593 2595 def data(self):
2594 2596 return util.readfile(self._path)
2595 2597
2596 2598 def decodeddata(self):
2597 2599 with open(self._path, "rb") as f:
2598 2600 return f.read()
2599 2601
2600 2602 def remove(self):
2601 2603 util.unlink(self._path)
2602 2604
2603 2605 def write(self, data, flags):
2604 2606 assert not flags
2605 2607 with open(self._path, "w") as f:
2606 2608 f.write(data)
@@ -1,644 +1,645
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import difflib
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, nullid, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_BAD_REQUEST,
22 22 HTTP_NOT_FOUND,
23 23 paritygen,
24 24 )
25 25
26 26 from .. import (
27 27 context,
28 28 error,
29 29 match,
30 30 mdiff,
31 31 patch,
32 32 pathutil,
33 33 pycompat,
34 34 templatefilters,
35 35 ui as uimod,
36 36 util,
37 37 )
38 38
39 39 def up(p):
40 40 if p[0] != "/":
41 41 p = "/" + p
42 42 if p[-1] == "/":
43 43 p = p[:-1]
44 44 up = os.path.dirname(p)
45 45 if up == "/":
46 46 return "/"
47 47 return up + "/"
48 48
49 49 def _navseq(step, firststep=None):
50 50 if firststep:
51 51 yield firststep
52 52 if firststep >= 20 and firststep <= 40:
53 53 firststep = 50
54 54 yield firststep
55 55 assert step > 0
56 56 assert firststep > 0
57 57 while step <= firststep:
58 58 step *= 10
59 59 while True:
60 60 yield 1 * step
61 61 yield 3 * step
62 62 step *= 10
63 63
64 64 class revnav(object):
65 65
66 66 def __init__(self, repo):
67 67 """Navigation generation object
68 68
69 69 :repo: repo object we generate nav for
70 70 """
71 71 # used for hex generation
72 72 self._revlog = repo.changelog
73 73
74 74 def __nonzero__(self):
75 75 """return True if any revision to navigate over"""
76 76 return self._first() is not None
77 77
78 78 __bool__ = __nonzero__
79 79
80 80 def _first(self):
81 81 """return the minimum non-filtered changeset or None"""
82 82 try:
83 83 return next(iter(self._revlog))
84 84 except StopIteration:
85 85 return None
86 86
87 87 def hex(self, rev):
88 88 return hex(self._revlog.node(rev))
89 89
90 90 def gen(self, pos, pagelen, limit):
91 91 """computes label and revision id for navigation link
92 92
93 93 :pos: is the revision relative to which we generate navigation.
94 94 :pagelen: the size of each navigation page
95 95 :limit: how far shall we link
96 96
97 97 The return is:
98 98 - a single element tuple
99 99 - containing a dictionary with a `before` and `after` key
100 100 - values are generator functions taking arbitrary number of kwargs
101 101 - yield items are dictionaries with `label` and `node` keys
102 102 """
103 103 if not self:
104 104 # empty repo
105 105 return ({'before': (), 'after': ()},)
106 106
107 107 targets = []
108 108 for f in _navseq(1, pagelen):
109 109 if f > limit:
110 110 break
111 111 targets.append(pos + f)
112 112 targets.append(pos - f)
113 113 targets.sort()
114 114
115 115 first = self._first()
116 116 navbefore = [("(%i)" % first, self.hex(first))]
117 117 navafter = []
118 118 for rev in targets:
119 119 if rev not in self._revlog:
120 120 continue
121 121 if pos < rev < limit:
122 122 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
123 123 if 0 < rev < pos:
124 124 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
125 125
126 126
127 127 navafter.append(("tip", "tip"))
128 128
129 129 data = lambda i: {"label": i[0], "node": i[1]}
130 130 return ({'before': lambda **map: (data(i) for i in navbefore),
131 131 'after': lambda **map: (data(i) for i in navafter)},)
132 132
133 133 class filerevnav(revnav):
134 134
135 135 def __init__(self, repo, path):
136 136 """Navigation generation object
137 137
138 138 :repo: repo object we generate nav for
139 139 :path: path of the file we generate nav for
140 140 """
141 141 # used for iteration
142 142 self._changelog = repo.unfiltered().changelog
143 143 # used for hex generation
144 144 self._revlog = repo.file(path)
145 145
146 146 def hex(self, rev):
147 147 return hex(self._changelog.node(self._revlog.linkrev(rev)))
148 148
149 149 class _siblings(object):
150 150 def __init__(self, siblings=None, hiderev=None):
151 151 if siblings is None:
152 152 siblings = []
153 153 self.siblings = [s for s in siblings if s.node() != nullid]
154 154 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
155 155 self.siblings = []
156 156
157 157 def __iter__(self):
158 158 for s in self.siblings:
159 159 d = {
160 160 'node': s.hex(),
161 161 'rev': s.rev(),
162 162 'user': s.user(),
163 163 'date': s.date(),
164 164 'description': s.description(),
165 165 'branch': s.branch(),
166 166 }
167 167 if util.safehasattr(s, 'path'):
168 168 d['file'] = s.path()
169 169 yield d
170 170
171 171 def __len__(self):
172 172 return len(self.siblings)
173 173
174 174 def difffeatureopts(req, ui, section):
175 175 diffopts = patch.difffeatureopts(ui, untrusted=True,
176 176 section=section, whitespace=True)
177 177
178 178 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
179 179 v = req.form.get(k, [None])[0]
180 180 if v is not None:
181 181 v = util.parsebool(v)
182 182 setattr(diffopts, k, v if v is not None else True)
183 183
184 184 return diffopts
185 185
186 186 def annotate(req, fctx, ui):
187 187 diffopts = difffeatureopts(req, ui, 'annotate')
188 188 return fctx.annotate(follow=True, linenumber=True, diffopts=diffopts)
189 189
190 190 def parents(ctx, hide=None):
191 191 if isinstance(ctx, context.basefilectx):
192 192 introrev = ctx.introrev()
193 193 if ctx.changectx().rev() != introrev:
194 194 return _siblings([ctx.repo()[introrev]], hide)
195 195 return _siblings(ctx.parents(), hide)
196 196
197 197 def children(ctx, hide=None):
198 198 return _siblings(ctx.children(), hide)
199 199
200 200 def renamelink(fctx):
201 201 r = fctx.renamed()
202 202 if r:
203 203 return [{'file': r[0], 'node': hex(r[1])}]
204 204 return []
205 205
206 206 def nodetagsdict(repo, node):
207 207 return [{"name": i} for i in repo.nodetags(node)]
208 208
209 209 def nodebookmarksdict(repo, node):
210 210 return [{"name": i} for i in repo.nodebookmarks(node)]
211 211
212 212 def nodebranchdict(repo, ctx):
213 213 branches = []
214 214 branch = ctx.branch()
215 215 # If this is an empty repo, ctx.node() == nullid,
216 216 # ctx.branch() == 'default'.
217 217 try:
218 218 branchnode = repo.branchtip(branch)
219 219 except error.RepoLookupError:
220 220 branchnode = None
221 221 if branchnode == ctx.node():
222 222 branches.append({"name": branch})
223 223 return branches
224 224
225 225 def nodeinbranch(repo, ctx):
226 226 branches = []
227 227 branch = ctx.branch()
228 228 try:
229 229 branchnode = repo.branchtip(branch)
230 230 except error.RepoLookupError:
231 231 branchnode = None
232 232 if branch != 'default' and branchnode != ctx.node():
233 233 branches.append({"name": branch})
234 234 return branches
235 235
236 236 def nodebranchnodefault(ctx):
237 237 branches = []
238 238 branch = ctx.branch()
239 239 if branch != 'default':
240 240 branches.append({"name": branch})
241 241 return branches
242 242
243 243 def showtag(repo, tmpl, t1, node=nullid, **args):
244 244 for t in repo.nodetags(node):
245 245 yield tmpl(t1, tag=t, **args)
246 246
247 247 def showbookmark(repo, tmpl, t1, node=nullid, **args):
248 248 for t in repo.nodebookmarks(node):
249 249 yield tmpl(t1, bookmark=t, **args)
250 250
251 251 def branchentries(repo, stripecount, limit=0):
252 252 tips = []
253 253 heads = repo.heads()
254 254 parity = paritygen(stripecount)
255 255 sortkey = lambda item: (not item[1], item[0].rev())
256 256
257 257 def entries(**map):
258 258 count = 0
259 259 if not tips:
260 260 for tag, hs, tip, closed in repo.branchmap().iterbranches():
261 261 tips.append((repo[tip], closed))
262 262 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
263 263 if limit > 0 and count >= limit:
264 264 return
265 265 count += 1
266 266 if closed:
267 267 status = 'closed'
268 268 elif ctx.node() not in heads:
269 269 status = 'inactive'
270 270 else:
271 271 status = 'open'
272 272 yield {
273 273 'parity': next(parity),
274 274 'branch': ctx.branch(),
275 275 'status': status,
276 276 'node': ctx.hex(),
277 277 'date': ctx.date()
278 278 }
279 279
280 280 return entries
281 281
282 282 def cleanpath(repo, path):
283 283 path = path.lstrip('/')
284 284 return pathutil.canonpath(repo.root, '', path)
285 285
286 286 def changeidctx(repo, changeid):
287 287 try:
288 288 ctx = repo[changeid]
289 289 except error.RepoError:
290 290 man = repo.manifestlog._revlog
291 291 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
292 292
293 293 return ctx
294 294
295 295 def changectx(repo, req):
296 296 changeid = "tip"
297 297 if 'node' in req.form:
298 298 changeid = req.form['node'][0]
299 299 ipos = changeid.find(':')
300 300 if ipos != -1:
301 301 changeid = changeid[(ipos + 1):]
302 302 elif 'manifest' in req.form:
303 303 changeid = req.form['manifest'][0]
304 304
305 305 return changeidctx(repo, changeid)
306 306
307 307 def basechangectx(repo, req):
308 308 if 'node' in req.form:
309 309 changeid = req.form['node'][0]
310 310 ipos = changeid.find(':')
311 311 if ipos != -1:
312 312 changeid = changeid[:ipos]
313 313 return changeidctx(repo, changeid)
314 314
315 315 return None
316 316
317 317 def filectx(repo, req):
318 318 if 'file' not in req.form:
319 319 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
320 320 path = cleanpath(repo, req.form['file'][0])
321 321 if 'node' in req.form:
322 322 changeid = req.form['node'][0]
323 323 elif 'filenode' in req.form:
324 324 changeid = req.form['filenode'][0]
325 325 else:
326 326 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
327 327 try:
328 328 fctx = repo[changeid][path]
329 329 except error.RepoError:
330 330 fctx = repo.filectx(path, fileid=changeid)
331 331
332 332 return fctx
333 333
334 334 def linerange(req):
335 335 linerange = req.form.get('linerange')
336 336 if linerange is None:
337 337 return None
338 338 if len(linerange) > 1:
339 339 raise ErrorResponse(HTTP_BAD_REQUEST,
340 340 'redundant linerange parameter')
341 341 try:
342 342 fromline, toline = map(int, linerange[0].split(':', 1))
343 343 except ValueError:
344 344 raise ErrorResponse(HTTP_BAD_REQUEST,
345 345 'invalid linerange parameter')
346 346 try:
347 347 return util.processlinerange(fromline, toline)
348 348 except error.ParseError as exc:
349 349 raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
350 350
351 351 def formatlinerange(fromline, toline):
352 352 return '%d:%d' % (fromline + 1, toline)
353 353
354 354 def commonentry(repo, ctx):
355 355 node = ctx.node()
356 356 return {
357 357 'rev': ctx.rev(),
358 358 'node': hex(node),
359 359 'author': ctx.user(),
360 360 'desc': ctx.description(),
361 361 'date': ctx.date(),
362 362 'extra': ctx.extra(),
363 363 'phase': ctx.phasestr(),
364 364 'obsolete': ctx.obsolete(),
365 'instabilities': [{"name": i} for i in ctx.instabilities()],
365 366 'branch': nodebranchnodefault(ctx),
366 367 'inbranch': nodeinbranch(repo, ctx),
367 368 'branches': nodebranchdict(repo, ctx),
368 369 'tags': nodetagsdict(repo, node),
369 370 'bookmarks': nodebookmarksdict(repo, node),
370 371 'parent': lambda **x: parents(ctx),
371 372 'child': lambda **x: children(ctx),
372 373 }
373 374
374 375 def changelistentry(web, ctx, tmpl):
375 376 '''Obtain a dictionary to be used for entries in a changelist.
376 377
377 378 This function is called when producing items for the "entries" list passed
378 379 to the "shortlog" and "changelog" templates.
379 380 '''
380 381 repo = web.repo
381 382 rev = ctx.rev()
382 383 n = ctx.node()
383 384 showtags = showtag(repo, tmpl, 'changelogtag', n)
384 385 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
385 386
386 387 entry = commonentry(repo, ctx)
387 388 entry.update(
388 389 allparents=lambda **x: parents(ctx),
389 390 parent=lambda **x: parents(ctx, rev - 1),
390 391 child=lambda **x: children(ctx, rev + 1),
391 392 changelogtag=showtags,
392 393 files=files,
393 394 )
394 395 return entry
395 396
396 397 def symrevorshortnode(req, ctx):
397 398 if 'node' in req.form:
398 399 return templatefilters.revescape(req.form['node'][0])
399 400 else:
400 401 return short(ctx.node())
401 402
402 403 def changesetentry(web, req, tmpl, ctx):
403 404 '''Obtain a dictionary to be used to render the "changeset" template.'''
404 405
405 406 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
406 407 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
407 408 ctx.node())
408 409 showbranch = nodebranchnodefault(ctx)
409 410
410 411 files = []
411 412 parity = paritygen(web.stripecount)
412 413 for blockno, f in enumerate(ctx.files()):
413 414 template = f in ctx and 'filenodelink' or 'filenolink'
414 415 files.append(tmpl(template,
415 416 node=ctx.hex(), file=f, blockno=blockno + 1,
416 417 parity=next(parity)))
417 418
418 419 basectx = basechangectx(web.repo, req)
419 420 if basectx is None:
420 421 basectx = ctx.p1()
421 422
422 423 style = web.config('web', 'style')
423 424 if 'style' in req.form:
424 425 style = req.form['style'][0]
425 426
426 427 diff = diffs(web, tmpl, ctx, basectx, None, style)
427 428
428 429 parity = paritygen(web.stripecount)
429 430 diffstatsgen = diffstatgen(ctx, basectx)
430 431 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
431 432
432 433 return dict(
433 434 diff=diff,
434 435 symrev=symrevorshortnode(req, ctx),
435 436 basenode=basectx.hex(),
436 437 changesettag=showtags,
437 438 changesetbookmark=showbookmarks,
438 439 changesetbranch=showbranch,
439 440 files=files,
440 441 diffsummary=lambda **x: diffsummary(diffstatsgen),
441 442 diffstat=diffstats,
442 443 archives=web.archivelist(ctx.hex()),
443 444 **commonentry(web.repo, ctx))
444 445
445 446 def listfilediffs(tmpl, files, node, max):
446 447 for f in files[:max]:
447 448 yield tmpl('filedifflink', node=hex(node), file=f)
448 449 if len(files) > max:
449 450 yield tmpl('fileellipses')
450 451
451 452 def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
452 453 lineidprefix=''):
453 454
454 455 def prettyprintlines(lines, blockno):
455 456 for lineno, l in enumerate(lines, 1):
456 457 difflineno = "%d.%d" % (blockno, lineno)
457 458 if l.startswith('+'):
458 459 ltype = "difflineplus"
459 460 elif l.startswith('-'):
460 461 ltype = "difflineminus"
461 462 elif l.startswith('@'):
462 463 ltype = "difflineat"
463 464 else:
464 465 ltype = "diffline"
465 466 yield tmpl(ltype,
466 467 line=l,
467 468 lineno=lineno,
468 469 lineid=lineidprefix + "l%s" % difflineno,
469 470 linenumber="% 8s" % difflineno)
470 471
471 472 repo = web.repo
472 473 if files:
473 474 m = match.exact(repo.root, repo.getcwd(), files)
474 475 else:
475 476 m = match.always(repo.root, repo.getcwd())
476 477
477 478 diffopts = patch.diffopts(repo.ui, untrusted=True)
478 479 node1 = basectx.node()
479 480 node2 = ctx.node()
480 481 parity = paritygen(web.stripecount)
481 482
482 483 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
483 484 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
484 485 if style != 'raw':
485 486 header = header[1:]
486 487 lines = [h + '\n' for h in header]
487 488 for hunkrange, hunklines in hunks:
488 489 if linerange is not None and hunkrange is not None:
489 490 s1, l1, s2, l2 = hunkrange
490 491 if not mdiff.hunkinrange((s2, l2), linerange):
491 492 continue
492 493 lines.extend(hunklines)
493 494 if lines:
494 495 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
495 496 lines=prettyprintlines(lines, blockno))
496 497
497 498 def compare(tmpl, context, leftlines, rightlines):
498 499 '''Generator function that provides side-by-side comparison data.'''
499 500
500 501 def compline(type, leftlineno, leftline, rightlineno, rightline):
501 502 lineid = leftlineno and ("l%s" % leftlineno) or ''
502 503 lineid += rightlineno and ("r%s" % rightlineno) or ''
503 504 return tmpl('comparisonline',
504 505 type=type,
505 506 lineid=lineid,
506 507 leftlineno=leftlineno,
507 508 leftlinenumber="% 6s" % (leftlineno or ''),
508 509 leftline=leftline or '',
509 510 rightlineno=rightlineno,
510 511 rightlinenumber="% 6s" % (rightlineno or ''),
511 512 rightline=rightline or '')
512 513
513 514 def getblock(opcodes):
514 515 for type, llo, lhi, rlo, rhi in opcodes:
515 516 len1 = lhi - llo
516 517 len2 = rhi - rlo
517 518 count = min(len1, len2)
518 519 for i in xrange(count):
519 520 yield compline(type=type,
520 521 leftlineno=llo + i + 1,
521 522 leftline=leftlines[llo + i],
522 523 rightlineno=rlo + i + 1,
523 524 rightline=rightlines[rlo + i])
524 525 if len1 > len2:
525 526 for i in xrange(llo + count, lhi):
526 527 yield compline(type=type,
527 528 leftlineno=i + 1,
528 529 leftline=leftlines[i],
529 530 rightlineno=None,
530 531 rightline=None)
531 532 elif len2 > len1:
532 533 for i in xrange(rlo + count, rhi):
533 534 yield compline(type=type,
534 535 leftlineno=None,
535 536 leftline=None,
536 537 rightlineno=i + 1,
537 538 rightline=rightlines[i])
538 539
539 540 s = difflib.SequenceMatcher(None, leftlines, rightlines)
540 541 if context < 0:
541 542 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
542 543 else:
543 544 for oc in s.get_grouped_opcodes(n=context):
544 545 yield tmpl('comparisonblock', lines=getblock(oc))
545 546
546 547 def diffstatgen(ctx, basectx):
547 548 '''Generator function that provides the diffstat data.'''
548 549
549 550 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
550 551 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
551 552 while True:
552 553 yield stats, maxname, maxtotal, addtotal, removetotal, binary
553 554
554 555 def diffsummary(statgen):
555 556 '''Return a short summary of the diff.'''
556 557
557 558 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
558 559 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
559 560 len(stats), addtotal, removetotal)
560 561
561 562 def diffstat(tmpl, ctx, statgen, parity):
562 563 '''Return a diffstat template for each file in the diff.'''
563 564
564 565 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
565 566 files = ctx.files()
566 567
567 568 def pct(i):
568 569 if maxtotal == 0:
569 570 return 0
570 571 return (float(i) / maxtotal) * 100
571 572
572 573 fileno = 0
573 574 for filename, adds, removes, isbinary in stats:
574 575 template = filename in files and 'diffstatlink' or 'diffstatnolink'
575 576 total = adds + removes
576 577 fileno += 1
577 578 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
578 579 total=total, addpct=pct(adds), removepct=pct(removes),
579 580 parity=next(parity))
580 581
581 582 class sessionvars(object):
582 583 def __init__(self, vars, start='?'):
583 584 self.start = start
584 585 self.vars = vars
585 586 def __getitem__(self, key):
586 587 return self.vars[key]
587 588 def __setitem__(self, key, value):
588 589 self.vars[key] = value
589 590 def __copy__(self):
590 591 return sessionvars(copy.copy(self.vars), self.start)
591 592 def __iter__(self):
592 593 separator = self.start
593 594 for key, value in sorted(self.vars.iteritems()):
594 595 yield {'name': key,
595 596 'value': pycompat.bytestr(value),
596 597 'separator': separator,
597 598 }
598 599 separator = '&'
599 600
600 601 class wsgiui(uimod.ui):
601 602 # default termwidth breaks under mod_wsgi
602 603 def termwidth(self):
603 604 return 80
604 605
605 606 def getwebsubs(repo):
606 607 websubtable = []
607 608 websubdefs = repo.ui.configitems('websub')
608 609 # we must maintain interhg backwards compatibility
609 610 websubdefs += repo.ui.configitems('interhg')
610 611 for key, pattern in websubdefs:
611 612 # grab the delimiter from the character after the "s"
612 613 unesc = pattern[1]
613 614 delim = re.escape(unesc)
614 615
615 616 # identify portions of the pattern, taking care to avoid escaped
616 617 # delimiters. the replace format and flags are optional, but
617 618 # delimiters are required.
618 619 match = re.match(
619 620 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
620 621 % (delim, delim, delim), pattern)
621 622 if not match:
622 623 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
623 624 % (key, pattern))
624 625 continue
625 626
626 627 # we need to unescape the delimiter for regexp and format
627 628 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
628 629 regexp = delim_re.sub(unesc, match.group(1))
629 630 format = delim_re.sub(unesc, match.group(2))
630 631
631 632 # the pattern allows for 6 regexp flags, so set them if necessary
632 633 flagin = match.group(3)
633 634 flags = 0
634 635 if flagin:
635 636 for flag in flagin.upper():
636 637 flags |= re.__dict__[flag]
637 638
638 639 try:
639 640 regexp = re.compile(regexp, flags)
640 641 websubtable.append((regexp, format))
641 642 except re.error:
642 643 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
643 644 % (key, regexp))
644 645 return websubtable
General Comments 0
You need to be logged in to leave comments. Login now