##// END OF EJS Templates
context: remove unwanted assignments in basectx.__new__() (API)...
Martin von Zweigbergk -
r37188:d7f3fdab default
parent child Browse files
Show More
@@ -1,2617 +1,2611 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from . import (
30 30 dagop,
31 31 encoding,
32 32 error,
33 33 fileset,
34 34 match as matchmod,
35 35 obsolete as obsmod,
36 36 obsutil,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 repoview,
42 42 revlog,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56 nonascii = re.compile(br'[^\x21-\x7f]').search
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65 def __new__(cls, repo, changeid='', *args, **kwargs):
66 66 if isinstance(changeid, basectx):
67 67 return changeid
68 68
69 o = super(basectx, cls).__new__(cls)
70
71 o._repo = repo
72 o._rev = nullrev
73 o._node = nullid
74
75 return o
69 return super(basectx, cls).__new__(cls)
76 70
77 71 def __bytes__(self):
78 72 return short(self.node())
79 73
80 74 __str__ = encoding.strmethod(__bytes__)
81 75
82 76 def __repr__(self):
83 77 return r"<%s %s>" % (type(self).__name__, str(self))
84 78
85 79 def __eq__(self, other):
86 80 try:
87 81 return type(self) == type(other) and self._rev == other._rev
88 82 except AttributeError:
89 83 return False
90 84
91 85 def __ne__(self, other):
92 86 return not (self == other)
93 87
94 88 def __contains__(self, key):
95 89 return key in self._manifest
96 90
97 91 def __getitem__(self, key):
98 92 return self.filectx(key)
99 93
100 94 def __iter__(self):
101 95 return iter(self._manifest)
102 96
103 97 def _buildstatusmanifest(self, status):
104 98 """Builds a manifest that includes the given status results, if this is
105 99 a working copy context. For non-working copy contexts, it just returns
106 100 the normal manifest."""
107 101 return self.manifest()
108 102
109 103 def _matchstatus(self, other, match):
110 104 """This internal method provides a way for child objects to override the
111 105 match operator.
112 106 """
113 107 return match
114 108
115 109 def _buildstatus(self, other, s, match, listignored, listclean,
116 110 listunknown):
117 111 """build a status with respect to another context"""
118 112 # Load earliest manifest first for caching reasons. More specifically,
119 113 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 114 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 115 # 1000 and cache it so that when you read 1001, we just need to apply a
122 116 # delta to what's in the cache. So that's one full reconstruction + one
123 117 # delta application.
124 118 mf2 = None
125 119 if self.rev() is not None and self.rev() < other.rev():
126 120 mf2 = self._buildstatusmanifest(s)
127 121 mf1 = other._buildstatusmanifest(s)
128 122 if mf2 is None:
129 123 mf2 = self._buildstatusmanifest(s)
130 124
131 125 modified, added = [], []
132 126 removed = []
133 127 clean = []
134 128 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 129 deletedset = set(deleted)
136 130 d = mf1.diff(mf2, match=match, clean=listclean)
137 131 for fn, value in d.iteritems():
138 132 if fn in deletedset:
139 133 continue
140 134 if value is None:
141 135 clean.append(fn)
142 136 continue
143 137 (node1, flag1), (node2, flag2) = value
144 138 if node1 is None:
145 139 added.append(fn)
146 140 elif node2 is None:
147 141 removed.append(fn)
148 142 elif flag1 != flag2:
149 143 modified.append(fn)
150 144 elif node2 not in wdirnodes:
151 145 # When comparing files between two commits, we save time by
152 146 # not comparing the file contents when the nodeids differ.
153 147 # Note that this means we incorrectly report a reverted change
154 148 # to a file as a modification.
155 149 modified.append(fn)
156 150 elif self[fn].cmp(other[fn]):
157 151 modified.append(fn)
158 152 else:
159 153 clean.append(fn)
160 154
161 155 if removed:
162 156 # need to filter files if they are already reported as removed
163 157 unknown = [fn for fn in unknown if fn not in mf1 and
164 158 (not match or match(fn))]
165 159 ignored = [fn for fn in ignored if fn not in mf1 and
166 160 (not match or match(fn))]
167 161 # if they're deleted, don't report them as removed
168 162 removed = [fn for fn in removed if fn not in deletedset]
169 163
170 164 return scmutil.status(modified, added, removed, deleted, unknown,
171 165 ignored, clean)
172 166
173 167 @propertycache
174 168 def substate(self):
175 169 return subrepoutil.state(self, self._repo.ui)
176 170
177 171 def subrev(self, subpath):
178 172 return self.substate[subpath][1]
179 173
180 174 def rev(self):
181 175 return self._rev
182 176 def node(self):
183 177 return self._node
184 178 def hex(self):
185 179 return hex(self.node())
186 180 def manifest(self):
187 181 return self._manifest
188 182 def manifestctx(self):
189 183 return self._manifestctx
190 184 def repo(self):
191 185 return self._repo
192 186 def phasestr(self):
193 187 return phases.phasenames[self.phase()]
194 188 def mutable(self):
195 189 return self.phase() > phases.public
196 190
197 191 def getfileset(self, expr):
198 192 return fileset.getfileset(self, expr)
199 193
200 194 def obsolete(self):
201 195 """True if the changeset is obsolete"""
202 196 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 197
204 198 def extinct(self):
205 199 """True if the changeset is extinct"""
206 200 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 201
208 202 def orphan(self):
209 203 """True if the changeset is not obsolete but it's ancestor are"""
210 204 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
211 205
212 206 def phasedivergent(self):
213 207 """True if the changeset try to be a successor of a public changeset
214 208
215 209 Only non-public and non-obsolete changesets may be bumped.
216 210 """
217 211 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
218 212
219 213 def contentdivergent(self):
220 214 """Is a successors of a changeset with multiple possible successors set
221 215
222 216 Only non-public and non-obsolete changesets may be divergent.
223 217 """
224 218 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
225 219
226 220 def isunstable(self):
227 221 """True if the changeset is either unstable, bumped or divergent"""
228 222 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229 223
230 224 def instabilities(self):
231 225 """return the list of instabilities affecting this changeset.
232 226
233 227 Instabilities are returned as strings. possible values are:
234 228 - orphan,
235 229 - phase-divergent,
236 230 - content-divergent.
237 231 """
238 232 instabilities = []
239 233 if self.orphan():
240 234 instabilities.append('orphan')
241 235 if self.phasedivergent():
242 236 instabilities.append('phase-divergent')
243 237 if self.contentdivergent():
244 238 instabilities.append('content-divergent')
245 239 return instabilities
246 240
247 241 def parents(self):
248 242 """return contexts for each parent changeset"""
249 243 return self._parents
250 244
251 245 def p1(self):
252 246 return self._parents[0]
253 247
254 248 def p2(self):
255 249 parents = self._parents
256 250 if len(parents) == 2:
257 251 return parents[1]
258 252 return changectx(self._repo, nullrev)
259 253
260 254 def _fileinfo(self, path):
261 255 if r'_manifest' in self.__dict__:
262 256 try:
263 257 return self._manifest[path], self._manifest.flags(path)
264 258 except KeyError:
265 259 raise error.ManifestLookupError(self._node, path,
266 260 _('not found in manifest'))
267 261 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 262 if path in self._manifestdelta:
269 263 return (self._manifestdelta[path],
270 264 self._manifestdelta.flags(path))
271 265 mfl = self._repo.manifestlog
272 266 try:
273 267 node, flag = mfl[self._changeset.manifest].find(path)
274 268 except KeyError:
275 269 raise error.ManifestLookupError(self._node, path,
276 270 _('not found in manifest'))
277 271
278 272 return node, flag
279 273
280 274 def filenode(self, path):
281 275 return self._fileinfo(path)[0]
282 276
283 277 def flags(self, path):
284 278 try:
285 279 return self._fileinfo(path)[1]
286 280 except error.LookupError:
287 281 return ''
288 282
289 283 def sub(self, path, allowcreate=True):
290 284 '''return a subrepo for the stored revision of path, never wdir()'''
291 285 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292 286
293 287 def nullsub(self, path, pctx):
294 288 return subrepo.nullsubrepo(self, path, pctx)
295 289
296 290 def workingsub(self, path):
297 291 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 292 context.
299 293 '''
300 294 return subrepo.subrepo(self, path, allowwdir=True)
301 295
302 296 def match(self, pats=None, include=None, exclude=None, default='glob',
303 297 listsubrepos=False, badfn=None):
304 298 r = self._repo
305 299 return matchmod.match(r.root, r.getcwd(), pats,
306 300 include, exclude, default,
307 301 auditor=r.nofsauditor, ctx=self,
308 302 listsubrepos=listsubrepos, badfn=badfn)
309 303
310 304 def diff(self, ctx2=None, match=None, **opts):
311 305 """Returns a diff generator for the given contexts and matcher"""
312 306 if ctx2 is None:
313 307 ctx2 = self.p1()
314 308 if ctx2 is not None:
315 309 ctx2 = self._repo[ctx2]
316 310 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
317 311 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 312
319 313 def dirs(self):
320 314 return self._manifest.dirs()
321 315
322 316 def hasdir(self, dir):
323 317 return self._manifest.hasdir(dir)
324 318
325 319 def status(self, other=None, match=None, listignored=False,
326 320 listclean=False, listunknown=False, listsubrepos=False):
327 321 """return status of files between two nodes or node and working
328 322 directory.
329 323
330 324 If other is None, compare this node with working directory.
331 325
332 326 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 327 """
334 328
335 329 ctx1 = self
336 330 ctx2 = self._repo[other]
337 331
338 332 # This next code block is, admittedly, fragile logic that tests for
339 333 # reversing the contexts and wouldn't need to exist if it weren't for
340 334 # the fast (and common) code path of comparing the working directory
341 335 # with its first parent.
342 336 #
343 337 # What we're aiming for here is the ability to call:
344 338 #
345 339 # workingctx.status(parentctx)
346 340 #
347 341 # If we always built the manifest for each context and compared those,
348 342 # then we'd be done. But the special case of the above call means we
349 343 # just copy the manifest of the parent.
350 344 reversed = False
351 345 if (not isinstance(ctx1, changectx)
352 346 and isinstance(ctx2, changectx)):
353 347 reversed = True
354 348 ctx1, ctx2 = ctx2, ctx1
355 349
356 350 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
357 351 match = ctx2._matchstatus(ctx1, match)
358 352 r = scmutil.status([], [], [], [], [], [], [])
359 353 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 354 listunknown)
361 355
362 356 if reversed:
363 357 # Reverse added and removed. Clear deleted, unknown and ignored as
364 358 # these make no sense to reverse.
365 359 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 360 r.clean)
367 361
368 362 if listsubrepos:
369 363 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 364 try:
371 365 rev2 = ctx2.subrev(subpath)
372 366 except KeyError:
373 367 # A subrepo that existed in node1 was deleted between
374 368 # node1 and node2 (inclusive). Thus, ctx2's substate
375 369 # won't contain that subpath. The best we can do ignore it.
376 370 rev2 = None
377 371 submatch = matchmod.subdirmatcher(subpath, match)
378 372 s = sub.status(rev2, match=submatch, ignored=listignored,
379 373 clean=listclean, unknown=listunknown,
380 374 listsubrepos=True)
381 375 for rfiles, sfiles in zip(r, s):
382 376 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383 377
384 378 for l in r:
385 379 l.sort()
386 380
387 381 return r
388 382
389 383 def _filterederror(repo, changeid):
390 384 """build an exception to be raised about a filtered changeid
391 385
392 386 This is extracted in a function to help extensions (eg: evolve) to
393 387 experiment with various message variants."""
394 388 if repo.filtername.startswith('visible'):
395 389
396 390 # Check if the changeset is obsolete
397 391 unfilteredrepo = repo.unfiltered()
398 392 ctx = unfilteredrepo[changeid]
399 393
400 394 # If the changeset is obsolete, enrich the message with the reason
401 395 # that made this changeset not visible
402 396 if ctx.obsolete():
403 397 msg = obsutil._getfilteredreason(repo, changeid, ctx)
404 398 else:
405 399 msg = _("hidden revision '%s'") % changeid
406 400
407 401 hint = _('use --hidden to access hidden revisions')
408 402
409 403 return error.FilteredRepoLookupError(msg, hint=hint)
410 404 msg = _("filtered revision '%s' (not in '%s' subset)")
411 405 msg %= (changeid, repo.filtername)
412 406 return error.FilteredRepoLookupError(msg)
413 407
414 408 class changectx(basectx):
415 409 """A changecontext object makes access to data related to a particular
416 410 changeset convenient. It represents a read-only context already present in
417 411 the repo."""
418 412 def __init__(self, repo, changeid='.'):
419 413 """changeid is a revision number, node, or tag"""
420 414
421 415 # since basectx.__new__ already took care of copying the object, we
422 416 # don't need to do anything in __init__, so we just exit here
423 417 if isinstance(changeid, basectx):
424 418 return
425 419
426 420 if changeid == '':
427 421 changeid = '.'
428 422 self._repo = repo
429 423
430 424 try:
431 425 if isinstance(changeid, int):
432 426 self._node = repo.changelog.node(changeid)
433 427 self._rev = changeid
434 428 return
435 429 if not pycompat.ispy3 and isinstance(changeid, long):
436 430 changeid = "%d" % changeid
437 431 if changeid == 'null':
438 432 self._node = nullid
439 433 self._rev = nullrev
440 434 return
441 435 if changeid == 'tip':
442 436 self._node = repo.changelog.tip()
443 437 self._rev = repo.changelog.rev(self._node)
444 438 return
445 439 if (changeid == '.'
446 440 or repo.local() and changeid == repo.dirstate.p1()):
447 441 # this is a hack to delay/avoid loading obsmarkers
448 442 # when we know that '.' won't be hidden
449 443 self._node = repo.dirstate.p1()
450 444 self._rev = repo.unfiltered().changelog.rev(self._node)
451 445 return
452 446 if len(changeid) == 20:
453 447 try:
454 448 self._node = changeid
455 449 self._rev = repo.changelog.rev(changeid)
456 450 return
457 451 except error.FilteredRepoLookupError:
458 452 raise
459 453 except LookupError:
460 454 pass
461 455
462 456 try:
463 457 r = int(changeid)
464 458 if '%d' % r != changeid:
465 459 raise ValueError
466 460 l = len(repo.changelog)
467 461 if r < 0:
468 462 r += l
469 463 if r < 0 or r >= l and r != wdirrev:
470 464 raise ValueError
471 465 self._rev = r
472 466 self._node = repo.changelog.node(r)
473 467 return
474 468 except error.FilteredIndexError:
475 469 raise
476 470 except (ValueError, OverflowError, IndexError):
477 471 pass
478 472
479 473 if len(changeid) == 40:
480 474 try:
481 475 self._node = bin(changeid)
482 476 self._rev = repo.changelog.rev(self._node)
483 477 return
484 478 except error.FilteredLookupError:
485 479 raise
486 480 except (TypeError, LookupError):
487 481 pass
488 482
489 483 # lookup bookmarks through the name interface
490 484 try:
491 485 self._node = repo.names.singlenode(repo, changeid)
492 486 self._rev = repo.changelog.rev(self._node)
493 487 return
494 488 except KeyError:
495 489 pass
496 490 except error.FilteredRepoLookupError:
497 491 raise
498 492 except error.RepoLookupError:
499 493 pass
500 494
501 495 self._node = repo.unfiltered().changelog._partialmatch(changeid)
502 496 if self._node is not None:
503 497 self._rev = repo.changelog.rev(self._node)
504 498 return
505 499
506 500 # lookup failed
507 501 # check if it might have come from damaged dirstate
508 502 #
509 503 # XXX we could avoid the unfiltered if we had a recognizable
510 504 # exception for filtered changeset access
511 505 if (repo.local()
512 506 and changeid in repo.unfiltered().dirstate.parents()):
513 507 msg = _("working directory has unknown parent '%s'!")
514 508 raise error.Abort(msg % short(changeid))
515 509 try:
516 510 if len(changeid) == 20 and nonascii(changeid):
517 511 changeid = hex(changeid)
518 512 except TypeError:
519 513 pass
520 514 except (error.FilteredIndexError, error.FilteredLookupError,
521 515 error.FilteredRepoLookupError):
522 516 raise _filterederror(repo, changeid)
523 517 except IndexError:
524 518 pass
525 519 raise error.RepoLookupError(
526 520 _("unknown revision '%s'") % changeid)
527 521
528 522 def __hash__(self):
529 523 try:
530 524 return hash(self._rev)
531 525 except AttributeError:
532 526 return id(self)
533 527
534 528 def __nonzero__(self):
535 529 return self._rev != nullrev
536 530
537 531 __bool__ = __nonzero__
538 532
539 533 @propertycache
540 534 def _changeset(self):
541 535 return self._repo.changelog.changelogrevision(self.rev())
542 536
543 537 @propertycache
544 538 def _manifest(self):
545 539 return self._manifestctx.read()
546 540
547 541 @property
548 542 def _manifestctx(self):
549 543 return self._repo.manifestlog[self._changeset.manifest]
550 544
551 545 @propertycache
552 546 def _manifestdelta(self):
553 547 return self._manifestctx.readdelta()
554 548
555 549 @propertycache
556 550 def _parents(self):
557 551 repo = self._repo
558 552 p1, p2 = repo.changelog.parentrevs(self._rev)
559 553 if p2 == nullrev:
560 554 return [changectx(repo, p1)]
561 555 return [changectx(repo, p1), changectx(repo, p2)]
562 556
563 557 def changeset(self):
564 558 c = self._changeset
565 559 return (
566 560 c.manifest,
567 561 c.user,
568 562 c.date,
569 563 c.files,
570 564 c.description,
571 565 c.extra,
572 566 )
573 567 def manifestnode(self):
574 568 return self._changeset.manifest
575 569
576 570 def user(self):
577 571 return self._changeset.user
578 572 def date(self):
579 573 return self._changeset.date
580 574 def files(self):
581 575 return self._changeset.files
582 576 def description(self):
583 577 return self._changeset.description
584 578 def branch(self):
585 579 return encoding.tolocal(self._changeset.extra.get("branch"))
586 580 def closesbranch(self):
587 581 return 'close' in self._changeset.extra
588 582 def extra(self):
589 583 """Return a dict of extra information."""
590 584 return self._changeset.extra
591 585 def tags(self):
592 586 """Return a list of byte tag names"""
593 587 return self._repo.nodetags(self._node)
594 588 def bookmarks(self):
595 589 """Return a list of byte bookmark names."""
596 590 return self._repo.nodebookmarks(self._node)
597 591 def phase(self):
598 592 return self._repo._phasecache.phase(self._repo, self._rev)
599 593 def hidden(self):
600 594 return self._rev in repoview.filterrevs(self._repo, 'visible')
601 595
602 596 def isinmemory(self):
603 597 return False
604 598
605 599 def children(self):
606 600 """return list of changectx contexts for each child changeset.
607 601
608 602 This returns only the immediate child changesets. Use descendants() to
609 603 recursively walk children.
610 604 """
611 605 c = self._repo.changelog.children(self._node)
612 606 return [changectx(self._repo, x) for x in c]
613 607
614 608 def ancestors(self):
615 609 for a in self._repo.changelog.ancestors([self._rev]):
616 610 yield changectx(self._repo, a)
617 611
618 612 def descendants(self):
619 613 """Recursively yield all children of the changeset.
620 614
621 615 For just the immediate children, use children()
622 616 """
623 617 for d in self._repo.changelog.descendants([self._rev]):
624 618 yield changectx(self._repo, d)
625 619
626 620 def filectx(self, path, fileid=None, filelog=None):
627 621 """get a file context from this changeset"""
628 622 if fileid is None:
629 623 fileid = self.filenode(path)
630 624 return filectx(self._repo, path, fileid=fileid,
631 625 changectx=self, filelog=filelog)
632 626
633 627 def ancestor(self, c2, warn=False):
634 628 """return the "best" ancestor context of self and c2
635 629
636 630 If there are multiple candidates, it will show a message and check
637 631 merge.preferancestor configuration before falling back to the
638 632 revlog ancestor."""
639 633 # deal with workingctxs
640 634 n2 = c2._node
641 635 if n2 is None:
642 636 n2 = c2._parents[0]._node
643 637 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
644 638 if not cahs:
645 639 anc = nullid
646 640 elif len(cahs) == 1:
647 641 anc = cahs[0]
648 642 else:
649 643 # experimental config: merge.preferancestor
650 644 for r in self._repo.ui.configlist('merge', 'preferancestor'):
651 645 try:
652 646 ctx = changectx(self._repo, r)
653 647 except error.RepoLookupError:
654 648 continue
655 649 anc = ctx.node()
656 650 if anc in cahs:
657 651 break
658 652 else:
659 653 anc = self._repo.changelog.ancestor(self._node, n2)
660 654 if warn:
661 655 self._repo.ui.status(
662 656 (_("note: using %s as ancestor of %s and %s\n") %
663 657 (short(anc), short(self._node), short(n2))) +
664 658 ''.join(_(" alternatively, use --config "
665 659 "merge.preferancestor=%s\n") %
666 660 short(n) for n in sorted(cahs) if n != anc))
667 661 return changectx(self._repo, anc)
668 662
669 663 def descendant(self, other):
670 664 """True if other is descendant of this changeset"""
671 665 return self._repo.changelog.descendant(self._rev, other._rev)
672 666
673 667 def walk(self, match):
674 668 '''Generates matching file names.'''
675 669
676 670 # Wrap match.bad method to have message with nodeid
677 671 def bad(fn, msg):
678 672 # The manifest doesn't know about subrepos, so don't complain about
679 673 # paths into valid subrepos.
680 674 if any(fn == s or fn.startswith(s + '/')
681 675 for s in self.substate):
682 676 return
683 677 match.bad(fn, _('no such file in rev %s') % self)
684 678
685 679 m = matchmod.badmatch(match, bad)
686 680 return self._manifest.walk(m)
687 681
688 682 def matches(self, match):
689 683 return self.walk(match)
690 684
691 685 class basefilectx(object):
692 686 """A filecontext object represents the common logic for its children:
693 687 filectx: read-only access to a filerevision that is already present
694 688 in the repo,
695 689 workingfilectx: a filecontext that represents files from the working
696 690 directory,
697 691 memfilectx: a filecontext that represents files in-memory,
698 692 overlayfilectx: duplicate another filecontext with some fields overridden.
699 693 """
700 694 @propertycache
701 695 def _filelog(self):
702 696 return self._repo.file(self._path)
703 697
704 698 @propertycache
705 699 def _changeid(self):
706 700 if r'_changeid' in self.__dict__:
707 701 return self._changeid
708 702 elif r'_changectx' in self.__dict__:
709 703 return self._changectx.rev()
710 704 elif r'_descendantrev' in self.__dict__:
711 705 # this file context was created from a revision with a known
712 706 # descendant, we can (lazily) correct for linkrev aliases
713 707 return self._adjustlinkrev(self._descendantrev)
714 708 else:
715 709 return self._filelog.linkrev(self._filerev)
716 710
717 711 @propertycache
718 712 def _filenode(self):
719 713 if r'_fileid' in self.__dict__:
720 714 return self._filelog.lookup(self._fileid)
721 715 else:
722 716 return self._changectx.filenode(self._path)
723 717
724 718 @propertycache
725 719 def _filerev(self):
726 720 return self._filelog.rev(self._filenode)
727 721
728 722 @propertycache
729 723 def _repopath(self):
730 724 return self._path
731 725
732 726 def __nonzero__(self):
733 727 try:
734 728 self._filenode
735 729 return True
736 730 except error.LookupError:
737 731 # file is missing
738 732 return False
739 733
740 734 __bool__ = __nonzero__
741 735
742 736 def __bytes__(self):
743 737 try:
744 738 return "%s@%s" % (self.path(), self._changectx)
745 739 except error.LookupError:
746 740 return "%s@???" % self.path()
747 741
748 742 __str__ = encoding.strmethod(__bytes__)
749 743
750 744 def __repr__(self):
751 745 return r"<%s %s>" % (type(self).__name__, str(self))
752 746
753 747 def __hash__(self):
754 748 try:
755 749 return hash((self._path, self._filenode))
756 750 except AttributeError:
757 751 return id(self)
758 752
759 753 def __eq__(self, other):
760 754 try:
761 755 return (type(self) == type(other) and self._path == other._path
762 756 and self._filenode == other._filenode)
763 757 except AttributeError:
764 758 return False
765 759
766 760 def __ne__(self, other):
767 761 return not (self == other)
768 762
769 763 def filerev(self):
770 764 return self._filerev
771 765 def filenode(self):
772 766 return self._filenode
773 767 @propertycache
774 768 def _flags(self):
775 769 return self._changectx.flags(self._path)
776 770 def flags(self):
777 771 return self._flags
778 772 def filelog(self):
779 773 return self._filelog
780 774 def rev(self):
781 775 return self._changeid
782 776 def linkrev(self):
783 777 return self._filelog.linkrev(self._filerev)
784 778 def node(self):
785 779 return self._changectx.node()
786 780 def hex(self):
787 781 return self._changectx.hex()
788 782 def user(self):
789 783 return self._changectx.user()
790 784 def date(self):
791 785 return self._changectx.date()
792 786 def files(self):
793 787 return self._changectx.files()
794 788 def description(self):
795 789 return self._changectx.description()
796 790 def branch(self):
797 791 return self._changectx.branch()
798 792 def extra(self):
799 793 return self._changectx.extra()
800 794 def phase(self):
801 795 return self._changectx.phase()
802 796 def phasestr(self):
803 797 return self._changectx.phasestr()
804 798 def obsolete(self):
805 799 return self._changectx.obsolete()
806 800 def instabilities(self):
807 801 return self._changectx.instabilities()
808 802 def manifest(self):
809 803 return self._changectx.manifest()
810 804 def changectx(self):
811 805 return self._changectx
812 806 def renamed(self):
813 807 return self._copied
814 808 def repo(self):
815 809 return self._repo
816 810 def size(self):
817 811 return len(self.data())
818 812
819 813 def path(self):
820 814 return self._path
821 815
822 816 def isbinary(self):
823 817 try:
824 818 return stringutil.binary(self.data())
825 819 except IOError:
826 820 return False
827 821 def isexec(self):
828 822 return 'x' in self.flags()
829 823 def islink(self):
830 824 return 'l' in self.flags()
831 825
832 826 def isabsent(self):
833 827 """whether this filectx represents a file not in self._changectx
834 828
835 829 This is mainly for merge code to detect change/delete conflicts. This is
836 830 expected to be True for all subclasses of basectx."""
837 831 return False
838 832
839 833 _customcmp = False
840 834 def cmp(self, fctx):
841 835 """compare with other file context
842 836
843 837 returns True if different than fctx.
844 838 """
845 839 if fctx._customcmp:
846 840 return fctx.cmp(self)
847 841
848 842 if (fctx._filenode is None
849 843 and (self._repo._encodefilterpats
850 844 # if file data starts with '\1\n', empty metadata block is
851 845 # prepended, which adds 4 bytes to filelog.size().
852 846 or self.size() - 4 == fctx.size())
853 847 or self.size() == fctx.size()):
854 848 return self._filelog.cmp(self._filenode, fctx.data())
855 849
856 850 return True
857 851
858 852 def _adjustlinkrev(self, srcrev, inclusive=False):
859 853 """return the first ancestor of <srcrev> introducing <fnode>
860 854
861 855 If the linkrev of the file revision does not point to an ancestor of
862 856 srcrev, we'll walk down the ancestors until we find one introducing
863 857 this file revision.
864 858
865 859 :srcrev: the changeset revision we search ancestors from
866 860 :inclusive: if true, the src revision will also be checked
867 861 """
868 862 repo = self._repo
869 863 cl = repo.unfiltered().changelog
870 864 mfl = repo.manifestlog
871 865 # fetch the linkrev
872 866 lkr = self.linkrev()
873 867 # hack to reuse ancestor computation when searching for renames
874 868 memberanc = getattr(self, '_ancestrycontext', None)
875 869 iteranc = None
876 870 if srcrev is None:
877 871 # wctx case, used by workingfilectx during mergecopy
878 872 revs = [p.rev() for p in self._repo[None].parents()]
879 873 inclusive = True # we skipped the real (revless) source
880 874 else:
881 875 revs = [srcrev]
882 876 if memberanc is None:
883 877 memberanc = iteranc = cl.ancestors(revs, lkr,
884 878 inclusive=inclusive)
885 879 # check if this linkrev is an ancestor of srcrev
886 880 if lkr not in memberanc:
887 881 if iteranc is None:
888 882 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
889 883 fnode = self._filenode
890 884 path = self._path
891 885 for a in iteranc:
892 886 ac = cl.read(a) # get changeset data (we avoid object creation)
893 887 if path in ac[3]: # checking the 'files' field.
894 888 # The file has been touched, check if the content is
895 889 # similar to the one we search for.
896 890 if fnode == mfl[ac[0]].readfast().get(path):
897 891 return a
898 892 # In theory, we should never get out of that loop without a result.
899 893 # But if manifest uses a buggy file revision (not children of the
900 894 # one it replaces) we could. Such a buggy situation will likely
901 895 # result is crash somewhere else at to some point.
902 896 return lkr
903 897
904 898 def introrev(self):
905 899 """return the rev of the changeset which introduced this file revision
906 900
907 901 This method is different from linkrev because it take into account the
908 902 changeset the filectx was created from. It ensures the returned
909 903 revision is one of its ancestors. This prevents bugs from
910 904 'linkrev-shadowing' when a file revision is used by multiple
911 905 changesets.
912 906 """
913 907 lkr = self.linkrev()
914 908 attrs = vars(self)
915 909 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
916 910 if noctx or self.rev() == lkr:
917 911 return self.linkrev()
918 912 return self._adjustlinkrev(self.rev(), inclusive=True)
919 913
920 914 def introfilectx(self):
921 915 """Return filectx having identical contents, but pointing to the
922 916 changeset revision where this filectx was introduced"""
923 917 introrev = self.introrev()
924 918 if self.rev() == introrev:
925 919 return self
926 920 return self.filectx(self.filenode(), changeid=introrev)
927 921
928 922 def _parentfilectx(self, path, fileid, filelog):
929 923 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 924 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 925 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
932 926 # If self is associated with a changeset (probably explicitly
933 927 # fed), ensure the created filectx is associated with a
934 928 # changeset that is an ancestor of self.changectx.
935 929 # This lets us later use _adjustlinkrev to get a correct link.
936 930 fctx._descendantrev = self.rev()
937 931 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 932 elif r'_descendantrev' in vars(self):
939 933 # Otherwise propagate _descendantrev if we have one associated.
940 934 fctx._descendantrev = self._descendantrev
941 935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 936 return fctx
943 937
944 938 def parents(self):
945 939 _path = self._path
946 940 fl = self._filelog
947 941 parents = self._filelog.parents(self._filenode)
948 942 pl = [(_path, node, fl) for node in parents if node != nullid]
949 943
950 944 r = fl.renamed(self._filenode)
951 945 if r:
952 946 # - In the simple rename case, both parent are nullid, pl is empty.
953 947 # - In case of merge, only one of the parent is null id and should
954 948 # be replaced with the rename information. This parent is -always-
955 949 # the first one.
956 950 #
957 951 # As null id have always been filtered out in the previous list
958 952 # comprehension, inserting to 0 will always result in "replacing
959 953 # first nullid parent with rename information.
960 954 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961 955
962 956 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963 957
964 958 def p1(self):
965 959 return self.parents()[0]
966 960
967 961 def p2(self):
968 962 p = self.parents()
969 963 if len(p) == 2:
970 964 return p[1]
971 965 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972 966
973 967 def annotate(self, follow=False, skiprevs=None, diffopts=None):
974 968 """Returns a list of annotateline objects for each line in the file
975 969
976 970 - line.fctx is the filectx of the node where that line was last changed
977 971 - line.lineno is the line number at the first appearance in the managed
978 972 file
979 973 - line.text is the data on that line (including newline character)
980 974 """
981 975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
982 976
983 977 def parents(f):
984 978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
985 979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
986 980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
987 981 # isn't an ancestor of the srcrev.
988 982 f._changeid
989 983 pl = f.parents()
990 984
991 985 # Don't return renamed parents if we aren't following.
992 986 if not follow:
993 987 pl = [p for p in pl if p.path() == f.path()]
994 988
995 989 # renamed filectx won't have a filelog yet, so set it
996 990 # from the cache to save time
997 991 for p in pl:
998 992 if not r'_filelog' in p.__dict__:
999 993 p._filelog = getlog(p.path())
1000 994
1001 995 return pl
1002 996
1003 997 # use linkrev to find the first changeset where self appeared
1004 998 base = self.introfilectx()
1005 999 if getattr(base, '_ancestrycontext', None) is None:
1006 1000 cl = self._repo.changelog
1007 1001 if base.rev() is None:
1008 1002 # wctx is not inclusive, but works because _ancestrycontext
1009 1003 # is used to test filelog revisions
1010 1004 ac = cl.ancestors([p.rev() for p in base.parents()],
1011 1005 inclusive=True)
1012 1006 else:
1013 1007 ac = cl.ancestors([base.rev()], inclusive=True)
1014 1008 base._ancestrycontext = ac
1015 1009
1016 1010 return dagop.annotate(base, parents, skiprevs=skiprevs,
1017 1011 diffopts=diffopts)
1018 1012
1019 1013 def ancestors(self, followfirst=False):
1020 1014 visit = {}
1021 1015 c = self
1022 1016 if followfirst:
1023 1017 cut = 1
1024 1018 else:
1025 1019 cut = None
1026 1020
1027 1021 while True:
1028 1022 for parent in c.parents()[:cut]:
1029 1023 visit[(parent.linkrev(), parent.filenode())] = parent
1030 1024 if not visit:
1031 1025 break
1032 1026 c = visit.pop(max(visit))
1033 1027 yield c
1034 1028
1035 1029 def decodeddata(self):
1036 1030 """Returns `data()` after running repository decoding filters.
1037 1031
1038 1032 This is often equivalent to how the data would be expressed on disk.
1039 1033 """
1040 1034 return self._repo.wwritedata(self.path(), self.data())
1041 1035
1042 1036 class filectx(basefilectx):
1043 1037 """A filecontext object makes access to data related to a particular
1044 1038 filerevision convenient."""
1045 1039 def __init__(self, repo, path, changeid=None, fileid=None,
1046 1040 filelog=None, changectx=None):
1047 1041 """changeid can be a changeset revision, node, or tag.
1048 1042 fileid can be a file revision or node."""
1049 1043 self._repo = repo
1050 1044 self._path = path
1051 1045
1052 1046 assert (changeid is not None
1053 1047 or fileid is not None
1054 1048 or changectx is not None), \
1055 1049 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1056 1050 % (changeid, fileid, changectx))
1057 1051
1058 1052 if filelog is not None:
1059 1053 self._filelog = filelog
1060 1054
1061 1055 if changeid is not None:
1062 1056 self._changeid = changeid
1063 1057 if changectx is not None:
1064 1058 self._changectx = changectx
1065 1059 if fileid is not None:
1066 1060 self._fileid = fileid
1067 1061
1068 1062 @propertycache
1069 1063 def _changectx(self):
1070 1064 try:
1071 1065 return changectx(self._repo, self._changeid)
1072 1066 except error.FilteredRepoLookupError:
1073 1067 # Linkrev may point to any revision in the repository. When the
1074 1068 # repository is filtered this may lead to `filectx` trying to build
1075 1069 # `changectx` for filtered revision. In such case we fallback to
1076 1070 # creating `changectx` on the unfiltered version of the reposition.
1077 1071 # This fallback should not be an issue because `changectx` from
1078 1072 # `filectx` are not used in complex operations that care about
1079 1073 # filtering.
1080 1074 #
1081 1075 # This fallback is a cheap and dirty fix that prevent several
1082 1076 # crashes. It does not ensure the behavior is correct. However the
1083 1077 # behavior was not correct before filtering either and "incorrect
1084 1078 # behavior" is seen as better as "crash"
1085 1079 #
1086 1080 # Linkrevs have several serious troubles with filtering that are
1087 1081 # complicated to solve. Proper handling of the issue here should be
1088 1082 # considered when solving linkrev issue are on the table.
1089 1083 return changectx(self._repo.unfiltered(), self._changeid)
1090 1084
1091 1085 def filectx(self, fileid, changeid=None):
1092 1086 '''opens an arbitrary revision of the file without
1093 1087 opening a new filelog'''
1094 1088 return filectx(self._repo, self._path, fileid=fileid,
1095 1089 filelog=self._filelog, changeid=changeid)
1096 1090
1097 1091 def rawdata(self):
1098 1092 return self._filelog.revision(self._filenode, raw=True)
1099 1093
1100 1094 def rawflags(self):
1101 1095 """low-level revlog flags"""
1102 1096 return self._filelog.flags(self._filerev)
1103 1097
1104 1098 def data(self):
1105 1099 try:
1106 1100 return self._filelog.read(self._filenode)
1107 1101 except error.CensoredNodeError:
1108 1102 if self._repo.ui.config("censor", "policy") == "ignore":
1109 1103 return ""
1110 1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1111 1105 hint=_("set censor.policy to ignore errors"))
1112 1106
1113 1107 def size(self):
1114 1108 return self._filelog.size(self._filerev)
1115 1109
1116 1110 @propertycache
1117 1111 def _copied(self):
1118 1112 """check if file was actually renamed in this changeset revision
1119 1113
1120 1114 If rename logged in file revision, we report copy for changeset only
1121 1115 if file revisions linkrev points back to the changeset in question
1122 1116 or both changeset parents contain different file revisions.
1123 1117 """
1124 1118
1125 1119 renamed = self._filelog.renamed(self._filenode)
1126 1120 if not renamed:
1127 1121 return renamed
1128 1122
1129 1123 if self.rev() == self.linkrev():
1130 1124 return renamed
1131 1125
1132 1126 name = self.path()
1133 1127 fnode = self._filenode
1134 1128 for p in self._changectx.parents():
1135 1129 try:
1136 1130 if fnode == p.filenode(name):
1137 1131 return None
1138 1132 except error.LookupError:
1139 1133 pass
1140 1134 return renamed
1141 1135
1142 1136 def children(self):
1143 1137 # hard for renames
1144 1138 c = self._filelog.children(self._filenode)
1145 1139 return [filectx(self._repo, self._path, fileid=x,
1146 1140 filelog=self._filelog) for x in c]
1147 1141
1148 1142 class committablectx(basectx):
1149 1143 """A committablectx object provides common functionality for a context that
1150 1144 wants the ability to commit, e.g. workingctx or memctx."""
1151 1145 def __init__(self, repo, text="", user=None, date=None, extra=None,
1152 1146 changes=None):
1153 1147 self._repo = repo
1154 1148 self._rev = None
1155 1149 self._node = None
1156 1150 self._text = text
1157 1151 if date:
1158 1152 self._date = dateutil.parsedate(date)
1159 1153 if user:
1160 1154 self._user = user
1161 1155 if changes:
1162 1156 self._status = changes
1163 1157
1164 1158 self._extra = {}
1165 1159 if extra:
1166 1160 self._extra = extra.copy()
1167 1161 if 'branch' not in self._extra:
1168 1162 try:
1169 1163 branch = encoding.fromlocal(self._repo.dirstate.branch())
1170 1164 except UnicodeDecodeError:
1171 1165 raise error.Abort(_('branch name not in UTF-8!'))
1172 1166 self._extra['branch'] = branch
1173 1167 if self._extra['branch'] == '':
1174 1168 self._extra['branch'] = 'default'
1175 1169
1176 1170 def __bytes__(self):
1177 1171 return bytes(self._parents[0]) + "+"
1178 1172
1179 1173 __str__ = encoding.strmethod(__bytes__)
1180 1174
1181 1175 def __nonzero__(self):
1182 1176 return True
1183 1177
1184 1178 __bool__ = __nonzero__
1185 1179
1186 1180 def _buildflagfunc(self):
1187 1181 # Create a fallback function for getting file flags when the
1188 1182 # filesystem doesn't support them
1189 1183
1190 1184 copiesget = self._repo.dirstate.copies().get
1191 1185 parents = self.parents()
1192 1186 if len(parents) < 2:
1193 1187 # when we have one parent, it's easy: copy from parent
1194 1188 man = parents[0].manifest()
1195 1189 def func(f):
1196 1190 f = copiesget(f, f)
1197 1191 return man.flags(f)
1198 1192 else:
1199 1193 # merges are tricky: we try to reconstruct the unstored
1200 1194 # result from the merge (issue1802)
1201 1195 p1, p2 = parents
1202 1196 pa = p1.ancestor(p2)
1203 1197 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1204 1198
1205 1199 def func(f):
1206 1200 f = copiesget(f, f) # may be wrong for merges with copies
1207 1201 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1208 1202 if fl1 == fl2:
1209 1203 return fl1
1210 1204 if fl1 == fla:
1211 1205 return fl2
1212 1206 if fl2 == fla:
1213 1207 return fl1
1214 1208 return '' # punt for conflicts
1215 1209
1216 1210 return func
1217 1211
1218 1212 @propertycache
1219 1213 def _flagfunc(self):
1220 1214 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1221 1215
1222 1216 @propertycache
1223 1217 def _status(self):
1224 1218 return self._repo.status()
1225 1219
1226 1220 @propertycache
1227 1221 def _user(self):
1228 1222 return self._repo.ui.username()
1229 1223
1230 1224 @propertycache
1231 1225 def _date(self):
1232 1226 ui = self._repo.ui
1233 1227 date = ui.configdate('devel', 'default-date')
1234 1228 if date is None:
1235 1229 date = dateutil.makedate()
1236 1230 return date
1237 1231
1238 1232 def subrev(self, subpath):
1239 1233 return None
1240 1234
1241 1235 def manifestnode(self):
1242 1236 return None
1243 1237 def user(self):
1244 1238 return self._user or self._repo.ui.username()
1245 1239 def date(self):
1246 1240 return self._date
1247 1241 def description(self):
1248 1242 return self._text
1249 1243 def files(self):
1250 1244 return sorted(self._status.modified + self._status.added +
1251 1245 self._status.removed)
1252 1246
1253 1247 def modified(self):
1254 1248 return self._status.modified
1255 1249 def added(self):
1256 1250 return self._status.added
1257 1251 def removed(self):
1258 1252 return self._status.removed
1259 1253 def deleted(self):
1260 1254 return self._status.deleted
1261 1255 def branch(self):
1262 1256 return encoding.tolocal(self._extra['branch'])
1263 1257 def closesbranch(self):
1264 1258 return 'close' in self._extra
1265 1259 def extra(self):
1266 1260 return self._extra
1267 1261
1268 1262 def isinmemory(self):
1269 1263 return False
1270 1264
1271 1265 def tags(self):
1272 1266 return []
1273 1267
1274 1268 def bookmarks(self):
1275 1269 b = []
1276 1270 for p in self.parents():
1277 1271 b.extend(p.bookmarks())
1278 1272 return b
1279 1273
1280 1274 def phase(self):
1281 1275 phase = phases.draft # default phase to draft
1282 1276 for p in self.parents():
1283 1277 phase = max(phase, p.phase())
1284 1278 return phase
1285 1279
1286 1280 def hidden(self):
1287 1281 return False
1288 1282
1289 1283 def children(self):
1290 1284 return []
1291 1285
1292 1286 def flags(self, path):
1293 1287 if r'_manifest' in self.__dict__:
1294 1288 try:
1295 1289 return self._manifest.flags(path)
1296 1290 except KeyError:
1297 1291 return ''
1298 1292
1299 1293 try:
1300 1294 return self._flagfunc(path)
1301 1295 except OSError:
1302 1296 return ''
1303 1297
1304 1298 def ancestor(self, c2):
1305 1299 """return the "best" ancestor context of self and c2"""
1306 1300 return self._parents[0].ancestor(c2) # punt on two parents for now
1307 1301
1308 1302 def walk(self, match):
1309 1303 '''Generates matching file names.'''
1310 1304 return sorted(self._repo.dirstate.walk(match,
1311 1305 subrepos=sorted(self.substate),
1312 1306 unknown=True, ignored=False))
1313 1307
1314 1308 def matches(self, match):
1315 1309 return sorted(self._repo.dirstate.matches(match))
1316 1310
1317 1311 def ancestors(self):
1318 1312 for p in self._parents:
1319 1313 yield p
1320 1314 for a in self._repo.changelog.ancestors(
1321 1315 [p.rev() for p in self._parents]):
1322 1316 yield changectx(self._repo, a)
1323 1317
1324 1318 def markcommitted(self, node):
1325 1319 """Perform post-commit cleanup necessary after committing this ctx
1326 1320
1327 1321 Specifically, this updates backing stores this working context
1328 1322 wraps to reflect the fact that the changes reflected by this
1329 1323 workingctx have been committed. For example, it marks
1330 1324 modified and added files as normal in the dirstate.
1331 1325
1332 1326 """
1333 1327
1334 1328 with self._repo.dirstate.parentchange():
1335 1329 for f in self.modified() + self.added():
1336 1330 self._repo.dirstate.normal(f)
1337 1331 for f in self.removed():
1338 1332 self._repo.dirstate.drop(f)
1339 1333 self._repo.dirstate.setparents(node)
1340 1334
1341 1335 # write changes out explicitly, because nesting wlock at
1342 1336 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1343 1337 # from immediately doing so for subsequent changing files
1344 1338 self._repo.dirstate.write(self._repo.currenttransaction())
1345 1339
1346 1340 def dirty(self, missing=False, merge=True, branch=True):
1347 1341 return False
1348 1342
1349 1343 class workingctx(committablectx):
1350 1344 """A workingctx object makes access to data related to
1351 1345 the current working directory convenient.
1352 1346 date - any valid date string or (unixtime, offset), or None.
1353 1347 user - username string, or None.
1354 1348 extra - a dictionary of extra values, or None.
1355 1349 changes - a list of file lists as returned by localrepo.status()
1356 1350 or None to use the repository status.
1357 1351 """
1358 1352 def __init__(self, repo, text="", user=None, date=None, extra=None,
1359 1353 changes=None):
1360 1354 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1361 1355
1362 1356 def __iter__(self):
1363 1357 d = self._repo.dirstate
1364 1358 for f in d:
1365 1359 if d[f] != 'r':
1366 1360 yield f
1367 1361
1368 1362 def __contains__(self, key):
1369 1363 return self._repo.dirstate[key] not in "?r"
1370 1364
1371 1365 def hex(self):
1372 1366 return hex(wdirid)
1373 1367
1374 1368 @propertycache
1375 1369 def _parents(self):
1376 1370 p = self._repo.dirstate.parents()
1377 1371 if p[1] == nullid:
1378 1372 p = p[:-1]
1379 1373 return [changectx(self._repo, x) for x in p]
1380 1374
1381 1375 def filectx(self, path, filelog=None):
1382 1376 """get a file context from the working directory"""
1383 1377 return workingfilectx(self._repo, path, workingctx=self,
1384 1378 filelog=filelog)
1385 1379
1386 1380 def dirty(self, missing=False, merge=True, branch=True):
1387 1381 "check whether a working directory is modified"
1388 1382 # check subrepos first
1389 1383 for s in sorted(self.substate):
1390 1384 if self.sub(s).dirty(missing=missing):
1391 1385 return True
1392 1386 # check current working dir
1393 1387 return ((merge and self.p2()) or
1394 1388 (branch and self.branch() != self.p1().branch()) or
1395 1389 self.modified() or self.added() or self.removed() or
1396 1390 (missing and self.deleted()))
1397 1391
1398 1392 def add(self, list, prefix=""):
1399 1393 with self._repo.wlock():
1400 1394 ui, ds = self._repo.ui, self._repo.dirstate
1401 1395 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1402 1396 rejected = []
1403 1397 lstat = self._repo.wvfs.lstat
1404 1398 for f in list:
1405 1399 # ds.pathto() returns an absolute file when this is invoked from
1406 1400 # the keyword extension. That gets flagged as non-portable on
1407 1401 # Windows, since it contains the drive letter and colon.
1408 1402 scmutil.checkportable(ui, os.path.join(prefix, f))
1409 1403 try:
1410 1404 st = lstat(f)
1411 1405 except OSError:
1412 1406 ui.warn(_("%s does not exist!\n") % uipath(f))
1413 1407 rejected.append(f)
1414 1408 continue
1415 1409 if st.st_size > 10000000:
1416 1410 ui.warn(_("%s: up to %d MB of RAM may be required "
1417 1411 "to manage this file\n"
1418 1412 "(use 'hg revert %s' to cancel the "
1419 1413 "pending addition)\n")
1420 1414 % (f, 3 * st.st_size // 1000000, uipath(f)))
1421 1415 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1422 1416 ui.warn(_("%s not added: only files and symlinks "
1423 1417 "supported currently\n") % uipath(f))
1424 1418 rejected.append(f)
1425 1419 elif ds[f] in 'amn':
1426 1420 ui.warn(_("%s already tracked!\n") % uipath(f))
1427 1421 elif ds[f] == 'r':
1428 1422 ds.normallookup(f)
1429 1423 else:
1430 1424 ds.add(f)
1431 1425 return rejected
1432 1426
1433 1427 def forget(self, files, prefix=""):
1434 1428 with self._repo.wlock():
1435 1429 ds = self._repo.dirstate
1436 1430 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1437 1431 rejected = []
1438 1432 for f in files:
1439 1433 if f not in self._repo.dirstate:
1440 1434 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1441 1435 rejected.append(f)
1442 1436 elif self._repo.dirstate[f] != 'a':
1443 1437 self._repo.dirstate.remove(f)
1444 1438 else:
1445 1439 self._repo.dirstate.drop(f)
1446 1440 return rejected
1447 1441
1448 1442 def undelete(self, list):
1449 1443 pctxs = self.parents()
1450 1444 with self._repo.wlock():
1451 1445 ds = self._repo.dirstate
1452 1446 for f in list:
1453 1447 if self._repo.dirstate[f] != 'r':
1454 1448 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1455 1449 else:
1456 1450 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1457 1451 t = fctx.data()
1458 1452 self._repo.wwrite(f, t, fctx.flags())
1459 1453 self._repo.dirstate.normal(f)
1460 1454
1461 1455 def copy(self, source, dest):
1462 1456 try:
1463 1457 st = self._repo.wvfs.lstat(dest)
1464 1458 except OSError as err:
1465 1459 if err.errno != errno.ENOENT:
1466 1460 raise
1467 1461 self._repo.ui.warn(_("%s does not exist!\n")
1468 1462 % self._repo.dirstate.pathto(dest))
1469 1463 return
1470 1464 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1471 1465 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1472 1466 "symbolic link\n")
1473 1467 % self._repo.dirstate.pathto(dest))
1474 1468 else:
1475 1469 with self._repo.wlock():
1476 1470 if self._repo.dirstate[dest] in '?':
1477 1471 self._repo.dirstate.add(dest)
1478 1472 elif self._repo.dirstate[dest] in 'r':
1479 1473 self._repo.dirstate.normallookup(dest)
1480 1474 self._repo.dirstate.copy(source, dest)
1481 1475
1482 1476 def match(self, pats=None, include=None, exclude=None, default='glob',
1483 1477 listsubrepos=False, badfn=None):
1484 1478 r = self._repo
1485 1479
1486 1480 # Only a case insensitive filesystem needs magic to translate user input
1487 1481 # to actual case in the filesystem.
1488 1482 icasefs = not util.fscasesensitive(r.root)
1489 1483 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1490 1484 default, auditor=r.auditor, ctx=self,
1491 1485 listsubrepos=listsubrepos, badfn=badfn,
1492 1486 icasefs=icasefs)
1493 1487
1494 1488 def _filtersuspectsymlink(self, files):
1495 1489 if not files or self._repo.dirstate._checklink:
1496 1490 return files
1497 1491
1498 1492 # Symlink placeholders may get non-symlink-like contents
1499 1493 # via user error or dereferencing by NFS or Samba servers,
1500 1494 # so we filter out any placeholders that don't look like a
1501 1495 # symlink
1502 1496 sane = []
1503 1497 for f in files:
1504 1498 if self.flags(f) == 'l':
1505 1499 d = self[f].data()
1506 1500 if (d == '' or len(d) >= 1024 or '\n' in d
1507 1501 or stringutil.binary(d)):
1508 1502 self._repo.ui.debug('ignoring suspect symlink placeholder'
1509 1503 ' "%s"\n' % f)
1510 1504 continue
1511 1505 sane.append(f)
1512 1506 return sane
1513 1507
1514 1508 def _checklookup(self, files):
1515 1509 # check for any possibly clean files
1516 1510 if not files:
1517 1511 return [], [], []
1518 1512
1519 1513 modified = []
1520 1514 deleted = []
1521 1515 fixup = []
1522 1516 pctx = self._parents[0]
1523 1517 # do a full compare of any files that might have changed
1524 1518 for f in sorted(files):
1525 1519 try:
1526 1520 # This will return True for a file that got replaced by a
1527 1521 # directory in the interim, but fixing that is pretty hard.
1528 1522 if (f not in pctx or self.flags(f) != pctx.flags(f)
1529 1523 or pctx[f].cmp(self[f])):
1530 1524 modified.append(f)
1531 1525 else:
1532 1526 fixup.append(f)
1533 1527 except (IOError, OSError):
1534 1528 # A file become inaccessible in between? Mark it as deleted,
1535 1529 # matching dirstate behavior (issue5584).
1536 1530 # The dirstate has more complex behavior around whether a
1537 1531 # missing file matches a directory, etc, but we don't need to
1538 1532 # bother with that: if f has made it to this point, we're sure
1539 1533 # it's in the dirstate.
1540 1534 deleted.append(f)
1541 1535
1542 1536 return modified, deleted, fixup
1543 1537
1544 1538 def _poststatusfixup(self, status, fixup):
1545 1539 """update dirstate for files that are actually clean"""
1546 1540 poststatus = self._repo.postdsstatus()
1547 1541 if fixup or poststatus:
1548 1542 try:
1549 1543 oldid = self._repo.dirstate.identity()
1550 1544
1551 1545 # updating the dirstate is optional
1552 1546 # so we don't wait on the lock
1553 1547 # wlock can invalidate the dirstate, so cache normal _after_
1554 1548 # taking the lock
1555 1549 with self._repo.wlock(False):
1556 1550 if self._repo.dirstate.identity() == oldid:
1557 1551 if fixup:
1558 1552 normal = self._repo.dirstate.normal
1559 1553 for f in fixup:
1560 1554 normal(f)
1561 1555 # write changes out explicitly, because nesting
1562 1556 # wlock at runtime may prevent 'wlock.release()'
1563 1557 # after this block from doing so for subsequent
1564 1558 # changing files
1565 1559 tr = self._repo.currenttransaction()
1566 1560 self._repo.dirstate.write(tr)
1567 1561
1568 1562 if poststatus:
1569 1563 for ps in poststatus:
1570 1564 ps(self, status)
1571 1565 else:
1572 1566 # in this case, writing changes out breaks
1573 1567 # consistency, because .hg/dirstate was
1574 1568 # already changed simultaneously after last
1575 1569 # caching (see also issue5584 for detail)
1576 1570 self._repo.ui.debug('skip updating dirstate: '
1577 1571 'identity mismatch\n')
1578 1572 except error.LockError:
1579 1573 pass
1580 1574 finally:
1581 1575 # Even if the wlock couldn't be grabbed, clear out the list.
1582 1576 self._repo.clearpostdsstatus()
1583 1577
1584 1578 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1585 1579 '''Gets the status from the dirstate -- internal use only.'''
1586 1580 subrepos = []
1587 1581 if '.hgsub' in self:
1588 1582 subrepos = sorted(self.substate)
1589 1583 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1590 1584 clean=clean, unknown=unknown)
1591 1585
1592 1586 # check for any possibly clean files
1593 1587 fixup = []
1594 1588 if cmp:
1595 1589 modified2, deleted2, fixup = self._checklookup(cmp)
1596 1590 s.modified.extend(modified2)
1597 1591 s.deleted.extend(deleted2)
1598 1592
1599 1593 if fixup and clean:
1600 1594 s.clean.extend(fixup)
1601 1595
1602 1596 self._poststatusfixup(s, fixup)
1603 1597
1604 1598 if match.always():
1605 1599 # cache for performance
1606 1600 if s.unknown or s.ignored or s.clean:
1607 1601 # "_status" is cached with list*=False in the normal route
1608 1602 self._status = scmutil.status(s.modified, s.added, s.removed,
1609 1603 s.deleted, [], [], [])
1610 1604 else:
1611 1605 self._status = s
1612 1606
1613 1607 return s
1614 1608
1615 1609 @propertycache
1616 1610 def _manifest(self):
1617 1611 """generate a manifest corresponding to the values in self._status
1618 1612
1619 1613 This reuse the file nodeid from parent, but we use special node
1620 1614 identifiers for added and modified files. This is used by manifests
1621 1615 merge to see that files are different and by update logic to avoid
1622 1616 deleting newly added files.
1623 1617 """
1624 1618 return self._buildstatusmanifest(self._status)
1625 1619
1626 1620 def _buildstatusmanifest(self, status):
1627 1621 """Builds a manifest that includes the given status results."""
1628 1622 parents = self.parents()
1629 1623
1630 1624 man = parents[0].manifest().copy()
1631 1625
1632 1626 ff = self._flagfunc
1633 1627 for i, l in ((addednodeid, status.added),
1634 1628 (modifiednodeid, status.modified)):
1635 1629 for f in l:
1636 1630 man[f] = i
1637 1631 try:
1638 1632 man.setflag(f, ff(f))
1639 1633 except OSError:
1640 1634 pass
1641 1635
1642 1636 for f in status.deleted + status.removed:
1643 1637 if f in man:
1644 1638 del man[f]
1645 1639
1646 1640 return man
1647 1641
1648 1642 def _buildstatus(self, other, s, match, listignored, listclean,
1649 1643 listunknown):
1650 1644 """build a status with respect to another context
1651 1645
1652 1646 This includes logic for maintaining the fast path of status when
1653 1647 comparing the working directory against its parent, which is to skip
1654 1648 building a new manifest if self (working directory) is not comparing
1655 1649 against its parent (repo['.']).
1656 1650 """
1657 1651 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1658 1652 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1659 1653 # might have accidentally ended up with the entire contents of the file
1660 1654 # they are supposed to be linking to.
1661 1655 s.modified[:] = self._filtersuspectsymlink(s.modified)
1662 1656 if other != self._repo['.']:
1663 1657 s = super(workingctx, self)._buildstatus(other, s, match,
1664 1658 listignored, listclean,
1665 1659 listunknown)
1666 1660 return s
1667 1661
1668 1662 def _matchstatus(self, other, match):
1669 1663 """override the match method with a filter for directory patterns
1670 1664
1671 1665 We use inheritance to customize the match.bad method only in cases of
1672 1666 workingctx since it belongs only to the working directory when
1673 1667 comparing against the parent changeset.
1674 1668
1675 1669 If we aren't comparing against the working directory's parent, then we
1676 1670 just use the default match object sent to us.
1677 1671 """
1678 1672 if other != self._repo['.']:
1679 1673 def bad(f, msg):
1680 1674 # 'f' may be a directory pattern from 'match.files()',
1681 1675 # so 'f not in ctx1' is not enough
1682 1676 if f not in other and not other.hasdir(f):
1683 1677 self._repo.ui.warn('%s: %s\n' %
1684 1678 (self._repo.dirstate.pathto(f), msg))
1685 1679 match.bad = bad
1686 1680 return match
1687 1681
1688 1682 def markcommitted(self, node):
1689 1683 super(workingctx, self).markcommitted(node)
1690 1684
1691 1685 sparse.aftercommit(self._repo, node)
1692 1686
1693 1687 class committablefilectx(basefilectx):
1694 1688 """A committablefilectx provides common functionality for a file context
1695 1689 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1696 1690 def __init__(self, repo, path, filelog=None, ctx=None):
1697 1691 self._repo = repo
1698 1692 self._path = path
1699 1693 self._changeid = None
1700 1694 self._filerev = self._filenode = None
1701 1695
1702 1696 if filelog is not None:
1703 1697 self._filelog = filelog
1704 1698 if ctx:
1705 1699 self._changectx = ctx
1706 1700
1707 1701 def __nonzero__(self):
1708 1702 return True
1709 1703
1710 1704 __bool__ = __nonzero__
1711 1705
1712 1706 def linkrev(self):
1713 1707 # linked to self._changectx no matter if file is modified or not
1714 1708 return self.rev()
1715 1709
1716 1710 def parents(self):
1717 1711 '''return parent filectxs, following copies if necessary'''
1718 1712 def filenode(ctx, path):
1719 1713 return ctx._manifest.get(path, nullid)
1720 1714
1721 1715 path = self._path
1722 1716 fl = self._filelog
1723 1717 pcl = self._changectx._parents
1724 1718 renamed = self.renamed()
1725 1719
1726 1720 if renamed:
1727 1721 pl = [renamed + (None,)]
1728 1722 else:
1729 1723 pl = [(path, filenode(pcl[0], path), fl)]
1730 1724
1731 1725 for pc in pcl[1:]:
1732 1726 pl.append((path, filenode(pc, path), fl))
1733 1727
1734 1728 return [self._parentfilectx(p, fileid=n, filelog=l)
1735 1729 for p, n, l in pl if n != nullid]
1736 1730
1737 1731 def children(self):
1738 1732 return []
1739 1733
1740 1734 class workingfilectx(committablefilectx):
1741 1735 """A workingfilectx object makes access to data related to a particular
1742 1736 file in the working directory convenient."""
1743 1737 def __init__(self, repo, path, filelog=None, workingctx=None):
1744 1738 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1745 1739
1746 1740 @propertycache
1747 1741 def _changectx(self):
1748 1742 return workingctx(self._repo)
1749 1743
1750 1744 def data(self):
1751 1745 return self._repo.wread(self._path)
1752 1746 def renamed(self):
1753 1747 rp = self._repo.dirstate.copied(self._path)
1754 1748 if not rp:
1755 1749 return None
1756 1750 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1757 1751
1758 1752 def size(self):
1759 1753 return self._repo.wvfs.lstat(self._path).st_size
1760 1754 def date(self):
1761 1755 t, tz = self._changectx.date()
1762 1756 try:
1763 1757 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1764 1758 except OSError as err:
1765 1759 if err.errno != errno.ENOENT:
1766 1760 raise
1767 1761 return (t, tz)
1768 1762
1769 1763 def exists(self):
1770 1764 return self._repo.wvfs.exists(self._path)
1771 1765
1772 1766 def lexists(self):
1773 1767 return self._repo.wvfs.lexists(self._path)
1774 1768
1775 1769 def audit(self):
1776 1770 return self._repo.wvfs.audit(self._path)
1777 1771
1778 1772 def cmp(self, fctx):
1779 1773 """compare with other file context
1780 1774
1781 1775 returns True if different than fctx.
1782 1776 """
1783 1777 # fctx should be a filectx (not a workingfilectx)
1784 1778 # invert comparison to reuse the same code path
1785 1779 return fctx.cmp(self)
1786 1780
1787 1781 def remove(self, ignoremissing=False):
1788 1782 """wraps unlink for a repo's working directory"""
1789 1783 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1790 1784
1791 1785 def write(self, data, flags, backgroundclose=False, **kwargs):
1792 1786 """wraps repo.wwrite"""
1793 1787 self._repo.wwrite(self._path, data, flags,
1794 1788 backgroundclose=backgroundclose,
1795 1789 **kwargs)
1796 1790
1797 1791 def markcopied(self, src):
1798 1792 """marks this file a copy of `src`"""
1799 1793 if self._repo.dirstate[self._path] in "nma":
1800 1794 self._repo.dirstate.copy(src, self._path)
1801 1795
1802 1796 def clearunknown(self):
1803 1797 """Removes conflicting items in the working directory so that
1804 1798 ``write()`` can be called successfully.
1805 1799 """
1806 1800 wvfs = self._repo.wvfs
1807 1801 f = self._path
1808 1802 wvfs.audit(f)
1809 1803 if wvfs.isdir(f) and not wvfs.islink(f):
1810 1804 wvfs.rmtree(f, forcibly=True)
1811 1805 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1812 1806 for p in reversed(list(util.finddirs(f))):
1813 1807 if wvfs.isfileorlink(p):
1814 1808 wvfs.unlink(p)
1815 1809 break
1816 1810
1817 1811 def setflags(self, l, x):
1818 1812 self._repo.wvfs.setflags(self._path, l, x)
1819 1813
1820 1814 class overlayworkingctx(committablectx):
1821 1815 """Wraps another mutable context with a write-back cache that can be
1822 1816 converted into a commit context.
1823 1817
1824 1818 self._cache[path] maps to a dict with keys: {
1825 1819 'exists': bool?
1826 1820 'date': date?
1827 1821 'data': str?
1828 1822 'flags': str?
1829 1823 'copied': str? (path or None)
1830 1824 }
1831 1825 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1832 1826 is `False`, the file was deleted.
1833 1827 """
1834 1828
1835 1829 def __init__(self, repo):
1836 1830 super(overlayworkingctx, self).__init__(repo)
1837 1831 self._repo = repo
1838 1832 self.clean()
1839 1833
1840 1834 def setbase(self, wrappedctx):
1841 1835 self._wrappedctx = wrappedctx
1842 1836 self._parents = [wrappedctx]
1843 1837 # Drop old manifest cache as it is now out of date.
1844 1838 # This is necessary when, e.g., rebasing several nodes with one
1845 1839 # ``overlayworkingctx`` (e.g. with --collapse).
1846 1840 util.clearcachedproperty(self, '_manifest')
1847 1841
1848 1842 def data(self, path):
1849 1843 if self.isdirty(path):
1850 1844 if self._cache[path]['exists']:
1851 1845 if self._cache[path]['data']:
1852 1846 return self._cache[path]['data']
1853 1847 else:
1854 1848 # Must fallback here, too, because we only set flags.
1855 1849 return self._wrappedctx[path].data()
1856 1850 else:
1857 1851 raise error.ProgrammingError("No such file or directory: %s" %
1858 1852 path)
1859 1853 else:
1860 1854 return self._wrappedctx[path].data()
1861 1855
1862 1856 @propertycache
1863 1857 def _manifest(self):
1864 1858 parents = self.parents()
1865 1859 man = parents[0].manifest().copy()
1866 1860
1867 1861 flag = self._flagfunc
1868 1862 for path in self.added():
1869 1863 man[path] = addednodeid
1870 1864 man.setflag(path, flag(path))
1871 1865 for path in self.modified():
1872 1866 man[path] = modifiednodeid
1873 1867 man.setflag(path, flag(path))
1874 1868 for path in self.removed():
1875 1869 del man[path]
1876 1870 return man
1877 1871
1878 1872 @propertycache
1879 1873 def _flagfunc(self):
1880 1874 def f(path):
1881 1875 return self._cache[path]['flags']
1882 1876 return f
1883 1877
1884 1878 def files(self):
1885 1879 return sorted(self.added() + self.modified() + self.removed())
1886 1880
1887 1881 def modified(self):
1888 1882 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1889 1883 self._existsinparent(f)]
1890 1884
1891 1885 def added(self):
1892 1886 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1893 1887 not self._existsinparent(f)]
1894 1888
1895 1889 def removed(self):
1896 1890 return [f for f in self._cache.keys() if
1897 1891 not self._cache[f]['exists'] and self._existsinparent(f)]
1898 1892
1899 1893 def isinmemory(self):
1900 1894 return True
1901 1895
1902 1896 def filedate(self, path):
1903 1897 if self.isdirty(path):
1904 1898 return self._cache[path]['date']
1905 1899 else:
1906 1900 return self._wrappedctx[path].date()
1907 1901
1908 1902 def markcopied(self, path, origin):
1909 1903 if self.isdirty(path):
1910 1904 self._cache[path]['copied'] = origin
1911 1905 else:
1912 1906 raise error.ProgrammingError('markcopied() called on clean context')
1913 1907
1914 1908 def copydata(self, path):
1915 1909 if self.isdirty(path):
1916 1910 return self._cache[path]['copied']
1917 1911 else:
1918 1912 raise error.ProgrammingError('copydata() called on clean context')
1919 1913
1920 1914 def flags(self, path):
1921 1915 if self.isdirty(path):
1922 1916 if self._cache[path]['exists']:
1923 1917 return self._cache[path]['flags']
1924 1918 else:
1925 1919 raise error.ProgrammingError("No such file or directory: %s" %
1926 1920 self._path)
1927 1921 else:
1928 1922 return self._wrappedctx[path].flags()
1929 1923
1930 1924 def _existsinparent(self, path):
1931 1925 try:
1932 1926 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1933 1927 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1934 1928 # with an ``exists()`` function.
1935 1929 self._wrappedctx[path]
1936 1930 return True
1937 1931 except error.ManifestLookupError:
1938 1932 return False
1939 1933
1940 1934 def _auditconflicts(self, path):
1941 1935 """Replicates conflict checks done by wvfs.write().
1942 1936
1943 1937 Since we never write to the filesystem and never call `applyupdates` in
1944 1938 IMM, we'll never check that a path is actually writable -- e.g., because
1945 1939 it adds `a/foo`, but `a` is actually a file in the other commit.
1946 1940 """
1947 1941 def fail(path, component):
1948 1942 # p1() is the base and we're receiving "writes" for p2()'s
1949 1943 # files.
1950 1944 if 'l' in self.p1()[component].flags():
1951 1945 raise error.Abort("error: %s conflicts with symlink %s "
1952 1946 "in %s." % (path, component,
1953 1947 self.p1().rev()))
1954 1948 else:
1955 1949 raise error.Abort("error: '%s' conflicts with file '%s' in "
1956 1950 "%s." % (path, component,
1957 1951 self.p1().rev()))
1958 1952
1959 1953 # Test that each new directory to be created to write this path from p2
1960 1954 # is not a file in p1.
1961 1955 components = path.split('/')
1962 1956 for i in xrange(len(components)):
1963 1957 component = "/".join(components[0:i])
1964 1958 if component in self.p1():
1965 1959 fail(path, component)
1966 1960
1967 1961 # Test the other direction -- that this path from p2 isn't a directory
1968 1962 # in p1 (test that p1 doesn't any paths matching `path/*`).
1969 1963 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1970 1964 matches = self.p1().manifest().matches(match)
1971 1965 if len(matches) > 0:
1972 1966 if len(matches) == 1 and matches.keys()[0] == path:
1973 1967 return
1974 1968 raise error.Abort("error: file '%s' cannot be written because "
1975 1969 " '%s/' is a folder in %s (containing %d "
1976 1970 "entries: %s)"
1977 1971 % (path, path, self.p1(), len(matches),
1978 1972 ', '.join(matches.keys())))
1979 1973
1980 1974 def write(self, path, data, flags='', **kwargs):
1981 1975 if data is None:
1982 1976 raise error.ProgrammingError("data must be non-None")
1983 1977 self._auditconflicts(path)
1984 1978 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1985 1979 flags=flags)
1986 1980
1987 1981 def setflags(self, path, l, x):
1988 1982 self._markdirty(path, exists=True, date=dateutil.makedate(),
1989 1983 flags=(l and 'l' or '') + (x and 'x' or ''))
1990 1984
1991 1985 def remove(self, path):
1992 1986 self._markdirty(path, exists=False)
1993 1987
1994 1988 def exists(self, path):
1995 1989 """exists behaves like `lexists`, but needs to follow symlinks and
1996 1990 return False if they are broken.
1997 1991 """
1998 1992 if self.isdirty(path):
1999 1993 # If this path exists and is a symlink, "follow" it by calling
2000 1994 # exists on the destination path.
2001 1995 if (self._cache[path]['exists'] and
2002 1996 'l' in self._cache[path]['flags']):
2003 1997 return self.exists(self._cache[path]['data'].strip())
2004 1998 else:
2005 1999 return self._cache[path]['exists']
2006 2000
2007 2001 return self._existsinparent(path)
2008 2002
2009 2003 def lexists(self, path):
2010 2004 """lexists returns True if the path exists"""
2011 2005 if self.isdirty(path):
2012 2006 return self._cache[path]['exists']
2013 2007
2014 2008 return self._existsinparent(path)
2015 2009
2016 2010 def size(self, path):
2017 2011 if self.isdirty(path):
2018 2012 if self._cache[path]['exists']:
2019 2013 return len(self._cache[path]['data'])
2020 2014 else:
2021 2015 raise error.ProgrammingError("No such file or directory: %s" %
2022 2016 self._path)
2023 2017 return self._wrappedctx[path].size()
2024 2018
2025 2019 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2026 2020 user=None, editor=None):
2027 2021 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2028 2022 committed.
2029 2023
2030 2024 ``text`` is the commit message.
2031 2025 ``parents`` (optional) are rev numbers.
2032 2026 """
2033 2027 # Default parents to the wrapped contexts' if not passed.
2034 2028 if parents is None:
2035 2029 parents = self._wrappedctx.parents()
2036 2030 if len(parents) == 1:
2037 2031 parents = (parents[0], None)
2038 2032
2039 2033 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2040 2034 if parents[1] is None:
2041 2035 parents = (self._repo[parents[0]], None)
2042 2036 else:
2043 2037 parents = (self._repo[parents[0]], self._repo[parents[1]])
2044 2038
2045 2039 files = self._cache.keys()
2046 2040 def getfile(repo, memctx, path):
2047 2041 if self._cache[path]['exists']:
2048 2042 return memfilectx(repo, memctx, path,
2049 2043 self._cache[path]['data'],
2050 2044 'l' in self._cache[path]['flags'],
2051 2045 'x' in self._cache[path]['flags'],
2052 2046 self._cache[path]['copied'])
2053 2047 else:
2054 2048 # Returning None, but including the path in `files`, is
2055 2049 # necessary for memctx to register a deletion.
2056 2050 return None
2057 2051 return memctx(self._repo, parents, text, files, getfile, date=date,
2058 2052 extra=extra, user=user, branch=branch, editor=editor)
2059 2053
2060 2054 def isdirty(self, path):
2061 2055 return path in self._cache
2062 2056
2063 2057 def isempty(self):
2064 2058 # We need to discard any keys that are actually clean before the empty
2065 2059 # commit check.
2066 2060 self._compact()
2067 2061 return len(self._cache) == 0
2068 2062
2069 2063 def clean(self):
2070 2064 self._cache = {}
2071 2065
2072 2066 def _compact(self):
2073 2067 """Removes keys from the cache that are actually clean, by comparing
2074 2068 them with the underlying context.
2075 2069
2076 2070 This can occur during the merge process, e.g. by passing --tool :local
2077 2071 to resolve a conflict.
2078 2072 """
2079 2073 keys = []
2080 2074 for path in self._cache.keys():
2081 2075 cache = self._cache[path]
2082 2076 try:
2083 2077 underlying = self._wrappedctx[path]
2084 2078 if (underlying.data() == cache['data'] and
2085 2079 underlying.flags() == cache['flags']):
2086 2080 keys.append(path)
2087 2081 except error.ManifestLookupError:
2088 2082 # Path not in the underlying manifest (created).
2089 2083 continue
2090 2084
2091 2085 for path in keys:
2092 2086 del self._cache[path]
2093 2087 return keys
2094 2088
2095 2089 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2096 2090 self._cache[path] = {
2097 2091 'exists': exists,
2098 2092 'data': data,
2099 2093 'date': date,
2100 2094 'flags': flags,
2101 2095 'copied': None,
2102 2096 }
2103 2097
2104 2098 def filectx(self, path, filelog=None):
2105 2099 return overlayworkingfilectx(self._repo, path, parent=self,
2106 2100 filelog=filelog)
2107 2101
2108 2102 class overlayworkingfilectx(committablefilectx):
2109 2103 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2110 2104 cache, which can be flushed through later by calling ``flush()``."""
2111 2105
2112 2106 def __init__(self, repo, path, filelog=None, parent=None):
2113 2107 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2114 2108 parent)
2115 2109 self._repo = repo
2116 2110 self._parent = parent
2117 2111 self._path = path
2118 2112
2119 2113 def cmp(self, fctx):
2120 2114 return self.data() != fctx.data()
2121 2115
2122 2116 def changectx(self):
2123 2117 return self._parent
2124 2118
2125 2119 def data(self):
2126 2120 return self._parent.data(self._path)
2127 2121
2128 2122 def date(self):
2129 2123 return self._parent.filedate(self._path)
2130 2124
2131 2125 def exists(self):
2132 2126 return self.lexists()
2133 2127
2134 2128 def lexists(self):
2135 2129 return self._parent.exists(self._path)
2136 2130
2137 2131 def renamed(self):
2138 2132 path = self._parent.copydata(self._path)
2139 2133 if not path:
2140 2134 return None
2141 2135 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2142 2136
2143 2137 def size(self):
2144 2138 return self._parent.size(self._path)
2145 2139
2146 2140 def markcopied(self, origin):
2147 2141 self._parent.markcopied(self._path, origin)
2148 2142
2149 2143 def audit(self):
2150 2144 pass
2151 2145
2152 2146 def flags(self):
2153 2147 return self._parent.flags(self._path)
2154 2148
2155 2149 def setflags(self, islink, isexec):
2156 2150 return self._parent.setflags(self._path, islink, isexec)
2157 2151
2158 2152 def write(self, data, flags, backgroundclose=False, **kwargs):
2159 2153 return self._parent.write(self._path, data, flags, **kwargs)
2160 2154
2161 2155 def remove(self, ignoremissing=False):
2162 2156 return self._parent.remove(self._path)
2163 2157
2164 2158 def clearunknown(self):
2165 2159 pass
2166 2160
2167 2161 class workingcommitctx(workingctx):
2168 2162 """A workingcommitctx object makes access to data related to
2169 2163 the revision being committed convenient.
2170 2164
2171 2165 This hides changes in the working directory, if they aren't
2172 2166 committed in this context.
2173 2167 """
2174 2168 def __init__(self, repo, changes,
2175 2169 text="", user=None, date=None, extra=None):
2176 2170 super(workingctx, self).__init__(repo, text, user, date, extra,
2177 2171 changes)
2178 2172
2179 2173 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2180 2174 """Return matched files only in ``self._status``
2181 2175
2182 2176 Uncommitted files appear "clean" via this context, even if
2183 2177 they aren't actually so in the working directory.
2184 2178 """
2185 2179 if clean:
2186 2180 clean = [f for f in self._manifest if f not in self._changedset]
2187 2181 else:
2188 2182 clean = []
2189 2183 return scmutil.status([f for f in self._status.modified if match(f)],
2190 2184 [f for f in self._status.added if match(f)],
2191 2185 [f for f in self._status.removed if match(f)],
2192 2186 [], [], [], clean)
2193 2187
2194 2188 @propertycache
2195 2189 def _changedset(self):
2196 2190 """Return the set of files changed in this context
2197 2191 """
2198 2192 changed = set(self._status.modified)
2199 2193 changed.update(self._status.added)
2200 2194 changed.update(self._status.removed)
2201 2195 return changed
2202 2196
2203 2197 def makecachingfilectxfn(func):
2204 2198 """Create a filectxfn that caches based on the path.
2205 2199
2206 2200 We can't use util.cachefunc because it uses all arguments as the cache
2207 2201 key and this creates a cycle since the arguments include the repo and
2208 2202 memctx.
2209 2203 """
2210 2204 cache = {}
2211 2205
2212 2206 def getfilectx(repo, memctx, path):
2213 2207 if path not in cache:
2214 2208 cache[path] = func(repo, memctx, path)
2215 2209 return cache[path]
2216 2210
2217 2211 return getfilectx
2218 2212
2219 2213 def memfilefromctx(ctx):
2220 2214 """Given a context return a memfilectx for ctx[path]
2221 2215
2222 2216 This is a convenience method for building a memctx based on another
2223 2217 context.
2224 2218 """
2225 2219 def getfilectx(repo, memctx, path):
2226 2220 fctx = ctx[path]
2227 2221 # this is weird but apparently we only keep track of one parent
2228 2222 # (why not only store that instead of a tuple?)
2229 2223 copied = fctx.renamed()
2230 2224 if copied:
2231 2225 copied = copied[0]
2232 2226 return memfilectx(repo, memctx, path, fctx.data(),
2233 2227 islink=fctx.islink(), isexec=fctx.isexec(),
2234 2228 copied=copied)
2235 2229
2236 2230 return getfilectx
2237 2231
2238 2232 def memfilefrompatch(patchstore):
2239 2233 """Given a patch (e.g. patchstore object) return a memfilectx
2240 2234
2241 2235 This is a convenience method for building a memctx based on a patchstore.
2242 2236 """
2243 2237 def getfilectx(repo, memctx, path):
2244 2238 data, mode, copied = patchstore.getfile(path)
2245 2239 if data is None:
2246 2240 return None
2247 2241 islink, isexec = mode
2248 2242 return memfilectx(repo, memctx, path, data, islink=islink,
2249 2243 isexec=isexec, copied=copied)
2250 2244
2251 2245 return getfilectx
2252 2246
2253 2247 class memctx(committablectx):
2254 2248 """Use memctx to perform in-memory commits via localrepo.commitctx().
2255 2249
2256 2250 Revision information is supplied at initialization time while
2257 2251 related files data and is made available through a callback
2258 2252 mechanism. 'repo' is the current localrepo, 'parents' is a
2259 2253 sequence of two parent revisions identifiers (pass None for every
2260 2254 missing parent), 'text' is the commit message and 'files' lists
2261 2255 names of files touched by the revision (normalized and relative to
2262 2256 repository root).
2263 2257
2264 2258 filectxfn(repo, memctx, path) is a callable receiving the
2265 2259 repository, the current memctx object and the normalized path of
2266 2260 requested file, relative to repository root. It is fired by the
2267 2261 commit function for every file in 'files', but calls order is
2268 2262 undefined. If the file is available in the revision being
2269 2263 committed (updated or added), filectxfn returns a memfilectx
2270 2264 object. If the file was removed, filectxfn return None for recent
2271 2265 Mercurial. Moved files are represented by marking the source file
2272 2266 removed and the new file added with copy information (see
2273 2267 memfilectx).
2274 2268
2275 2269 user receives the committer name and defaults to current
2276 2270 repository username, date is the commit date in any format
2277 2271 supported by dateutil.parsedate() and defaults to current date, extra
2278 2272 is a dictionary of metadata or is left empty.
2279 2273 """
2280 2274
2281 2275 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2282 2276 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2283 2277 # this field to determine what to do in filectxfn.
2284 2278 _returnnoneformissingfiles = True
2285 2279
2286 2280 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2287 2281 date=None, extra=None, branch=None, editor=False):
2288 2282 super(memctx, self).__init__(repo, text, user, date, extra)
2289 2283 self._rev = None
2290 2284 self._node = None
2291 2285 parents = [(p or nullid) for p in parents]
2292 2286 p1, p2 = parents
2293 2287 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2294 2288 files = sorted(set(files))
2295 2289 self._files = files
2296 2290 if branch is not None:
2297 2291 self._extra['branch'] = encoding.fromlocal(branch)
2298 2292 self.substate = {}
2299 2293
2300 2294 if isinstance(filectxfn, patch.filestore):
2301 2295 filectxfn = memfilefrompatch(filectxfn)
2302 2296 elif not callable(filectxfn):
2303 2297 # if store is not callable, wrap it in a function
2304 2298 filectxfn = memfilefromctx(filectxfn)
2305 2299
2306 2300 # memoizing increases performance for e.g. vcs convert scenarios.
2307 2301 self._filectxfn = makecachingfilectxfn(filectxfn)
2308 2302
2309 2303 if editor:
2310 2304 self._text = editor(self._repo, self, [])
2311 2305 self._repo.savecommitmessage(self._text)
2312 2306
2313 2307 def filectx(self, path, filelog=None):
2314 2308 """get a file context from the working directory
2315 2309
2316 2310 Returns None if file doesn't exist and should be removed."""
2317 2311 return self._filectxfn(self._repo, self, path)
2318 2312
2319 2313 def commit(self):
2320 2314 """commit context to the repo"""
2321 2315 return self._repo.commitctx(self)
2322 2316
2323 2317 @propertycache
2324 2318 def _manifest(self):
2325 2319 """generate a manifest based on the return values of filectxfn"""
2326 2320
2327 2321 # keep this simple for now; just worry about p1
2328 2322 pctx = self._parents[0]
2329 2323 man = pctx.manifest().copy()
2330 2324
2331 2325 for f in self._status.modified:
2332 2326 p1node = nullid
2333 2327 p2node = nullid
2334 2328 p = pctx[f].parents() # if file isn't in pctx, check p2?
2335 2329 if len(p) > 0:
2336 2330 p1node = p[0].filenode()
2337 2331 if len(p) > 1:
2338 2332 p2node = p[1].filenode()
2339 2333 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2340 2334
2341 2335 for f in self._status.added:
2342 2336 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2343 2337
2344 2338 for f in self._status.removed:
2345 2339 if f in man:
2346 2340 del man[f]
2347 2341
2348 2342 return man
2349 2343
2350 2344 @propertycache
2351 2345 def _status(self):
2352 2346 """Calculate exact status from ``files`` specified at construction
2353 2347 """
2354 2348 man1 = self.p1().manifest()
2355 2349 p2 = self._parents[1]
2356 2350 # "1 < len(self._parents)" can't be used for checking
2357 2351 # existence of the 2nd parent, because "memctx._parents" is
2358 2352 # explicitly initialized by the list, of which length is 2.
2359 2353 if p2.node() != nullid:
2360 2354 man2 = p2.manifest()
2361 2355 managing = lambda f: f in man1 or f in man2
2362 2356 else:
2363 2357 managing = lambda f: f in man1
2364 2358
2365 2359 modified, added, removed = [], [], []
2366 2360 for f in self._files:
2367 2361 if not managing(f):
2368 2362 added.append(f)
2369 2363 elif self[f]:
2370 2364 modified.append(f)
2371 2365 else:
2372 2366 removed.append(f)
2373 2367
2374 2368 return scmutil.status(modified, added, removed, [], [], [], [])
2375 2369
2376 2370 class memfilectx(committablefilectx):
2377 2371 """memfilectx represents an in-memory file to commit.
2378 2372
2379 2373 See memctx and committablefilectx for more details.
2380 2374 """
2381 2375 def __init__(self, repo, changectx, path, data, islink=False,
2382 2376 isexec=False, copied=None):
2383 2377 """
2384 2378 path is the normalized file path relative to repository root.
2385 2379 data is the file content as a string.
2386 2380 islink is True if the file is a symbolic link.
2387 2381 isexec is True if the file is executable.
2388 2382 copied is the source file path if current file was copied in the
2389 2383 revision being committed, or None."""
2390 2384 super(memfilectx, self).__init__(repo, path, None, changectx)
2391 2385 self._data = data
2392 2386 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2393 2387 self._copied = None
2394 2388 if copied:
2395 2389 self._copied = (copied, nullid)
2396 2390
2397 2391 def data(self):
2398 2392 return self._data
2399 2393
2400 2394 def remove(self, ignoremissing=False):
2401 2395 """wraps unlink for a repo's working directory"""
2402 2396 # need to figure out what to do here
2403 2397 del self._changectx[self._path]
2404 2398
2405 2399 def write(self, data, flags, **kwargs):
2406 2400 """wraps repo.wwrite"""
2407 2401 self._data = data
2408 2402
2409 2403 class overlayfilectx(committablefilectx):
2410 2404 """Like memfilectx but take an original filectx and optional parameters to
2411 2405 override parts of it. This is useful when fctx.data() is expensive (i.e.
2412 2406 flag processor is expensive) and raw data, flags, and filenode could be
2413 2407 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2414 2408 """
2415 2409
2416 2410 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2417 2411 copied=None, ctx=None):
2418 2412 """originalfctx: filecontext to duplicate
2419 2413
2420 2414 datafunc: None or a function to override data (file content). It is a
2421 2415 function to be lazy. path, flags, copied, ctx: None or overridden value
2422 2416
2423 2417 copied could be (path, rev), or False. copied could also be just path,
2424 2418 and will be converted to (path, nullid). This simplifies some callers.
2425 2419 """
2426 2420
2427 2421 if path is None:
2428 2422 path = originalfctx.path()
2429 2423 if ctx is None:
2430 2424 ctx = originalfctx.changectx()
2431 2425 ctxmatch = lambda: True
2432 2426 else:
2433 2427 ctxmatch = lambda: ctx == originalfctx.changectx()
2434 2428
2435 2429 repo = originalfctx.repo()
2436 2430 flog = originalfctx.filelog()
2437 2431 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2438 2432
2439 2433 if copied is None:
2440 2434 copied = originalfctx.renamed()
2441 2435 copiedmatch = lambda: True
2442 2436 else:
2443 2437 if copied and not isinstance(copied, tuple):
2444 2438 # repo._filecommit will recalculate copyrev so nullid is okay
2445 2439 copied = (copied, nullid)
2446 2440 copiedmatch = lambda: copied == originalfctx.renamed()
2447 2441
2448 2442 # When data, copied (could affect data), ctx (could affect filelog
2449 2443 # parents) are not overridden, rawdata, rawflags, and filenode may be
2450 2444 # reused (repo._filecommit should double check filelog parents).
2451 2445 #
2452 2446 # path, flags are not hashed in filelog (but in manifestlog) so they do
2453 2447 # not affect reusable here.
2454 2448 #
2455 2449 # If ctx or copied is overridden to a same value with originalfctx,
2456 2450 # still consider it's reusable. originalfctx.renamed() may be a bit
2457 2451 # expensive so it's not called unless necessary. Assuming datafunc is
2458 2452 # always expensive, do not call it for this "reusable" test.
2459 2453 reusable = datafunc is None and ctxmatch() and copiedmatch()
2460 2454
2461 2455 if datafunc is None:
2462 2456 datafunc = originalfctx.data
2463 2457 if flags is None:
2464 2458 flags = originalfctx.flags()
2465 2459
2466 2460 self._datafunc = datafunc
2467 2461 self._flags = flags
2468 2462 self._copied = copied
2469 2463
2470 2464 if reusable:
2471 2465 # copy extra fields from originalfctx
2472 2466 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2473 2467 for attr_ in attrs:
2474 2468 if util.safehasattr(originalfctx, attr_):
2475 2469 setattr(self, attr_, getattr(originalfctx, attr_))
2476 2470
2477 2471 def data(self):
2478 2472 return self._datafunc()
2479 2473
2480 2474 class metadataonlyctx(committablectx):
2481 2475 """Like memctx but it's reusing the manifest of different commit.
2482 2476 Intended to be used by lightweight operations that are creating
2483 2477 metadata-only changes.
2484 2478
2485 2479 Revision information is supplied at initialization time. 'repo' is the
2486 2480 current localrepo, 'ctx' is original revision which manifest we're reuisng
2487 2481 'parents' is a sequence of two parent revisions identifiers (pass None for
2488 2482 every missing parent), 'text' is the commit.
2489 2483
2490 2484 user receives the committer name and defaults to current repository
2491 2485 username, date is the commit date in any format supported by
2492 2486 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2493 2487 metadata or is left empty.
2494 2488 """
2495 2489 def __new__(cls, repo, originalctx, *args, **kwargs):
2496 2490 return super(metadataonlyctx, cls).__new__(cls, repo)
2497 2491
2498 2492 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2499 2493 date=None, extra=None, editor=False):
2500 2494 if text is None:
2501 2495 text = originalctx.description()
2502 2496 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2503 2497 self._rev = None
2504 2498 self._node = None
2505 2499 self._originalctx = originalctx
2506 2500 self._manifestnode = originalctx.manifestnode()
2507 2501 if parents is None:
2508 2502 parents = originalctx.parents()
2509 2503 else:
2510 2504 parents = [repo[p] for p in parents if p is not None]
2511 2505 parents = parents[:]
2512 2506 while len(parents) < 2:
2513 2507 parents.append(repo[nullid])
2514 2508 p1, p2 = self._parents = parents
2515 2509
2516 2510 # sanity check to ensure that the reused manifest parents are
2517 2511 # manifests of our commit parents
2518 2512 mp1, mp2 = self.manifestctx().parents
2519 2513 if p1 != nullid and p1.manifestnode() != mp1:
2520 2514 raise RuntimeError('can\'t reuse the manifest: '
2521 2515 'its p1 doesn\'t match the new ctx p1')
2522 2516 if p2 != nullid and p2.manifestnode() != mp2:
2523 2517 raise RuntimeError('can\'t reuse the manifest: '
2524 2518 'its p2 doesn\'t match the new ctx p2')
2525 2519
2526 2520 self._files = originalctx.files()
2527 2521 self.substate = {}
2528 2522
2529 2523 if editor:
2530 2524 self._text = editor(self._repo, self, [])
2531 2525 self._repo.savecommitmessage(self._text)
2532 2526
2533 2527 def manifestnode(self):
2534 2528 return self._manifestnode
2535 2529
2536 2530 @property
2537 2531 def _manifestctx(self):
2538 2532 return self._repo.manifestlog[self._manifestnode]
2539 2533
2540 2534 def filectx(self, path, filelog=None):
2541 2535 return self._originalctx.filectx(path, filelog=filelog)
2542 2536
2543 2537 def commit(self):
2544 2538 """commit context to the repo"""
2545 2539 return self._repo.commitctx(self)
2546 2540
2547 2541 @property
2548 2542 def _manifest(self):
2549 2543 return self._originalctx.manifest()
2550 2544
2551 2545 @propertycache
2552 2546 def _status(self):
2553 2547 """Calculate exact status from ``files`` specified in the ``origctx``
2554 2548 and parents manifests.
2555 2549 """
2556 2550 man1 = self.p1().manifest()
2557 2551 p2 = self._parents[1]
2558 2552 # "1 < len(self._parents)" can't be used for checking
2559 2553 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2560 2554 # explicitly initialized by the list, of which length is 2.
2561 2555 if p2.node() != nullid:
2562 2556 man2 = p2.manifest()
2563 2557 managing = lambda f: f in man1 or f in man2
2564 2558 else:
2565 2559 managing = lambda f: f in man1
2566 2560
2567 2561 modified, added, removed = [], [], []
2568 2562 for f in self._files:
2569 2563 if not managing(f):
2570 2564 added.append(f)
2571 2565 elif f in self:
2572 2566 modified.append(f)
2573 2567 else:
2574 2568 removed.append(f)
2575 2569
2576 2570 return scmutil.status(modified, added, removed, [], [], [], [])
2577 2571
2578 2572 class arbitraryfilectx(object):
2579 2573 """Allows you to use filectx-like functions on a file in an arbitrary
2580 2574 location on disk, possibly not in the working directory.
2581 2575 """
2582 2576 def __init__(self, path, repo=None):
2583 2577 # Repo is optional because contrib/simplemerge uses this class.
2584 2578 self._repo = repo
2585 2579 self._path = path
2586 2580
2587 2581 def cmp(self, fctx):
2588 2582 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2589 2583 # path if either side is a symlink.
2590 2584 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2591 2585 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2592 2586 # Add a fast-path for merge if both sides are disk-backed.
2593 2587 # Note that filecmp uses the opposite return values (True if same)
2594 2588 # from our cmp functions (True if different).
2595 2589 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2596 2590 return self.data() != fctx.data()
2597 2591
2598 2592 def path(self):
2599 2593 return self._path
2600 2594
2601 2595 def flags(self):
2602 2596 return ''
2603 2597
2604 2598 def data(self):
2605 2599 return util.readfile(self._path)
2606 2600
2607 2601 def decodeddata(self):
2608 2602 with open(self._path, "rb") as f:
2609 2603 return f.read()
2610 2604
2611 2605 def remove(self):
2612 2606 util.unlink(self._path)
2613 2607
2614 2608 def write(self, data, flags, **kwargs):
2615 2609 assert not flags
2616 2610 with open(self._path, "w") as f:
2617 2611 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now