##// END OF EJS Templates
context: fix typo in workingcommitctx...
Sean Farley -
r39747:6c8ceebc default
parent child Browse files
Show More
@@ -1,2578 +1,2578 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirfilenodeids,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 sparse,
42 42 subrepo,
43 43 subrepoutil,
44 44 util,
45 45 )
46 46 from .utils import (
47 47 dateutil,
48 48 stringutil,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 class basectx(object):
54 54 """A basectx object represents the common logic for its children:
55 55 changectx: read-only context that is already present in the repo,
56 56 workingctx: a context that represents the working directory and can
57 57 be committed,
58 58 memctx: a context that represents changes in-memory and can also
59 59 be committed."""
60 60
61 61 def __init__(self, repo):
62 62 self._repo = repo
63 63
64 64 def __bytes__(self):
65 65 return short(self.node())
66 66
67 67 __str__ = encoding.strmethod(__bytes__)
68 68
69 69 def __repr__(self):
70 70 return r"<%s %s>" % (type(self).__name__, str(self))
71 71
72 72 def __eq__(self, other):
73 73 try:
74 74 return type(self) == type(other) and self._rev == other._rev
75 75 except AttributeError:
76 76 return False
77 77
78 78 def __ne__(self, other):
79 79 return not (self == other)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 return iter(self._manifest)
89 89
90 90 def _buildstatusmanifest(self, status):
91 91 """Builds a manifest that includes the given status results, if this is
92 92 a working copy context. For non-working copy contexts, it just returns
93 93 the normal manifest."""
94 94 return self.manifest()
95 95
96 96 def _matchstatus(self, other, match):
97 97 """This internal method provides a way for child objects to override the
98 98 match operator.
99 99 """
100 100 return match
101 101
102 102 def _buildstatus(self, other, s, match, listignored, listclean,
103 103 listunknown):
104 104 """build a status with respect to another context"""
105 105 # Load earliest manifest first for caching reasons. More specifically,
106 106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 109 # delta to what's in the cache. So that's one full reconstruction + one
110 110 # delta application.
111 111 mf2 = None
112 112 if self.rev() is not None and self.rev() < other.rev():
113 113 mf2 = self._buildstatusmanifest(s)
114 114 mf1 = other._buildstatusmanifest(s)
115 115 if mf2 is None:
116 116 mf2 = self._buildstatusmanifest(s)
117 117
118 118 modified, added = [], []
119 119 removed = []
120 120 clean = []
121 121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 122 deletedset = set(deleted)
123 123 d = mf1.diff(mf2, match=match, clean=listclean)
124 124 for fn, value in d.iteritems():
125 125 if fn in deletedset:
126 126 continue
127 127 if value is None:
128 128 clean.append(fn)
129 129 continue
130 130 (node1, flag1), (node2, flag2) = value
131 131 if node1 is None:
132 132 added.append(fn)
133 133 elif node2 is None:
134 134 removed.append(fn)
135 135 elif flag1 != flag2:
136 136 modified.append(fn)
137 137 elif node2 not in wdirfilenodeids:
138 138 # When comparing files between two commits, we save time by
139 139 # not comparing the file contents when the nodeids differ.
140 140 # Note that this means we incorrectly report a reverted change
141 141 # to a file as a modification.
142 142 modified.append(fn)
143 143 elif self[fn].cmp(other[fn]):
144 144 modified.append(fn)
145 145 else:
146 146 clean.append(fn)
147 147
148 148 if removed:
149 149 # need to filter files if they are already reported as removed
150 150 unknown = [fn for fn in unknown if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 ignored = [fn for fn in ignored if fn not in mf1 and
153 153 (not match or match(fn))]
154 154 # if they're deleted, don't report them as removed
155 155 removed = [fn for fn in removed if fn not in deletedset]
156 156
157 157 return scmutil.status(modified, added, removed, deleted, unknown,
158 158 ignored, clean)
159 159
160 160 @propertycache
161 161 def substate(self):
162 162 return subrepoutil.state(self, self._repo.ui)
163 163
164 164 def subrev(self, subpath):
165 165 return self.substate[subpath][1]
166 166
167 167 def rev(self):
168 168 return self._rev
169 169 def node(self):
170 170 return self._node
171 171 def hex(self):
172 172 return hex(self.node())
173 173 def manifest(self):
174 174 return self._manifest
175 175 def manifestctx(self):
176 176 return self._manifestctx
177 177 def repo(self):
178 178 return self._repo
179 179 def phasestr(self):
180 180 return phases.phasenames[self.phase()]
181 181 def mutable(self):
182 182 return self.phase() > phases.public
183 183
184 184 def matchfileset(self, expr, badfn=None):
185 185 return fileset.match(self, expr, badfn=badfn)
186 186
187 187 def obsolete(self):
188 188 """True if the changeset is obsolete"""
189 189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190 190
191 191 def extinct(self):
192 192 """True if the changeset is extinct"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194 194
195 195 def orphan(self):
196 196 """True if the changeset is not obsolete, but its ancestor is"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198 198
199 199 def phasedivergent(self):
200 200 """True if the changeset tries to be a successor of a public changeset
201 201
202 202 Only non-public and non-obsolete changesets may be phase-divergent.
203 203 """
204 204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205 205
206 206 def contentdivergent(self):
207 207 """Is a successor of a changeset with multiple possible successor sets
208 208
209 209 Only non-public and non-obsolete changesets may be content-divergent.
210 210 """
211 211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212 212
213 213 def isunstable(self):
214 214 """True if the changeset is either orphan, phase-divergent or
215 215 content-divergent"""
216 216 return self.orphan() or self.phasedivergent() or self.contentdivergent()
217 217
218 218 def instabilities(self):
219 219 """return the list of instabilities affecting this changeset.
220 220
221 221 Instabilities are returned as strings. possible values are:
222 222 - orphan,
223 223 - phase-divergent,
224 224 - content-divergent.
225 225 """
226 226 instabilities = []
227 227 if self.orphan():
228 228 instabilities.append('orphan')
229 229 if self.phasedivergent():
230 230 instabilities.append('phase-divergent')
231 231 if self.contentdivergent():
232 232 instabilities.append('content-divergent')
233 233 return instabilities
234 234
235 235 def parents(self):
236 236 """return contexts for each parent changeset"""
237 237 return self._parents
238 238
239 239 def p1(self):
240 240 return self._parents[0]
241 241
242 242 def p2(self):
243 243 parents = self._parents
244 244 if len(parents) == 2:
245 245 return parents[1]
246 246 return changectx(self._repo, nullrev)
247 247
248 248 def _fileinfo(self, path):
249 249 if r'_manifest' in self.__dict__:
250 250 try:
251 251 return self._manifest[path], self._manifest.flags(path)
252 252 except KeyError:
253 253 raise error.ManifestLookupError(self._node, path,
254 254 _('not found in manifest'))
255 255 if r'_manifestdelta' in self.__dict__ or path in self.files():
256 256 if path in self._manifestdelta:
257 257 return (self._manifestdelta[path],
258 258 self._manifestdelta.flags(path))
259 259 mfl = self._repo.manifestlog
260 260 try:
261 261 node, flag = mfl[self._changeset.manifest].find(path)
262 262 except KeyError:
263 263 raise error.ManifestLookupError(self._node, path,
264 264 _('not found in manifest'))
265 265
266 266 return node, flag
267 267
268 268 def filenode(self, path):
269 269 return self._fileinfo(path)[0]
270 270
271 271 def flags(self, path):
272 272 try:
273 273 return self._fileinfo(path)[1]
274 274 except error.LookupError:
275 275 return ''
276 276
277 277 def sub(self, path, allowcreate=True):
278 278 '''return a subrepo for the stored revision of path, never wdir()'''
279 279 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 280
281 281 def nullsub(self, path, pctx):
282 282 return subrepo.nullsubrepo(self, path, pctx)
283 283
284 284 def workingsub(self, path):
285 285 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 286 context.
287 287 '''
288 288 return subrepo.subrepo(self, path, allowwdir=True)
289 289
290 290 def match(self, pats=None, include=None, exclude=None, default='glob',
291 291 listsubrepos=False, badfn=None):
292 292 r = self._repo
293 293 return matchmod.match(r.root, r.getcwd(), pats,
294 294 include, exclude, default,
295 295 auditor=r.nofsauditor, ctx=self,
296 296 listsubrepos=listsubrepos, badfn=badfn)
297 297
298 298 def diff(self, ctx2=None, match=None, changes=None, opts=None,
299 299 losedatafn=None, prefix='', relroot='', copy=None,
300 300 hunksfilterfn=None):
301 301 """Returns a diff generator for the given contexts and matcher"""
302 302 if ctx2 is None:
303 303 ctx2 = self.p1()
304 304 if ctx2 is not None:
305 305 ctx2 = self._repo[ctx2]
306 306 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
307 307 opts=opts, losedatafn=losedatafn, prefix=prefix,
308 308 relroot=relroot, copy=copy,
309 309 hunksfilterfn=hunksfilterfn)
310 310
311 311 def dirs(self):
312 312 return self._manifest.dirs()
313 313
314 314 def hasdir(self, dir):
315 315 return self._manifest.hasdir(dir)
316 316
317 317 def status(self, other=None, match=None, listignored=False,
318 318 listclean=False, listunknown=False, listsubrepos=False):
319 319 """return status of files between two nodes or node and working
320 320 directory.
321 321
322 322 If other is None, compare this node with working directory.
323 323
324 324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 325 """
326 326
327 327 ctx1 = self
328 328 ctx2 = self._repo[other]
329 329
330 330 # This next code block is, admittedly, fragile logic that tests for
331 331 # reversing the contexts and wouldn't need to exist if it weren't for
332 332 # the fast (and common) code path of comparing the working directory
333 333 # with its first parent.
334 334 #
335 335 # What we're aiming for here is the ability to call:
336 336 #
337 337 # workingctx.status(parentctx)
338 338 #
339 339 # If we always built the manifest for each context and compared those,
340 340 # then we'd be done. But the special case of the above call means we
341 341 # just copy the manifest of the parent.
342 342 reversed = False
343 343 if (not isinstance(ctx1, changectx)
344 344 and isinstance(ctx2, changectx)):
345 345 reversed = True
346 346 ctx1, ctx2 = ctx2, ctx1
347 347
348 348 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
349 349 match = ctx2._matchstatus(ctx1, match)
350 350 r = scmutil.status([], [], [], [], [], [], [])
351 351 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 352 listunknown)
353 353
354 354 if reversed:
355 355 # Reverse added and removed. Clear deleted, unknown and ignored as
356 356 # these make no sense to reverse.
357 357 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 358 r.clean)
359 359
360 360 if listsubrepos:
361 361 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 362 try:
363 363 rev2 = ctx2.subrev(subpath)
364 364 except KeyError:
365 365 # A subrepo that existed in node1 was deleted between
366 366 # node1 and node2 (inclusive). Thus, ctx2's substate
367 367 # won't contain that subpath. The best we can do ignore it.
368 368 rev2 = None
369 369 submatch = matchmod.subdirmatcher(subpath, match)
370 370 s = sub.status(rev2, match=submatch, ignored=listignored,
371 371 clean=listclean, unknown=listunknown,
372 372 listsubrepos=True)
373 373 for rfiles, sfiles in zip(r, s):
374 374 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 375
376 376 narrowmatch = self._repo.narrowmatch()
377 377 if not narrowmatch.always():
378 378 for l in r:
379 379 l[:] = list(filter(narrowmatch, l))
380 380 for l in r:
381 381 l.sort()
382 382
383 383 return r
384 384
385 385 class changectx(basectx):
386 386 """A changecontext object makes access to data related to a particular
387 387 changeset convenient. It represents a read-only context already present in
388 388 the repo."""
389 389 def __init__(self, repo, changeid='.'):
390 390 """changeid is a revision number, node, or tag"""
391 391 super(changectx, self).__init__(repo)
392 392
393 393 try:
394 394 if isinstance(changeid, int):
395 395 self._node = repo.changelog.node(changeid)
396 396 self._rev = changeid
397 397 return
398 398 elif changeid == 'null':
399 399 self._node = nullid
400 400 self._rev = nullrev
401 401 return
402 402 elif changeid == 'tip':
403 403 self._node = repo.changelog.tip()
404 404 self._rev = repo.changelog.rev(self._node)
405 405 return
406 406 elif (changeid == '.'
407 407 or repo.local() and changeid == repo.dirstate.p1()):
408 408 # this is a hack to delay/avoid loading obsmarkers
409 409 # when we know that '.' won't be hidden
410 410 self._node = repo.dirstate.p1()
411 411 self._rev = repo.unfiltered().changelog.rev(self._node)
412 412 return
413 413 elif len(changeid) == 20:
414 414 try:
415 415 self._node = changeid
416 416 self._rev = repo.changelog.rev(changeid)
417 417 return
418 418 except error.FilteredLookupError:
419 419 changeid = hex(changeid) # for the error message
420 420 raise
421 421 except LookupError:
422 422 # check if it might have come from damaged dirstate
423 423 #
424 424 # XXX we could avoid the unfiltered if we had a recognizable
425 425 # exception for filtered changeset access
426 426 if (repo.local()
427 427 and changeid in repo.unfiltered().dirstate.parents()):
428 428 msg = _("working directory has unknown parent '%s'!")
429 429 raise error.Abort(msg % short(changeid))
430 430 changeid = hex(changeid) # for the error message
431 431
432 432 elif len(changeid) == 40:
433 433 try:
434 434 self._node = bin(changeid)
435 435 self._rev = repo.changelog.rev(self._node)
436 436 return
437 437 except error.FilteredLookupError:
438 438 raise
439 439 except (TypeError, LookupError):
440 440 pass
441 441 else:
442 442 raise error.ProgrammingError(
443 443 "unsupported changeid '%s' of type %s" %
444 444 (changeid, type(changeid)))
445 445
446 446 except (error.FilteredIndexError, error.FilteredLookupError):
447 447 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
448 448 % pycompat.bytestr(changeid))
449 449 except error.FilteredRepoLookupError:
450 450 raise
451 451 except IndexError:
452 452 pass
453 453 raise error.RepoLookupError(
454 454 _("unknown revision '%s'") % changeid)
455 455
456 456 def __hash__(self):
457 457 try:
458 458 return hash(self._rev)
459 459 except AttributeError:
460 460 return id(self)
461 461
462 462 def __nonzero__(self):
463 463 return self._rev != nullrev
464 464
465 465 __bool__ = __nonzero__
466 466
467 467 @propertycache
468 468 def _changeset(self):
469 469 return self._repo.changelog.changelogrevision(self.rev())
470 470
471 471 @propertycache
472 472 def _manifest(self):
473 473 return self._manifestctx.read()
474 474
475 475 @property
476 476 def _manifestctx(self):
477 477 return self._repo.manifestlog[self._changeset.manifest]
478 478
479 479 @propertycache
480 480 def _manifestdelta(self):
481 481 return self._manifestctx.readdelta()
482 482
483 483 @propertycache
484 484 def _parents(self):
485 485 repo = self._repo
486 486 p1, p2 = repo.changelog.parentrevs(self._rev)
487 487 if p2 == nullrev:
488 488 return [changectx(repo, p1)]
489 489 return [changectx(repo, p1), changectx(repo, p2)]
490 490
491 491 def changeset(self):
492 492 c = self._changeset
493 493 return (
494 494 c.manifest,
495 495 c.user,
496 496 c.date,
497 497 c.files,
498 498 c.description,
499 499 c.extra,
500 500 )
501 501 def manifestnode(self):
502 502 return self._changeset.manifest
503 503
504 504 def user(self):
505 505 return self._changeset.user
506 506 def date(self):
507 507 return self._changeset.date
508 508 def files(self):
509 509 return self._changeset.files
510 510 def description(self):
511 511 return self._changeset.description
512 512 def branch(self):
513 513 return encoding.tolocal(self._changeset.extra.get("branch"))
514 514 def closesbranch(self):
515 515 return 'close' in self._changeset.extra
516 516 def extra(self):
517 517 """Return a dict of extra information."""
518 518 return self._changeset.extra
519 519 def tags(self):
520 520 """Return a list of byte tag names"""
521 521 return self._repo.nodetags(self._node)
522 522 def bookmarks(self):
523 523 """Return a list of byte bookmark names."""
524 524 return self._repo.nodebookmarks(self._node)
525 525 def phase(self):
526 526 return self._repo._phasecache.phase(self._repo, self._rev)
527 527 def hidden(self):
528 528 return self._rev in repoview.filterrevs(self._repo, 'visible')
529 529
530 530 def isinmemory(self):
531 531 return False
532 532
533 533 def children(self):
534 534 """return list of changectx contexts for each child changeset.
535 535
536 536 This returns only the immediate child changesets. Use descendants() to
537 537 recursively walk children.
538 538 """
539 539 c = self._repo.changelog.children(self._node)
540 540 return [changectx(self._repo, x) for x in c]
541 541
542 542 def ancestors(self):
543 543 for a in self._repo.changelog.ancestors([self._rev]):
544 544 yield changectx(self._repo, a)
545 545
546 546 def descendants(self):
547 547 """Recursively yield all children of the changeset.
548 548
549 549 For just the immediate children, use children()
550 550 """
551 551 for d in self._repo.changelog.descendants([self._rev]):
552 552 yield changectx(self._repo, d)
553 553
554 554 def filectx(self, path, fileid=None, filelog=None):
555 555 """get a file context from this changeset"""
556 556 if fileid is None:
557 557 fileid = self.filenode(path)
558 558 return filectx(self._repo, path, fileid=fileid,
559 559 changectx=self, filelog=filelog)
560 560
561 561 def ancestor(self, c2, warn=False):
562 562 """return the "best" ancestor context of self and c2
563 563
564 564 If there are multiple candidates, it will show a message and check
565 565 merge.preferancestor configuration before falling back to the
566 566 revlog ancestor."""
567 567 # deal with workingctxs
568 568 n2 = c2._node
569 569 if n2 is None:
570 570 n2 = c2._parents[0]._node
571 571 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
572 572 if not cahs:
573 573 anc = nullid
574 574 elif len(cahs) == 1:
575 575 anc = cahs[0]
576 576 else:
577 577 # experimental config: merge.preferancestor
578 578 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 579 try:
580 580 ctx = scmutil.revsymbol(self._repo, r)
581 581 except error.RepoLookupError:
582 582 continue
583 583 anc = ctx.node()
584 584 if anc in cahs:
585 585 break
586 586 else:
587 587 anc = self._repo.changelog.ancestor(self._node, n2)
588 588 if warn:
589 589 self._repo.ui.status(
590 590 (_("note: using %s as ancestor of %s and %s\n") %
591 591 (short(anc), short(self._node), short(n2))) +
592 592 ''.join(_(" alternatively, use --config "
593 593 "merge.preferancestor=%s\n") %
594 594 short(n) for n in sorted(cahs) if n != anc))
595 595 return changectx(self._repo, anc)
596 596
597 597 def isancestorof(self, other):
598 598 """True if this changeset is an ancestor of other"""
599 599 return self._repo.changelog.isancestorrev(self._rev, other._rev)
600 600
601 601 def walk(self, match):
602 602 '''Generates matching file names.'''
603 603
604 604 # Wrap match.bad method to have message with nodeid
605 605 def bad(fn, msg):
606 606 # The manifest doesn't know about subrepos, so don't complain about
607 607 # paths into valid subrepos.
608 608 if any(fn == s or fn.startswith(s + '/')
609 609 for s in self.substate):
610 610 return
611 611 match.bad(fn, _('no such file in rev %s') % self)
612 612
613 613 m = matchmod.badmatch(match, bad)
614 614 return self._manifest.walk(m)
615 615
616 616 def matches(self, match):
617 617 return self.walk(match)
618 618
619 619 class basefilectx(object):
620 620 """A filecontext object represents the common logic for its children:
621 621 filectx: read-only access to a filerevision that is already present
622 622 in the repo,
623 623 workingfilectx: a filecontext that represents files from the working
624 624 directory,
625 625 memfilectx: a filecontext that represents files in-memory,
626 626 overlayfilectx: duplicate another filecontext with some fields overridden.
627 627 """
628 628 @propertycache
629 629 def _filelog(self):
630 630 return self._repo.file(self._path)
631 631
632 632 @propertycache
633 633 def _changeid(self):
634 634 if r'_changeid' in self.__dict__:
635 635 return self._changeid
636 636 elif r'_changectx' in self.__dict__:
637 637 return self._changectx.rev()
638 638 elif r'_descendantrev' in self.__dict__:
639 639 # this file context was created from a revision with a known
640 640 # descendant, we can (lazily) correct for linkrev aliases
641 641 return self._adjustlinkrev(self._descendantrev)
642 642 else:
643 643 return self._filelog.linkrev(self._filerev)
644 644
645 645 @propertycache
646 646 def _filenode(self):
647 647 if r'_fileid' in self.__dict__:
648 648 return self._filelog.lookup(self._fileid)
649 649 else:
650 650 return self._changectx.filenode(self._path)
651 651
652 652 @propertycache
653 653 def _filerev(self):
654 654 return self._filelog.rev(self._filenode)
655 655
656 656 @propertycache
657 657 def _repopath(self):
658 658 return self._path
659 659
660 660 def __nonzero__(self):
661 661 try:
662 662 self._filenode
663 663 return True
664 664 except error.LookupError:
665 665 # file is missing
666 666 return False
667 667
668 668 __bool__ = __nonzero__
669 669
670 670 def __bytes__(self):
671 671 try:
672 672 return "%s@%s" % (self.path(), self._changectx)
673 673 except error.LookupError:
674 674 return "%s@???" % self.path()
675 675
676 676 __str__ = encoding.strmethod(__bytes__)
677 677
678 678 def __repr__(self):
679 679 return r"<%s %s>" % (type(self).__name__, str(self))
680 680
681 681 def __hash__(self):
682 682 try:
683 683 return hash((self._path, self._filenode))
684 684 except AttributeError:
685 685 return id(self)
686 686
687 687 def __eq__(self, other):
688 688 try:
689 689 return (type(self) == type(other) and self._path == other._path
690 690 and self._filenode == other._filenode)
691 691 except AttributeError:
692 692 return False
693 693
694 694 def __ne__(self, other):
695 695 return not (self == other)
696 696
697 697 def filerev(self):
698 698 return self._filerev
699 699 def filenode(self):
700 700 return self._filenode
701 701 @propertycache
702 702 def _flags(self):
703 703 return self._changectx.flags(self._path)
704 704 def flags(self):
705 705 return self._flags
706 706 def filelog(self):
707 707 return self._filelog
708 708 def rev(self):
709 709 return self._changeid
710 710 def linkrev(self):
711 711 return self._filelog.linkrev(self._filerev)
712 712 def node(self):
713 713 return self._changectx.node()
714 714 def hex(self):
715 715 return self._changectx.hex()
716 716 def user(self):
717 717 return self._changectx.user()
718 718 def date(self):
719 719 return self._changectx.date()
720 720 def files(self):
721 721 return self._changectx.files()
722 722 def description(self):
723 723 return self._changectx.description()
724 724 def branch(self):
725 725 return self._changectx.branch()
726 726 def extra(self):
727 727 return self._changectx.extra()
728 728 def phase(self):
729 729 return self._changectx.phase()
730 730 def phasestr(self):
731 731 return self._changectx.phasestr()
732 732 def obsolete(self):
733 733 return self._changectx.obsolete()
734 734 def instabilities(self):
735 735 return self._changectx.instabilities()
736 736 def manifest(self):
737 737 return self._changectx.manifest()
738 738 def changectx(self):
739 739 return self._changectx
740 740 def renamed(self):
741 741 return self._copied
742 742 def repo(self):
743 743 return self._repo
744 744 def size(self):
745 745 return len(self.data())
746 746
747 747 def path(self):
748 748 return self._path
749 749
750 750 def isbinary(self):
751 751 try:
752 752 return stringutil.binary(self.data())
753 753 except IOError:
754 754 return False
755 755 def isexec(self):
756 756 return 'x' in self.flags()
757 757 def islink(self):
758 758 return 'l' in self.flags()
759 759
760 760 def isabsent(self):
761 761 """whether this filectx represents a file not in self._changectx
762 762
763 763 This is mainly for merge code to detect change/delete conflicts. This is
764 764 expected to be True for all subclasses of basectx."""
765 765 return False
766 766
767 767 _customcmp = False
768 768 def cmp(self, fctx):
769 769 """compare with other file context
770 770
771 771 returns True if different than fctx.
772 772 """
773 773 if fctx._customcmp:
774 774 return fctx.cmp(self)
775 775
776 776 if (fctx._filenode is None
777 777 and (self._repo._encodefilterpats
778 778 # if file data starts with '\1\n', empty metadata block is
779 779 # prepended, which adds 4 bytes to filelog.size().
780 780 or self.size() - 4 == fctx.size())
781 781 or self.size() == fctx.size()):
782 782 return self._filelog.cmp(self._filenode, fctx.data())
783 783
784 784 return True
785 785
786 786 def _adjustlinkrev(self, srcrev, inclusive=False):
787 787 """return the first ancestor of <srcrev> introducing <fnode>
788 788
789 789 If the linkrev of the file revision does not point to an ancestor of
790 790 srcrev, we'll walk down the ancestors until we find one introducing
791 791 this file revision.
792 792
793 793 :srcrev: the changeset revision we search ancestors from
794 794 :inclusive: if true, the src revision will also be checked
795 795 """
796 796 repo = self._repo
797 797 cl = repo.unfiltered().changelog
798 798 mfl = repo.manifestlog
799 799 # fetch the linkrev
800 800 lkr = self.linkrev()
801 801 # hack to reuse ancestor computation when searching for renames
802 802 memberanc = getattr(self, '_ancestrycontext', None)
803 803 iteranc = None
804 804 if srcrev is None:
805 805 # wctx case, used by workingfilectx during mergecopy
806 806 revs = [p.rev() for p in self._repo[None].parents()]
807 807 inclusive = True # we skipped the real (revless) source
808 808 else:
809 809 revs = [srcrev]
810 810 if memberanc is None:
811 811 memberanc = iteranc = cl.ancestors(revs, lkr,
812 812 inclusive=inclusive)
813 813 # check if this linkrev is an ancestor of srcrev
814 814 if lkr not in memberanc:
815 815 if iteranc is None:
816 816 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
817 817 fnode = self._filenode
818 818 path = self._path
819 819 for a in iteranc:
820 820 ac = cl.read(a) # get changeset data (we avoid object creation)
821 821 if path in ac[3]: # checking the 'files' field.
822 822 # The file has been touched, check if the content is
823 823 # similar to the one we search for.
824 824 if fnode == mfl[ac[0]].readfast().get(path):
825 825 return a
826 826 # In theory, we should never get out of that loop without a result.
827 827 # But if manifest uses a buggy file revision (not children of the
828 828 # one it replaces) we could. Such a buggy situation will likely
829 829 # result is crash somewhere else at to some point.
830 830 return lkr
831 831
832 832 def introrev(self):
833 833 """return the rev of the changeset which introduced this file revision
834 834
835 835 This method is different from linkrev because it take into account the
836 836 changeset the filectx was created from. It ensures the returned
837 837 revision is one of its ancestors. This prevents bugs from
838 838 'linkrev-shadowing' when a file revision is used by multiple
839 839 changesets.
840 840 """
841 841 lkr = self.linkrev()
842 842 attrs = vars(self)
843 843 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
844 844 if noctx or self.rev() == lkr:
845 845 return self.linkrev()
846 846 return self._adjustlinkrev(self.rev(), inclusive=True)
847 847
848 848 def introfilectx(self):
849 849 """Return filectx having identical contents, but pointing to the
850 850 changeset revision where this filectx was introduced"""
851 851 introrev = self.introrev()
852 852 if self.rev() == introrev:
853 853 return self
854 854 return self.filectx(self.filenode(), changeid=introrev)
855 855
856 856 def _parentfilectx(self, path, fileid, filelog):
857 857 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
858 858 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
859 859 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
860 860 # If self is associated with a changeset (probably explicitly
861 861 # fed), ensure the created filectx is associated with a
862 862 # changeset that is an ancestor of self.changectx.
863 863 # This lets us later use _adjustlinkrev to get a correct link.
864 864 fctx._descendantrev = self.rev()
865 865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
866 866 elif r'_descendantrev' in vars(self):
867 867 # Otherwise propagate _descendantrev if we have one associated.
868 868 fctx._descendantrev = self._descendantrev
869 869 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
870 870 return fctx
871 871
872 872 def parents(self):
873 873 _path = self._path
874 874 fl = self._filelog
875 875 parents = self._filelog.parents(self._filenode)
876 876 pl = [(_path, node, fl) for node in parents if node != nullid]
877 877
878 878 r = fl.renamed(self._filenode)
879 879 if r:
880 880 # - In the simple rename case, both parent are nullid, pl is empty.
881 881 # - In case of merge, only one of the parent is null id and should
882 882 # be replaced with the rename information. This parent is -always-
883 883 # the first one.
884 884 #
885 885 # As null id have always been filtered out in the previous list
886 886 # comprehension, inserting to 0 will always result in "replacing
887 887 # first nullid parent with rename information.
888 888 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
889 889
890 890 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
891 891
892 892 def p1(self):
893 893 return self.parents()[0]
894 894
895 895 def p2(self):
896 896 p = self.parents()
897 897 if len(p) == 2:
898 898 return p[1]
899 899 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
900 900
901 901 def annotate(self, follow=False, skiprevs=None, diffopts=None):
902 902 """Returns a list of annotateline objects for each line in the file
903 903
904 904 - line.fctx is the filectx of the node where that line was last changed
905 905 - line.lineno is the line number at the first appearance in the managed
906 906 file
907 907 - line.text is the data on that line (including newline character)
908 908 """
909 909 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
910 910
911 911 def parents(f):
912 912 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
913 913 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
914 914 # from the topmost introrev (= srcrev) down to p.linkrev() if it
915 915 # isn't an ancestor of the srcrev.
916 916 f._changeid
917 917 pl = f.parents()
918 918
919 919 # Don't return renamed parents if we aren't following.
920 920 if not follow:
921 921 pl = [p for p in pl if p.path() == f.path()]
922 922
923 923 # renamed filectx won't have a filelog yet, so set it
924 924 # from the cache to save time
925 925 for p in pl:
926 926 if not r'_filelog' in p.__dict__:
927 927 p._filelog = getlog(p.path())
928 928
929 929 return pl
930 930
931 931 # use linkrev to find the first changeset where self appeared
932 932 base = self.introfilectx()
933 933 if getattr(base, '_ancestrycontext', None) is None:
934 934 cl = self._repo.changelog
935 935 if base.rev() is None:
936 936 # wctx is not inclusive, but works because _ancestrycontext
937 937 # is used to test filelog revisions
938 938 ac = cl.ancestors([p.rev() for p in base.parents()],
939 939 inclusive=True)
940 940 else:
941 941 ac = cl.ancestors([base.rev()], inclusive=True)
942 942 base._ancestrycontext = ac
943 943
944 944 return dagop.annotate(base, parents, skiprevs=skiprevs,
945 945 diffopts=diffopts)
946 946
947 947 def ancestors(self, followfirst=False):
948 948 visit = {}
949 949 c = self
950 950 if followfirst:
951 951 cut = 1
952 952 else:
953 953 cut = None
954 954
955 955 while True:
956 956 for parent in c.parents()[:cut]:
957 957 visit[(parent.linkrev(), parent.filenode())] = parent
958 958 if not visit:
959 959 break
960 960 c = visit.pop(max(visit))
961 961 yield c
962 962
963 963 def decodeddata(self):
964 964 """Returns `data()` after running repository decoding filters.
965 965
966 966 This is often equivalent to how the data would be expressed on disk.
967 967 """
968 968 return self._repo.wwritedata(self.path(), self.data())
969 969
970 970 class filectx(basefilectx):
971 971 """A filecontext object makes access to data related to a particular
972 972 filerevision convenient."""
973 973 def __init__(self, repo, path, changeid=None, fileid=None,
974 974 filelog=None, changectx=None):
975 975 """changeid can be a changeset revision, node, or tag.
976 976 fileid can be a file revision or node."""
977 977 self._repo = repo
978 978 self._path = path
979 979
980 980 assert (changeid is not None
981 981 or fileid is not None
982 982 or changectx is not None), \
983 983 ("bad args: changeid=%r, fileid=%r, changectx=%r"
984 984 % (changeid, fileid, changectx))
985 985
986 986 if filelog is not None:
987 987 self._filelog = filelog
988 988
989 989 if changeid is not None:
990 990 self._changeid = changeid
991 991 if changectx is not None:
992 992 self._changectx = changectx
993 993 if fileid is not None:
994 994 self._fileid = fileid
995 995
996 996 @propertycache
997 997 def _changectx(self):
998 998 try:
999 999 return changectx(self._repo, self._changeid)
1000 1000 except error.FilteredRepoLookupError:
1001 1001 # Linkrev may point to any revision in the repository. When the
1002 1002 # repository is filtered this may lead to `filectx` trying to build
1003 1003 # `changectx` for filtered revision. In such case we fallback to
1004 1004 # creating `changectx` on the unfiltered version of the reposition.
1005 1005 # This fallback should not be an issue because `changectx` from
1006 1006 # `filectx` are not used in complex operations that care about
1007 1007 # filtering.
1008 1008 #
1009 1009 # This fallback is a cheap and dirty fix that prevent several
1010 1010 # crashes. It does not ensure the behavior is correct. However the
1011 1011 # behavior was not correct before filtering either and "incorrect
1012 1012 # behavior" is seen as better as "crash"
1013 1013 #
1014 1014 # Linkrevs have several serious troubles with filtering that are
1015 1015 # complicated to solve. Proper handling of the issue here should be
1016 1016 # considered when solving linkrev issue are on the table.
1017 1017 return changectx(self._repo.unfiltered(), self._changeid)
1018 1018
1019 1019 def filectx(self, fileid, changeid=None):
1020 1020 '''opens an arbitrary revision of the file without
1021 1021 opening a new filelog'''
1022 1022 return filectx(self._repo, self._path, fileid=fileid,
1023 1023 filelog=self._filelog, changeid=changeid)
1024 1024
1025 1025 def rawdata(self):
1026 1026 return self._filelog.revision(self._filenode, raw=True)
1027 1027
1028 1028 def rawflags(self):
1029 1029 """low-level revlog flags"""
1030 1030 return self._filelog.flags(self._filerev)
1031 1031
1032 1032 def data(self):
1033 1033 try:
1034 1034 return self._filelog.read(self._filenode)
1035 1035 except error.CensoredNodeError:
1036 1036 if self._repo.ui.config("censor", "policy") == "ignore":
1037 1037 return ""
1038 1038 raise error.Abort(_("censored node: %s") % short(self._filenode),
1039 1039 hint=_("set censor.policy to ignore errors"))
1040 1040
1041 1041 def size(self):
1042 1042 return self._filelog.size(self._filerev)
1043 1043
1044 1044 @propertycache
1045 1045 def _copied(self):
1046 1046 """check if file was actually renamed in this changeset revision
1047 1047
1048 1048 If rename logged in file revision, we report copy for changeset only
1049 1049 if file revisions linkrev points back to the changeset in question
1050 1050 or both changeset parents contain different file revisions.
1051 1051 """
1052 1052
1053 1053 renamed = self._filelog.renamed(self._filenode)
1054 1054 if not renamed:
1055 1055 return None
1056 1056
1057 1057 if self.rev() == self.linkrev():
1058 1058 return renamed
1059 1059
1060 1060 name = self.path()
1061 1061 fnode = self._filenode
1062 1062 for p in self._changectx.parents():
1063 1063 try:
1064 1064 if fnode == p.filenode(name):
1065 1065 return None
1066 1066 except error.LookupError:
1067 1067 pass
1068 1068 return renamed
1069 1069
1070 1070 def children(self):
1071 1071 # hard for renames
1072 1072 c = self._filelog.children(self._filenode)
1073 1073 return [filectx(self._repo, self._path, fileid=x,
1074 1074 filelog=self._filelog) for x in c]
1075 1075
1076 1076 class committablectx(basectx):
1077 1077 """A committablectx object provides common functionality for a context that
1078 1078 wants the ability to commit, e.g. workingctx or memctx."""
1079 1079 def __init__(self, repo, text="", user=None, date=None, extra=None,
1080 1080 changes=None):
1081 1081 super(committablectx, self).__init__(repo)
1082 1082 self._rev = None
1083 1083 self._node = None
1084 1084 self._text = text
1085 1085 if date:
1086 1086 self._date = dateutil.parsedate(date)
1087 1087 if user:
1088 1088 self._user = user
1089 1089 if changes:
1090 1090 self._status = changes
1091 1091
1092 1092 self._extra = {}
1093 1093 if extra:
1094 1094 self._extra = extra.copy()
1095 1095 if 'branch' not in self._extra:
1096 1096 try:
1097 1097 branch = encoding.fromlocal(self._repo.dirstate.branch())
1098 1098 except UnicodeDecodeError:
1099 1099 raise error.Abort(_('branch name not in UTF-8!'))
1100 1100 self._extra['branch'] = branch
1101 1101 if self._extra['branch'] == '':
1102 1102 self._extra['branch'] = 'default'
1103 1103
1104 1104 def __bytes__(self):
1105 1105 return bytes(self._parents[0]) + "+"
1106 1106
1107 1107 __str__ = encoding.strmethod(__bytes__)
1108 1108
1109 1109 def __nonzero__(self):
1110 1110 return True
1111 1111
1112 1112 __bool__ = __nonzero__
1113 1113
1114 1114 def _buildflagfunc(self):
1115 1115 # Create a fallback function for getting file flags when the
1116 1116 # filesystem doesn't support them
1117 1117
1118 1118 copiesget = self._repo.dirstate.copies().get
1119 1119 parents = self.parents()
1120 1120 if len(parents) < 2:
1121 1121 # when we have one parent, it's easy: copy from parent
1122 1122 man = parents[0].manifest()
1123 1123 def func(f):
1124 1124 f = copiesget(f, f)
1125 1125 return man.flags(f)
1126 1126 else:
1127 1127 # merges are tricky: we try to reconstruct the unstored
1128 1128 # result from the merge (issue1802)
1129 1129 p1, p2 = parents
1130 1130 pa = p1.ancestor(p2)
1131 1131 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1132 1132
1133 1133 def func(f):
1134 1134 f = copiesget(f, f) # may be wrong for merges with copies
1135 1135 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1136 1136 if fl1 == fl2:
1137 1137 return fl1
1138 1138 if fl1 == fla:
1139 1139 return fl2
1140 1140 if fl2 == fla:
1141 1141 return fl1
1142 1142 return '' # punt for conflicts
1143 1143
1144 1144 return func
1145 1145
1146 1146 @propertycache
1147 1147 def _flagfunc(self):
1148 1148 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1149 1149
1150 1150 @propertycache
1151 1151 def _status(self):
1152 1152 return self._repo.status()
1153 1153
1154 1154 @propertycache
1155 1155 def _user(self):
1156 1156 return self._repo.ui.username()
1157 1157
1158 1158 @propertycache
1159 1159 def _date(self):
1160 1160 ui = self._repo.ui
1161 1161 date = ui.configdate('devel', 'default-date')
1162 1162 if date is None:
1163 1163 date = dateutil.makedate()
1164 1164 return date
1165 1165
1166 1166 def subrev(self, subpath):
1167 1167 return None
1168 1168
1169 1169 def manifestnode(self):
1170 1170 return None
1171 1171 def user(self):
1172 1172 return self._user or self._repo.ui.username()
1173 1173 def date(self):
1174 1174 return self._date
1175 1175 def description(self):
1176 1176 return self._text
1177 1177 def files(self):
1178 1178 return sorted(self._status.modified + self._status.added +
1179 1179 self._status.removed)
1180 1180
1181 1181 def modified(self):
1182 1182 return self._status.modified
1183 1183 def added(self):
1184 1184 return self._status.added
1185 1185 def removed(self):
1186 1186 return self._status.removed
1187 1187 def deleted(self):
1188 1188 return self._status.deleted
1189 1189 def branch(self):
1190 1190 return encoding.tolocal(self._extra['branch'])
1191 1191 def closesbranch(self):
1192 1192 return 'close' in self._extra
1193 1193 def extra(self):
1194 1194 return self._extra
1195 1195
1196 1196 def isinmemory(self):
1197 1197 return False
1198 1198
1199 1199 def tags(self):
1200 1200 return []
1201 1201
1202 1202 def bookmarks(self):
1203 1203 b = []
1204 1204 for p in self.parents():
1205 1205 b.extend(p.bookmarks())
1206 1206 return b
1207 1207
1208 1208 def phase(self):
1209 1209 phase = phases.draft # default phase to draft
1210 1210 for p in self.parents():
1211 1211 phase = max(phase, p.phase())
1212 1212 return phase
1213 1213
1214 1214 def hidden(self):
1215 1215 return False
1216 1216
1217 1217 def children(self):
1218 1218 return []
1219 1219
1220 1220 def flags(self, path):
1221 1221 if r'_manifest' in self.__dict__:
1222 1222 try:
1223 1223 return self._manifest.flags(path)
1224 1224 except KeyError:
1225 1225 return ''
1226 1226
1227 1227 try:
1228 1228 return self._flagfunc(path)
1229 1229 except OSError:
1230 1230 return ''
1231 1231
1232 1232 def ancestor(self, c2):
1233 1233 """return the "best" ancestor context of self and c2"""
1234 1234 return self._parents[0].ancestor(c2) # punt on two parents for now
1235 1235
1236 1236 def walk(self, match):
1237 1237 '''Generates matching file names.'''
1238 1238 return sorted(self._repo.dirstate.walk(match,
1239 1239 subrepos=sorted(self.substate),
1240 1240 unknown=True, ignored=False))
1241 1241
1242 1242 def matches(self, match):
1243 1243 ds = self._repo.dirstate
1244 1244 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1245 1245
1246 1246 def ancestors(self):
1247 1247 for p in self._parents:
1248 1248 yield p
1249 1249 for a in self._repo.changelog.ancestors(
1250 1250 [p.rev() for p in self._parents]):
1251 1251 yield changectx(self._repo, a)
1252 1252
1253 1253 def markcommitted(self, node):
1254 1254 """Perform post-commit cleanup necessary after committing this ctx
1255 1255
1256 1256 Specifically, this updates backing stores this working context
1257 1257 wraps to reflect the fact that the changes reflected by this
1258 1258 workingctx have been committed. For example, it marks
1259 1259 modified and added files as normal in the dirstate.
1260 1260
1261 1261 """
1262 1262
1263 1263 with self._repo.dirstate.parentchange():
1264 1264 for f in self.modified() + self.added():
1265 1265 self._repo.dirstate.normal(f)
1266 1266 for f in self.removed():
1267 1267 self._repo.dirstate.drop(f)
1268 1268 self._repo.dirstate.setparents(node)
1269 1269
1270 1270 # write changes out explicitly, because nesting wlock at
1271 1271 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1272 1272 # from immediately doing so for subsequent changing files
1273 1273 self._repo.dirstate.write(self._repo.currenttransaction())
1274 1274
1275 1275 def dirty(self, missing=False, merge=True, branch=True):
1276 1276 return False
1277 1277
1278 1278 class workingctx(committablectx):
1279 1279 """A workingctx object makes access to data related to
1280 1280 the current working directory convenient.
1281 1281 date - any valid date string or (unixtime, offset), or None.
1282 1282 user - username string, or None.
1283 1283 extra - a dictionary of extra values, or None.
1284 1284 changes - a list of file lists as returned by localrepo.status()
1285 1285 or None to use the repository status.
1286 1286 """
1287 1287 def __init__(self, repo, text="", user=None, date=None, extra=None,
1288 1288 changes=None):
1289 1289 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1290 1290
1291 1291 def __iter__(self):
1292 1292 d = self._repo.dirstate
1293 1293 for f in d:
1294 1294 if d[f] != 'r':
1295 1295 yield f
1296 1296
1297 1297 def __contains__(self, key):
1298 1298 return self._repo.dirstate[key] not in "?r"
1299 1299
1300 1300 def hex(self):
1301 1301 return hex(wdirid)
1302 1302
1303 1303 @propertycache
1304 1304 def _parents(self):
1305 1305 p = self._repo.dirstate.parents()
1306 1306 if p[1] == nullid:
1307 1307 p = p[:-1]
1308 1308 return [changectx(self._repo, x) for x in p]
1309 1309
1310 1310 def _fileinfo(self, path):
1311 1311 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1312 1312 self._manifest
1313 1313 return super(workingctx, self)._fileinfo(path)
1314 1314
1315 1315 def filectx(self, path, filelog=None):
1316 1316 """get a file context from the working directory"""
1317 1317 return workingfilectx(self._repo, path, workingctx=self,
1318 1318 filelog=filelog)
1319 1319
1320 1320 def dirty(self, missing=False, merge=True, branch=True):
1321 1321 "check whether a working directory is modified"
1322 1322 # check subrepos first
1323 1323 for s in sorted(self.substate):
1324 1324 if self.sub(s).dirty(missing=missing):
1325 1325 return True
1326 1326 # check current working dir
1327 1327 return ((merge and self.p2()) or
1328 1328 (branch and self.branch() != self.p1().branch()) or
1329 1329 self.modified() or self.added() or self.removed() or
1330 1330 (missing and self.deleted()))
1331 1331
1332 1332 def add(self, list, prefix=""):
1333 1333 with self._repo.wlock():
1334 1334 ui, ds = self._repo.ui, self._repo.dirstate
1335 1335 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1336 1336 rejected = []
1337 1337 lstat = self._repo.wvfs.lstat
1338 1338 for f in list:
1339 1339 # ds.pathto() returns an absolute file when this is invoked from
1340 1340 # the keyword extension. That gets flagged as non-portable on
1341 1341 # Windows, since it contains the drive letter and colon.
1342 1342 scmutil.checkportable(ui, os.path.join(prefix, f))
1343 1343 try:
1344 1344 st = lstat(f)
1345 1345 except OSError:
1346 1346 ui.warn(_("%s does not exist!\n") % uipath(f))
1347 1347 rejected.append(f)
1348 1348 continue
1349 1349 limit = ui.configbytes('ui', 'large-file-limit')
1350 1350 if limit != 0 and st.st_size > limit:
1351 1351 ui.warn(_("%s: up to %d MB of RAM may be required "
1352 1352 "to manage this file\n"
1353 1353 "(use 'hg revert %s' to cancel the "
1354 1354 "pending addition)\n")
1355 1355 % (f, 3 * st.st_size // 1000000, uipath(f)))
1356 1356 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1357 1357 ui.warn(_("%s not added: only files and symlinks "
1358 1358 "supported currently\n") % uipath(f))
1359 1359 rejected.append(f)
1360 1360 elif ds[f] in 'amn':
1361 1361 ui.warn(_("%s already tracked!\n") % uipath(f))
1362 1362 elif ds[f] == 'r':
1363 1363 ds.normallookup(f)
1364 1364 else:
1365 1365 ds.add(f)
1366 1366 return rejected
1367 1367
1368 1368 def forget(self, files, prefix=""):
1369 1369 with self._repo.wlock():
1370 1370 ds = self._repo.dirstate
1371 1371 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1372 1372 rejected = []
1373 1373 for f in files:
1374 1374 if f not in self._repo.dirstate:
1375 1375 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1376 1376 rejected.append(f)
1377 1377 elif self._repo.dirstate[f] != 'a':
1378 1378 self._repo.dirstate.remove(f)
1379 1379 else:
1380 1380 self._repo.dirstate.drop(f)
1381 1381 return rejected
1382 1382
1383 1383 def undelete(self, list):
1384 1384 pctxs = self.parents()
1385 1385 with self._repo.wlock():
1386 1386 ds = self._repo.dirstate
1387 1387 for f in list:
1388 1388 if self._repo.dirstate[f] != 'r':
1389 1389 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1390 1390 else:
1391 1391 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1392 1392 t = fctx.data()
1393 1393 self._repo.wwrite(f, t, fctx.flags())
1394 1394 self._repo.dirstate.normal(f)
1395 1395
1396 1396 def copy(self, source, dest):
1397 1397 try:
1398 1398 st = self._repo.wvfs.lstat(dest)
1399 1399 except OSError as err:
1400 1400 if err.errno != errno.ENOENT:
1401 1401 raise
1402 1402 self._repo.ui.warn(_("%s does not exist!\n")
1403 1403 % self._repo.dirstate.pathto(dest))
1404 1404 return
1405 1405 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1406 1406 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1407 1407 "symbolic link\n")
1408 1408 % self._repo.dirstate.pathto(dest))
1409 1409 else:
1410 1410 with self._repo.wlock():
1411 1411 if self._repo.dirstate[dest] in '?':
1412 1412 self._repo.dirstate.add(dest)
1413 1413 elif self._repo.dirstate[dest] in 'r':
1414 1414 self._repo.dirstate.normallookup(dest)
1415 1415 self._repo.dirstate.copy(source, dest)
1416 1416
1417 1417 def match(self, pats=None, include=None, exclude=None, default='glob',
1418 1418 listsubrepos=False, badfn=None):
1419 1419 r = self._repo
1420 1420
1421 1421 # Only a case insensitive filesystem needs magic to translate user input
1422 1422 # to actual case in the filesystem.
1423 1423 icasefs = not util.fscasesensitive(r.root)
1424 1424 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1425 1425 default, auditor=r.auditor, ctx=self,
1426 1426 listsubrepos=listsubrepos, badfn=badfn,
1427 1427 icasefs=icasefs)
1428 1428
1429 1429 def _filtersuspectsymlink(self, files):
1430 1430 if not files or self._repo.dirstate._checklink:
1431 1431 return files
1432 1432
1433 1433 # Symlink placeholders may get non-symlink-like contents
1434 1434 # via user error or dereferencing by NFS or Samba servers,
1435 1435 # so we filter out any placeholders that don't look like a
1436 1436 # symlink
1437 1437 sane = []
1438 1438 for f in files:
1439 1439 if self.flags(f) == 'l':
1440 1440 d = self[f].data()
1441 1441 if (d == '' or len(d) >= 1024 or '\n' in d
1442 1442 or stringutil.binary(d)):
1443 1443 self._repo.ui.debug('ignoring suspect symlink placeholder'
1444 1444 ' "%s"\n' % f)
1445 1445 continue
1446 1446 sane.append(f)
1447 1447 return sane
1448 1448
1449 1449 def _checklookup(self, files):
1450 1450 # check for any possibly clean files
1451 1451 if not files:
1452 1452 return [], [], []
1453 1453
1454 1454 modified = []
1455 1455 deleted = []
1456 1456 fixup = []
1457 1457 pctx = self._parents[0]
1458 1458 # do a full compare of any files that might have changed
1459 1459 for f in sorted(files):
1460 1460 try:
1461 1461 # This will return True for a file that got replaced by a
1462 1462 # directory in the interim, but fixing that is pretty hard.
1463 1463 if (f not in pctx or self.flags(f) != pctx.flags(f)
1464 1464 or pctx[f].cmp(self[f])):
1465 1465 modified.append(f)
1466 1466 else:
1467 1467 fixup.append(f)
1468 1468 except (IOError, OSError):
1469 1469 # A file become inaccessible in between? Mark it as deleted,
1470 1470 # matching dirstate behavior (issue5584).
1471 1471 # The dirstate has more complex behavior around whether a
1472 1472 # missing file matches a directory, etc, but we don't need to
1473 1473 # bother with that: if f has made it to this point, we're sure
1474 1474 # it's in the dirstate.
1475 1475 deleted.append(f)
1476 1476
1477 1477 return modified, deleted, fixup
1478 1478
1479 1479 def _poststatusfixup(self, status, fixup):
1480 1480 """update dirstate for files that are actually clean"""
1481 1481 poststatus = self._repo.postdsstatus()
1482 1482 if fixup or poststatus:
1483 1483 try:
1484 1484 oldid = self._repo.dirstate.identity()
1485 1485
1486 1486 # updating the dirstate is optional
1487 1487 # so we don't wait on the lock
1488 1488 # wlock can invalidate the dirstate, so cache normal _after_
1489 1489 # taking the lock
1490 1490 with self._repo.wlock(False):
1491 1491 if self._repo.dirstate.identity() == oldid:
1492 1492 if fixup:
1493 1493 normal = self._repo.dirstate.normal
1494 1494 for f in fixup:
1495 1495 normal(f)
1496 1496 # write changes out explicitly, because nesting
1497 1497 # wlock at runtime may prevent 'wlock.release()'
1498 1498 # after this block from doing so for subsequent
1499 1499 # changing files
1500 1500 tr = self._repo.currenttransaction()
1501 1501 self._repo.dirstate.write(tr)
1502 1502
1503 1503 if poststatus:
1504 1504 for ps in poststatus:
1505 1505 ps(self, status)
1506 1506 else:
1507 1507 # in this case, writing changes out breaks
1508 1508 # consistency, because .hg/dirstate was
1509 1509 # already changed simultaneously after last
1510 1510 # caching (see also issue5584 for detail)
1511 1511 self._repo.ui.debug('skip updating dirstate: '
1512 1512 'identity mismatch\n')
1513 1513 except error.LockError:
1514 1514 pass
1515 1515 finally:
1516 1516 # Even if the wlock couldn't be grabbed, clear out the list.
1517 1517 self._repo.clearpostdsstatus()
1518 1518
1519 1519 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1520 1520 '''Gets the status from the dirstate -- internal use only.'''
1521 1521 subrepos = []
1522 1522 if '.hgsub' in self:
1523 1523 subrepos = sorted(self.substate)
1524 1524 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1525 1525 clean=clean, unknown=unknown)
1526 1526
1527 1527 # check for any possibly clean files
1528 1528 fixup = []
1529 1529 if cmp:
1530 1530 modified2, deleted2, fixup = self._checklookup(cmp)
1531 1531 s.modified.extend(modified2)
1532 1532 s.deleted.extend(deleted2)
1533 1533
1534 1534 if fixup and clean:
1535 1535 s.clean.extend(fixup)
1536 1536
1537 1537 self._poststatusfixup(s, fixup)
1538 1538
1539 1539 if match.always():
1540 1540 # cache for performance
1541 1541 if s.unknown or s.ignored or s.clean:
1542 1542 # "_status" is cached with list*=False in the normal route
1543 1543 self._status = scmutil.status(s.modified, s.added, s.removed,
1544 1544 s.deleted, [], [], [])
1545 1545 else:
1546 1546 self._status = s
1547 1547
1548 1548 return s
1549 1549
1550 1550 @propertycache
1551 1551 def _manifest(self):
1552 1552 """generate a manifest corresponding to the values in self._status
1553 1553
1554 1554 This reuse the file nodeid from parent, but we use special node
1555 1555 identifiers for added and modified files. This is used by manifests
1556 1556 merge to see that files are different and by update logic to avoid
1557 1557 deleting newly added files.
1558 1558 """
1559 1559 return self._buildstatusmanifest(self._status)
1560 1560
1561 1561 def _buildstatusmanifest(self, status):
1562 1562 """Builds a manifest that includes the given status results."""
1563 1563 parents = self.parents()
1564 1564
1565 1565 man = parents[0].manifest().copy()
1566 1566
1567 1567 ff = self._flagfunc
1568 1568 for i, l in ((addednodeid, status.added),
1569 1569 (modifiednodeid, status.modified)):
1570 1570 for f in l:
1571 1571 man[f] = i
1572 1572 try:
1573 1573 man.setflag(f, ff(f))
1574 1574 except OSError:
1575 1575 pass
1576 1576
1577 1577 for f in status.deleted + status.removed:
1578 1578 if f in man:
1579 1579 del man[f]
1580 1580
1581 1581 return man
1582 1582
1583 1583 def _buildstatus(self, other, s, match, listignored, listclean,
1584 1584 listunknown):
1585 1585 """build a status with respect to another context
1586 1586
1587 1587 This includes logic for maintaining the fast path of status when
1588 1588 comparing the working directory against its parent, which is to skip
1589 1589 building a new manifest if self (working directory) is not comparing
1590 1590 against its parent (repo['.']).
1591 1591 """
1592 1592 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1593 1593 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1594 1594 # might have accidentally ended up with the entire contents of the file
1595 1595 # they are supposed to be linking to.
1596 1596 s.modified[:] = self._filtersuspectsymlink(s.modified)
1597 1597 if other != self._repo['.']:
1598 1598 s = super(workingctx, self)._buildstatus(other, s, match,
1599 1599 listignored, listclean,
1600 1600 listunknown)
1601 1601 return s
1602 1602
1603 1603 def _matchstatus(self, other, match):
1604 1604 """override the match method with a filter for directory patterns
1605 1605
1606 1606 We use inheritance to customize the match.bad method only in cases of
1607 1607 workingctx since it belongs only to the working directory when
1608 1608 comparing against the parent changeset.
1609 1609
1610 1610 If we aren't comparing against the working directory's parent, then we
1611 1611 just use the default match object sent to us.
1612 1612 """
1613 1613 if other != self._repo['.']:
1614 1614 def bad(f, msg):
1615 1615 # 'f' may be a directory pattern from 'match.files()',
1616 1616 # so 'f not in ctx1' is not enough
1617 1617 if f not in other and not other.hasdir(f):
1618 1618 self._repo.ui.warn('%s: %s\n' %
1619 1619 (self._repo.dirstate.pathto(f), msg))
1620 1620 match.bad = bad
1621 1621 return match
1622 1622
1623 1623 def markcommitted(self, node):
1624 1624 super(workingctx, self).markcommitted(node)
1625 1625
1626 1626 sparse.aftercommit(self._repo, node)
1627 1627
1628 1628 class committablefilectx(basefilectx):
1629 1629 """A committablefilectx provides common functionality for a file context
1630 1630 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1631 1631 def __init__(self, repo, path, filelog=None, ctx=None):
1632 1632 self._repo = repo
1633 1633 self._path = path
1634 1634 self._changeid = None
1635 1635 self._filerev = self._filenode = None
1636 1636
1637 1637 if filelog is not None:
1638 1638 self._filelog = filelog
1639 1639 if ctx:
1640 1640 self._changectx = ctx
1641 1641
1642 1642 def __nonzero__(self):
1643 1643 return True
1644 1644
1645 1645 __bool__ = __nonzero__
1646 1646
1647 1647 def linkrev(self):
1648 1648 # linked to self._changectx no matter if file is modified or not
1649 1649 return self.rev()
1650 1650
1651 1651 def parents(self):
1652 1652 '''return parent filectxs, following copies if necessary'''
1653 1653 def filenode(ctx, path):
1654 1654 return ctx._manifest.get(path, nullid)
1655 1655
1656 1656 path = self._path
1657 1657 fl = self._filelog
1658 1658 pcl = self._changectx._parents
1659 1659 renamed = self.renamed()
1660 1660
1661 1661 if renamed:
1662 1662 pl = [renamed + (None,)]
1663 1663 else:
1664 1664 pl = [(path, filenode(pcl[0], path), fl)]
1665 1665
1666 1666 for pc in pcl[1:]:
1667 1667 pl.append((path, filenode(pc, path), fl))
1668 1668
1669 1669 return [self._parentfilectx(p, fileid=n, filelog=l)
1670 1670 for p, n, l in pl if n != nullid]
1671 1671
1672 1672 def children(self):
1673 1673 return []
1674 1674
1675 1675 class workingfilectx(committablefilectx):
1676 1676 """A workingfilectx object makes access to data related to a particular
1677 1677 file in the working directory convenient."""
1678 1678 def __init__(self, repo, path, filelog=None, workingctx=None):
1679 1679 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1680 1680
1681 1681 @propertycache
1682 1682 def _changectx(self):
1683 1683 return workingctx(self._repo)
1684 1684
1685 1685 def data(self):
1686 1686 return self._repo.wread(self._path)
1687 1687 def renamed(self):
1688 1688 rp = self._repo.dirstate.copied(self._path)
1689 1689 if not rp:
1690 1690 return None
1691 1691 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1692 1692
1693 1693 def size(self):
1694 1694 return self._repo.wvfs.lstat(self._path).st_size
1695 1695 def date(self):
1696 1696 t, tz = self._changectx.date()
1697 1697 try:
1698 1698 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1699 1699 except OSError as err:
1700 1700 if err.errno != errno.ENOENT:
1701 1701 raise
1702 1702 return (t, tz)
1703 1703
1704 1704 def exists(self):
1705 1705 return self._repo.wvfs.exists(self._path)
1706 1706
1707 1707 def lexists(self):
1708 1708 return self._repo.wvfs.lexists(self._path)
1709 1709
1710 1710 def audit(self):
1711 1711 return self._repo.wvfs.audit(self._path)
1712 1712
1713 1713 def cmp(self, fctx):
1714 1714 """compare with other file context
1715 1715
1716 1716 returns True if different than fctx.
1717 1717 """
1718 1718 # fctx should be a filectx (not a workingfilectx)
1719 1719 # invert comparison to reuse the same code path
1720 1720 return fctx.cmp(self)
1721 1721
1722 1722 def remove(self, ignoremissing=False):
1723 1723 """wraps unlink for a repo's working directory"""
1724 1724 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1725 1725 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1726 1726 rmdir=rmdir)
1727 1727
1728 1728 def write(self, data, flags, backgroundclose=False, **kwargs):
1729 1729 """wraps repo.wwrite"""
1730 1730 self._repo.wwrite(self._path, data, flags,
1731 1731 backgroundclose=backgroundclose,
1732 1732 **kwargs)
1733 1733
1734 1734 def markcopied(self, src):
1735 1735 """marks this file a copy of `src`"""
1736 1736 if self._repo.dirstate[self._path] in "nma":
1737 1737 self._repo.dirstate.copy(src, self._path)
1738 1738
1739 1739 def clearunknown(self):
1740 1740 """Removes conflicting items in the working directory so that
1741 1741 ``write()`` can be called successfully.
1742 1742 """
1743 1743 wvfs = self._repo.wvfs
1744 1744 f = self._path
1745 1745 wvfs.audit(f)
1746 1746 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1747 1747 # remove files under the directory as they should already be
1748 1748 # warned and backed up
1749 1749 if wvfs.isdir(f) and not wvfs.islink(f):
1750 1750 wvfs.rmtree(f, forcibly=True)
1751 1751 for p in reversed(list(util.finddirs(f))):
1752 1752 if wvfs.isfileorlink(p):
1753 1753 wvfs.unlink(p)
1754 1754 break
1755 1755 else:
1756 1756 # don't remove files if path conflicts are not processed
1757 1757 if wvfs.isdir(f) and not wvfs.islink(f):
1758 1758 wvfs.removedirs(f)
1759 1759
1760 1760 def setflags(self, l, x):
1761 1761 self._repo.wvfs.setflags(self._path, l, x)
1762 1762
1763 1763 class overlayworkingctx(committablectx):
1764 1764 """Wraps another mutable context with a write-back cache that can be
1765 1765 converted into a commit context.
1766 1766
1767 1767 self._cache[path] maps to a dict with keys: {
1768 1768 'exists': bool?
1769 1769 'date': date?
1770 1770 'data': str?
1771 1771 'flags': str?
1772 1772 'copied': str? (path or None)
1773 1773 }
1774 1774 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1775 1775 is `False`, the file was deleted.
1776 1776 """
1777 1777
1778 1778 def __init__(self, repo):
1779 1779 super(overlayworkingctx, self).__init__(repo)
1780 1780 self.clean()
1781 1781
1782 1782 def setbase(self, wrappedctx):
1783 1783 self._wrappedctx = wrappedctx
1784 1784 self._parents = [wrappedctx]
1785 1785 # Drop old manifest cache as it is now out of date.
1786 1786 # This is necessary when, e.g., rebasing several nodes with one
1787 1787 # ``overlayworkingctx`` (e.g. with --collapse).
1788 1788 util.clearcachedproperty(self, '_manifest')
1789 1789
1790 1790 def data(self, path):
1791 1791 if self.isdirty(path):
1792 1792 if self._cache[path]['exists']:
1793 1793 if self._cache[path]['data']:
1794 1794 return self._cache[path]['data']
1795 1795 else:
1796 1796 # Must fallback here, too, because we only set flags.
1797 1797 return self._wrappedctx[path].data()
1798 1798 else:
1799 1799 raise error.ProgrammingError("No such file or directory: %s" %
1800 1800 path)
1801 1801 else:
1802 1802 return self._wrappedctx[path].data()
1803 1803
1804 1804 @propertycache
1805 1805 def _manifest(self):
1806 1806 parents = self.parents()
1807 1807 man = parents[0].manifest().copy()
1808 1808
1809 1809 flag = self._flagfunc
1810 1810 for path in self.added():
1811 1811 man[path] = addednodeid
1812 1812 man.setflag(path, flag(path))
1813 1813 for path in self.modified():
1814 1814 man[path] = modifiednodeid
1815 1815 man.setflag(path, flag(path))
1816 1816 for path in self.removed():
1817 1817 del man[path]
1818 1818 return man
1819 1819
1820 1820 @propertycache
1821 1821 def _flagfunc(self):
1822 1822 def f(path):
1823 1823 return self._cache[path]['flags']
1824 1824 return f
1825 1825
1826 1826 def files(self):
1827 1827 return sorted(self.added() + self.modified() + self.removed())
1828 1828
1829 1829 def modified(self):
1830 1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 1831 self._existsinparent(f)]
1832 1832
1833 1833 def added(self):
1834 1834 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1835 1835 not self._existsinparent(f)]
1836 1836
1837 1837 def removed(self):
1838 1838 return [f for f in self._cache.keys() if
1839 1839 not self._cache[f]['exists'] and self._existsinparent(f)]
1840 1840
1841 1841 def isinmemory(self):
1842 1842 return True
1843 1843
1844 1844 def filedate(self, path):
1845 1845 if self.isdirty(path):
1846 1846 return self._cache[path]['date']
1847 1847 else:
1848 1848 return self._wrappedctx[path].date()
1849 1849
1850 1850 def markcopied(self, path, origin):
1851 1851 if self.isdirty(path):
1852 1852 self._cache[path]['copied'] = origin
1853 1853 else:
1854 1854 raise error.ProgrammingError('markcopied() called on clean context')
1855 1855
1856 1856 def copydata(self, path):
1857 1857 if self.isdirty(path):
1858 1858 return self._cache[path]['copied']
1859 1859 else:
1860 1860 raise error.ProgrammingError('copydata() called on clean context')
1861 1861
1862 1862 def flags(self, path):
1863 1863 if self.isdirty(path):
1864 1864 if self._cache[path]['exists']:
1865 1865 return self._cache[path]['flags']
1866 1866 else:
1867 1867 raise error.ProgrammingError("No such file or directory: %s" %
1868 1868 self._path)
1869 1869 else:
1870 1870 return self._wrappedctx[path].flags()
1871 1871
1872 1872 def _existsinparent(self, path):
1873 1873 try:
1874 1874 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1875 1875 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1876 1876 # with an ``exists()`` function.
1877 1877 self._wrappedctx[path]
1878 1878 return True
1879 1879 except error.ManifestLookupError:
1880 1880 return False
1881 1881
1882 1882 def _auditconflicts(self, path):
1883 1883 """Replicates conflict checks done by wvfs.write().
1884 1884
1885 1885 Since we never write to the filesystem and never call `applyupdates` in
1886 1886 IMM, we'll never check that a path is actually writable -- e.g., because
1887 1887 it adds `a/foo`, but `a` is actually a file in the other commit.
1888 1888 """
1889 1889 def fail(path, component):
1890 1890 # p1() is the base and we're receiving "writes" for p2()'s
1891 1891 # files.
1892 1892 if 'l' in self.p1()[component].flags():
1893 1893 raise error.Abort("error: %s conflicts with symlink %s "
1894 1894 "in %s." % (path, component,
1895 1895 self.p1().rev()))
1896 1896 else:
1897 1897 raise error.Abort("error: '%s' conflicts with file '%s' in "
1898 1898 "%s." % (path, component,
1899 1899 self.p1().rev()))
1900 1900
1901 1901 # Test that each new directory to be created to write this path from p2
1902 1902 # is not a file in p1.
1903 1903 components = path.split('/')
1904 1904 for i in pycompat.xrange(len(components)):
1905 1905 component = "/".join(components[0:i])
1906 1906 if component in self.p1() and self._cache[component]['exists']:
1907 1907 fail(path, component)
1908 1908
1909 1909 # Test the other direction -- that this path from p2 isn't a directory
1910 1910 # in p1 (test that p1 doesn't any paths matching `path/*`).
1911 1911 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1912 1912 matches = self.p1().manifest().matches(match)
1913 1913 mfiles = matches.keys()
1914 1914 if len(mfiles) > 0:
1915 1915 if len(mfiles) == 1 and mfiles[0] == path:
1916 1916 return
1917 1917 # omit the files which are deleted in current IMM wctx
1918 1918 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1919 1919 if not mfiles:
1920 1920 return
1921 1921 raise error.Abort("error: file '%s' cannot be written because "
1922 1922 " '%s/' is a folder in %s (containing %d "
1923 1923 "entries: %s)"
1924 1924 % (path, path, self.p1(), len(mfiles),
1925 1925 ', '.join(mfiles)))
1926 1926
1927 1927 def write(self, path, data, flags='', **kwargs):
1928 1928 if data is None:
1929 1929 raise error.ProgrammingError("data must be non-None")
1930 1930 self._auditconflicts(path)
1931 1931 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1932 1932 flags=flags)
1933 1933
1934 1934 def setflags(self, path, l, x):
1935 1935 flag = ''
1936 1936 if l:
1937 1937 flag = 'l'
1938 1938 elif x:
1939 1939 flag = 'x'
1940 1940 self._markdirty(path, exists=True, date=dateutil.makedate(),
1941 1941 flags=flag)
1942 1942
1943 1943 def remove(self, path):
1944 1944 self._markdirty(path, exists=False)
1945 1945
1946 1946 def exists(self, path):
1947 1947 """exists behaves like `lexists`, but needs to follow symlinks and
1948 1948 return False if they are broken.
1949 1949 """
1950 1950 if self.isdirty(path):
1951 1951 # If this path exists and is a symlink, "follow" it by calling
1952 1952 # exists on the destination path.
1953 1953 if (self._cache[path]['exists'] and
1954 1954 'l' in self._cache[path]['flags']):
1955 1955 return self.exists(self._cache[path]['data'].strip())
1956 1956 else:
1957 1957 return self._cache[path]['exists']
1958 1958
1959 1959 return self._existsinparent(path)
1960 1960
1961 1961 def lexists(self, path):
1962 1962 """lexists returns True if the path exists"""
1963 1963 if self.isdirty(path):
1964 1964 return self._cache[path]['exists']
1965 1965
1966 1966 return self._existsinparent(path)
1967 1967
1968 1968 def size(self, path):
1969 1969 if self.isdirty(path):
1970 1970 if self._cache[path]['exists']:
1971 1971 return len(self._cache[path]['data'])
1972 1972 else:
1973 1973 raise error.ProgrammingError("No such file or directory: %s" %
1974 1974 self._path)
1975 1975 return self._wrappedctx[path].size()
1976 1976
1977 1977 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1978 1978 user=None, editor=None):
1979 1979 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1980 1980 committed.
1981 1981
1982 1982 ``text`` is the commit message.
1983 1983 ``parents`` (optional) are rev numbers.
1984 1984 """
1985 1985 # Default parents to the wrapped contexts' if not passed.
1986 1986 if parents is None:
1987 1987 parents = self._wrappedctx.parents()
1988 1988 if len(parents) == 1:
1989 1989 parents = (parents[0], None)
1990 1990
1991 1991 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1992 1992 if parents[1] is None:
1993 1993 parents = (self._repo[parents[0]], None)
1994 1994 else:
1995 1995 parents = (self._repo[parents[0]], self._repo[parents[1]])
1996 1996
1997 1997 files = self._cache.keys()
1998 1998 def getfile(repo, memctx, path):
1999 1999 if self._cache[path]['exists']:
2000 2000 return memfilectx(repo, memctx, path,
2001 2001 self._cache[path]['data'],
2002 2002 'l' in self._cache[path]['flags'],
2003 2003 'x' in self._cache[path]['flags'],
2004 2004 self._cache[path]['copied'])
2005 2005 else:
2006 2006 # Returning None, but including the path in `files`, is
2007 2007 # necessary for memctx to register a deletion.
2008 2008 return None
2009 2009 return memctx(self._repo, parents, text, files, getfile, date=date,
2010 2010 extra=extra, user=user, branch=branch, editor=editor)
2011 2011
2012 2012 def isdirty(self, path):
2013 2013 return path in self._cache
2014 2014
2015 2015 def isempty(self):
2016 2016 # We need to discard any keys that are actually clean before the empty
2017 2017 # commit check.
2018 2018 self._compact()
2019 2019 return len(self._cache) == 0
2020 2020
2021 2021 def clean(self):
2022 2022 self._cache = {}
2023 2023
2024 2024 def _compact(self):
2025 2025 """Removes keys from the cache that are actually clean, by comparing
2026 2026 them with the underlying context.
2027 2027
2028 2028 This can occur during the merge process, e.g. by passing --tool :local
2029 2029 to resolve a conflict.
2030 2030 """
2031 2031 keys = []
2032 2032 for path in self._cache.keys():
2033 2033 cache = self._cache[path]
2034 2034 try:
2035 2035 underlying = self._wrappedctx[path]
2036 2036 if (underlying.data() == cache['data'] and
2037 2037 underlying.flags() == cache['flags']):
2038 2038 keys.append(path)
2039 2039 except error.ManifestLookupError:
2040 2040 # Path not in the underlying manifest (created).
2041 2041 continue
2042 2042
2043 2043 for path in keys:
2044 2044 del self._cache[path]
2045 2045 return keys
2046 2046
2047 2047 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2048 2048 # data not provided, let's see if we already have some; if not, let's
2049 2049 # grab it from our underlying context, so that we always have data if
2050 2050 # the file is marked as existing.
2051 2051 if exists and data is None:
2052 2052 oldentry = self._cache.get(path) or {}
2053 2053 data = oldentry.get('data') or self._wrappedctx[path].data()
2054 2054
2055 2055 self._cache[path] = {
2056 2056 'exists': exists,
2057 2057 'data': data,
2058 2058 'date': date,
2059 2059 'flags': flags,
2060 2060 'copied': None,
2061 2061 }
2062 2062
2063 2063 def filectx(self, path, filelog=None):
2064 2064 return overlayworkingfilectx(self._repo, path, parent=self,
2065 2065 filelog=filelog)
2066 2066
2067 2067 class overlayworkingfilectx(committablefilectx):
2068 2068 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2069 2069 cache, which can be flushed through later by calling ``flush()``."""
2070 2070
2071 2071 def __init__(self, repo, path, filelog=None, parent=None):
2072 2072 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2073 2073 parent)
2074 2074 self._repo = repo
2075 2075 self._parent = parent
2076 2076 self._path = path
2077 2077
2078 2078 def cmp(self, fctx):
2079 2079 return self.data() != fctx.data()
2080 2080
2081 2081 def changectx(self):
2082 2082 return self._parent
2083 2083
2084 2084 def data(self):
2085 2085 return self._parent.data(self._path)
2086 2086
2087 2087 def date(self):
2088 2088 return self._parent.filedate(self._path)
2089 2089
2090 2090 def exists(self):
2091 2091 return self.lexists()
2092 2092
2093 2093 def lexists(self):
2094 2094 return self._parent.exists(self._path)
2095 2095
2096 2096 def renamed(self):
2097 2097 path = self._parent.copydata(self._path)
2098 2098 if not path:
2099 2099 return None
2100 2100 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2101 2101
2102 2102 def size(self):
2103 2103 return self._parent.size(self._path)
2104 2104
2105 2105 def markcopied(self, origin):
2106 2106 self._parent.markcopied(self._path, origin)
2107 2107
2108 2108 def audit(self):
2109 2109 pass
2110 2110
2111 2111 def flags(self):
2112 2112 return self._parent.flags(self._path)
2113 2113
2114 2114 def setflags(self, islink, isexec):
2115 2115 return self._parent.setflags(self._path, islink, isexec)
2116 2116
2117 2117 def write(self, data, flags, backgroundclose=False, **kwargs):
2118 2118 return self._parent.write(self._path, data, flags, **kwargs)
2119 2119
2120 2120 def remove(self, ignoremissing=False):
2121 2121 return self._parent.remove(self._path)
2122 2122
2123 2123 def clearunknown(self):
2124 2124 pass
2125 2125
2126 2126 class workingcommitctx(workingctx):
2127 2127 """A workingcommitctx object makes access to data related to
2128 2128 the revision being committed convenient.
2129 2129
2130 2130 This hides changes in the working directory, if they aren't
2131 2131 committed in this context.
2132 2132 """
2133 2133 def __init__(self, repo, changes,
2134 2134 text="", user=None, date=None, extra=None):
2135 super(workingctx, self).__init__(repo, text, user, date, extra,
2136 changes)
2135 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2136 changes)
2137 2137
2138 2138 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2139 2139 """Return matched files only in ``self._status``
2140 2140
2141 2141 Uncommitted files appear "clean" via this context, even if
2142 2142 they aren't actually so in the working directory.
2143 2143 """
2144 2144 if clean:
2145 2145 clean = [f for f in self._manifest if f not in self._changedset]
2146 2146 else:
2147 2147 clean = []
2148 2148 return scmutil.status([f for f in self._status.modified if match(f)],
2149 2149 [f for f in self._status.added if match(f)],
2150 2150 [f for f in self._status.removed if match(f)],
2151 2151 [], [], [], clean)
2152 2152
2153 2153 @propertycache
2154 2154 def _changedset(self):
2155 2155 """Return the set of files changed in this context
2156 2156 """
2157 2157 changed = set(self._status.modified)
2158 2158 changed.update(self._status.added)
2159 2159 changed.update(self._status.removed)
2160 2160 return changed
2161 2161
2162 2162 def makecachingfilectxfn(func):
2163 2163 """Create a filectxfn that caches based on the path.
2164 2164
2165 2165 We can't use util.cachefunc because it uses all arguments as the cache
2166 2166 key and this creates a cycle since the arguments include the repo and
2167 2167 memctx.
2168 2168 """
2169 2169 cache = {}
2170 2170
2171 2171 def getfilectx(repo, memctx, path):
2172 2172 if path not in cache:
2173 2173 cache[path] = func(repo, memctx, path)
2174 2174 return cache[path]
2175 2175
2176 2176 return getfilectx
2177 2177
2178 2178 def memfilefromctx(ctx):
2179 2179 """Given a context return a memfilectx for ctx[path]
2180 2180
2181 2181 This is a convenience method for building a memctx based on another
2182 2182 context.
2183 2183 """
2184 2184 def getfilectx(repo, memctx, path):
2185 2185 fctx = ctx[path]
2186 2186 # this is weird but apparently we only keep track of one parent
2187 2187 # (why not only store that instead of a tuple?)
2188 2188 copied = fctx.renamed()
2189 2189 if copied:
2190 2190 copied = copied[0]
2191 2191 return memfilectx(repo, memctx, path, fctx.data(),
2192 2192 islink=fctx.islink(), isexec=fctx.isexec(),
2193 2193 copied=copied)
2194 2194
2195 2195 return getfilectx
2196 2196
2197 2197 def memfilefrompatch(patchstore):
2198 2198 """Given a patch (e.g. patchstore object) return a memfilectx
2199 2199
2200 2200 This is a convenience method for building a memctx based on a patchstore.
2201 2201 """
2202 2202 def getfilectx(repo, memctx, path):
2203 2203 data, mode, copied = patchstore.getfile(path)
2204 2204 if data is None:
2205 2205 return None
2206 2206 islink, isexec = mode
2207 2207 return memfilectx(repo, memctx, path, data, islink=islink,
2208 2208 isexec=isexec, copied=copied)
2209 2209
2210 2210 return getfilectx
2211 2211
2212 2212 class memctx(committablectx):
2213 2213 """Use memctx to perform in-memory commits via localrepo.commitctx().
2214 2214
2215 2215 Revision information is supplied at initialization time while
2216 2216 related files data and is made available through a callback
2217 2217 mechanism. 'repo' is the current localrepo, 'parents' is a
2218 2218 sequence of two parent revisions identifiers (pass None for every
2219 2219 missing parent), 'text' is the commit message and 'files' lists
2220 2220 names of files touched by the revision (normalized and relative to
2221 2221 repository root).
2222 2222
2223 2223 filectxfn(repo, memctx, path) is a callable receiving the
2224 2224 repository, the current memctx object and the normalized path of
2225 2225 requested file, relative to repository root. It is fired by the
2226 2226 commit function for every file in 'files', but calls order is
2227 2227 undefined. If the file is available in the revision being
2228 2228 committed (updated or added), filectxfn returns a memfilectx
2229 2229 object. If the file was removed, filectxfn return None for recent
2230 2230 Mercurial. Moved files are represented by marking the source file
2231 2231 removed and the new file added with copy information (see
2232 2232 memfilectx).
2233 2233
2234 2234 user receives the committer name and defaults to current
2235 2235 repository username, date is the commit date in any format
2236 2236 supported by dateutil.parsedate() and defaults to current date, extra
2237 2237 is a dictionary of metadata or is left empty.
2238 2238 """
2239 2239
2240 2240 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2241 2241 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2242 2242 # this field to determine what to do in filectxfn.
2243 2243 _returnnoneformissingfiles = True
2244 2244
2245 2245 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2246 2246 date=None, extra=None, branch=None, editor=False):
2247 2247 super(memctx, self).__init__(repo, text, user, date, extra)
2248 2248 self._rev = None
2249 2249 self._node = None
2250 2250 parents = [(p or nullid) for p in parents]
2251 2251 p1, p2 = parents
2252 2252 self._parents = [self._repo[p] for p in (p1, p2)]
2253 2253 files = sorted(set(files))
2254 2254 self._files = files
2255 2255 if branch is not None:
2256 2256 self._extra['branch'] = encoding.fromlocal(branch)
2257 2257 self.substate = {}
2258 2258
2259 2259 if isinstance(filectxfn, patch.filestore):
2260 2260 filectxfn = memfilefrompatch(filectxfn)
2261 2261 elif not callable(filectxfn):
2262 2262 # if store is not callable, wrap it in a function
2263 2263 filectxfn = memfilefromctx(filectxfn)
2264 2264
2265 2265 # memoizing increases performance for e.g. vcs convert scenarios.
2266 2266 self._filectxfn = makecachingfilectxfn(filectxfn)
2267 2267
2268 2268 if editor:
2269 2269 self._text = editor(self._repo, self, [])
2270 2270 self._repo.savecommitmessage(self._text)
2271 2271
2272 2272 def filectx(self, path, filelog=None):
2273 2273 """get a file context from the working directory
2274 2274
2275 2275 Returns None if file doesn't exist and should be removed."""
2276 2276 return self._filectxfn(self._repo, self, path)
2277 2277
2278 2278 def commit(self):
2279 2279 """commit context to the repo"""
2280 2280 return self._repo.commitctx(self)
2281 2281
2282 2282 @propertycache
2283 2283 def _manifest(self):
2284 2284 """generate a manifest based on the return values of filectxfn"""
2285 2285
2286 2286 # keep this simple for now; just worry about p1
2287 2287 pctx = self._parents[0]
2288 2288 man = pctx.manifest().copy()
2289 2289
2290 2290 for f in self._status.modified:
2291 2291 p1node = nullid
2292 2292 p2node = nullid
2293 2293 p = pctx[f].parents() # if file isn't in pctx, check p2?
2294 2294 if len(p) > 0:
2295 2295 p1node = p[0].filenode()
2296 2296 if len(p) > 1:
2297 2297 p2node = p[1].filenode()
2298 2298 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2299 2299
2300 2300 for f in self._status.added:
2301 2301 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2302 2302
2303 2303 for f in self._status.removed:
2304 2304 if f in man:
2305 2305 del man[f]
2306 2306
2307 2307 return man
2308 2308
2309 2309 @propertycache
2310 2310 def _status(self):
2311 2311 """Calculate exact status from ``files`` specified at construction
2312 2312 """
2313 2313 man1 = self.p1().manifest()
2314 2314 p2 = self._parents[1]
2315 2315 # "1 < len(self._parents)" can't be used for checking
2316 2316 # existence of the 2nd parent, because "memctx._parents" is
2317 2317 # explicitly initialized by the list, of which length is 2.
2318 2318 if p2.node() != nullid:
2319 2319 man2 = p2.manifest()
2320 2320 managing = lambda f: f in man1 or f in man2
2321 2321 else:
2322 2322 managing = lambda f: f in man1
2323 2323
2324 2324 modified, added, removed = [], [], []
2325 2325 for f in self._files:
2326 2326 if not managing(f):
2327 2327 added.append(f)
2328 2328 elif self[f]:
2329 2329 modified.append(f)
2330 2330 else:
2331 2331 removed.append(f)
2332 2332
2333 2333 return scmutil.status(modified, added, removed, [], [], [], [])
2334 2334
2335 2335 class memfilectx(committablefilectx):
2336 2336 """memfilectx represents an in-memory file to commit.
2337 2337
2338 2338 See memctx and committablefilectx for more details.
2339 2339 """
2340 2340 def __init__(self, repo, changectx, path, data, islink=False,
2341 2341 isexec=False, copied=None):
2342 2342 """
2343 2343 path is the normalized file path relative to repository root.
2344 2344 data is the file content as a string.
2345 2345 islink is True if the file is a symbolic link.
2346 2346 isexec is True if the file is executable.
2347 2347 copied is the source file path if current file was copied in the
2348 2348 revision being committed, or None."""
2349 2349 super(memfilectx, self).__init__(repo, path, None, changectx)
2350 2350 self._data = data
2351 2351 if islink:
2352 2352 self._flags = 'l'
2353 2353 elif isexec:
2354 2354 self._flags = 'x'
2355 2355 else:
2356 2356 self._flags = ''
2357 2357 self._copied = None
2358 2358 if copied:
2359 2359 self._copied = (copied, nullid)
2360 2360
2361 2361 def data(self):
2362 2362 return self._data
2363 2363
2364 2364 def remove(self, ignoremissing=False):
2365 2365 """wraps unlink for a repo's working directory"""
2366 2366 # need to figure out what to do here
2367 2367 del self._changectx[self._path]
2368 2368
2369 2369 def write(self, data, flags, **kwargs):
2370 2370 """wraps repo.wwrite"""
2371 2371 self._data = data
2372 2372
2373 2373 class overlayfilectx(committablefilectx):
2374 2374 """Like memfilectx but take an original filectx and optional parameters to
2375 2375 override parts of it. This is useful when fctx.data() is expensive (i.e.
2376 2376 flag processor is expensive) and raw data, flags, and filenode could be
2377 2377 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2378 2378 """
2379 2379
2380 2380 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2381 2381 copied=None, ctx=None):
2382 2382 """originalfctx: filecontext to duplicate
2383 2383
2384 2384 datafunc: None or a function to override data (file content). It is a
2385 2385 function to be lazy. path, flags, copied, ctx: None or overridden value
2386 2386
2387 2387 copied could be (path, rev), or False. copied could also be just path,
2388 2388 and will be converted to (path, nullid). This simplifies some callers.
2389 2389 """
2390 2390
2391 2391 if path is None:
2392 2392 path = originalfctx.path()
2393 2393 if ctx is None:
2394 2394 ctx = originalfctx.changectx()
2395 2395 ctxmatch = lambda: True
2396 2396 else:
2397 2397 ctxmatch = lambda: ctx == originalfctx.changectx()
2398 2398
2399 2399 repo = originalfctx.repo()
2400 2400 flog = originalfctx.filelog()
2401 2401 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2402 2402
2403 2403 if copied is None:
2404 2404 copied = originalfctx.renamed()
2405 2405 copiedmatch = lambda: True
2406 2406 else:
2407 2407 if copied and not isinstance(copied, tuple):
2408 2408 # repo._filecommit will recalculate copyrev so nullid is okay
2409 2409 copied = (copied, nullid)
2410 2410 copiedmatch = lambda: copied == originalfctx.renamed()
2411 2411
2412 2412 # When data, copied (could affect data), ctx (could affect filelog
2413 2413 # parents) are not overridden, rawdata, rawflags, and filenode may be
2414 2414 # reused (repo._filecommit should double check filelog parents).
2415 2415 #
2416 2416 # path, flags are not hashed in filelog (but in manifestlog) so they do
2417 2417 # not affect reusable here.
2418 2418 #
2419 2419 # If ctx or copied is overridden to a same value with originalfctx,
2420 2420 # still consider it's reusable. originalfctx.renamed() may be a bit
2421 2421 # expensive so it's not called unless necessary. Assuming datafunc is
2422 2422 # always expensive, do not call it for this "reusable" test.
2423 2423 reusable = datafunc is None and ctxmatch() and copiedmatch()
2424 2424
2425 2425 if datafunc is None:
2426 2426 datafunc = originalfctx.data
2427 2427 if flags is None:
2428 2428 flags = originalfctx.flags()
2429 2429
2430 2430 self._datafunc = datafunc
2431 2431 self._flags = flags
2432 2432 self._copied = copied
2433 2433
2434 2434 if reusable:
2435 2435 # copy extra fields from originalfctx
2436 2436 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2437 2437 for attr_ in attrs:
2438 2438 if util.safehasattr(originalfctx, attr_):
2439 2439 setattr(self, attr_, getattr(originalfctx, attr_))
2440 2440
2441 2441 def data(self):
2442 2442 return self._datafunc()
2443 2443
2444 2444 class metadataonlyctx(committablectx):
2445 2445 """Like memctx but it's reusing the manifest of different commit.
2446 2446 Intended to be used by lightweight operations that are creating
2447 2447 metadata-only changes.
2448 2448
2449 2449 Revision information is supplied at initialization time. 'repo' is the
2450 2450 current localrepo, 'ctx' is original revision which manifest we're reuisng
2451 2451 'parents' is a sequence of two parent revisions identifiers (pass None for
2452 2452 every missing parent), 'text' is the commit.
2453 2453
2454 2454 user receives the committer name and defaults to current repository
2455 2455 username, date is the commit date in any format supported by
2456 2456 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2457 2457 metadata or is left empty.
2458 2458 """
2459 2459 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2460 2460 date=None, extra=None, editor=False):
2461 2461 if text is None:
2462 2462 text = originalctx.description()
2463 2463 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2464 2464 self._rev = None
2465 2465 self._node = None
2466 2466 self._originalctx = originalctx
2467 2467 self._manifestnode = originalctx.manifestnode()
2468 2468 if parents is None:
2469 2469 parents = originalctx.parents()
2470 2470 else:
2471 2471 parents = [repo[p] for p in parents if p is not None]
2472 2472 parents = parents[:]
2473 2473 while len(parents) < 2:
2474 2474 parents.append(repo[nullid])
2475 2475 p1, p2 = self._parents = parents
2476 2476
2477 2477 # sanity check to ensure that the reused manifest parents are
2478 2478 # manifests of our commit parents
2479 2479 mp1, mp2 = self.manifestctx().parents
2480 2480 if p1 != nullid and p1.manifestnode() != mp1:
2481 2481 raise RuntimeError('can\'t reuse the manifest: '
2482 2482 'its p1 doesn\'t match the new ctx p1')
2483 2483 if p2 != nullid and p2.manifestnode() != mp2:
2484 2484 raise RuntimeError('can\'t reuse the manifest: '
2485 2485 'its p2 doesn\'t match the new ctx p2')
2486 2486
2487 2487 self._files = originalctx.files()
2488 2488 self.substate = {}
2489 2489
2490 2490 if editor:
2491 2491 self._text = editor(self._repo, self, [])
2492 2492 self._repo.savecommitmessage(self._text)
2493 2493
2494 2494 def manifestnode(self):
2495 2495 return self._manifestnode
2496 2496
2497 2497 @property
2498 2498 def _manifestctx(self):
2499 2499 return self._repo.manifestlog[self._manifestnode]
2500 2500
2501 2501 def filectx(self, path, filelog=None):
2502 2502 return self._originalctx.filectx(path, filelog=filelog)
2503 2503
2504 2504 def commit(self):
2505 2505 """commit context to the repo"""
2506 2506 return self._repo.commitctx(self)
2507 2507
2508 2508 @property
2509 2509 def _manifest(self):
2510 2510 return self._originalctx.manifest()
2511 2511
2512 2512 @propertycache
2513 2513 def _status(self):
2514 2514 """Calculate exact status from ``files`` specified in the ``origctx``
2515 2515 and parents manifests.
2516 2516 """
2517 2517 man1 = self.p1().manifest()
2518 2518 p2 = self._parents[1]
2519 2519 # "1 < len(self._parents)" can't be used for checking
2520 2520 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2521 2521 # explicitly initialized by the list, of which length is 2.
2522 2522 if p2.node() != nullid:
2523 2523 man2 = p2.manifest()
2524 2524 managing = lambda f: f in man1 or f in man2
2525 2525 else:
2526 2526 managing = lambda f: f in man1
2527 2527
2528 2528 modified, added, removed = [], [], []
2529 2529 for f in self._files:
2530 2530 if not managing(f):
2531 2531 added.append(f)
2532 2532 elif f in self:
2533 2533 modified.append(f)
2534 2534 else:
2535 2535 removed.append(f)
2536 2536
2537 2537 return scmutil.status(modified, added, removed, [], [], [], [])
2538 2538
2539 2539 class arbitraryfilectx(object):
2540 2540 """Allows you to use filectx-like functions on a file in an arbitrary
2541 2541 location on disk, possibly not in the working directory.
2542 2542 """
2543 2543 def __init__(self, path, repo=None):
2544 2544 # Repo is optional because contrib/simplemerge uses this class.
2545 2545 self._repo = repo
2546 2546 self._path = path
2547 2547
2548 2548 def cmp(self, fctx):
2549 2549 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2550 2550 # path if either side is a symlink.
2551 2551 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2552 2552 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2553 2553 # Add a fast-path for merge if both sides are disk-backed.
2554 2554 # Note that filecmp uses the opposite return values (True if same)
2555 2555 # from our cmp functions (True if different).
2556 2556 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2557 2557 return self.data() != fctx.data()
2558 2558
2559 2559 def path(self):
2560 2560 return self._path
2561 2561
2562 2562 def flags(self):
2563 2563 return ''
2564 2564
2565 2565 def data(self):
2566 2566 return util.readfile(self._path)
2567 2567
2568 2568 def decodeddata(self):
2569 2569 with open(self._path, "rb") as f:
2570 2570 return f.read()
2571 2571
2572 2572 def remove(self):
2573 2573 util.unlink(self._path)
2574 2574
2575 2575 def write(self, data, flags, **kwargs):
2576 2576 assert not flags
2577 2577 with open(self._path, "w") as f:
2578 2578 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now