##// END OF EJS Templates
context: drop support for looking up context by ambiguous changeid (API)...
Martin von Zweigbergk -
r37871:8b86acc7 default
parent child Browse files
Show More
@@ -1,2600 +1,2541 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirfilenodeids,
26 26 wdirid,
27 wdirrev,
28 27 )
29 28 from . import (
30 29 dagop,
31 30 encoding,
32 31 error,
33 32 fileset,
34 33 match as matchmod,
35 34 obsolete as obsmod,
36 35 patch,
37 36 pathutil,
38 37 phases,
39 38 pycompat,
40 39 repoview,
41 40 revlog,
42 41 scmutil,
43 42 sparse,
44 43 subrepo,
45 44 subrepoutil,
46 45 util,
47 46 )
48 47 from .utils import (
49 48 dateutil,
50 49 stringutil,
51 50 )
52 51
53 52 propertycache = util.propertycache
54 53
55 54 nonascii = re.compile(br'[^\x21-\x7f]').search
56 55
57 56 class basectx(object):
58 57 """A basectx object represents the common logic for its children:
59 58 changectx: read-only context that is already present in the repo,
60 59 workingctx: a context that represents the working directory and can
61 60 be committed,
62 61 memctx: a context that represents changes in-memory and can also
63 62 be committed."""
64 63
65 64 def __init__(self, repo):
66 65 self._repo = repo
67 66
68 67 def __bytes__(self):
69 68 return short(self.node())
70 69
71 70 __str__ = encoding.strmethod(__bytes__)
72 71
73 72 def __repr__(self):
74 73 return r"<%s %s>" % (type(self).__name__, str(self))
75 74
76 75 def __eq__(self, other):
77 76 try:
78 77 return type(self) == type(other) and self._rev == other._rev
79 78 except AttributeError:
80 79 return False
81 80
82 81 def __ne__(self, other):
83 82 return not (self == other)
84 83
85 84 def __contains__(self, key):
86 85 return key in self._manifest
87 86
88 87 def __getitem__(self, key):
89 88 return self.filectx(key)
90 89
91 90 def __iter__(self):
92 91 return iter(self._manifest)
93 92
94 93 def _buildstatusmanifest(self, status):
95 94 """Builds a manifest that includes the given status results, if this is
96 95 a working copy context. For non-working copy contexts, it just returns
97 96 the normal manifest."""
98 97 return self.manifest()
99 98
100 99 def _matchstatus(self, other, match):
101 100 """This internal method provides a way for child objects to override the
102 101 match operator.
103 102 """
104 103 return match
105 104
106 105 def _buildstatus(self, other, s, match, listignored, listclean,
107 106 listunknown):
108 107 """build a status with respect to another context"""
109 108 # Load earliest manifest first for caching reasons. More specifically,
110 109 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 110 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 111 # 1000 and cache it so that when you read 1001, we just need to apply a
113 112 # delta to what's in the cache. So that's one full reconstruction + one
114 113 # delta application.
115 114 mf2 = None
116 115 if self.rev() is not None and self.rev() < other.rev():
117 116 mf2 = self._buildstatusmanifest(s)
118 117 mf1 = other._buildstatusmanifest(s)
119 118 if mf2 is None:
120 119 mf2 = self._buildstatusmanifest(s)
121 120
122 121 modified, added = [], []
123 122 removed = []
124 123 clean = []
125 124 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 125 deletedset = set(deleted)
127 126 d = mf1.diff(mf2, match=match, clean=listclean)
128 127 for fn, value in d.iteritems():
129 128 if fn in deletedset:
130 129 continue
131 130 if value is None:
132 131 clean.append(fn)
133 132 continue
134 133 (node1, flag1), (node2, flag2) = value
135 134 if node1 is None:
136 135 added.append(fn)
137 136 elif node2 is None:
138 137 removed.append(fn)
139 138 elif flag1 != flag2:
140 139 modified.append(fn)
141 140 elif node2 not in wdirfilenodeids:
142 141 # When comparing files between two commits, we save time by
143 142 # not comparing the file contents when the nodeids differ.
144 143 # Note that this means we incorrectly report a reverted change
145 144 # to a file as a modification.
146 145 modified.append(fn)
147 146 elif self[fn].cmp(other[fn]):
148 147 modified.append(fn)
149 148 else:
150 149 clean.append(fn)
151 150
152 151 if removed:
153 152 # need to filter files if they are already reported as removed
154 153 unknown = [fn for fn in unknown if fn not in mf1 and
155 154 (not match or match(fn))]
156 155 ignored = [fn for fn in ignored if fn not in mf1 and
157 156 (not match or match(fn))]
158 157 # if they're deleted, don't report them as removed
159 158 removed = [fn for fn in removed if fn not in deletedset]
160 159
161 160 return scmutil.status(modified, added, removed, deleted, unknown,
162 161 ignored, clean)
163 162
164 163 @propertycache
165 164 def substate(self):
166 165 return subrepoutil.state(self, self._repo.ui)
167 166
168 167 def subrev(self, subpath):
169 168 return self.substate[subpath][1]
170 169
171 170 def rev(self):
172 171 return self._rev
173 172 def node(self):
174 173 return self._node
175 174 def hex(self):
176 175 return hex(self.node())
177 176 def manifest(self):
178 177 return self._manifest
179 178 def manifestctx(self):
180 179 return self._manifestctx
181 180 def repo(self):
182 181 return self._repo
183 182 def phasestr(self):
184 183 return phases.phasenames[self.phase()]
185 184 def mutable(self):
186 185 return self.phase() > phases.public
187 186
188 187 def getfileset(self, expr):
189 188 return fileset.getfileset(self, expr)
190 189
191 190 def obsolete(self):
192 191 """True if the changeset is obsolete"""
193 192 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 193
195 194 def extinct(self):
196 195 """True if the changeset is extinct"""
197 196 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 197
199 198 def orphan(self):
200 199 """True if the changeset is not obsolete but it's ancestor are"""
201 200 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202 201
203 202 def phasedivergent(self):
204 203 """True if the changeset try to be a successor of a public changeset
205 204
206 205 Only non-public and non-obsolete changesets may be bumped.
207 206 """
208 207 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209 208
210 209 def contentdivergent(self):
211 210 """Is a successors of a changeset with multiple possible successors set
212 211
213 212 Only non-public and non-obsolete changesets may be divergent.
214 213 """
215 214 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216 215
217 216 def isunstable(self):
218 217 """True if the changeset is either unstable, bumped or divergent"""
219 218 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220 219
221 220 def instabilities(self):
222 221 """return the list of instabilities affecting this changeset.
223 222
224 223 Instabilities are returned as strings. possible values are:
225 224 - orphan,
226 225 - phase-divergent,
227 226 - content-divergent.
228 227 """
229 228 instabilities = []
230 229 if self.orphan():
231 230 instabilities.append('orphan')
232 231 if self.phasedivergent():
233 232 instabilities.append('phase-divergent')
234 233 if self.contentdivergent():
235 234 instabilities.append('content-divergent')
236 235 return instabilities
237 236
238 237 def parents(self):
239 238 """return contexts for each parent changeset"""
240 239 return self._parents
241 240
242 241 def p1(self):
243 242 return self._parents[0]
244 243
245 244 def p2(self):
246 245 parents = self._parents
247 246 if len(parents) == 2:
248 247 return parents[1]
249 248 return changectx(self._repo, nullrev)
250 249
251 250 def _fileinfo(self, path):
252 251 if r'_manifest' in self.__dict__:
253 252 try:
254 253 return self._manifest[path], self._manifest.flags(path)
255 254 except KeyError:
256 255 raise error.ManifestLookupError(self._node, path,
257 256 _('not found in manifest'))
258 257 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 258 if path in self._manifestdelta:
260 259 return (self._manifestdelta[path],
261 260 self._manifestdelta.flags(path))
262 261 mfl = self._repo.manifestlog
263 262 try:
264 263 node, flag = mfl[self._changeset.manifest].find(path)
265 264 except KeyError:
266 265 raise error.ManifestLookupError(self._node, path,
267 266 _('not found in manifest'))
268 267
269 268 return node, flag
270 269
271 270 def filenode(self, path):
272 271 return self._fileinfo(path)[0]
273 272
274 273 def flags(self, path):
275 274 try:
276 275 return self._fileinfo(path)[1]
277 276 except error.LookupError:
278 277 return ''
279 278
280 279 def sub(self, path, allowcreate=True):
281 280 '''return a subrepo for the stored revision of path, never wdir()'''
282 281 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 282
284 283 def nullsub(self, path, pctx):
285 284 return subrepo.nullsubrepo(self, path, pctx)
286 285
287 286 def workingsub(self, path):
288 287 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 288 context.
290 289 '''
291 290 return subrepo.subrepo(self, path, allowwdir=True)
292 291
293 292 def match(self, pats=None, include=None, exclude=None, default='glob',
294 293 listsubrepos=False, badfn=None):
295 294 r = self._repo
296 295 return matchmod.match(r.root, r.getcwd(), pats,
297 296 include, exclude, default,
298 297 auditor=r.nofsauditor, ctx=self,
299 298 listsubrepos=listsubrepos, badfn=badfn)
300 299
301 300 def diff(self, ctx2=None, match=None, **opts):
302 301 """Returns a diff generator for the given contexts and matcher"""
303 302 if ctx2 is None:
304 303 ctx2 = self.p1()
305 304 if ctx2 is not None:
306 305 ctx2 = self._repo[ctx2]
307 306 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 307 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 308
310 309 def dirs(self):
311 310 return self._manifest.dirs()
312 311
313 312 def hasdir(self, dir):
314 313 return self._manifest.hasdir(dir)
315 314
316 315 def status(self, other=None, match=None, listignored=False,
317 316 listclean=False, listunknown=False, listsubrepos=False):
318 317 """return status of files between two nodes or node and working
319 318 directory.
320 319
321 320 If other is None, compare this node with working directory.
322 321
323 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 323 """
325 324
326 325 ctx1 = self
327 326 ctx2 = self._repo[other]
328 327
329 328 # This next code block is, admittedly, fragile logic that tests for
330 329 # reversing the contexts and wouldn't need to exist if it weren't for
331 330 # the fast (and common) code path of comparing the working directory
332 331 # with its first parent.
333 332 #
334 333 # What we're aiming for here is the ability to call:
335 334 #
336 335 # workingctx.status(parentctx)
337 336 #
338 337 # If we always built the manifest for each context and compared those,
339 338 # then we'd be done. But the special case of the above call means we
340 339 # just copy the manifest of the parent.
341 340 reversed = False
342 341 if (not isinstance(ctx1, changectx)
343 342 and isinstance(ctx2, changectx)):
344 343 reversed = True
345 344 ctx1, ctx2 = ctx2, ctx1
346 345
347 346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 347 match = ctx2._matchstatus(ctx1, match)
349 348 r = scmutil.status([], [], [], [], [], [], [])
350 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 350 listunknown)
352 351
353 352 if reversed:
354 353 # Reverse added and removed. Clear deleted, unknown and ignored as
355 354 # these make no sense to reverse.
356 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 356 r.clean)
358 357
359 358 if listsubrepos:
360 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 360 try:
362 361 rev2 = ctx2.subrev(subpath)
363 362 except KeyError:
364 363 # A subrepo that existed in node1 was deleted between
365 364 # node1 and node2 (inclusive). Thus, ctx2's substate
366 365 # won't contain that subpath. The best we can do ignore it.
367 366 rev2 = None
368 367 submatch = matchmod.subdirmatcher(subpath, match)
369 368 s = sub.status(rev2, match=submatch, ignored=listignored,
370 369 clean=listclean, unknown=listunknown,
371 370 listsubrepos=True)
372 371 for rfiles, sfiles in zip(r, s):
373 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 373
375 374 for l in r:
376 375 l.sort()
377 376
378 377 return r
379 378
380 def changectxdeprecwarn(repo):
381 # changectx's constructor will soon lose support for these forms of
382 # changeids:
383 # * stringinfied ints
384 # * bookmarks, tags, branches, and other namespace identifiers
385 # * hex nodeid prefixes
386 #
387 # Depending on your use case, replace repo[x] by one of these:
388 # * If you want to support general revsets, use scmutil.revsingle(x)
389 # * If you know that "x" is a stringified int, use repo[int(x)]
390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
392 # * If you know that "x" is a branch or in some other namespace,
393 # use the appropriate mechanism for that namespace
394 # * If you know that "x" is a hex nodeid prefix, use
395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
396 # * If "x" is a string that can be any of the above, but you don't want
397 # to allow general revsets (perhaps because "x" may come from a remote
398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
399 # * If "x" can be a mix of the above, you'll have to figure it out
400 # yourself
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see "
402 "context.changectxdeprecwarn() for details", "4.6",
403 stacklevel=4)
404
405 379 class changectx(basectx):
406 380 """A changecontext object makes access to data related to a particular
407 381 changeset convenient. It represents a read-only context already present in
408 382 the repo."""
409 383 def __init__(self, repo, changeid='.'):
410 384 """changeid is a revision number, node, or tag"""
411 385 super(changectx, self).__init__(repo)
412 386
413 387 try:
414 388 if isinstance(changeid, int):
415 389 self._node = repo.changelog.node(changeid)
416 390 self._rev = changeid
417 391 return
418 392 if changeid == 'null':
419 393 self._node = nullid
420 394 self._rev = nullrev
421 395 return
422 396 if changeid == 'tip':
423 397 self._node = repo.changelog.tip()
424 398 self._rev = repo.changelog.rev(self._node)
425 399 return
426 400 if (changeid == '.'
427 401 or repo.local() and changeid == repo.dirstate.p1()):
428 402 # this is a hack to delay/avoid loading obsmarkers
429 403 # when we know that '.' won't be hidden
430 404 self._node = repo.dirstate.p1()
431 405 self._rev = repo.unfiltered().changelog.rev(self._node)
432 406 return
433 407 if len(changeid) == 20:
434 408 try:
435 409 self._node = changeid
436 410 self._rev = repo.changelog.rev(changeid)
437 411 return
438 412 except error.FilteredLookupError:
439 413 raise
440 414 except LookupError:
441 415 pass
442 416
443 try:
444 r = int(changeid)
445 if '%d' % r != changeid:
446 raise ValueError
447 l = len(repo.changelog)
448 if r < 0:
449 r += l
450 if r < 0 or r >= l and r != wdirrev:
451 raise ValueError
452 self._rev = r
453 self._node = repo.changelog.node(r)
454 changectxdeprecwarn(repo)
455 return
456 except error.FilteredIndexError:
457 raise
458 except (ValueError, OverflowError, IndexError):
459 pass
460
461 417 if len(changeid) == 40:
462 418 try:
463 419 self._node = bin(changeid)
464 420 self._rev = repo.changelog.rev(self._node)
465 421 return
466 422 except error.FilteredLookupError:
467 423 raise
468 424 except (TypeError, LookupError):
469 425 pass
470 426
471 # lookup bookmarks through the name interface
472 try:
473 self._node = repo.names.singlenode(repo, changeid)
474 self._rev = repo.changelog.rev(self._node)
475 changectxdeprecwarn(repo)
476 return
477 except KeyError:
478 pass
479
480 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
481 if self._node is not None:
482 self._rev = repo.changelog.rev(self._node)
483 changectxdeprecwarn(repo)
484 return
485
486 427 # lookup failed
487 428 # check if it might have come from damaged dirstate
488 429 #
489 430 # XXX we could avoid the unfiltered if we had a recognizable
490 431 # exception for filtered changeset access
491 432 if (repo.local()
492 433 and changeid in repo.unfiltered().dirstate.parents()):
493 434 msg = _("working directory has unknown parent '%s'!")
494 435 raise error.Abort(msg % short(changeid))
495 436 try:
496 437 if len(changeid) == 20 and nonascii(changeid):
497 438 changeid = hex(changeid)
498 439 except TypeError:
499 440 pass
500 441 except (error.FilteredIndexError, error.FilteredLookupError):
501 442 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
502 443 % changeid)
503 444 except error.FilteredRepoLookupError:
504 445 raise
505 446 except IndexError:
506 447 pass
507 448 raise error.RepoLookupError(
508 449 _("unknown revision '%s'") % changeid)
509 450
510 451 def __hash__(self):
511 452 try:
512 453 return hash(self._rev)
513 454 except AttributeError:
514 455 return id(self)
515 456
516 457 def __nonzero__(self):
517 458 return self._rev != nullrev
518 459
519 460 __bool__ = __nonzero__
520 461
521 462 @propertycache
522 463 def _changeset(self):
523 464 return self._repo.changelog.changelogrevision(self.rev())
524 465
525 466 @propertycache
526 467 def _manifest(self):
527 468 return self._manifestctx.read()
528 469
529 470 @property
530 471 def _manifestctx(self):
531 472 return self._repo.manifestlog[self._changeset.manifest]
532 473
533 474 @propertycache
534 475 def _manifestdelta(self):
535 476 return self._manifestctx.readdelta()
536 477
537 478 @propertycache
538 479 def _parents(self):
539 480 repo = self._repo
540 481 p1, p2 = repo.changelog.parentrevs(self._rev)
541 482 if p2 == nullrev:
542 483 return [changectx(repo, p1)]
543 484 return [changectx(repo, p1), changectx(repo, p2)]
544 485
545 486 def changeset(self):
546 487 c = self._changeset
547 488 return (
548 489 c.manifest,
549 490 c.user,
550 491 c.date,
551 492 c.files,
552 493 c.description,
553 494 c.extra,
554 495 )
555 496 def manifestnode(self):
556 497 return self._changeset.manifest
557 498
558 499 def user(self):
559 500 return self._changeset.user
560 501 def date(self):
561 502 return self._changeset.date
562 503 def files(self):
563 504 return self._changeset.files
564 505 def description(self):
565 506 return self._changeset.description
566 507 def branch(self):
567 508 return encoding.tolocal(self._changeset.extra.get("branch"))
568 509 def closesbranch(self):
569 510 return 'close' in self._changeset.extra
570 511 def extra(self):
571 512 """Return a dict of extra information."""
572 513 return self._changeset.extra
573 514 def tags(self):
574 515 """Return a list of byte tag names"""
575 516 return self._repo.nodetags(self._node)
576 517 def bookmarks(self):
577 518 """Return a list of byte bookmark names."""
578 519 return self._repo.nodebookmarks(self._node)
579 520 def phase(self):
580 521 return self._repo._phasecache.phase(self._repo, self._rev)
581 522 def hidden(self):
582 523 return self._rev in repoview.filterrevs(self._repo, 'visible')
583 524
584 525 def isinmemory(self):
585 526 return False
586 527
587 528 def children(self):
588 529 """return list of changectx contexts for each child changeset.
589 530
590 531 This returns only the immediate child changesets. Use descendants() to
591 532 recursively walk children.
592 533 """
593 534 c = self._repo.changelog.children(self._node)
594 535 return [changectx(self._repo, x) for x in c]
595 536
596 537 def ancestors(self):
597 538 for a in self._repo.changelog.ancestors([self._rev]):
598 539 yield changectx(self._repo, a)
599 540
600 541 def descendants(self):
601 542 """Recursively yield all children of the changeset.
602 543
603 544 For just the immediate children, use children()
604 545 """
605 546 for d in self._repo.changelog.descendants([self._rev]):
606 547 yield changectx(self._repo, d)
607 548
608 549 def filectx(self, path, fileid=None, filelog=None):
609 550 """get a file context from this changeset"""
610 551 if fileid is None:
611 552 fileid = self.filenode(path)
612 553 return filectx(self._repo, path, fileid=fileid,
613 554 changectx=self, filelog=filelog)
614 555
615 556 def ancestor(self, c2, warn=False):
616 557 """return the "best" ancestor context of self and c2
617 558
618 559 If there are multiple candidates, it will show a message and check
619 560 merge.preferancestor configuration before falling back to the
620 561 revlog ancestor."""
621 562 # deal with workingctxs
622 563 n2 = c2._node
623 564 if n2 is None:
624 565 n2 = c2._parents[0]._node
625 566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
626 567 if not cahs:
627 568 anc = nullid
628 569 elif len(cahs) == 1:
629 570 anc = cahs[0]
630 571 else:
631 572 # experimental config: merge.preferancestor
632 573 for r in self._repo.ui.configlist('merge', 'preferancestor'):
633 574 try:
634 575 ctx = scmutil.revsymbol(self._repo, r)
635 576 except error.RepoLookupError:
636 577 continue
637 578 anc = ctx.node()
638 579 if anc in cahs:
639 580 break
640 581 else:
641 582 anc = self._repo.changelog.ancestor(self._node, n2)
642 583 if warn:
643 584 self._repo.ui.status(
644 585 (_("note: using %s as ancestor of %s and %s\n") %
645 586 (short(anc), short(self._node), short(n2))) +
646 587 ''.join(_(" alternatively, use --config "
647 588 "merge.preferancestor=%s\n") %
648 589 short(n) for n in sorted(cahs) if n != anc))
649 590 return changectx(self._repo, anc)
650 591
651 592 def descendant(self, other):
652 593 """True if other is descendant of this changeset"""
653 594 return self._repo.changelog.descendant(self._rev, other._rev)
654 595
655 596 def walk(self, match):
656 597 '''Generates matching file names.'''
657 598
658 599 # Wrap match.bad method to have message with nodeid
659 600 def bad(fn, msg):
660 601 # The manifest doesn't know about subrepos, so don't complain about
661 602 # paths into valid subrepos.
662 603 if any(fn == s or fn.startswith(s + '/')
663 604 for s in self.substate):
664 605 return
665 606 match.bad(fn, _('no such file in rev %s') % self)
666 607
667 608 m = matchmod.badmatch(match, bad)
668 609 return self._manifest.walk(m)
669 610
670 611 def matches(self, match):
671 612 return self.walk(match)
672 613
673 614 class basefilectx(object):
674 615 """A filecontext object represents the common logic for its children:
675 616 filectx: read-only access to a filerevision that is already present
676 617 in the repo,
677 618 workingfilectx: a filecontext that represents files from the working
678 619 directory,
679 620 memfilectx: a filecontext that represents files in-memory,
680 621 overlayfilectx: duplicate another filecontext with some fields overridden.
681 622 """
682 623 @propertycache
683 624 def _filelog(self):
684 625 return self._repo.file(self._path)
685 626
686 627 @propertycache
687 628 def _changeid(self):
688 629 if r'_changeid' in self.__dict__:
689 630 return self._changeid
690 631 elif r'_changectx' in self.__dict__:
691 632 return self._changectx.rev()
692 633 elif r'_descendantrev' in self.__dict__:
693 634 # this file context was created from a revision with a known
694 635 # descendant, we can (lazily) correct for linkrev aliases
695 636 return self._adjustlinkrev(self._descendantrev)
696 637 else:
697 638 return self._filelog.linkrev(self._filerev)
698 639
699 640 @propertycache
700 641 def _filenode(self):
701 642 if r'_fileid' in self.__dict__:
702 643 return self._filelog.lookup(self._fileid)
703 644 else:
704 645 return self._changectx.filenode(self._path)
705 646
706 647 @propertycache
707 648 def _filerev(self):
708 649 return self._filelog.rev(self._filenode)
709 650
710 651 @propertycache
711 652 def _repopath(self):
712 653 return self._path
713 654
714 655 def __nonzero__(self):
715 656 try:
716 657 self._filenode
717 658 return True
718 659 except error.LookupError:
719 660 # file is missing
720 661 return False
721 662
722 663 __bool__ = __nonzero__
723 664
724 665 def __bytes__(self):
725 666 try:
726 667 return "%s@%s" % (self.path(), self._changectx)
727 668 except error.LookupError:
728 669 return "%s@???" % self.path()
729 670
730 671 __str__ = encoding.strmethod(__bytes__)
731 672
732 673 def __repr__(self):
733 674 return r"<%s %s>" % (type(self).__name__, str(self))
734 675
735 676 def __hash__(self):
736 677 try:
737 678 return hash((self._path, self._filenode))
738 679 except AttributeError:
739 680 return id(self)
740 681
741 682 def __eq__(self, other):
742 683 try:
743 684 return (type(self) == type(other) and self._path == other._path
744 685 and self._filenode == other._filenode)
745 686 except AttributeError:
746 687 return False
747 688
748 689 def __ne__(self, other):
749 690 return not (self == other)
750 691
751 692 def filerev(self):
752 693 return self._filerev
753 694 def filenode(self):
754 695 return self._filenode
755 696 @propertycache
756 697 def _flags(self):
757 698 return self._changectx.flags(self._path)
758 699 def flags(self):
759 700 return self._flags
760 701 def filelog(self):
761 702 return self._filelog
762 703 def rev(self):
763 704 return self._changeid
764 705 def linkrev(self):
765 706 return self._filelog.linkrev(self._filerev)
766 707 def node(self):
767 708 return self._changectx.node()
768 709 def hex(self):
769 710 return self._changectx.hex()
770 711 def user(self):
771 712 return self._changectx.user()
772 713 def date(self):
773 714 return self._changectx.date()
774 715 def files(self):
775 716 return self._changectx.files()
776 717 def description(self):
777 718 return self._changectx.description()
778 719 def branch(self):
779 720 return self._changectx.branch()
780 721 def extra(self):
781 722 return self._changectx.extra()
782 723 def phase(self):
783 724 return self._changectx.phase()
784 725 def phasestr(self):
785 726 return self._changectx.phasestr()
786 727 def obsolete(self):
787 728 return self._changectx.obsolete()
788 729 def instabilities(self):
789 730 return self._changectx.instabilities()
790 731 def manifest(self):
791 732 return self._changectx.manifest()
792 733 def changectx(self):
793 734 return self._changectx
794 735 def renamed(self):
795 736 return self._copied
796 737 def repo(self):
797 738 return self._repo
798 739 def size(self):
799 740 return len(self.data())
800 741
801 742 def path(self):
802 743 return self._path
803 744
804 745 def isbinary(self):
805 746 try:
806 747 return stringutil.binary(self.data())
807 748 except IOError:
808 749 return False
809 750 def isexec(self):
810 751 return 'x' in self.flags()
811 752 def islink(self):
812 753 return 'l' in self.flags()
813 754
814 755 def isabsent(self):
815 756 """whether this filectx represents a file not in self._changectx
816 757
817 758 This is mainly for merge code to detect change/delete conflicts. This is
818 759 expected to be True for all subclasses of basectx."""
819 760 return False
820 761
821 762 _customcmp = False
822 763 def cmp(self, fctx):
823 764 """compare with other file context
824 765
825 766 returns True if different than fctx.
826 767 """
827 768 if fctx._customcmp:
828 769 return fctx.cmp(self)
829 770
830 771 if (fctx._filenode is None
831 772 and (self._repo._encodefilterpats
832 773 # if file data starts with '\1\n', empty metadata block is
833 774 # prepended, which adds 4 bytes to filelog.size().
834 775 or self.size() - 4 == fctx.size())
835 776 or self.size() == fctx.size()):
836 777 return self._filelog.cmp(self._filenode, fctx.data())
837 778
838 779 return True
839 780
840 781 def _adjustlinkrev(self, srcrev, inclusive=False):
841 782 """return the first ancestor of <srcrev> introducing <fnode>
842 783
843 784 If the linkrev of the file revision does not point to an ancestor of
844 785 srcrev, we'll walk down the ancestors until we find one introducing
845 786 this file revision.
846 787
847 788 :srcrev: the changeset revision we search ancestors from
848 789 :inclusive: if true, the src revision will also be checked
849 790 """
850 791 repo = self._repo
851 792 cl = repo.unfiltered().changelog
852 793 mfl = repo.manifestlog
853 794 # fetch the linkrev
854 795 lkr = self.linkrev()
855 796 # hack to reuse ancestor computation when searching for renames
856 797 memberanc = getattr(self, '_ancestrycontext', None)
857 798 iteranc = None
858 799 if srcrev is None:
859 800 # wctx case, used by workingfilectx during mergecopy
860 801 revs = [p.rev() for p in self._repo[None].parents()]
861 802 inclusive = True # we skipped the real (revless) source
862 803 else:
863 804 revs = [srcrev]
864 805 if memberanc is None:
865 806 memberanc = iteranc = cl.ancestors(revs, lkr,
866 807 inclusive=inclusive)
867 808 # check if this linkrev is an ancestor of srcrev
868 809 if lkr not in memberanc:
869 810 if iteranc is None:
870 811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
871 812 fnode = self._filenode
872 813 path = self._path
873 814 for a in iteranc:
874 815 ac = cl.read(a) # get changeset data (we avoid object creation)
875 816 if path in ac[3]: # checking the 'files' field.
876 817 # The file has been touched, check if the content is
877 818 # similar to the one we search for.
878 819 if fnode == mfl[ac[0]].readfast().get(path):
879 820 return a
880 821 # In theory, we should never get out of that loop without a result.
881 822 # But if manifest uses a buggy file revision (not children of the
882 823 # one it replaces) we could. Such a buggy situation will likely
883 824 # result is crash somewhere else at to some point.
884 825 return lkr
885 826
886 827 def introrev(self):
887 828 """return the rev of the changeset which introduced this file revision
888 829
889 830 This method is different from linkrev because it take into account the
890 831 changeset the filectx was created from. It ensures the returned
891 832 revision is one of its ancestors. This prevents bugs from
892 833 'linkrev-shadowing' when a file revision is used by multiple
893 834 changesets.
894 835 """
895 836 lkr = self.linkrev()
896 837 attrs = vars(self)
897 838 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
898 839 if noctx or self.rev() == lkr:
899 840 return self.linkrev()
900 841 return self._adjustlinkrev(self.rev(), inclusive=True)
901 842
902 843 def introfilectx(self):
903 844 """Return filectx having identical contents, but pointing to the
904 845 changeset revision where this filectx was introduced"""
905 846 introrev = self.introrev()
906 847 if self.rev() == introrev:
907 848 return self
908 849 return self.filectx(self.filenode(), changeid=introrev)
909 850
910 851 def _parentfilectx(self, path, fileid, filelog):
911 852 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
912 853 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
913 854 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
914 855 # If self is associated with a changeset (probably explicitly
915 856 # fed), ensure the created filectx is associated with a
916 857 # changeset that is an ancestor of self.changectx.
917 858 # This lets us later use _adjustlinkrev to get a correct link.
918 859 fctx._descendantrev = self.rev()
919 860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
920 861 elif r'_descendantrev' in vars(self):
921 862 # Otherwise propagate _descendantrev if we have one associated.
922 863 fctx._descendantrev = self._descendantrev
923 864 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
924 865 return fctx
925 866
926 867 def parents(self):
927 868 _path = self._path
928 869 fl = self._filelog
929 870 parents = self._filelog.parents(self._filenode)
930 871 pl = [(_path, node, fl) for node in parents if node != nullid]
931 872
932 873 r = fl.renamed(self._filenode)
933 874 if r:
934 875 # - In the simple rename case, both parent are nullid, pl is empty.
935 876 # - In case of merge, only one of the parent is null id and should
936 877 # be replaced with the rename information. This parent is -always-
937 878 # the first one.
938 879 #
939 880 # As null id have always been filtered out in the previous list
940 881 # comprehension, inserting to 0 will always result in "replacing
941 882 # first nullid parent with rename information.
942 883 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
943 884
944 885 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
945 886
946 887 def p1(self):
947 888 return self.parents()[0]
948 889
949 890 def p2(self):
950 891 p = self.parents()
951 892 if len(p) == 2:
952 893 return p[1]
953 894 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
954 895
955 896 def annotate(self, follow=False, skiprevs=None, diffopts=None):
956 897 """Returns a list of annotateline objects for each line in the file
957 898
958 899 - line.fctx is the filectx of the node where that line was last changed
959 900 - line.lineno is the line number at the first appearance in the managed
960 901 file
961 902 - line.text is the data on that line (including newline character)
962 903 """
963 904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
964 905
965 906 def parents(f):
966 907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
967 908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
968 909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
969 910 # isn't an ancestor of the srcrev.
970 911 f._changeid
971 912 pl = f.parents()
972 913
973 914 # Don't return renamed parents if we aren't following.
974 915 if not follow:
975 916 pl = [p for p in pl if p.path() == f.path()]
976 917
977 918 # renamed filectx won't have a filelog yet, so set it
978 919 # from the cache to save time
979 920 for p in pl:
980 921 if not r'_filelog' in p.__dict__:
981 922 p._filelog = getlog(p.path())
982 923
983 924 return pl
984 925
985 926 # use linkrev to find the first changeset where self appeared
986 927 base = self.introfilectx()
987 928 if getattr(base, '_ancestrycontext', None) is None:
988 929 cl = self._repo.changelog
989 930 if base.rev() is None:
990 931 # wctx is not inclusive, but works because _ancestrycontext
991 932 # is used to test filelog revisions
992 933 ac = cl.ancestors([p.rev() for p in base.parents()],
993 934 inclusive=True)
994 935 else:
995 936 ac = cl.ancestors([base.rev()], inclusive=True)
996 937 base._ancestrycontext = ac
997 938
998 939 return dagop.annotate(base, parents, skiprevs=skiprevs,
999 940 diffopts=diffopts)
1000 941
1001 942 def ancestors(self, followfirst=False):
1002 943 visit = {}
1003 944 c = self
1004 945 if followfirst:
1005 946 cut = 1
1006 947 else:
1007 948 cut = None
1008 949
1009 950 while True:
1010 951 for parent in c.parents()[:cut]:
1011 952 visit[(parent.linkrev(), parent.filenode())] = parent
1012 953 if not visit:
1013 954 break
1014 955 c = visit.pop(max(visit))
1015 956 yield c
1016 957
1017 958 def decodeddata(self):
1018 959 """Returns `data()` after running repository decoding filters.
1019 960
1020 961 This is often equivalent to how the data would be expressed on disk.
1021 962 """
1022 963 return self._repo.wwritedata(self.path(), self.data())
1023 964
1024 965 class filectx(basefilectx):
1025 966 """A filecontext object makes access to data related to a particular
1026 967 filerevision convenient."""
1027 968 def __init__(self, repo, path, changeid=None, fileid=None,
1028 969 filelog=None, changectx=None):
1029 970 """changeid can be a changeset revision, node, or tag.
1030 971 fileid can be a file revision or node."""
1031 972 self._repo = repo
1032 973 self._path = path
1033 974
1034 975 assert (changeid is not None
1035 976 or fileid is not None
1036 977 or changectx is not None), \
1037 978 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1038 979 % (changeid, fileid, changectx))
1039 980
1040 981 if filelog is not None:
1041 982 self._filelog = filelog
1042 983
1043 984 if changeid is not None:
1044 985 self._changeid = changeid
1045 986 if changectx is not None:
1046 987 self._changectx = changectx
1047 988 if fileid is not None:
1048 989 self._fileid = fileid
1049 990
1050 991 @propertycache
1051 992 def _changectx(self):
1052 993 try:
1053 994 return changectx(self._repo, self._changeid)
1054 995 except error.FilteredRepoLookupError:
1055 996 # Linkrev may point to any revision in the repository. When the
1056 997 # repository is filtered this may lead to `filectx` trying to build
1057 998 # `changectx` for filtered revision. In such case we fallback to
1058 999 # creating `changectx` on the unfiltered version of the reposition.
1059 1000 # This fallback should not be an issue because `changectx` from
1060 1001 # `filectx` are not used in complex operations that care about
1061 1002 # filtering.
1062 1003 #
1063 1004 # This fallback is a cheap and dirty fix that prevent several
1064 1005 # crashes. It does not ensure the behavior is correct. However the
1065 1006 # behavior was not correct before filtering either and "incorrect
1066 1007 # behavior" is seen as better as "crash"
1067 1008 #
1068 1009 # Linkrevs have several serious troubles with filtering that are
1069 1010 # complicated to solve. Proper handling of the issue here should be
1070 1011 # considered when solving linkrev issue are on the table.
1071 1012 return changectx(self._repo.unfiltered(), self._changeid)
1072 1013
1073 1014 def filectx(self, fileid, changeid=None):
1074 1015 '''opens an arbitrary revision of the file without
1075 1016 opening a new filelog'''
1076 1017 return filectx(self._repo, self._path, fileid=fileid,
1077 1018 filelog=self._filelog, changeid=changeid)
1078 1019
1079 1020 def rawdata(self):
1080 1021 return self._filelog.revision(self._filenode, raw=True)
1081 1022
1082 1023 def rawflags(self):
1083 1024 """low-level revlog flags"""
1084 1025 return self._filelog.flags(self._filerev)
1085 1026
1086 1027 def data(self):
1087 1028 try:
1088 1029 return self._filelog.read(self._filenode)
1089 1030 except error.CensoredNodeError:
1090 1031 if self._repo.ui.config("censor", "policy") == "ignore":
1091 1032 return ""
1092 1033 raise error.Abort(_("censored node: %s") % short(self._filenode),
1093 1034 hint=_("set censor.policy to ignore errors"))
1094 1035
1095 1036 def size(self):
1096 1037 return self._filelog.size(self._filerev)
1097 1038
1098 1039 @propertycache
1099 1040 def _copied(self):
1100 1041 """check if file was actually renamed in this changeset revision
1101 1042
1102 1043 If rename logged in file revision, we report copy for changeset only
1103 1044 if file revisions linkrev points back to the changeset in question
1104 1045 or both changeset parents contain different file revisions.
1105 1046 """
1106 1047
1107 1048 renamed = self._filelog.renamed(self._filenode)
1108 1049 if not renamed:
1109 1050 return renamed
1110 1051
1111 1052 if self.rev() == self.linkrev():
1112 1053 return renamed
1113 1054
1114 1055 name = self.path()
1115 1056 fnode = self._filenode
1116 1057 for p in self._changectx.parents():
1117 1058 try:
1118 1059 if fnode == p.filenode(name):
1119 1060 return None
1120 1061 except error.LookupError:
1121 1062 pass
1122 1063 return renamed
1123 1064
1124 1065 def children(self):
1125 1066 # hard for renames
1126 1067 c = self._filelog.children(self._filenode)
1127 1068 return [filectx(self._repo, self._path, fileid=x,
1128 1069 filelog=self._filelog) for x in c]
1129 1070
1130 1071 class committablectx(basectx):
1131 1072 """A committablectx object provides common functionality for a context that
1132 1073 wants the ability to commit, e.g. workingctx or memctx."""
1133 1074 def __init__(self, repo, text="", user=None, date=None, extra=None,
1134 1075 changes=None):
1135 1076 super(committablectx, self).__init__(repo)
1136 1077 self._rev = None
1137 1078 self._node = None
1138 1079 self._text = text
1139 1080 if date:
1140 1081 self._date = dateutil.parsedate(date)
1141 1082 if user:
1142 1083 self._user = user
1143 1084 if changes:
1144 1085 self._status = changes
1145 1086
1146 1087 self._extra = {}
1147 1088 if extra:
1148 1089 self._extra = extra.copy()
1149 1090 if 'branch' not in self._extra:
1150 1091 try:
1151 1092 branch = encoding.fromlocal(self._repo.dirstate.branch())
1152 1093 except UnicodeDecodeError:
1153 1094 raise error.Abort(_('branch name not in UTF-8!'))
1154 1095 self._extra['branch'] = branch
1155 1096 if self._extra['branch'] == '':
1156 1097 self._extra['branch'] = 'default'
1157 1098
1158 1099 def __bytes__(self):
1159 1100 return bytes(self._parents[0]) + "+"
1160 1101
1161 1102 __str__ = encoding.strmethod(__bytes__)
1162 1103
1163 1104 def __nonzero__(self):
1164 1105 return True
1165 1106
1166 1107 __bool__ = __nonzero__
1167 1108
1168 1109 def _buildflagfunc(self):
1169 1110 # Create a fallback function for getting file flags when the
1170 1111 # filesystem doesn't support them
1171 1112
1172 1113 copiesget = self._repo.dirstate.copies().get
1173 1114 parents = self.parents()
1174 1115 if len(parents) < 2:
1175 1116 # when we have one parent, it's easy: copy from parent
1176 1117 man = parents[0].manifest()
1177 1118 def func(f):
1178 1119 f = copiesget(f, f)
1179 1120 return man.flags(f)
1180 1121 else:
1181 1122 # merges are tricky: we try to reconstruct the unstored
1182 1123 # result from the merge (issue1802)
1183 1124 p1, p2 = parents
1184 1125 pa = p1.ancestor(p2)
1185 1126 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1186 1127
1187 1128 def func(f):
1188 1129 f = copiesget(f, f) # may be wrong for merges with copies
1189 1130 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1190 1131 if fl1 == fl2:
1191 1132 return fl1
1192 1133 if fl1 == fla:
1193 1134 return fl2
1194 1135 if fl2 == fla:
1195 1136 return fl1
1196 1137 return '' # punt for conflicts
1197 1138
1198 1139 return func
1199 1140
1200 1141 @propertycache
1201 1142 def _flagfunc(self):
1202 1143 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1203 1144
1204 1145 @propertycache
1205 1146 def _status(self):
1206 1147 return self._repo.status()
1207 1148
1208 1149 @propertycache
1209 1150 def _user(self):
1210 1151 return self._repo.ui.username()
1211 1152
1212 1153 @propertycache
1213 1154 def _date(self):
1214 1155 ui = self._repo.ui
1215 1156 date = ui.configdate('devel', 'default-date')
1216 1157 if date is None:
1217 1158 date = dateutil.makedate()
1218 1159 return date
1219 1160
1220 1161 def subrev(self, subpath):
1221 1162 return None
1222 1163
1223 1164 def manifestnode(self):
1224 1165 return None
1225 1166 def user(self):
1226 1167 return self._user or self._repo.ui.username()
1227 1168 def date(self):
1228 1169 return self._date
1229 1170 def description(self):
1230 1171 return self._text
1231 1172 def files(self):
1232 1173 return sorted(self._status.modified + self._status.added +
1233 1174 self._status.removed)
1234 1175
1235 1176 def modified(self):
1236 1177 return self._status.modified
1237 1178 def added(self):
1238 1179 return self._status.added
1239 1180 def removed(self):
1240 1181 return self._status.removed
1241 1182 def deleted(self):
1242 1183 return self._status.deleted
1243 1184 def branch(self):
1244 1185 return encoding.tolocal(self._extra['branch'])
1245 1186 def closesbranch(self):
1246 1187 return 'close' in self._extra
1247 1188 def extra(self):
1248 1189 return self._extra
1249 1190
1250 1191 def isinmemory(self):
1251 1192 return False
1252 1193
1253 1194 def tags(self):
1254 1195 return []
1255 1196
1256 1197 def bookmarks(self):
1257 1198 b = []
1258 1199 for p in self.parents():
1259 1200 b.extend(p.bookmarks())
1260 1201 return b
1261 1202
1262 1203 def phase(self):
1263 1204 phase = phases.draft # default phase to draft
1264 1205 for p in self.parents():
1265 1206 phase = max(phase, p.phase())
1266 1207 return phase
1267 1208
1268 1209 def hidden(self):
1269 1210 return False
1270 1211
1271 1212 def children(self):
1272 1213 return []
1273 1214
1274 1215 def flags(self, path):
1275 1216 if r'_manifest' in self.__dict__:
1276 1217 try:
1277 1218 return self._manifest.flags(path)
1278 1219 except KeyError:
1279 1220 return ''
1280 1221
1281 1222 try:
1282 1223 return self._flagfunc(path)
1283 1224 except OSError:
1284 1225 return ''
1285 1226
1286 1227 def ancestor(self, c2):
1287 1228 """return the "best" ancestor context of self and c2"""
1288 1229 return self._parents[0].ancestor(c2) # punt on two parents for now
1289 1230
1290 1231 def walk(self, match):
1291 1232 '''Generates matching file names.'''
1292 1233 return sorted(self._repo.dirstate.walk(match,
1293 1234 subrepos=sorted(self.substate),
1294 1235 unknown=True, ignored=False))
1295 1236
1296 1237 def matches(self, match):
1297 1238 return sorted(self._repo.dirstate.matches(match))
1298 1239
1299 1240 def ancestors(self):
1300 1241 for p in self._parents:
1301 1242 yield p
1302 1243 for a in self._repo.changelog.ancestors(
1303 1244 [p.rev() for p in self._parents]):
1304 1245 yield changectx(self._repo, a)
1305 1246
1306 1247 def markcommitted(self, node):
1307 1248 """Perform post-commit cleanup necessary after committing this ctx
1308 1249
1309 1250 Specifically, this updates backing stores this working context
1310 1251 wraps to reflect the fact that the changes reflected by this
1311 1252 workingctx have been committed. For example, it marks
1312 1253 modified and added files as normal in the dirstate.
1313 1254
1314 1255 """
1315 1256
1316 1257 with self._repo.dirstate.parentchange():
1317 1258 for f in self.modified() + self.added():
1318 1259 self._repo.dirstate.normal(f)
1319 1260 for f in self.removed():
1320 1261 self._repo.dirstate.drop(f)
1321 1262 self._repo.dirstate.setparents(node)
1322 1263
1323 1264 # write changes out explicitly, because nesting wlock at
1324 1265 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1325 1266 # from immediately doing so for subsequent changing files
1326 1267 self._repo.dirstate.write(self._repo.currenttransaction())
1327 1268
1328 1269 def dirty(self, missing=False, merge=True, branch=True):
1329 1270 return False
1330 1271
1331 1272 class workingctx(committablectx):
1332 1273 """A workingctx object makes access to data related to
1333 1274 the current working directory convenient.
1334 1275 date - any valid date string or (unixtime, offset), or None.
1335 1276 user - username string, or None.
1336 1277 extra - a dictionary of extra values, or None.
1337 1278 changes - a list of file lists as returned by localrepo.status()
1338 1279 or None to use the repository status.
1339 1280 """
1340 1281 def __init__(self, repo, text="", user=None, date=None, extra=None,
1341 1282 changes=None):
1342 1283 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1343 1284
1344 1285 def __iter__(self):
1345 1286 d = self._repo.dirstate
1346 1287 for f in d:
1347 1288 if d[f] != 'r':
1348 1289 yield f
1349 1290
1350 1291 def __contains__(self, key):
1351 1292 return self._repo.dirstate[key] not in "?r"
1352 1293
1353 1294 def hex(self):
1354 1295 return hex(wdirid)
1355 1296
1356 1297 @propertycache
1357 1298 def _parents(self):
1358 1299 p = self._repo.dirstate.parents()
1359 1300 if p[1] == nullid:
1360 1301 p = p[:-1]
1361 1302 return [changectx(self._repo, x) for x in p]
1362 1303
1363 1304 def _fileinfo(self, path):
1364 1305 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1365 1306 self._manifest
1366 1307 return super(workingctx, self)._fileinfo(path)
1367 1308
1368 1309 def filectx(self, path, filelog=None):
1369 1310 """get a file context from the working directory"""
1370 1311 return workingfilectx(self._repo, path, workingctx=self,
1371 1312 filelog=filelog)
1372 1313
1373 1314 def dirty(self, missing=False, merge=True, branch=True):
1374 1315 "check whether a working directory is modified"
1375 1316 # check subrepos first
1376 1317 for s in sorted(self.substate):
1377 1318 if self.sub(s).dirty(missing=missing):
1378 1319 return True
1379 1320 # check current working dir
1380 1321 return ((merge and self.p2()) or
1381 1322 (branch and self.branch() != self.p1().branch()) or
1382 1323 self.modified() or self.added() or self.removed() or
1383 1324 (missing and self.deleted()))
1384 1325
1385 1326 def add(self, list, prefix=""):
1386 1327 with self._repo.wlock():
1387 1328 ui, ds = self._repo.ui, self._repo.dirstate
1388 1329 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1389 1330 rejected = []
1390 1331 lstat = self._repo.wvfs.lstat
1391 1332 for f in list:
1392 1333 # ds.pathto() returns an absolute file when this is invoked from
1393 1334 # the keyword extension. That gets flagged as non-portable on
1394 1335 # Windows, since it contains the drive letter and colon.
1395 1336 scmutil.checkportable(ui, os.path.join(prefix, f))
1396 1337 try:
1397 1338 st = lstat(f)
1398 1339 except OSError:
1399 1340 ui.warn(_("%s does not exist!\n") % uipath(f))
1400 1341 rejected.append(f)
1401 1342 continue
1402 1343 if st.st_size > 10000000:
1403 1344 ui.warn(_("%s: up to %d MB of RAM may be required "
1404 1345 "to manage this file\n"
1405 1346 "(use 'hg revert %s' to cancel the "
1406 1347 "pending addition)\n")
1407 1348 % (f, 3 * st.st_size // 1000000, uipath(f)))
1408 1349 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1409 1350 ui.warn(_("%s not added: only files and symlinks "
1410 1351 "supported currently\n") % uipath(f))
1411 1352 rejected.append(f)
1412 1353 elif ds[f] in 'amn':
1413 1354 ui.warn(_("%s already tracked!\n") % uipath(f))
1414 1355 elif ds[f] == 'r':
1415 1356 ds.normallookup(f)
1416 1357 else:
1417 1358 ds.add(f)
1418 1359 return rejected
1419 1360
1420 1361 def forget(self, files, prefix=""):
1421 1362 with self._repo.wlock():
1422 1363 ds = self._repo.dirstate
1423 1364 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1424 1365 rejected = []
1425 1366 for f in files:
1426 1367 if f not in self._repo.dirstate:
1427 1368 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1428 1369 rejected.append(f)
1429 1370 elif self._repo.dirstate[f] != 'a':
1430 1371 self._repo.dirstate.remove(f)
1431 1372 else:
1432 1373 self._repo.dirstate.drop(f)
1433 1374 return rejected
1434 1375
1435 1376 def undelete(self, list):
1436 1377 pctxs = self.parents()
1437 1378 with self._repo.wlock():
1438 1379 ds = self._repo.dirstate
1439 1380 for f in list:
1440 1381 if self._repo.dirstate[f] != 'r':
1441 1382 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1442 1383 else:
1443 1384 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1444 1385 t = fctx.data()
1445 1386 self._repo.wwrite(f, t, fctx.flags())
1446 1387 self._repo.dirstate.normal(f)
1447 1388
1448 1389 def copy(self, source, dest):
1449 1390 try:
1450 1391 st = self._repo.wvfs.lstat(dest)
1451 1392 except OSError as err:
1452 1393 if err.errno != errno.ENOENT:
1453 1394 raise
1454 1395 self._repo.ui.warn(_("%s does not exist!\n")
1455 1396 % self._repo.dirstate.pathto(dest))
1456 1397 return
1457 1398 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1458 1399 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1459 1400 "symbolic link\n")
1460 1401 % self._repo.dirstate.pathto(dest))
1461 1402 else:
1462 1403 with self._repo.wlock():
1463 1404 if self._repo.dirstate[dest] in '?':
1464 1405 self._repo.dirstate.add(dest)
1465 1406 elif self._repo.dirstate[dest] in 'r':
1466 1407 self._repo.dirstate.normallookup(dest)
1467 1408 self._repo.dirstate.copy(source, dest)
1468 1409
1469 1410 def match(self, pats=None, include=None, exclude=None, default='glob',
1470 1411 listsubrepos=False, badfn=None):
1471 1412 r = self._repo
1472 1413
1473 1414 # Only a case insensitive filesystem needs magic to translate user input
1474 1415 # to actual case in the filesystem.
1475 1416 icasefs = not util.fscasesensitive(r.root)
1476 1417 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1477 1418 default, auditor=r.auditor, ctx=self,
1478 1419 listsubrepos=listsubrepos, badfn=badfn,
1479 1420 icasefs=icasefs)
1480 1421
1481 1422 def _filtersuspectsymlink(self, files):
1482 1423 if not files or self._repo.dirstate._checklink:
1483 1424 return files
1484 1425
1485 1426 # Symlink placeholders may get non-symlink-like contents
1486 1427 # via user error or dereferencing by NFS or Samba servers,
1487 1428 # so we filter out any placeholders that don't look like a
1488 1429 # symlink
1489 1430 sane = []
1490 1431 for f in files:
1491 1432 if self.flags(f) == 'l':
1492 1433 d = self[f].data()
1493 1434 if (d == '' or len(d) >= 1024 or '\n' in d
1494 1435 or stringutil.binary(d)):
1495 1436 self._repo.ui.debug('ignoring suspect symlink placeholder'
1496 1437 ' "%s"\n' % f)
1497 1438 continue
1498 1439 sane.append(f)
1499 1440 return sane
1500 1441
1501 1442 def _checklookup(self, files):
1502 1443 # check for any possibly clean files
1503 1444 if not files:
1504 1445 return [], [], []
1505 1446
1506 1447 modified = []
1507 1448 deleted = []
1508 1449 fixup = []
1509 1450 pctx = self._parents[0]
1510 1451 # do a full compare of any files that might have changed
1511 1452 for f in sorted(files):
1512 1453 try:
1513 1454 # This will return True for a file that got replaced by a
1514 1455 # directory in the interim, but fixing that is pretty hard.
1515 1456 if (f not in pctx or self.flags(f) != pctx.flags(f)
1516 1457 or pctx[f].cmp(self[f])):
1517 1458 modified.append(f)
1518 1459 else:
1519 1460 fixup.append(f)
1520 1461 except (IOError, OSError):
1521 1462 # A file become inaccessible in between? Mark it as deleted,
1522 1463 # matching dirstate behavior (issue5584).
1523 1464 # The dirstate has more complex behavior around whether a
1524 1465 # missing file matches a directory, etc, but we don't need to
1525 1466 # bother with that: if f has made it to this point, we're sure
1526 1467 # it's in the dirstate.
1527 1468 deleted.append(f)
1528 1469
1529 1470 return modified, deleted, fixup
1530 1471
1531 1472 def _poststatusfixup(self, status, fixup):
1532 1473 """update dirstate for files that are actually clean"""
1533 1474 poststatus = self._repo.postdsstatus()
1534 1475 if fixup or poststatus:
1535 1476 try:
1536 1477 oldid = self._repo.dirstate.identity()
1537 1478
1538 1479 # updating the dirstate is optional
1539 1480 # so we don't wait on the lock
1540 1481 # wlock can invalidate the dirstate, so cache normal _after_
1541 1482 # taking the lock
1542 1483 with self._repo.wlock(False):
1543 1484 if self._repo.dirstate.identity() == oldid:
1544 1485 if fixup:
1545 1486 normal = self._repo.dirstate.normal
1546 1487 for f in fixup:
1547 1488 normal(f)
1548 1489 # write changes out explicitly, because nesting
1549 1490 # wlock at runtime may prevent 'wlock.release()'
1550 1491 # after this block from doing so for subsequent
1551 1492 # changing files
1552 1493 tr = self._repo.currenttransaction()
1553 1494 self._repo.dirstate.write(tr)
1554 1495
1555 1496 if poststatus:
1556 1497 for ps in poststatus:
1557 1498 ps(self, status)
1558 1499 else:
1559 1500 # in this case, writing changes out breaks
1560 1501 # consistency, because .hg/dirstate was
1561 1502 # already changed simultaneously after last
1562 1503 # caching (see also issue5584 for detail)
1563 1504 self._repo.ui.debug('skip updating dirstate: '
1564 1505 'identity mismatch\n')
1565 1506 except error.LockError:
1566 1507 pass
1567 1508 finally:
1568 1509 # Even if the wlock couldn't be grabbed, clear out the list.
1569 1510 self._repo.clearpostdsstatus()
1570 1511
1571 1512 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1572 1513 '''Gets the status from the dirstate -- internal use only.'''
1573 1514 subrepos = []
1574 1515 if '.hgsub' in self:
1575 1516 subrepos = sorted(self.substate)
1576 1517 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1577 1518 clean=clean, unknown=unknown)
1578 1519
1579 1520 # check for any possibly clean files
1580 1521 fixup = []
1581 1522 if cmp:
1582 1523 modified2, deleted2, fixup = self._checklookup(cmp)
1583 1524 s.modified.extend(modified2)
1584 1525 s.deleted.extend(deleted2)
1585 1526
1586 1527 if fixup and clean:
1587 1528 s.clean.extend(fixup)
1588 1529
1589 1530 self._poststatusfixup(s, fixup)
1590 1531
1591 1532 if match.always():
1592 1533 # cache for performance
1593 1534 if s.unknown or s.ignored or s.clean:
1594 1535 # "_status" is cached with list*=False in the normal route
1595 1536 self._status = scmutil.status(s.modified, s.added, s.removed,
1596 1537 s.deleted, [], [], [])
1597 1538 else:
1598 1539 self._status = s
1599 1540
1600 1541 return s
1601 1542
1602 1543 @propertycache
1603 1544 def _manifest(self):
1604 1545 """generate a manifest corresponding to the values in self._status
1605 1546
1606 1547 This reuse the file nodeid from parent, but we use special node
1607 1548 identifiers for added and modified files. This is used by manifests
1608 1549 merge to see that files are different and by update logic to avoid
1609 1550 deleting newly added files.
1610 1551 """
1611 1552 return self._buildstatusmanifest(self._status)
1612 1553
1613 1554 def _buildstatusmanifest(self, status):
1614 1555 """Builds a manifest that includes the given status results."""
1615 1556 parents = self.parents()
1616 1557
1617 1558 man = parents[0].manifest().copy()
1618 1559
1619 1560 ff = self._flagfunc
1620 1561 for i, l in ((addednodeid, status.added),
1621 1562 (modifiednodeid, status.modified)):
1622 1563 for f in l:
1623 1564 man[f] = i
1624 1565 try:
1625 1566 man.setflag(f, ff(f))
1626 1567 except OSError:
1627 1568 pass
1628 1569
1629 1570 for f in status.deleted + status.removed:
1630 1571 if f in man:
1631 1572 del man[f]
1632 1573
1633 1574 return man
1634 1575
1635 1576 def _buildstatus(self, other, s, match, listignored, listclean,
1636 1577 listunknown):
1637 1578 """build a status with respect to another context
1638 1579
1639 1580 This includes logic for maintaining the fast path of status when
1640 1581 comparing the working directory against its parent, which is to skip
1641 1582 building a new manifest if self (working directory) is not comparing
1642 1583 against its parent (repo['.']).
1643 1584 """
1644 1585 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1645 1586 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1646 1587 # might have accidentally ended up with the entire contents of the file
1647 1588 # they are supposed to be linking to.
1648 1589 s.modified[:] = self._filtersuspectsymlink(s.modified)
1649 1590 if other != self._repo['.']:
1650 1591 s = super(workingctx, self)._buildstatus(other, s, match,
1651 1592 listignored, listclean,
1652 1593 listunknown)
1653 1594 return s
1654 1595
1655 1596 def _matchstatus(self, other, match):
1656 1597 """override the match method with a filter for directory patterns
1657 1598
1658 1599 We use inheritance to customize the match.bad method only in cases of
1659 1600 workingctx since it belongs only to the working directory when
1660 1601 comparing against the parent changeset.
1661 1602
1662 1603 If we aren't comparing against the working directory's parent, then we
1663 1604 just use the default match object sent to us.
1664 1605 """
1665 1606 if other != self._repo['.']:
1666 1607 def bad(f, msg):
1667 1608 # 'f' may be a directory pattern from 'match.files()',
1668 1609 # so 'f not in ctx1' is not enough
1669 1610 if f not in other and not other.hasdir(f):
1670 1611 self._repo.ui.warn('%s: %s\n' %
1671 1612 (self._repo.dirstate.pathto(f), msg))
1672 1613 match.bad = bad
1673 1614 return match
1674 1615
1675 1616 def markcommitted(self, node):
1676 1617 super(workingctx, self).markcommitted(node)
1677 1618
1678 1619 sparse.aftercommit(self._repo, node)
1679 1620
1680 1621 class committablefilectx(basefilectx):
1681 1622 """A committablefilectx provides common functionality for a file context
1682 1623 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1683 1624 def __init__(self, repo, path, filelog=None, ctx=None):
1684 1625 self._repo = repo
1685 1626 self._path = path
1686 1627 self._changeid = None
1687 1628 self._filerev = self._filenode = None
1688 1629
1689 1630 if filelog is not None:
1690 1631 self._filelog = filelog
1691 1632 if ctx:
1692 1633 self._changectx = ctx
1693 1634
1694 1635 def __nonzero__(self):
1695 1636 return True
1696 1637
1697 1638 __bool__ = __nonzero__
1698 1639
1699 1640 def linkrev(self):
1700 1641 # linked to self._changectx no matter if file is modified or not
1701 1642 return self.rev()
1702 1643
1703 1644 def parents(self):
1704 1645 '''return parent filectxs, following copies if necessary'''
1705 1646 def filenode(ctx, path):
1706 1647 return ctx._manifest.get(path, nullid)
1707 1648
1708 1649 path = self._path
1709 1650 fl = self._filelog
1710 1651 pcl = self._changectx._parents
1711 1652 renamed = self.renamed()
1712 1653
1713 1654 if renamed:
1714 1655 pl = [renamed + (None,)]
1715 1656 else:
1716 1657 pl = [(path, filenode(pcl[0], path), fl)]
1717 1658
1718 1659 for pc in pcl[1:]:
1719 1660 pl.append((path, filenode(pc, path), fl))
1720 1661
1721 1662 return [self._parentfilectx(p, fileid=n, filelog=l)
1722 1663 for p, n, l in pl if n != nullid]
1723 1664
1724 1665 def children(self):
1725 1666 return []
1726 1667
1727 1668 class workingfilectx(committablefilectx):
1728 1669 """A workingfilectx object makes access to data related to a particular
1729 1670 file in the working directory convenient."""
1730 1671 def __init__(self, repo, path, filelog=None, workingctx=None):
1731 1672 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1732 1673
1733 1674 @propertycache
1734 1675 def _changectx(self):
1735 1676 return workingctx(self._repo)
1736 1677
1737 1678 def data(self):
1738 1679 return self._repo.wread(self._path)
1739 1680 def renamed(self):
1740 1681 rp = self._repo.dirstate.copied(self._path)
1741 1682 if not rp:
1742 1683 return None
1743 1684 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1744 1685
1745 1686 def size(self):
1746 1687 return self._repo.wvfs.lstat(self._path).st_size
1747 1688 def date(self):
1748 1689 t, tz = self._changectx.date()
1749 1690 try:
1750 1691 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1751 1692 except OSError as err:
1752 1693 if err.errno != errno.ENOENT:
1753 1694 raise
1754 1695 return (t, tz)
1755 1696
1756 1697 def exists(self):
1757 1698 return self._repo.wvfs.exists(self._path)
1758 1699
1759 1700 def lexists(self):
1760 1701 return self._repo.wvfs.lexists(self._path)
1761 1702
1762 1703 def audit(self):
1763 1704 return self._repo.wvfs.audit(self._path)
1764 1705
1765 1706 def cmp(self, fctx):
1766 1707 """compare with other file context
1767 1708
1768 1709 returns True if different than fctx.
1769 1710 """
1770 1711 # fctx should be a filectx (not a workingfilectx)
1771 1712 # invert comparison to reuse the same code path
1772 1713 return fctx.cmp(self)
1773 1714
1774 1715 def remove(self, ignoremissing=False):
1775 1716 """wraps unlink for a repo's working directory"""
1776 1717 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1777 1718
1778 1719 def write(self, data, flags, backgroundclose=False, **kwargs):
1779 1720 """wraps repo.wwrite"""
1780 1721 self._repo.wwrite(self._path, data, flags,
1781 1722 backgroundclose=backgroundclose,
1782 1723 **kwargs)
1783 1724
1784 1725 def markcopied(self, src):
1785 1726 """marks this file a copy of `src`"""
1786 1727 if self._repo.dirstate[self._path] in "nma":
1787 1728 self._repo.dirstate.copy(src, self._path)
1788 1729
1789 1730 def clearunknown(self):
1790 1731 """Removes conflicting items in the working directory so that
1791 1732 ``write()`` can be called successfully.
1792 1733 """
1793 1734 wvfs = self._repo.wvfs
1794 1735 f = self._path
1795 1736 wvfs.audit(f)
1796 1737 if wvfs.isdir(f) and not wvfs.islink(f):
1797 1738 wvfs.rmtree(f, forcibly=True)
1798 1739 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1799 1740 for p in reversed(list(util.finddirs(f))):
1800 1741 if wvfs.isfileorlink(p):
1801 1742 wvfs.unlink(p)
1802 1743 break
1803 1744
1804 1745 def setflags(self, l, x):
1805 1746 self._repo.wvfs.setflags(self._path, l, x)
1806 1747
1807 1748 class overlayworkingctx(committablectx):
1808 1749 """Wraps another mutable context with a write-back cache that can be
1809 1750 converted into a commit context.
1810 1751
1811 1752 self._cache[path] maps to a dict with keys: {
1812 1753 'exists': bool?
1813 1754 'date': date?
1814 1755 'data': str?
1815 1756 'flags': str?
1816 1757 'copied': str? (path or None)
1817 1758 }
1818 1759 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1819 1760 is `False`, the file was deleted.
1820 1761 """
1821 1762
1822 1763 def __init__(self, repo):
1823 1764 super(overlayworkingctx, self).__init__(repo)
1824 1765 self.clean()
1825 1766
1826 1767 def setbase(self, wrappedctx):
1827 1768 self._wrappedctx = wrappedctx
1828 1769 self._parents = [wrappedctx]
1829 1770 # Drop old manifest cache as it is now out of date.
1830 1771 # This is necessary when, e.g., rebasing several nodes with one
1831 1772 # ``overlayworkingctx`` (e.g. with --collapse).
1832 1773 util.clearcachedproperty(self, '_manifest')
1833 1774
1834 1775 def data(self, path):
1835 1776 if self.isdirty(path):
1836 1777 if self._cache[path]['exists']:
1837 1778 if self._cache[path]['data']:
1838 1779 return self._cache[path]['data']
1839 1780 else:
1840 1781 # Must fallback here, too, because we only set flags.
1841 1782 return self._wrappedctx[path].data()
1842 1783 else:
1843 1784 raise error.ProgrammingError("No such file or directory: %s" %
1844 1785 path)
1845 1786 else:
1846 1787 return self._wrappedctx[path].data()
1847 1788
1848 1789 @propertycache
1849 1790 def _manifest(self):
1850 1791 parents = self.parents()
1851 1792 man = parents[0].manifest().copy()
1852 1793
1853 1794 flag = self._flagfunc
1854 1795 for path in self.added():
1855 1796 man[path] = addednodeid
1856 1797 man.setflag(path, flag(path))
1857 1798 for path in self.modified():
1858 1799 man[path] = modifiednodeid
1859 1800 man.setflag(path, flag(path))
1860 1801 for path in self.removed():
1861 1802 del man[path]
1862 1803 return man
1863 1804
1864 1805 @propertycache
1865 1806 def _flagfunc(self):
1866 1807 def f(path):
1867 1808 return self._cache[path]['flags']
1868 1809 return f
1869 1810
1870 1811 def files(self):
1871 1812 return sorted(self.added() + self.modified() + self.removed())
1872 1813
1873 1814 def modified(self):
1874 1815 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1875 1816 self._existsinparent(f)]
1876 1817
1877 1818 def added(self):
1878 1819 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1879 1820 not self._existsinparent(f)]
1880 1821
1881 1822 def removed(self):
1882 1823 return [f for f in self._cache.keys() if
1883 1824 not self._cache[f]['exists'] and self._existsinparent(f)]
1884 1825
1885 1826 def isinmemory(self):
1886 1827 return True
1887 1828
1888 1829 def filedate(self, path):
1889 1830 if self.isdirty(path):
1890 1831 return self._cache[path]['date']
1891 1832 else:
1892 1833 return self._wrappedctx[path].date()
1893 1834
1894 1835 def markcopied(self, path, origin):
1895 1836 if self.isdirty(path):
1896 1837 self._cache[path]['copied'] = origin
1897 1838 else:
1898 1839 raise error.ProgrammingError('markcopied() called on clean context')
1899 1840
1900 1841 def copydata(self, path):
1901 1842 if self.isdirty(path):
1902 1843 return self._cache[path]['copied']
1903 1844 else:
1904 1845 raise error.ProgrammingError('copydata() called on clean context')
1905 1846
1906 1847 def flags(self, path):
1907 1848 if self.isdirty(path):
1908 1849 if self._cache[path]['exists']:
1909 1850 return self._cache[path]['flags']
1910 1851 else:
1911 1852 raise error.ProgrammingError("No such file or directory: %s" %
1912 1853 self._path)
1913 1854 else:
1914 1855 return self._wrappedctx[path].flags()
1915 1856
1916 1857 def _existsinparent(self, path):
1917 1858 try:
1918 1859 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1919 1860 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1920 1861 # with an ``exists()`` function.
1921 1862 self._wrappedctx[path]
1922 1863 return True
1923 1864 except error.ManifestLookupError:
1924 1865 return False
1925 1866
1926 1867 def _auditconflicts(self, path):
1927 1868 """Replicates conflict checks done by wvfs.write().
1928 1869
1929 1870 Since we never write to the filesystem and never call `applyupdates` in
1930 1871 IMM, we'll never check that a path is actually writable -- e.g., because
1931 1872 it adds `a/foo`, but `a` is actually a file in the other commit.
1932 1873 """
1933 1874 def fail(path, component):
1934 1875 # p1() is the base and we're receiving "writes" for p2()'s
1935 1876 # files.
1936 1877 if 'l' in self.p1()[component].flags():
1937 1878 raise error.Abort("error: %s conflicts with symlink %s "
1938 1879 "in %s." % (path, component,
1939 1880 self.p1().rev()))
1940 1881 else:
1941 1882 raise error.Abort("error: '%s' conflicts with file '%s' in "
1942 1883 "%s." % (path, component,
1943 1884 self.p1().rev()))
1944 1885
1945 1886 # Test that each new directory to be created to write this path from p2
1946 1887 # is not a file in p1.
1947 1888 components = path.split('/')
1948 1889 for i in xrange(len(components)):
1949 1890 component = "/".join(components[0:i])
1950 1891 if component in self.p1():
1951 1892 fail(path, component)
1952 1893
1953 1894 # Test the other direction -- that this path from p2 isn't a directory
1954 1895 # in p1 (test that p1 doesn't any paths matching `path/*`).
1955 1896 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1956 1897 matches = self.p1().manifest().matches(match)
1957 1898 if len(matches) > 0:
1958 1899 if len(matches) == 1 and matches.keys()[0] == path:
1959 1900 return
1960 1901 raise error.Abort("error: file '%s' cannot be written because "
1961 1902 " '%s/' is a folder in %s (containing %d "
1962 1903 "entries: %s)"
1963 1904 % (path, path, self.p1(), len(matches),
1964 1905 ', '.join(matches.keys())))
1965 1906
1966 1907 def write(self, path, data, flags='', **kwargs):
1967 1908 if data is None:
1968 1909 raise error.ProgrammingError("data must be non-None")
1969 1910 self._auditconflicts(path)
1970 1911 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1971 1912 flags=flags)
1972 1913
1973 1914 def setflags(self, path, l, x):
1974 1915 self._markdirty(path, exists=True, date=dateutil.makedate(),
1975 1916 flags=(l and 'l' or '') + (x and 'x' or ''))
1976 1917
1977 1918 def remove(self, path):
1978 1919 self._markdirty(path, exists=False)
1979 1920
1980 1921 def exists(self, path):
1981 1922 """exists behaves like `lexists`, but needs to follow symlinks and
1982 1923 return False if they are broken.
1983 1924 """
1984 1925 if self.isdirty(path):
1985 1926 # If this path exists and is a symlink, "follow" it by calling
1986 1927 # exists on the destination path.
1987 1928 if (self._cache[path]['exists'] and
1988 1929 'l' in self._cache[path]['flags']):
1989 1930 return self.exists(self._cache[path]['data'].strip())
1990 1931 else:
1991 1932 return self._cache[path]['exists']
1992 1933
1993 1934 return self._existsinparent(path)
1994 1935
1995 1936 def lexists(self, path):
1996 1937 """lexists returns True if the path exists"""
1997 1938 if self.isdirty(path):
1998 1939 return self._cache[path]['exists']
1999 1940
2000 1941 return self._existsinparent(path)
2001 1942
2002 1943 def size(self, path):
2003 1944 if self.isdirty(path):
2004 1945 if self._cache[path]['exists']:
2005 1946 return len(self._cache[path]['data'])
2006 1947 else:
2007 1948 raise error.ProgrammingError("No such file or directory: %s" %
2008 1949 self._path)
2009 1950 return self._wrappedctx[path].size()
2010 1951
2011 1952 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2012 1953 user=None, editor=None):
2013 1954 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2014 1955 committed.
2015 1956
2016 1957 ``text`` is the commit message.
2017 1958 ``parents`` (optional) are rev numbers.
2018 1959 """
2019 1960 # Default parents to the wrapped contexts' if not passed.
2020 1961 if parents is None:
2021 1962 parents = self._wrappedctx.parents()
2022 1963 if len(parents) == 1:
2023 1964 parents = (parents[0], None)
2024 1965
2025 1966 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2026 1967 if parents[1] is None:
2027 1968 parents = (self._repo[parents[0]], None)
2028 1969 else:
2029 1970 parents = (self._repo[parents[0]], self._repo[parents[1]])
2030 1971
2031 1972 files = self._cache.keys()
2032 1973 def getfile(repo, memctx, path):
2033 1974 if self._cache[path]['exists']:
2034 1975 return memfilectx(repo, memctx, path,
2035 1976 self._cache[path]['data'],
2036 1977 'l' in self._cache[path]['flags'],
2037 1978 'x' in self._cache[path]['flags'],
2038 1979 self._cache[path]['copied'])
2039 1980 else:
2040 1981 # Returning None, but including the path in `files`, is
2041 1982 # necessary for memctx to register a deletion.
2042 1983 return None
2043 1984 return memctx(self._repo, parents, text, files, getfile, date=date,
2044 1985 extra=extra, user=user, branch=branch, editor=editor)
2045 1986
2046 1987 def isdirty(self, path):
2047 1988 return path in self._cache
2048 1989
2049 1990 def isempty(self):
2050 1991 # We need to discard any keys that are actually clean before the empty
2051 1992 # commit check.
2052 1993 self._compact()
2053 1994 return len(self._cache) == 0
2054 1995
2055 1996 def clean(self):
2056 1997 self._cache = {}
2057 1998
2058 1999 def _compact(self):
2059 2000 """Removes keys from the cache that are actually clean, by comparing
2060 2001 them with the underlying context.
2061 2002
2062 2003 This can occur during the merge process, e.g. by passing --tool :local
2063 2004 to resolve a conflict.
2064 2005 """
2065 2006 keys = []
2066 2007 for path in self._cache.keys():
2067 2008 cache = self._cache[path]
2068 2009 try:
2069 2010 underlying = self._wrappedctx[path]
2070 2011 if (underlying.data() == cache['data'] and
2071 2012 underlying.flags() == cache['flags']):
2072 2013 keys.append(path)
2073 2014 except error.ManifestLookupError:
2074 2015 # Path not in the underlying manifest (created).
2075 2016 continue
2076 2017
2077 2018 for path in keys:
2078 2019 del self._cache[path]
2079 2020 return keys
2080 2021
2081 2022 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2082 2023 self._cache[path] = {
2083 2024 'exists': exists,
2084 2025 'data': data,
2085 2026 'date': date,
2086 2027 'flags': flags,
2087 2028 'copied': None,
2088 2029 }
2089 2030
2090 2031 def filectx(self, path, filelog=None):
2091 2032 return overlayworkingfilectx(self._repo, path, parent=self,
2092 2033 filelog=filelog)
2093 2034
2094 2035 class overlayworkingfilectx(committablefilectx):
2095 2036 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2096 2037 cache, which can be flushed through later by calling ``flush()``."""
2097 2038
2098 2039 def __init__(self, repo, path, filelog=None, parent=None):
2099 2040 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2100 2041 parent)
2101 2042 self._repo = repo
2102 2043 self._parent = parent
2103 2044 self._path = path
2104 2045
2105 2046 def cmp(self, fctx):
2106 2047 return self.data() != fctx.data()
2107 2048
2108 2049 def changectx(self):
2109 2050 return self._parent
2110 2051
2111 2052 def data(self):
2112 2053 return self._parent.data(self._path)
2113 2054
2114 2055 def date(self):
2115 2056 return self._parent.filedate(self._path)
2116 2057
2117 2058 def exists(self):
2118 2059 return self.lexists()
2119 2060
2120 2061 def lexists(self):
2121 2062 return self._parent.exists(self._path)
2122 2063
2123 2064 def renamed(self):
2124 2065 path = self._parent.copydata(self._path)
2125 2066 if not path:
2126 2067 return None
2127 2068 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2128 2069
2129 2070 def size(self):
2130 2071 return self._parent.size(self._path)
2131 2072
2132 2073 def markcopied(self, origin):
2133 2074 self._parent.markcopied(self._path, origin)
2134 2075
2135 2076 def audit(self):
2136 2077 pass
2137 2078
2138 2079 def flags(self):
2139 2080 return self._parent.flags(self._path)
2140 2081
2141 2082 def setflags(self, islink, isexec):
2142 2083 return self._parent.setflags(self._path, islink, isexec)
2143 2084
2144 2085 def write(self, data, flags, backgroundclose=False, **kwargs):
2145 2086 return self._parent.write(self._path, data, flags, **kwargs)
2146 2087
2147 2088 def remove(self, ignoremissing=False):
2148 2089 return self._parent.remove(self._path)
2149 2090
2150 2091 def clearunknown(self):
2151 2092 pass
2152 2093
2153 2094 class workingcommitctx(workingctx):
2154 2095 """A workingcommitctx object makes access to data related to
2155 2096 the revision being committed convenient.
2156 2097
2157 2098 This hides changes in the working directory, if they aren't
2158 2099 committed in this context.
2159 2100 """
2160 2101 def __init__(self, repo, changes,
2161 2102 text="", user=None, date=None, extra=None):
2162 2103 super(workingctx, self).__init__(repo, text, user, date, extra,
2163 2104 changes)
2164 2105
2165 2106 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2166 2107 """Return matched files only in ``self._status``
2167 2108
2168 2109 Uncommitted files appear "clean" via this context, even if
2169 2110 they aren't actually so in the working directory.
2170 2111 """
2171 2112 if clean:
2172 2113 clean = [f for f in self._manifest if f not in self._changedset]
2173 2114 else:
2174 2115 clean = []
2175 2116 return scmutil.status([f for f in self._status.modified if match(f)],
2176 2117 [f for f in self._status.added if match(f)],
2177 2118 [f for f in self._status.removed if match(f)],
2178 2119 [], [], [], clean)
2179 2120
2180 2121 @propertycache
2181 2122 def _changedset(self):
2182 2123 """Return the set of files changed in this context
2183 2124 """
2184 2125 changed = set(self._status.modified)
2185 2126 changed.update(self._status.added)
2186 2127 changed.update(self._status.removed)
2187 2128 return changed
2188 2129
2189 2130 def makecachingfilectxfn(func):
2190 2131 """Create a filectxfn that caches based on the path.
2191 2132
2192 2133 We can't use util.cachefunc because it uses all arguments as the cache
2193 2134 key and this creates a cycle since the arguments include the repo and
2194 2135 memctx.
2195 2136 """
2196 2137 cache = {}
2197 2138
2198 2139 def getfilectx(repo, memctx, path):
2199 2140 if path not in cache:
2200 2141 cache[path] = func(repo, memctx, path)
2201 2142 return cache[path]
2202 2143
2203 2144 return getfilectx
2204 2145
2205 2146 def memfilefromctx(ctx):
2206 2147 """Given a context return a memfilectx for ctx[path]
2207 2148
2208 2149 This is a convenience method for building a memctx based on another
2209 2150 context.
2210 2151 """
2211 2152 def getfilectx(repo, memctx, path):
2212 2153 fctx = ctx[path]
2213 2154 # this is weird but apparently we only keep track of one parent
2214 2155 # (why not only store that instead of a tuple?)
2215 2156 copied = fctx.renamed()
2216 2157 if copied:
2217 2158 copied = copied[0]
2218 2159 return memfilectx(repo, memctx, path, fctx.data(),
2219 2160 islink=fctx.islink(), isexec=fctx.isexec(),
2220 2161 copied=copied)
2221 2162
2222 2163 return getfilectx
2223 2164
2224 2165 def memfilefrompatch(patchstore):
2225 2166 """Given a patch (e.g. patchstore object) return a memfilectx
2226 2167
2227 2168 This is a convenience method for building a memctx based on a patchstore.
2228 2169 """
2229 2170 def getfilectx(repo, memctx, path):
2230 2171 data, mode, copied = patchstore.getfile(path)
2231 2172 if data is None:
2232 2173 return None
2233 2174 islink, isexec = mode
2234 2175 return memfilectx(repo, memctx, path, data, islink=islink,
2235 2176 isexec=isexec, copied=copied)
2236 2177
2237 2178 return getfilectx
2238 2179
2239 2180 class memctx(committablectx):
2240 2181 """Use memctx to perform in-memory commits via localrepo.commitctx().
2241 2182
2242 2183 Revision information is supplied at initialization time while
2243 2184 related files data and is made available through a callback
2244 2185 mechanism. 'repo' is the current localrepo, 'parents' is a
2245 2186 sequence of two parent revisions identifiers (pass None for every
2246 2187 missing parent), 'text' is the commit message and 'files' lists
2247 2188 names of files touched by the revision (normalized and relative to
2248 2189 repository root).
2249 2190
2250 2191 filectxfn(repo, memctx, path) is a callable receiving the
2251 2192 repository, the current memctx object and the normalized path of
2252 2193 requested file, relative to repository root. It is fired by the
2253 2194 commit function for every file in 'files', but calls order is
2254 2195 undefined. If the file is available in the revision being
2255 2196 committed (updated or added), filectxfn returns a memfilectx
2256 2197 object. If the file was removed, filectxfn return None for recent
2257 2198 Mercurial. Moved files are represented by marking the source file
2258 2199 removed and the new file added with copy information (see
2259 2200 memfilectx).
2260 2201
2261 2202 user receives the committer name and defaults to current
2262 2203 repository username, date is the commit date in any format
2263 2204 supported by dateutil.parsedate() and defaults to current date, extra
2264 2205 is a dictionary of metadata or is left empty.
2265 2206 """
2266 2207
2267 2208 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2268 2209 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2269 2210 # this field to determine what to do in filectxfn.
2270 2211 _returnnoneformissingfiles = True
2271 2212
2272 2213 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2273 2214 date=None, extra=None, branch=None, editor=False):
2274 2215 super(memctx, self).__init__(repo, text, user, date, extra)
2275 2216 self._rev = None
2276 2217 self._node = None
2277 2218 parents = [(p or nullid) for p in parents]
2278 2219 p1, p2 = parents
2279 2220 self._parents = [self._repo[p] for p in (p1, p2)]
2280 2221 files = sorted(set(files))
2281 2222 self._files = files
2282 2223 if branch is not None:
2283 2224 self._extra['branch'] = encoding.fromlocal(branch)
2284 2225 self.substate = {}
2285 2226
2286 2227 if isinstance(filectxfn, patch.filestore):
2287 2228 filectxfn = memfilefrompatch(filectxfn)
2288 2229 elif not callable(filectxfn):
2289 2230 # if store is not callable, wrap it in a function
2290 2231 filectxfn = memfilefromctx(filectxfn)
2291 2232
2292 2233 # memoizing increases performance for e.g. vcs convert scenarios.
2293 2234 self._filectxfn = makecachingfilectxfn(filectxfn)
2294 2235
2295 2236 if editor:
2296 2237 self._text = editor(self._repo, self, [])
2297 2238 self._repo.savecommitmessage(self._text)
2298 2239
2299 2240 def filectx(self, path, filelog=None):
2300 2241 """get a file context from the working directory
2301 2242
2302 2243 Returns None if file doesn't exist and should be removed."""
2303 2244 return self._filectxfn(self._repo, self, path)
2304 2245
2305 2246 def commit(self):
2306 2247 """commit context to the repo"""
2307 2248 return self._repo.commitctx(self)
2308 2249
2309 2250 @propertycache
2310 2251 def _manifest(self):
2311 2252 """generate a manifest based on the return values of filectxfn"""
2312 2253
2313 2254 # keep this simple for now; just worry about p1
2314 2255 pctx = self._parents[0]
2315 2256 man = pctx.manifest().copy()
2316 2257
2317 2258 for f in self._status.modified:
2318 2259 p1node = nullid
2319 2260 p2node = nullid
2320 2261 p = pctx[f].parents() # if file isn't in pctx, check p2?
2321 2262 if len(p) > 0:
2322 2263 p1node = p[0].filenode()
2323 2264 if len(p) > 1:
2324 2265 p2node = p[1].filenode()
2325 2266 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2326 2267
2327 2268 for f in self._status.added:
2328 2269 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2329 2270
2330 2271 for f in self._status.removed:
2331 2272 if f in man:
2332 2273 del man[f]
2333 2274
2334 2275 return man
2335 2276
2336 2277 @propertycache
2337 2278 def _status(self):
2338 2279 """Calculate exact status from ``files`` specified at construction
2339 2280 """
2340 2281 man1 = self.p1().manifest()
2341 2282 p2 = self._parents[1]
2342 2283 # "1 < len(self._parents)" can't be used for checking
2343 2284 # existence of the 2nd parent, because "memctx._parents" is
2344 2285 # explicitly initialized by the list, of which length is 2.
2345 2286 if p2.node() != nullid:
2346 2287 man2 = p2.manifest()
2347 2288 managing = lambda f: f in man1 or f in man2
2348 2289 else:
2349 2290 managing = lambda f: f in man1
2350 2291
2351 2292 modified, added, removed = [], [], []
2352 2293 for f in self._files:
2353 2294 if not managing(f):
2354 2295 added.append(f)
2355 2296 elif self[f]:
2356 2297 modified.append(f)
2357 2298 else:
2358 2299 removed.append(f)
2359 2300
2360 2301 return scmutil.status(modified, added, removed, [], [], [], [])
2361 2302
2362 2303 class memfilectx(committablefilectx):
2363 2304 """memfilectx represents an in-memory file to commit.
2364 2305
2365 2306 See memctx and committablefilectx for more details.
2366 2307 """
2367 2308 def __init__(self, repo, changectx, path, data, islink=False,
2368 2309 isexec=False, copied=None):
2369 2310 """
2370 2311 path is the normalized file path relative to repository root.
2371 2312 data is the file content as a string.
2372 2313 islink is True if the file is a symbolic link.
2373 2314 isexec is True if the file is executable.
2374 2315 copied is the source file path if current file was copied in the
2375 2316 revision being committed, or None."""
2376 2317 super(memfilectx, self).__init__(repo, path, None, changectx)
2377 2318 self._data = data
2378 2319 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2379 2320 self._copied = None
2380 2321 if copied:
2381 2322 self._copied = (copied, nullid)
2382 2323
2383 2324 def data(self):
2384 2325 return self._data
2385 2326
2386 2327 def remove(self, ignoremissing=False):
2387 2328 """wraps unlink for a repo's working directory"""
2388 2329 # need to figure out what to do here
2389 2330 del self._changectx[self._path]
2390 2331
2391 2332 def write(self, data, flags, **kwargs):
2392 2333 """wraps repo.wwrite"""
2393 2334 self._data = data
2394 2335
2395 2336 class overlayfilectx(committablefilectx):
2396 2337 """Like memfilectx but take an original filectx and optional parameters to
2397 2338 override parts of it. This is useful when fctx.data() is expensive (i.e.
2398 2339 flag processor is expensive) and raw data, flags, and filenode could be
2399 2340 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2400 2341 """
2401 2342
2402 2343 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2403 2344 copied=None, ctx=None):
2404 2345 """originalfctx: filecontext to duplicate
2405 2346
2406 2347 datafunc: None or a function to override data (file content). It is a
2407 2348 function to be lazy. path, flags, copied, ctx: None or overridden value
2408 2349
2409 2350 copied could be (path, rev), or False. copied could also be just path,
2410 2351 and will be converted to (path, nullid). This simplifies some callers.
2411 2352 """
2412 2353
2413 2354 if path is None:
2414 2355 path = originalfctx.path()
2415 2356 if ctx is None:
2416 2357 ctx = originalfctx.changectx()
2417 2358 ctxmatch = lambda: True
2418 2359 else:
2419 2360 ctxmatch = lambda: ctx == originalfctx.changectx()
2420 2361
2421 2362 repo = originalfctx.repo()
2422 2363 flog = originalfctx.filelog()
2423 2364 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2424 2365
2425 2366 if copied is None:
2426 2367 copied = originalfctx.renamed()
2427 2368 copiedmatch = lambda: True
2428 2369 else:
2429 2370 if copied and not isinstance(copied, tuple):
2430 2371 # repo._filecommit will recalculate copyrev so nullid is okay
2431 2372 copied = (copied, nullid)
2432 2373 copiedmatch = lambda: copied == originalfctx.renamed()
2433 2374
2434 2375 # When data, copied (could affect data), ctx (could affect filelog
2435 2376 # parents) are not overridden, rawdata, rawflags, and filenode may be
2436 2377 # reused (repo._filecommit should double check filelog parents).
2437 2378 #
2438 2379 # path, flags are not hashed in filelog (but in manifestlog) so they do
2439 2380 # not affect reusable here.
2440 2381 #
2441 2382 # If ctx or copied is overridden to a same value with originalfctx,
2442 2383 # still consider it's reusable. originalfctx.renamed() may be a bit
2443 2384 # expensive so it's not called unless necessary. Assuming datafunc is
2444 2385 # always expensive, do not call it for this "reusable" test.
2445 2386 reusable = datafunc is None and ctxmatch() and copiedmatch()
2446 2387
2447 2388 if datafunc is None:
2448 2389 datafunc = originalfctx.data
2449 2390 if flags is None:
2450 2391 flags = originalfctx.flags()
2451 2392
2452 2393 self._datafunc = datafunc
2453 2394 self._flags = flags
2454 2395 self._copied = copied
2455 2396
2456 2397 if reusable:
2457 2398 # copy extra fields from originalfctx
2458 2399 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2459 2400 for attr_ in attrs:
2460 2401 if util.safehasattr(originalfctx, attr_):
2461 2402 setattr(self, attr_, getattr(originalfctx, attr_))
2462 2403
2463 2404 def data(self):
2464 2405 return self._datafunc()
2465 2406
2466 2407 class metadataonlyctx(committablectx):
2467 2408 """Like memctx but it's reusing the manifest of different commit.
2468 2409 Intended to be used by lightweight operations that are creating
2469 2410 metadata-only changes.
2470 2411
2471 2412 Revision information is supplied at initialization time. 'repo' is the
2472 2413 current localrepo, 'ctx' is original revision which manifest we're reuisng
2473 2414 'parents' is a sequence of two parent revisions identifiers (pass None for
2474 2415 every missing parent), 'text' is the commit.
2475 2416
2476 2417 user receives the committer name and defaults to current repository
2477 2418 username, date is the commit date in any format supported by
2478 2419 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2479 2420 metadata or is left empty.
2480 2421 """
2481 2422 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2482 2423 date=None, extra=None, editor=False):
2483 2424 if text is None:
2484 2425 text = originalctx.description()
2485 2426 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2486 2427 self._rev = None
2487 2428 self._node = None
2488 2429 self._originalctx = originalctx
2489 2430 self._manifestnode = originalctx.manifestnode()
2490 2431 if parents is None:
2491 2432 parents = originalctx.parents()
2492 2433 else:
2493 2434 parents = [repo[p] for p in parents if p is not None]
2494 2435 parents = parents[:]
2495 2436 while len(parents) < 2:
2496 2437 parents.append(repo[nullid])
2497 2438 p1, p2 = self._parents = parents
2498 2439
2499 2440 # sanity check to ensure that the reused manifest parents are
2500 2441 # manifests of our commit parents
2501 2442 mp1, mp2 = self.manifestctx().parents
2502 2443 if p1 != nullid and p1.manifestnode() != mp1:
2503 2444 raise RuntimeError('can\'t reuse the manifest: '
2504 2445 'its p1 doesn\'t match the new ctx p1')
2505 2446 if p2 != nullid and p2.manifestnode() != mp2:
2506 2447 raise RuntimeError('can\'t reuse the manifest: '
2507 2448 'its p2 doesn\'t match the new ctx p2')
2508 2449
2509 2450 self._files = originalctx.files()
2510 2451 self.substate = {}
2511 2452
2512 2453 if editor:
2513 2454 self._text = editor(self._repo, self, [])
2514 2455 self._repo.savecommitmessage(self._text)
2515 2456
2516 2457 def manifestnode(self):
2517 2458 return self._manifestnode
2518 2459
2519 2460 @property
2520 2461 def _manifestctx(self):
2521 2462 return self._repo.manifestlog[self._manifestnode]
2522 2463
2523 2464 def filectx(self, path, filelog=None):
2524 2465 return self._originalctx.filectx(path, filelog=filelog)
2525 2466
2526 2467 def commit(self):
2527 2468 """commit context to the repo"""
2528 2469 return self._repo.commitctx(self)
2529 2470
2530 2471 @property
2531 2472 def _manifest(self):
2532 2473 return self._originalctx.manifest()
2533 2474
2534 2475 @propertycache
2535 2476 def _status(self):
2536 2477 """Calculate exact status from ``files`` specified in the ``origctx``
2537 2478 and parents manifests.
2538 2479 """
2539 2480 man1 = self.p1().manifest()
2540 2481 p2 = self._parents[1]
2541 2482 # "1 < len(self._parents)" can't be used for checking
2542 2483 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2543 2484 # explicitly initialized by the list, of which length is 2.
2544 2485 if p2.node() != nullid:
2545 2486 man2 = p2.manifest()
2546 2487 managing = lambda f: f in man1 or f in man2
2547 2488 else:
2548 2489 managing = lambda f: f in man1
2549 2490
2550 2491 modified, added, removed = [], [], []
2551 2492 for f in self._files:
2552 2493 if not managing(f):
2553 2494 added.append(f)
2554 2495 elif f in self:
2555 2496 modified.append(f)
2556 2497 else:
2557 2498 removed.append(f)
2558 2499
2559 2500 return scmutil.status(modified, added, removed, [], [], [], [])
2560 2501
2561 2502 class arbitraryfilectx(object):
2562 2503 """Allows you to use filectx-like functions on a file in an arbitrary
2563 2504 location on disk, possibly not in the working directory.
2564 2505 """
2565 2506 def __init__(self, path, repo=None):
2566 2507 # Repo is optional because contrib/simplemerge uses this class.
2567 2508 self._repo = repo
2568 2509 self._path = path
2569 2510
2570 2511 def cmp(self, fctx):
2571 2512 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2572 2513 # path if either side is a symlink.
2573 2514 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2574 2515 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2575 2516 # Add a fast-path for merge if both sides are disk-backed.
2576 2517 # Note that filecmp uses the opposite return values (True if same)
2577 2518 # from our cmp functions (True if different).
2578 2519 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2579 2520 return self.data() != fctx.data()
2580 2521
2581 2522 def path(self):
2582 2523 return self._path
2583 2524
2584 2525 def flags(self):
2585 2526 return ''
2586 2527
2587 2528 def data(self):
2588 2529 return util.readfile(self._path)
2589 2530
2590 2531 def decodeddata(self):
2591 2532 with open(self._path, "rb") as f:
2592 2533 return f.read()
2593 2534
2594 2535 def remove(self):
2595 2536 util.unlink(self._path)
2596 2537
2597 2538 def write(self, data, flags, **kwargs):
2598 2539 assert not flags
2599 2540 with open(self._path, "w") as f:
2600 2541 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now