##// END OF EJS Templates
context: clarify deprecation warning message...
Martin von Zweigbergk -
r37747:6e137da5 default
parent child Browse files
Show More
@@ -1,2597 +1,2598
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirfilenodeids,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29 from . import (
30 30 dagop,
31 31 encoding,
32 32 error,
33 33 fileset,
34 34 match as matchmod,
35 35 obsolete as obsmod,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 repoview,
41 41 revlog,
42 42 scmutil,
43 43 sparse,
44 44 subrepo,
45 45 subrepoutil,
46 46 util,
47 47 )
48 48 from .utils import (
49 49 dateutil,
50 50 stringutil,
51 51 )
52 52
53 53 propertycache = util.propertycache
54 54
55 55 nonascii = re.compile(br'[^\x21-\x7f]').search
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return r"<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(self, other, s, match, listignored, listclean,
107 107 listunknown):
108 108 """build a status with respect to another context"""
109 109 # Load earliest manifest first for caching reasons. More specifically,
110 110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 113 # delta to what's in the cache. So that's one full reconstruction + one
114 114 # delta application.
115 115 mf2 = None
116 116 if self.rev() is not None and self.rev() < other.rev():
117 117 mf2 = self._buildstatusmanifest(s)
118 118 mf1 = other._buildstatusmanifest(s)
119 119 if mf2 is None:
120 120 mf2 = self._buildstatusmanifest(s)
121 121
122 122 modified, added = [], []
123 123 removed = []
124 124 clean = []
125 125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 126 deletedset = set(deleted)
127 127 d = mf1.diff(mf2, match=match, clean=listclean)
128 128 for fn, value in d.iteritems():
129 129 if fn in deletedset:
130 130 continue
131 131 if value is None:
132 132 clean.append(fn)
133 133 continue
134 134 (node1, flag1), (node2, flag2) = value
135 135 if node1 is None:
136 136 added.append(fn)
137 137 elif node2 is None:
138 138 removed.append(fn)
139 139 elif flag1 != flag2:
140 140 modified.append(fn)
141 141 elif node2 not in wdirfilenodeids:
142 142 # When comparing files between two commits, we save time by
143 143 # not comparing the file contents when the nodeids differ.
144 144 # Note that this means we incorrectly report a reverted change
145 145 # to a file as a modification.
146 146 modified.append(fn)
147 147 elif self[fn].cmp(other[fn]):
148 148 modified.append(fn)
149 149 else:
150 150 clean.append(fn)
151 151
152 152 if removed:
153 153 # need to filter files if they are already reported as removed
154 154 unknown = [fn for fn in unknown if fn not in mf1 and
155 155 (not match or match(fn))]
156 156 ignored = [fn for fn in ignored if fn not in mf1 and
157 157 (not match or match(fn))]
158 158 # if they're deleted, don't report them as removed
159 159 removed = [fn for fn in removed if fn not in deletedset]
160 160
161 161 return scmutil.status(modified, added, removed, deleted, unknown,
162 162 ignored, clean)
163 163
164 164 @propertycache
165 165 def substate(self):
166 166 return subrepoutil.state(self, self._repo.ui)
167 167
168 168 def subrev(self, subpath):
169 169 return self.substate[subpath][1]
170 170
171 171 def rev(self):
172 172 return self._rev
173 173 def node(self):
174 174 return self._node
175 175 def hex(self):
176 176 return hex(self.node())
177 177 def manifest(self):
178 178 return self._manifest
179 179 def manifestctx(self):
180 180 return self._manifestctx
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def orphan(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202 202
203 203 def phasedivergent(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209 209
210 210 def contentdivergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216 216
217 217 def isunstable(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220 220
221 221 def instabilities(self):
222 222 """return the list of instabilities affecting this changeset.
223 223
224 224 Instabilities are returned as strings. possible values are:
225 225 - orphan,
226 226 - phase-divergent,
227 227 - content-divergent.
228 228 """
229 229 instabilities = []
230 230 if self.orphan():
231 231 instabilities.append('orphan')
232 232 if self.phasedivergent():
233 233 instabilities.append('phase-divergent')
234 234 if self.contentdivergent():
235 235 instabilities.append('content-divergent')
236 236 return instabilities
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if r'_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 mfl = self._repo.manifestlog
263 263 try:
264 264 node, flag = mfl[self._changeset.manifest].find(path)
265 265 except KeyError:
266 266 raise error.ManifestLookupError(self._node, path,
267 267 _('not found in manifest'))
268 268
269 269 return node, flag
270 270
271 271 def filenode(self, path):
272 272 return self._fileinfo(path)[0]
273 273
274 274 def flags(self, path):
275 275 try:
276 276 return self._fileinfo(path)[1]
277 277 except error.LookupError:
278 278 return ''
279 279
280 280 def sub(self, path, allowcreate=True):
281 281 '''return a subrepo for the stored revision of path, never wdir()'''
282 282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 283
284 284 def nullsub(self, path, pctx):
285 285 return subrepo.nullsubrepo(self, path, pctx)
286 286
287 287 def workingsub(self, path):
288 288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 289 context.
290 290 '''
291 291 return subrepo.subrepo(self, path, allowwdir=True)
292 292
293 293 def match(self, pats=None, include=None, exclude=None, default='glob',
294 294 listsubrepos=False, badfn=None):
295 295 r = self._repo
296 296 return matchmod.match(r.root, r.getcwd(), pats,
297 297 include, exclude, default,
298 298 auditor=r.nofsauditor, ctx=self,
299 299 listsubrepos=listsubrepos, badfn=badfn)
300 300
301 301 def diff(self, ctx2=None, match=None, **opts):
302 302 """Returns a diff generator for the given contexts and matcher"""
303 303 if ctx2 is None:
304 304 ctx2 = self.p1()
305 305 if ctx2 is not None:
306 306 ctx2 = self._repo[ctx2]
307 307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 309
310 310 def dirs(self):
311 311 return self._manifest.dirs()
312 312
313 313 def hasdir(self, dir):
314 314 return self._manifest.hasdir(dir)
315 315
316 316 def status(self, other=None, match=None, listignored=False,
317 317 listclean=False, listunknown=False, listsubrepos=False):
318 318 """return status of files between two nodes or node and working
319 319 directory.
320 320
321 321 If other is None, compare this node with working directory.
322 322
323 323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 324 """
325 325
326 326 ctx1 = self
327 327 ctx2 = self._repo[other]
328 328
329 329 # This next code block is, admittedly, fragile logic that tests for
330 330 # reversing the contexts and wouldn't need to exist if it weren't for
331 331 # the fast (and common) code path of comparing the working directory
332 332 # with its first parent.
333 333 #
334 334 # What we're aiming for here is the ability to call:
335 335 #
336 336 # workingctx.status(parentctx)
337 337 #
338 338 # If we always built the manifest for each context and compared those,
339 339 # then we'd be done. But the special case of the above call means we
340 340 # just copy the manifest of the parent.
341 341 reversed = False
342 342 if (not isinstance(ctx1, changectx)
343 343 and isinstance(ctx2, changectx)):
344 344 reversed = True
345 345 ctx1, ctx2 = ctx2, ctx1
346 346
347 347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380 def changectxdeprecwarn(repo):
381 381 # changectx's constructor will soon lose support for these forms of
382 382 # changeids:
383 383 # * stringinfied ints
384 384 # * bookmarks, tags, branches, and other namespace identifiers
385 385 # * hex nodeid prefixes
386 386 #
387 387 # Depending on your use case, replace repo[x] by one of these:
388 388 # * If you want to support general revsets, use scmutil.revsingle(x)
389 389 # * If you know that "x" is a stringified int, use repo[int(x)]
390 390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
391 391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
392 392 # * If you know that "x" is a branch or in some other namespace,
393 393 # use the appropriate mechanism for that namespace
394 394 # * If you know that "x" is a hex nodeid prefix, use
395 395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
396 396 # * If "x" is a string that can be any of the above, but you don't want
397 397 # to allow general revsets (perhaps because "x" may come from a remote
398 398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
399 399 # * If "x" can be a mix of the above, you'll have to figure it out
400 400 # yourself
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see source "
402 "for details", "4.6", stacklevel=4)
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see "
402 "context.changectxdeprecwarn() for details", "4.6",
403 stacklevel=4)
403 404
404 405 class changectx(basectx):
405 406 """A changecontext object makes access to data related to a particular
406 407 changeset convenient. It represents a read-only context already present in
407 408 the repo."""
408 409 def __init__(self, repo, changeid='.'):
409 410 """changeid is a revision number, node, or tag"""
410 411 super(changectx, self).__init__(repo)
411 412
412 413 try:
413 414 if isinstance(changeid, int):
414 415 self._node = repo.changelog.node(changeid)
415 416 self._rev = changeid
416 417 return
417 418 if changeid == 'null':
418 419 self._node = nullid
419 420 self._rev = nullrev
420 421 return
421 422 if changeid == 'tip':
422 423 self._node = repo.changelog.tip()
423 424 self._rev = repo.changelog.rev(self._node)
424 425 return
425 426 if (changeid == '.'
426 427 or repo.local() and changeid == repo.dirstate.p1()):
427 428 # this is a hack to delay/avoid loading obsmarkers
428 429 # when we know that '.' won't be hidden
429 430 self._node = repo.dirstate.p1()
430 431 self._rev = repo.unfiltered().changelog.rev(self._node)
431 432 return
432 433 if len(changeid) == 20:
433 434 try:
434 435 self._node = changeid
435 436 self._rev = repo.changelog.rev(changeid)
436 437 return
437 438 except error.FilteredLookupError:
438 439 raise
439 440 except LookupError:
440 441 pass
441 442
442 443 try:
443 444 r = int(changeid)
444 445 if '%d' % r != changeid:
445 446 raise ValueError
446 447 l = len(repo.changelog)
447 448 if r < 0:
448 449 r += l
449 450 if r < 0 or r >= l and r != wdirrev:
450 451 raise ValueError
451 452 self._rev = r
452 453 self._node = repo.changelog.node(r)
453 454 changectxdeprecwarn(repo)
454 455 return
455 456 except error.FilteredIndexError:
456 457 raise
457 458 except (ValueError, OverflowError, IndexError):
458 459 pass
459 460
460 461 if len(changeid) == 40:
461 462 try:
462 463 self._node = bin(changeid)
463 464 self._rev = repo.changelog.rev(self._node)
464 465 return
465 466 except error.FilteredLookupError:
466 467 raise
467 468 except (TypeError, LookupError):
468 469 pass
469 470
470 471 # lookup bookmarks through the name interface
471 472 try:
472 473 self._node = repo.names.singlenode(repo, changeid)
473 474 self._rev = repo.changelog.rev(self._node)
474 475 changectxdeprecwarn(repo)
475 476 return
476 477 except KeyError:
477 478 pass
478 479
479 480 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
480 481 if self._node is not None:
481 482 self._rev = repo.changelog.rev(self._node)
482 483 changectxdeprecwarn(repo)
483 484 return
484 485
485 486 # lookup failed
486 487 # check if it might have come from damaged dirstate
487 488 #
488 489 # XXX we could avoid the unfiltered if we had a recognizable
489 490 # exception for filtered changeset access
490 491 if (repo.local()
491 492 and changeid in repo.unfiltered().dirstate.parents()):
492 493 msg = _("working directory has unknown parent '%s'!")
493 494 raise error.Abort(msg % short(changeid))
494 495 try:
495 496 if len(changeid) == 20 and nonascii(changeid):
496 497 changeid = hex(changeid)
497 498 except TypeError:
498 499 pass
499 500 except (error.FilteredIndexError, error.FilteredLookupError,
500 501 error.FilteredRepoLookupError):
501 502 raise
502 503 except IndexError:
503 504 pass
504 505 raise error.RepoLookupError(
505 506 _("unknown revision '%s'") % changeid)
506 507
507 508 def __hash__(self):
508 509 try:
509 510 return hash(self._rev)
510 511 except AttributeError:
511 512 return id(self)
512 513
513 514 def __nonzero__(self):
514 515 return self._rev != nullrev
515 516
516 517 __bool__ = __nonzero__
517 518
518 519 @propertycache
519 520 def _changeset(self):
520 521 return self._repo.changelog.changelogrevision(self.rev())
521 522
522 523 @propertycache
523 524 def _manifest(self):
524 525 return self._manifestctx.read()
525 526
526 527 @property
527 528 def _manifestctx(self):
528 529 return self._repo.manifestlog[self._changeset.manifest]
529 530
530 531 @propertycache
531 532 def _manifestdelta(self):
532 533 return self._manifestctx.readdelta()
533 534
534 535 @propertycache
535 536 def _parents(self):
536 537 repo = self._repo
537 538 p1, p2 = repo.changelog.parentrevs(self._rev)
538 539 if p2 == nullrev:
539 540 return [changectx(repo, p1)]
540 541 return [changectx(repo, p1), changectx(repo, p2)]
541 542
542 543 def changeset(self):
543 544 c = self._changeset
544 545 return (
545 546 c.manifest,
546 547 c.user,
547 548 c.date,
548 549 c.files,
549 550 c.description,
550 551 c.extra,
551 552 )
552 553 def manifestnode(self):
553 554 return self._changeset.manifest
554 555
555 556 def user(self):
556 557 return self._changeset.user
557 558 def date(self):
558 559 return self._changeset.date
559 560 def files(self):
560 561 return self._changeset.files
561 562 def description(self):
562 563 return self._changeset.description
563 564 def branch(self):
564 565 return encoding.tolocal(self._changeset.extra.get("branch"))
565 566 def closesbranch(self):
566 567 return 'close' in self._changeset.extra
567 568 def extra(self):
568 569 """Return a dict of extra information."""
569 570 return self._changeset.extra
570 571 def tags(self):
571 572 """Return a list of byte tag names"""
572 573 return self._repo.nodetags(self._node)
573 574 def bookmarks(self):
574 575 """Return a list of byte bookmark names."""
575 576 return self._repo.nodebookmarks(self._node)
576 577 def phase(self):
577 578 return self._repo._phasecache.phase(self._repo, self._rev)
578 579 def hidden(self):
579 580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 581
581 582 def isinmemory(self):
582 583 return False
583 584
584 585 def children(self):
585 586 """return list of changectx contexts for each child changeset.
586 587
587 588 This returns only the immediate child changesets. Use descendants() to
588 589 recursively walk children.
589 590 """
590 591 c = self._repo.changelog.children(self._node)
591 592 return [changectx(self._repo, x) for x in c]
592 593
593 594 def ancestors(self):
594 595 for a in self._repo.changelog.ancestors([self._rev]):
595 596 yield changectx(self._repo, a)
596 597
597 598 def descendants(self):
598 599 """Recursively yield all children of the changeset.
599 600
600 601 For just the immediate children, use children()
601 602 """
602 603 for d in self._repo.changelog.descendants([self._rev]):
603 604 yield changectx(self._repo, d)
604 605
605 606 def filectx(self, path, fileid=None, filelog=None):
606 607 """get a file context from this changeset"""
607 608 if fileid is None:
608 609 fileid = self.filenode(path)
609 610 return filectx(self._repo, path, fileid=fileid,
610 611 changectx=self, filelog=filelog)
611 612
612 613 def ancestor(self, c2, warn=False):
613 614 """return the "best" ancestor context of self and c2
614 615
615 616 If there are multiple candidates, it will show a message and check
616 617 merge.preferancestor configuration before falling back to the
617 618 revlog ancestor."""
618 619 # deal with workingctxs
619 620 n2 = c2._node
620 621 if n2 is None:
621 622 n2 = c2._parents[0]._node
622 623 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
623 624 if not cahs:
624 625 anc = nullid
625 626 elif len(cahs) == 1:
626 627 anc = cahs[0]
627 628 else:
628 629 # experimental config: merge.preferancestor
629 630 for r in self._repo.ui.configlist('merge', 'preferancestor'):
630 631 try:
631 632 ctx = scmutil.revsymbol(self._repo, r)
632 633 except error.RepoLookupError:
633 634 continue
634 635 anc = ctx.node()
635 636 if anc in cahs:
636 637 break
637 638 else:
638 639 anc = self._repo.changelog.ancestor(self._node, n2)
639 640 if warn:
640 641 self._repo.ui.status(
641 642 (_("note: using %s as ancestor of %s and %s\n") %
642 643 (short(anc), short(self._node), short(n2))) +
643 644 ''.join(_(" alternatively, use --config "
644 645 "merge.preferancestor=%s\n") %
645 646 short(n) for n in sorted(cahs) if n != anc))
646 647 return changectx(self._repo, anc)
647 648
648 649 def descendant(self, other):
649 650 """True if other is descendant of this changeset"""
650 651 return self._repo.changelog.descendant(self._rev, other._rev)
651 652
652 653 def walk(self, match):
653 654 '''Generates matching file names.'''
654 655
655 656 # Wrap match.bad method to have message with nodeid
656 657 def bad(fn, msg):
657 658 # The manifest doesn't know about subrepos, so don't complain about
658 659 # paths into valid subrepos.
659 660 if any(fn == s or fn.startswith(s + '/')
660 661 for s in self.substate):
661 662 return
662 663 match.bad(fn, _('no such file in rev %s') % self)
663 664
664 665 m = matchmod.badmatch(match, bad)
665 666 return self._manifest.walk(m)
666 667
667 668 def matches(self, match):
668 669 return self.walk(match)
669 670
670 671 class basefilectx(object):
671 672 """A filecontext object represents the common logic for its children:
672 673 filectx: read-only access to a filerevision that is already present
673 674 in the repo,
674 675 workingfilectx: a filecontext that represents files from the working
675 676 directory,
676 677 memfilectx: a filecontext that represents files in-memory,
677 678 overlayfilectx: duplicate another filecontext with some fields overridden.
678 679 """
679 680 @propertycache
680 681 def _filelog(self):
681 682 return self._repo.file(self._path)
682 683
683 684 @propertycache
684 685 def _changeid(self):
685 686 if r'_changeid' in self.__dict__:
686 687 return self._changeid
687 688 elif r'_changectx' in self.__dict__:
688 689 return self._changectx.rev()
689 690 elif r'_descendantrev' in self.__dict__:
690 691 # this file context was created from a revision with a known
691 692 # descendant, we can (lazily) correct for linkrev aliases
692 693 return self._adjustlinkrev(self._descendantrev)
693 694 else:
694 695 return self._filelog.linkrev(self._filerev)
695 696
696 697 @propertycache
697 698 def _filenode(self):
698 699 if r'_fileid' in self.__dict__:
699 700 return self._filelog.lookup(self._fileid)
700 701 else:
701 702 return self._changectx.filenode(self._path)
702 703
703 704 @propertycache
704 705 def _filerev(self):
705 706 return self._filelog.rev(self._filenode)
706 707
707 708 @propertycache
708 709 def _repopath(self):
709 710 return self._path
710 711
711 712 def __nonzero__(self):
712 713 try:
713 714 self._filenode
714 715 return True
715 716 except error.LookupError:
716 717 # file is missing
717 718 return False
718 719
719 720 __bool__ = __nonzero__
720 721
721 722 def __bytes__(self):
722 723 try:
723 724 return "%s@%s" % (self.path(), self._changectx)
724 725 except error.LookupError:
725 726 return "%s@???" % self.path()
726 727
727 728 __str__ = encoding.strmethod(__bytes__)
728 729
729 730 def __repr__(self):
730 731 return r"<%s %s>" % (type(self).__name__, str(self))
731 732
732 733 def __hash__(self):
733 734 try:
734 735 return hash((self._path, self._filenode))
735 736 except AttributeError:
736 737 return id(self)
737 738
738 739 def __eq__(self, other):
739 740 try:
740 741 return (type(self) == type(other) and self._path == other._path
741 742 and self._filenode == other._filenode)
742 743 except AttributeError:
743 744 return False
744 745
745 746 def __ne__(self, other):
746 747 return not (self == other)
747 748
748 749 def filerev(self):
749 750 return self._filerev
750 751 def filenode(self):
751 752 return self._filenode
752 753 @propertycache
753 754 def _flags(self):
754 755 return self._changectx.flags(self._path)
755 756 def flags(self):
756 757 return self._flags
757 758 def filelog(self):
758 759 return self._filelog
759 760 def rev(self):
760 761 return self._changeid
761 762 def linkrev(self):
762 763 return self._filelog.linkrev(self._filerev)
763 764 def node(self):
764 765 return self._changectx.node()
765 766 def hex(self):
766 767 return self._changectx.hex()
767 768 def user(self):
768 769 return self._changectx.user()
769 770 def date(self):
770 771 return self._changectx.date()
771 772 def files(self):
772 773 return self._changectx.files()
773 774 def description(self):
774 775 return self._changectx.description()
775 776 def branch(self):
776 777 return self._changectx.branch()
777 778 def extra(self):
778 779 return self._changectx.extra()
779 780 def phase(self):
780 781 return self._changectx.phase()
781 782 def phasestr(self):
782 783 return self._changectx.phasestr()
783 784 def obsolete(self):
784 785 return self._changectx.obsolete()
785 786 def instabilities(self):
786 787 return self._changectx.instabilities()
787 788 def manifest(self):
788 789 return self._changectx.manifest()
789 790 def changectx(self):
790 791 return self._changectx
791 792 def renamed(self):
792 793 return self._copied
793 794 def repo(self):
794 795 return self._repo
795 796 def size(self):
796 797 return len(self.data())
797 798
798 799 def path(self):
799 800 return self._path
800 801
801 802 def isbinary(self):
802 803 try:
803 804 return stringutil.binary(self.data())
804 805 except IOError:
805 806 return False
806 807 def isexec(self):
807 808 return 'x' in self.flags()
808 809 def islink(self):
809 810 return 'l' in self.flags()
810 811
811 812 def isabsent(self):
812 813 """whether this filectx represents a file not in self._changectx
813 814
814 815 This is mainly for merge code to detect change/delete conflicts. This is
815 816 expected to be True for all subclasses of basectx."""
816 817 return False
817 818
818 819 _customcmp = False
819 820 def cmp(self, fctx):
820 821 """compare with other file context
821 822
822 823 returns True if different than fctx.
823 824 """
824 825 if fctx._customcmp:
825 826 return fctx.cmp(self)
826 827
827 828 if (fctx._filenode is None
828 829 and (self._repo._encodefilterpats
829 830 # if file data starts with '\1\n', empty metadata block is
830 831 # prepended, which adds 4 bytes to filelog.size().
831 832 or self.size() - 4 == fctx.size())
832 833 or self.size() == fctx.size()):
833 834 return self._filelog.cmp(self._filenode, fctx.data())
834 835
835 836 return True
836 837
837 838 def _adjustlinkrev(self, srcrev, inclusive=False):
838 839 """return the first ancestor of <srcrev> introducing <fnode>
839 840
840 841 If the linkrev of the file revision does not point to an ancestor of
841 842 srcrev, we'll walk down the ancestors until we find one introducing
842 843 this file revision.
843 844
844 845 :srcrev: the changeset revision we search ancestors from
845 846 :inclusive: if true, the src revision will also be checked
846 847 """
847 848 repo = self._repo
848 849 cl = repo.unfiltered().changelog
849 850 mfl = repo.manifestlog
850 851 # fetch the linkrev
851 852 lkr = self.linkrev()
852 853 # hack to reuse ancestor computation when searching for renames
853 854 memberanc = getattr(self, '_ancestrycontext', None)
854 855 iteranc = None
855 856 if srcrev is None:
856 857 # wctx case, used by workingfilectx during mergecopy
857 858 revs = [p.rev() for p in self._repo[None].parents()]
858 859 inclusive = True # we skipped the real (revless) source
859 860 else:
860 861 revs = [srcrev]
861 862 if memberanc is None:
862 863 memberanc = iteranc = cl.ancestors(revs, lkr,
863 864 inclusive=inclusive)
864 865 # check if this linkrev is an ancestor of srcrev
865 866 if lkr not in memberanc:
866 867 if iteranc is None:
867 868 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
868 869 fnode = self._filenode
869 870 path = self._path
870 871 for a in iteranc:
871 872 ac = cl.read(a) # get changeset data (we avoid object creation)
872 873 if path in ac[3]: # checking the 'files' field.
873 874 # The file has been touched, check if the content is
874 875 # similar to the one we search for.
875 876 if fnode == mfl[ac[0]].readfast().get(path):
876 877 return a
877 878 # In theory, we should never get out of that loop without a result.
878 879 # But if manifest uses a buggy file revision (not children of the
879 880 # one it replaces) we could. Such a buggy situation will likely
880 881 # result is crash somewhere else at to some point.
881 882 return lkr
882 883
883 884 def introrev(self):
884 885 """return the rev of the changeset which introduced this file revision
885 886
886 887 This method is different from linkrev because it take into account the
887 888 changeset the filectx was created from. It ensures the returned
888 889 revision is one of its ancestors. This prevents bugs from
889 890 'linkrev-shadowing' when a file revision is used by multiple
890 891 changesets.
891 892 """
892 893 lkr = self.linkrev()
893 894 attrs = vars(self)
894 895 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
895 896 if noctx or self.rev() == lkr:
896 897 return self.linkrev()
897 898 return self._adjustlinkrev(self.rev(), inclusive=True)
898 899
899 900 def introfilectx(self):
900 901 """Return filectx having identical contents, but pointing to the
901 902 changeset revision where this filectx was introduced"""
902 903 introrev = self.introrev()
903 904 if self.rev() == introrev:
904 905 return self
905 906 return self.filectx(self.filenode(), changeid=introrev)
906 907
907 908 def _parentfilectx(self, path, fileid, filelog):
908 909 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 910 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 911 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
911 912 # If self is associated with a changeset (probably explicitly
912 913 # fed), ensure the created filectx is associated with a
913 914 # changeset that is an ancestor of self.changectx.
914 915 # This lets us later use _adjustlinkrev to get a correct link.
915 916 fctx._descendantrev = self.rev()
916 917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 918 elif r'_descendantrev' in vars(self):
918 919 # Otherwise propagate _descendantrev if we have one associated.
919 920 fctx._descendantrev = self._descendantrev
920 921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 922 return fctx
922 923
923 924 def parents(self):
924 925 _path = self._path
925 926 fl = self._filelog
926 927 parents = self._filelog.parents(self._filenode)
927 928 pl = [(_path, node, fl) for node in parents if node != nullid]
928 929
929 930 r = fl.renamed(self._filenode)
930 931 if r:
931 932 # - In the simple rename case, both parent are nullid, pl is empty.
932 933 # - In case of merge, only one of the parent is null id and should
933 934 # be replaced with the rename information. This parent is -always-
934 935 # the first one.
935 936 #
936 937 # As null id have always been filtered out in the previous list
937 938 # comprehension, inserting to 0 will always result in "replacing
938 939 # first nullid parent with rename information.
939 940 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940 941
941 942 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942 943
943 944 def p1(self):
944 945 return self.parents()[0]
945 946
946 947 def p2(self):
947 948 p = self.parents()
948 949 if len(p) == 2:
949 950 return p[1]
950 951 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951 952
952 953 def annotate(self, follow=False, skiprevs=None, diffopts=None):
953 954 """Returns a list of annotateline objects for each line in the file
954 955
955 956 - line.fctx is the filectx of the node where that line was last changed
956 957 - line.lineno is the line number at the first appearance in the managed
957 958 file
958 959 - line.text is the data on that line (including newline character)
959 960 """
960 961 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
961 962
962 963 def parents(f):
963 964 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
964 965 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
965 966 # from the topmost introrev (= srcrev) down to p.linkrev() if it
966 967 # isn't an ancestor of the srcrev.
967 968 f._changeid
968 969 pl = f.parents()
969 970
970 971 # Don't return renamed parents if we aren't following.
971 972 if not follow:
972 973 pl = [p for p in pl if p.path() == f.path()]
973 974
974 975 # renamed filectx won't have a filelog yet, so set it
975 976 # from the cache to save time
976 977 for p in pl:
977 978 if not r'_filelog' in p.__dict__:
978 979 p._filelog = getlog(p.path())
979 980
980 981 return pl
981 982
982 983 # use linkrev to find the first changeset where self appeared
983 984 base = self.introfilectx()
984 985 if getattr(base, '_ancestrycontext', None) is None:
985 986 cl = self._repo.changelog
986 987 if base.rev() is None:
987 988 # wctx is not inclusive, but works because _ancestrycontext
988 989 # is used to test filelog revisions
989 990 ac = cl.ancestors([p.rev() for p in base.parents()],
990 991 inclusive=True)
991 992 else:
992 993 ac = cl.ancestors([base.rev()], inclusive=True)
993 994 base._ancestrycontext = ac
994 995
995 996 return dagop.annotate(base, parents, skiprevs=skiprevs,
996 997 diffopts=diffopts)
997 998
998 999 def ancestors(self, followfirst=False):
999 1000 visit = {}
1000 1001 c = self
1001 1002 if followfirst:
1002 1003 cut = 1
1003 1004 else:
1004 1005 cut = None
1005 1006
1006 1007 while True:
1007 1008 for parent in c.parents()[:cut]:
1008 1009 visit[(parent.linkrev(), parent.filenode())] = parent
1009 1010 if not visit:
1010 1011 break
1011 1012 c = visit.pop(max(visit))
1012 1013 yield c
1013 1014
1014 1015 def decodeddata(self):
1015 1016 """Returns `data()` after running repository decoding filters.
1016 1017
1017 1018 This is often equivalent to how the data would be expressed on disk.
1018 1019 """
1019 1020 return self._repo.wwritedata(self.path(), self.data())
1020 1021
1021 1022 class filectx(basefilectx):
1022 1023 """A filecontext object makes access to data related to a particular
1023 1024 filerevision convenient."""
1024 1025 def __init__(self, repo, path, changeid=None, fileid=None,
1025 1026 filelog=None, changectx=None):
1026 1027 """changeid can be a changeset revision, node, or tag.
1027 1028 fileid can be a file revision or node."""
1028 1029 self._repo = repo
1029 1030 self._path = path
1030 1031
1031 1032 assert (changeid is not None
1032 1033 or fileid is not None
1033 1034 or changectx is not None), \
1034 1035 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1035 1036 % (changeid, fileid, changectx))
1036 1037
1037 1038 if filelog is not None:
1038 1039 self._filelog = filelog
1039 1040
1040 1041 if changeid is not None:
1041 1042 self._changeid = changeid
1042 1043 if changectx is not None:
1043 1044 self._changectx = changectx
1044 1045 if fileid is not None:
1045 1046 self._fileid = fileid
1046 1047
1047 1048 @propertycache
1048 1049 def _changectx(self):
1049 1050 try:
1050 1051 return changectx(self._repo, self._changeid)
1051 1052 except error.FilteredRepoLookupError:
1052 1053 # Linkrev may point to any revision in the repository. When the
1053 1054 # repository is filtered this may lead to `filectx` trying to build
1054 1055 # `changectx` for filtered revision. In such case we fallback to
1055 1056 # creating `changectx` on the unfiltered version of the reposition.
1056 1057 # This fallback should not be an issue because `changectx` from
1057 1058 # `filectx` are not used in complex operations that care about
1058 1059 # filtering.
1059 1060 #
1060 1061 # This fallback is a cheap and dirty fix that prevent several
1061 1062 # crashes. It does not ensure the behavior is correct. However the
1062 1063 # behavior was not correct before filtering either and "incorrect
1063 1064 # behavior" is seen as better as "crash"
1064 1065 #
1065 1066 # Linkrevs have several serious troubles with filtering that are
1066 1067 # complicated to solve. Proper handling of the issue here should be
1067 1068 # considered when solving linkrev issue are on the table.
1068 1069 return changectx(self._repo.unfiltered(), self._changeid)
1069 1070
1070 1071 def filectx(self, fileid, changeid=None):
1071 1072 '''opens an arbitrary revision of the file without
1072 1073 opening a new filelog'''
1073 1074 return filectx(self._repo, self._path, fileid=fileid,
1074 1075 filelog=self._filelog, changeid=changeid)
1075 1076
1076 1077 def rawdata(self):
1077 1078 return self._filelog.revision(self._filenode, raw=True)
1078 1079
1079 1080 def rawflags(self):
1080 1081 """low-level revlog flags"""
1081 1082 return self._filelog.flags(self._filerev)
1082 1083
1083 1084 def data(self):
1084 1085 try:
1085 1086 return self._filelog.read(self._filenode)
1086 1087 except error.CensoredNodeError:
1087 1088 if self._repo.ui.config("censor", "policy") == "ignore":
1088 1089 return ""
1089 1090 raise error.Abort(_("censored node: %s") % short(self._filenode),
1090 1091 hint=_("set censor.policy to ignore errors"))
1091 1092
1092 1093 def size(self):
1093 1094 return self._filelog.size(self._filerev)
1094 1095
1095 1096 @propertycache
1096 1097 def _copied(self):
1097 1098 """check if file was actually renamed in this changeset revision
1098 1099
1099 1100 If rename logged in file revision, we report copy for changeset only
1100 1101 if file revisions linkrev points back to the changeset in question
1101 1102 or both changeset parents contain different file revisions.
1102 1103 """
1103 1104
1104 1105 renamed = self._filelog.renamed(self._filenode)
1105 1106 if not renamed:
1106 1107 return renamed
1107 1108
1108 1109 if self.rev() == self.linkrev():
1109 1110 return renamed
1110 1111
1111 1112 name = self.path()
1112 1113 fnode = self._filenode
1113 1114 for p in self._changectx.parents():
1114 1115 try:
1115 1116 if fnode == p.filenode(name):
1116 1117 return None
1117 1118 except error.LookupError:
1118 1119 pass
1119 1120 return renamed
1120 1121
1121 1122 def children(self):
1122 1123 # hard for renames
1123 1124 c = self._filelog.children(self._filenode)
1124 1125 return [filectx(self._repo, self._path, fileid=x,
1125 1126 filelog=self._filelog) for x in c]
1126 1127
1127 1128 class committablectx(basectx):
1128 1129 """A committablectx object provides common functionality for a context that
1129 1130 wants the ability to commit, e.g. workingctx or memctx."""
1130 1131 def __init__(self, repo, text="", user=None, date=None, extra=None,
1131 1132 changes=None):
1132 1133 super(committablectx, self).__init__(repo)
1133 1134 self._rev = None
1134 1135 self._node = None
1135 1136 self._text = text
1136 1137 if date:
1137 1138 self._date = dateutil.parsedate(date)
1138 1139 if user:
1139 1140 self._user = user
1140 1141 if changes:
1141 1142 self._status = changes
1142 1143
1143 1144 self._extra = {}
1144 1145 if extra:
1145 1146 self._extra = extra.copy()
1146 1147 if 'branch' not in self._extra:
1147 1148 try:
1148 1149 branch = encoding.fromlocal(self._repo.dirstate.branch())
1149 1150 except UnicodeDecodeError:
1150 1151 raise error.Abort(_('branch name not in UTF-8!'))
1151 1152 self._extra['branch'] = branch
1152 1153 if self._extra['branch'] == '':
1153 1154 self._extra['branch'] = 'default'
1154 1155
1155 1156 def __bytes__(self):
1156 1157 return bytes(self._parents[0]) + "+"
1157 1158
1158 1159 __str__ = encoding.strmethod(__bytes__)
1159 1160
1160 1161 def __nonzero__(self):
1161 1162 return True
1162 1163
1163 1164 __bool__ = __nonzero__
1164 1165
1165 1166 def _buildflagfunc(self):
1166 1167 # Create a fallback function for getting file flags when the
1167 1168 # filesystem doesn't support them
1168 1169
1169 1170 copiesget = self._repo.dirstate.copies().get
1170 1171 parents = self.parents()
1171 1172 if len(parents) < 2:
1172 1173 # when we have one parent, it's easy: copy from parent
1173 1174 man = parents[0].manifest()
1174 1175 def func(f):
1175 1176 f = copiesget(f, f)
1176 1177 return man.flags(f)
1177 1178 else:
1178 1179 # merges are tricky: we try to reconstruct the unstored
1179 1180 # result from the merge (issue1802)
1180 1181 p1, p2 = parents
1181 1182 pa = p1.ancestor(p2)
1182 1183 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1183 1184
1184 1185 def func(f):
1185 1186 f = copiesget(f, f) # may be wrong for merges with copies
1186 1187 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1187 1188 if fl1 == fl2:
1188 1189 return fl1
1189 1190 if fl1 == fla:
1190 1191 return fl2
1191 1192 if fl2 == fla:
1192 1193 return fl1
1193 1194 return '' # punt for conflicts
1194 1195
1195 1196 return func
1196 1197
1197 1198 @propertycache
1198 1199 def _flagfunc(self):
1199 1200 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1200 1201
1201 1202 @propertycache
1202 1203 def _status(self):
1203 1204 return self._repo.status()
1204 1205
1205 1206 @propertycache
1206 1207 def _user(self):
1207 1208 return self._repo.ui.username()
1208 1209
1209 1210 @propertycache
1210 1211 def _date(self):
1211 1212 ui = self._repo.ui
1212 1213 date = ui.configdate('devel', 'default-date')
1213 1214 if date is None:
1214 1215 date = dateutil.makedate()
1215 1216 return date
1216 1217
1217 1218 def subrev(self, subpath):
1218 1219 return None
1219 1220
1220 1221 def manifestnode(self):
1221 1222 return None
1222 1223 def user(self):
1223 1224 return self._user or self._repo.ui.username()
1224 1225 def date(self):
1225 1226 return self._date
1226 1227 def description(self):
1227 1228 return self._text
1228 1229 def files(self):
1229 1230 return sorted(self._status.modified + self._status.added +
1230 1231 self._status.removed)
1231 1232
1232 1233 def modified(self):
1233 1234 return self._status.modified
1234 1235 def added(self):
1235 1236 return self._status.added
1236 1237 def removed(self):
1237 1238 return self._status.removed
1238 1239 def deleted(self):
1239 1240 return self._status.deleted
1240 1241 def branch(self):
1241 1242 return encoding.tolocal(self._extra['branch'])
1242 1243 def closesbranch(self):
1243 1244 return 'close' in self._extra
1244 1245 def extra(self):
1245 1246 return self._extra
1246 1247
1247 1248 def isinmemory(self):
1248 1249 return False
1249 1250
1250 1251 def tags(self):
1251 1252 return []
1252 1253
1253 1254 def bookmarks(self):
1254 1255 b = []
1255 1256 for p in self.parents():
1256 1257 b.extend(p.bookmarks())
1257 1258 return b
1258 1259
1259 1260 def phase(self):
1260 1261 phase = phases.draft # default phase to draft
1261 1262 for p in self.parents():
1262 1263 phase = max(phase, p.phase())
1263 1264 return phase
1264 1265
1265 1266 def hidden(self):
1266 1267 return False
1267 1268
1268 1269 def children(self):
1269 1270 return []
1270 1271
1271 1272 def flags(self, path):
1272 1273 if r'_manifest' in self.__dict__:
1273 1274 try:
1274 1275 return self._manifest.flags(path)
1275 1276 except KeyError:
1276 1277 return ''
1277 1278
1278 1279 try:
1279 1280 return self._flagfunc(path)
1280 1281 except OSError:
1281 1282 return ''
1282 1283
1283 1284 def ancestor(self, c2):
1284 1285 """return the "best" ancestor context of self and c2"""
1285 1286 return self._parents[0].ancestor(c2) # punt on two parents for now
1286 1287
1287 1288 def walk(self, match):
1288 1289 '''Generates matching file names.'''
1289 1290 return sorted(self._repo.dirstate.walk(match,
1290 1291 subrepos=sorted(self.substate),
1291 1292 unknown=True, ignored=False))
1292 1293
1293 1294 def matches(self, match):
1294 1295 return sorted(self._repo.dirstate.matches(match))
1295 1296
1296 1297 def ancestors(self):
1297 1298 for p in self._parents:
1298 1299 yield p
1299 1300 for a in self._repo.changelog.ancestors(
1300 1301 [p.rev() for p in self._parents]):
1301 1302 yield changectx(self._repo, a)
1302 1303
1303 1304 def markcommitted(self, node):
1304 1305 """Perform post-commit cleanup necessary after committing this ctx
1305 1306
1306 1307 Specifically, this updates backing stores this working context
1307 1308 wraps to reflect the fact that the changes reflected by this
1308 1309 workingctx have been committed. For example, it marks
1309 1310 modified and added files as normal in the dirstate.
1310 1311
1311 1312 """
1312 1313
1313 1314 with self._repo.dirstate.parentchange():
1314 1315 for f in self.modified() + self.added():
1315 1316 self._repo.dirstate.normal(f)
1316 1317 for f in self.removed():
1317 1318 self._repo.dirstate.drop(f)
1318 1319 self._repo.dirstate.setparents(node)
1319 1320
1320 1321 # write changes out explicitly, because nesting wlock at
1321 1322 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1322 1323 # from immediately doing so for subsequent changing files
1323 1324 self._repo.dirstate.write(self._repo.currenttransaction())
1324 1325
1325 1326 def dirty(self, missing=False, merge=True, branch=True):
1326 1327 return False
1327 1328
1328 1329 class workingctx(committablectx):
1329 1330 """A workingctx object makes access to data related to
1330 1331 the current working directory convenient.
1331 1332 date - any valid date string or (unixtime, offset), or None.
1332 1333 user - username string, or None.
1333 1334 extra - a dictionary of extra values, or None.
1334 1335 changes - a list of file lists as returned by localrepo.status()
1335 1336 or None to use the repository status.
1336 1337 """
1337 1338 def __init__(self, repo, text="", user=None, date=None, extra=None,
1338 1339 changes=None):
1339 1340 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1340 1341
1341 1342 def __iter__(self):
1342 1343 d = self._repo.dirstate
1343 1344 for f in d:
1344 1345 if d[f] != 'r':
1345 1346 yield f
1346 1347
1347 1348 def __contains__(self, key):
1348 1349 return self._repo.dirstate[key] not in "?r"
1349 1350
1350 1351 def hex(self):
1351 1352 return hex(wdirid)
1352 1353
1353 1354 @propertycache
1354 1355 def _parents(self):
1355 1356 p = self._repo.dirstate.parents()
1356 1357 if p[1] == nullid:
1357 1358 p = p[:-1]
1358 1359 return [changectx(self._repo, x) for x in p]
1359 1360
1360 1361 def _fileinfo(self, path):
1361 1362 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1362 1363 self._manifest
1363 1364 return super(workingctx, self)._fileinfo(path)
1364 1365
1365 1366 def filectx(self, path, filelog=None):
1366 1367 """get a file context from the working directory"""
1367 1368 return workingfilectx(self._repo, path, workingctx=self,
1368 1369 filelog=filelog)
1369 1370
1370 1371 def dirty(self, missing=False, merge=True, branch=True):
1371 1372 "check whether a working directory is modified"
1372 1373 # check subrepos first
1373 1374 for s in sorted(self.substate):
1374 1375 if self.sub(s).dirty(missing=missing):
1375 1376 return True
1376 1377 # check current working dir
1377 1378 return ((merge and self.p2()) or
1378 1379 (branch and self.branch() != self.p1().branch()) or
1379 1380 self.modified() or self.added() or self.removed() or
1380 1381 (missing and self.deleted()))
1381 1382
1382 1383 def add(self, list, prefix=""):
1383 1384 with self._repo.wlock():
1384 1385 ui, ds = self._repo.ui, self._repo.dirstate
1385 1386 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1386 1387 rejected = []
1387 1388 lstat = self._repo.wvfs.lstat
1388 1389 for f in list:
1389 1390 # ds.pathto() returns an absolute file when this is invoked from
1390 1391 # the keyword extension. That gets flagged as non-portable on
1391 1392 # Windows, since it contains the drive letter and colon.
1392 1393 scmutil.checkportable(ui, os.path.join(prefix, f))
1393 1394 try:
1394 1395 st = lstat(f)
1395 1396 except OSError:
1396 1397 ui.warn(_("%s does not exist!\n") % uipath(f))
1397 1398 rejected.append(f)
1398 1399 continue
1399 1400 if st.st_size > 10000000:
1400 1401 ui.warn(_("%s: up to %d MB of RAM may be required "
1401 1402 "to manage this file\n"
1402 1403 "(use 'hg revert %s' to cancel the "
1403 1404 "pending addition)\n")
1404 1405 % (f, 3 * st.st_size // 1000000, uipath(f)))
1405 1406 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1406 1407 ui.warn(_("%s not added: only files and symlinks "
1407 1408 "supported currently\n") % uipath(f))
1408 1409 rejected.append(f)
1409 1410 elif ds[f] in 'amn':
1410 1411 ui.warn(_("%s already tracked!\n") % uipath(f))
1411 1412 elif ds[f] == 'r':
1412 1413 ds.normallookup(f)
1413 1414 else:
1414 1415 ds.add(f)
1415 1416 return rejected
1416 1417
1417 1418 def forget(self, files, prefix=""):
1418 1419 with self._repo.wlock():
1419 1420 ds = self._repo.dirstate
1420 1421 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1421 1422 rejected = []
1422 1423 for f in files:
1423 1424 if f not in self._repo.dirstate:
1424 1425 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1425 1426 rejected.append(f)
1426 1427 elif self._repo.dirstate[f] != 'a':
1427 1428 self._repo.dirstate.remove(f)
1428 1429 else:
1429 1430 self._repo.dirstate.drop(f)
1430 1431 return rejected
1431 1432
1432 1433 def undelete(self, list):
1433 1434 pctxs = self.parents()
1434 1435 with self._repo.wlock():
1435 1436 ds = self._repo.dirstate
1436 1437 for f in list:
1437 1438 if self._repo.dirstate[f] != 'r':
1438 1439 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1439 1440 else:
1440 1441 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1441 1442 t = fctx.data()
1442 1443 self._repo.wwrite(f, t, fctx.flags())
1443 1444 self._repo.dirstate.normal(f)
1444 1445
1445 1446 def copy(self, source, dest):
1446 1447 try:
1447 1448 st = self._repo.wvfs.lstat(dest)
1448 1449 except OSError as err:
1449 1450 if err.errno != errno.ENOENT:
1450 1451 raise
1451 1452 self._repo.ui.warn(_("%s does not exist!\n")
1452 1453 % self._repo.dirstate.pathto(dest))
1453 1454 return
1454 1455 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 1456 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1456 1457 "symbolic link\n")
1457 1458 % self._repo.dirstate.pathto(dest))
1458 1459 else:
1459 1460 with self._repo.wlock():
1460 1461 if self._repo.dirstate[dest] in '?':
1461 1462 self._repo.dirstate.add(dest)
1462 1463 elif self._repo.dirstate[dest] in 'r':
1463 1464 self._repo.dirstate.normallookup(dest)
1464 1465 self._repo.dirstate.copy(source, dest)
1465 1466
1466 1467 def match(self, pats=None, include=None, exclude=None, default='glob',
1467 1468 listsubrepos=False, badfn=None):
1468 1469 r = self._repo
1469 1470
1470 1471 # Only a case insensitive filesystem needs magic to translate user input
1471 1472 # to actual case in the filesystem.
1472 1473 icasefs = not util.fscasesensitive(r.root)
1473 1474 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1474 1475 default, auditor=r.auditor, ctx=self,
1475 1476 listsubrepos=listsubrepos, badfn=badfn,
1476 1477 icasefs=icasefs)
1477 1478
1478 1479 def _filtersuspectsymlink(self, files):
1479 1480 if not files or self._repo.dirstate._checklink:
1480 1481 return files
1481 1482
1482 1483 # Symlink placeholders may get non-symlink-like contents
1483 1484 # via user error or dereferencing by NFS or Samba servers,
1484 1485 # so we filter out any placeholders that don't look like a
1485 1486 # symlink
1486 1487 sane = []
1487 1488 for f in files:
1488 1489 if self.flags(f) == 'l':
1489 1490 d = self[f].data()
1490 1491 if (d == '' or len(d) >= 1024 or '\n' in d
1491 1492 or stringutil.binary(d)):
1492 1493 self._repo.ui.debug('ignoring suspect symlink placeholder'
1493 1494 ' "%s"\n' % f)
1494 1495 continue
1495 1496 sane.append(f)
1496 1497 return sane
1497 1498
1498 1499 def _checklookup(self, files):
1499 1500 # check for any possibly clean files
1500 1501 if not files:
1501 1502 return [], [], []
1502 1503
1503 1504 modified = []
1504 1505 deleted = []
1505 1506 fixup = []
1506 1507 pctx = self._parents[0]
1507 1508 # do a full compare of any files that might have changed
1508 1509 for f in sorted(files):
1509 1510 try:
1510 1511 # This will return True for a file that got replaced by a
1511 1512 # directory in the interim, but fixing that is pretty hard.
1512 1513 if (f not in pctx or self.flags(f) != pctx.flags(f)
1513 1514 or pctx[f].cmp(self[f])):
1514 1515 modified.append(f)
1515 1516 else:
1516 1517 fixup.append(f)
1517 1518 except (IOError, OSError):
1518 1519 # A file become inaccessible in between? Mark it as deleted,
1519 1520 # matching dirstate behavior (issue5584).
1520 1521 # The dirstate has more complex behavior around whether a
1521 1522 # missing file matches a directory, etc, but we don't need to
1522 1523 # bother with that: if f has made it to this point, we're sure
1523 1524 # it's in the dirstate.
1524 1525 deleted.append(f)
1525 1526
1526 1527 return modified, deleted, fixup
1527 1528
1528 1529 def _poststatusfixup(self, status, fixup):
1529 1530 """update dirstate for files that are actually clean"""
1530 1531 poststatus = self._repo.postdsstatus()
1531 1532 if fixup or poststatus:
1532 1533 try:
1533 1534 oldid = self._repo.dirstate.identity()
1534 1535
1535 1536 # updating the dirstate is optional
1536 1537 # so we don't wait on the lock
1537 1538 # wlock can invalidate the dirstate, so cache normal _after_
1538 1539 # taking the lock
1539 1540 with self._repo.wlock(False):
1540 1541 if self._repo.dirstate.identity() == oldid:
1541 1542 if fixup:
1542 1543 normal = self._repo.dirstate.normal
1543 1544 for f in fixup:
1544 1545 normal(f)
1545 1546 # write changes out explicitly, because nesting
1546 1547 # wlock at runtime may prevent 'wlock.release()'
1547 1548 # after this block from doing so for subsequent
1548 1549 # changing files
1549 1550 tr = self._repo.currenttransaction()
1550 1551 self._repo.dirstate.write(tr)
1551 1552
1552 1553 if poststatus:
1553 1554 for ps in poststatus:
1554 1555 ps(self, status)
1555 1556 else:
1556 1557 # in this case, writing changes out breaks
1557 1558 # consistency, because .hg/dirstate was
1558 1559 # already changed simultaneously after last
1559 1560 # caching (see also issue5584 for detail)
1560 1561 self._repo.ui.debug('skip updating dirstate: '
1561 1562 'identity mismatch\n')
1562 1563 except error.LockError:
1563 1564 pass
1564 1565 finally:
1565 1566 # Even if the wlock couldn't be grabbed, clear out the list.
1566 1567 self._repo.clearpostdsstatus()
1567 1568
1568 1569 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1569 1570 '''Gets the status from the dirstate -- internal use only.'''
1570 1571 subrepos = []
1571 1572 if '.hgsub' in self:
1572 1573 subrepos = sorted(self.substate)
1573 1574 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1574 1575 clean=clean, unknown=unknown)
1575 1576
1576 1577 # check for any possibly clean files
1577 1578 fixup = []
1578 1579 if cmp:
1579 1580 modified2, deleted2, fixup = self._checklookup(cmp)
1580 1581 s.modified.extend(modified2)
1581 1582 s.deleted.extend(deleted2)
1582 1583
1583 1584 if fixup and clean:
1584 1585 s.clean.extend(fixup)
1585 1586
1586 1587 self._poststatusfixup(s, fixup)
1587 1588
1588 1589 if match.always():
1589 1590 # cache for performance
1590 1591 if s.unknown or s.ignored or s.clean:
1591 1592 # "_status" is cached with list*=False in the normal route
1592 1593 self._status = scmutil.status(s.modified, s.added, s.removed,
1593 1594 s.deleted, [], [], [])
1594 1595 else:
1595 1596 self._status = s
1596 1597
1597 1598 return s
1598 1599
1599 1600 @propertycache
1600 1601 def _manifest(self):
1601 1602 """generate a manifest corresponding to the values in self._status
1602 1603
1603 1604 This reuse the file nodeid from parent, but we use special node
1604 1605 identifiers for added and modified files. This is used by manifests
1605 1606 merge to see that files are different and by update logic to avoid
1606 1607 deleting newly added files.
1607 1608 """
1608 1609 return self._buildstatusmanifest(self._status)
1609 1610
1610 1611 def _buildstatusmanifest(self, status):
1611 1612 """Builds a manifest that includes the given status results."""
1612 1613 parents = self.parents()
1613 1614
1614 1615 man = parents[0].manifest().copy()
1615 1616
1616 1617 ff = self._flagfunc
1617 1618 for i, l in ((addednodeid, status.added),
1618 1619 (modifiednodeid, status.modified)):
1619 1620 for f in l:
1620 1621 man[f] = i
1621 1622 try:
1622 1623 man.setflag(f, ff(f))
1623 1624 except OSError:
1624 1625 pass
1625 1626
1626 1627 for f in status.deleted + status.removed:
1627 1628 if f in man:
1628 1629 del man[f]
1629 1630
1630 1631 return man
1631 1632
1632 1633 def _buildstatus(self, other, s, match, listignored, listclean,
1633 1634 listunknown):
1634 1635 """build a status with respect to another context
1635 1636
1636 1637 This includes logic for maintaining the fast path of status when
1637 1638 comparing the working directory against its parent, which is to skip
1638 1639 building a new manifest if self (working directory) is not comparing
1639 1640 against its parent (repo['.']).
1640 1641 """
1641 1642 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1642 1643 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1643 1644 # might have accidentally ended up with the entire contents of the file
1644 1645 # they are supposed to be linking to.
1645 1646 s.modified[:] = self._filtersuspectsymlink(s.modified)
1646 1647 if other != self._repo['.']:
1647 1648 s = super(workingctx, self)._buildstatus(other, s, match,
1648 1649 listignored, listclean,
1649 1650 listunknown)
1650 1651 return s
1651 1652
1652 1653 def _matchstatus(self, other, match):
1653 1654 """override the match method with a filter for directory patterns
1654 1655
1655 1656 We use inheritance to customize the match.bad method only in cases of
1656 1657 workingctx since it belongs only to the working directory when
1657 1658 comparing against the parent changeset.
1658 1659
1659 1660 If we aren't comparing against the working directory's parent, then we
1660 1661 just use the default match object sent to us.
1661 1662 """
1662 1663 if other != self._repo['.']:
1663 1664 def bad(f, msg):
1664 1665 # 'f' may be a directory pattern from 'match.files()',
1665 1666 # so 'f not in ctx1' is not enough
1666 1667 if f not in other and not other.hasdir(f):
1667 1668 self._repo.ui.warn('%s: %s\n' %
1668 1669 (self._repo.dirstate.pathto(f), msg))
1669 1670 match.bad = bad
1670 1671 return match
1671 1672
1672 1673 def markcommitted(self, node):
1673 1674 super(workingctx, self).markcommitted(node)
1674 1675
1675 1676 sparse.aftercommit(self._repo, node)
1676 1677
1677 1678 class committablefilectx(basefilectx):
1678 1679 """A committablefilectx provides common functionality for a file context
1679 1680 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1680 1681 def __init__(self, repo, path, filelog=None, ctx=None):
1681 1682 self._repo = repo
1682 1683 self._path = path
1683 1684 self._changeid = None
1684 1685 self._filerev = self._filenode = None
1685 1686
1686 1687 if filelog is not None:
1687 1688 self._filelog = filelog
1688 1689 if ctx:
1689 1690 self._changectx = ctx
1690 1691
1691 1692 def __nonzero__(self):
1692 1693 return True
1693 1694
1694 1695 __bool__ = __nonzero__
1695 1696
1696 1697 def linkrev(self):
1697 1698 # linked to self._changectx no matter if file is modified or not
1698 1699 return self.rev()
1699 1700
1700 1701 def parents(self):
1701 1702 '''return parent filectxs, following copies if necessary'''
1702 1703 def filenode(ctx, path):
1703 1704 return ctx._manifest.get(path, nullid)
1704 1705
1705 1706 path = self._path
1706 1707 fl = self._filelog
1707 1708 pcl = self._changectx._parents
1708 1709 renamed = self.renamed()
1709 1710
1710 1711 if renamed:
1711 1712 pl = [renamed + (None,)]
1712 1713 else:
1713 1714 pl = [(path, filenode(pcl[0], path), fl)]
1714 1715
1715 1716 for pc in pcl[1:]:
1716 1717 pl.append((path, filenode(pc, path), fl))
1717 1718
1718 1719 return [self._parentfilectx(p, fileid=n, filelog=l)
1719 1720 for p, n, l in pl if n != nullid]
1720 1721
1721 1722 def children(self):
1722 1723 return []
1723 1724
1724 1725 class workingfilectx(committablefilectx):
1725 1726 """A workingfilectx object makes access to data related to a particular
1726 1727 file in the working directory convenient."""
1727 1728 def __init__(self, repo, path, filelog=None, workingctx=None):
1728 1729 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1729 1730
1730 1731 @propertycache
1731 1732 def _changectx(self):
1732 1733 return workingctx(self._repo)
1733 1734
1734 1735 def data(self):
1735 1736 return self._repo.wread(self._path)
1736 1737 def renamed(self):
1737 1738 rp = self._repo.dirstate.copied(self._path)
1738 1739 if not rp:
1739 1740 return None
1740 1741 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1741 1742
1742 1743 def size(self):
1743 1744 return self._repo.wvfs.lstat(self._path).st_size
1744 1745 def date(self):
1745 1746 t, tz = self._changectx.date()
1746 1747 try:
1747 1748 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1748 1749 except OSError as err:
1749 1750 if err.errno != errno.ENOENT:
1750 1751 raise
1751 1752 return (t, tz)
1752 1753
1753 1754 def exists(self):
1754 1755 return self._repo.wvfs.exists(self._path)
1755 1756
1756 1757 def lexists(self):
1757 1758 return self._repo.wvfs.lexists(self._path)
1758 1759
1759 1760 def audit(self):
1760 1761 return self._repo.wvfs.audit(self._path)
1761 1762
1762 1763 def cmp(self, fctx):
1763 1764 """compare with other file context
1764 1765
1765 1766 returns True if different than fctx.
1766 1767 """
1767 1768 # fctx should be a filectx (not a workingfilectx)
1768 1769 # invert comparison to reuse the same code path
1769 1770 return fctx.cmp(self)
1770 1771
1771 1772 def remove(self, ignoremissing=False):
1772 1773 """wraps unlink for a repo's working directory"""
1773 1774 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774 1775
1775 1776 def write(self, data, flags, backgroundclose=False, **kwargs):
1776 1777 """wraps repo.wwrite"""
1777 1778 self._repo.wwrite(self._path, data, flags,
1778 1779 backgroundclose=backgroundclose,
1779 1780 **kwargs)
1780 1781
1781 1782 def markcopied(self, src):
1782 1783 """marks this file a copy of `src`"""
1783 1784 if self._repo.dirstate[self._path] in "nma":
1784 1785 self._repo.dirstate.copy(src, self._path)
1785 1786
1786 1787 def clearunknown(self):
1787 1788 """Removes conflicting items in the working directory so that
1788 1789 ``write()`` can be called successfully.
1789 1790 """
1790 1791 wvfs = self._repo.wvfs
1791 1792 f = self._path
1792 1793 wvfs.audit(f)
1793 1794 if wvfs.isdir(f) and not wvfs.islink(f):
1794 1795 wvfs.rmtree(f, forcibly=True)
1795 1796 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1796 1797 for p in reversed(list(util.finddirs(f))):
1797 1798 if wvfs.isfileorlink(p):
1798 1799 wvfs.unlink(p)
1799 1800 break
1800 1801
1801 1802 def setflags(self, l, x):
1802 1803 self._repo.wvfs.setflags(self._path, l, x)
1803 1804
1804 1805 class overlayworkingctx(committablectx):
1805 1806 """Wraps another mutable context with a write-back cache that can be
1806 1807 converted into a commit context.
1807 1808
1808 1809 self._cache[path] maps to a dict with keys: {
1809 1810 'exists': bool?
1810 1811 'date': date?
1811 1812 'data': str?
1812 1813 'flags': str?
1813 1814 'copied': str? (path or None)
1814 1815 }
1815 1816 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1816 1817 is `False`, the file was deleted.
1817 1818 """
1818 1819
1819 1820 def __init__(self, repo):
1820 1821 super(overlayworkingctx, self).__init__(repo)
1821 1822 self.clean()
1822 1823
1823 1824 def setbase(self, wrappedctx):
1824 1825 self._wrappedctx = wrappedctx
1825 1826 self._parents = [wrappedctx]
1826 1827 # Drop old manifest cache as it is now out of date.
1827 1828 # This is necessary when, e.g., rebasing several nodes with one
1828 1829 # ``overlayworkingctx`` (e.g. with --collapse).
1829 1830 util.clearcachedproperty(self, '_manifest')
1830 1831
1831 1832 def data(self, path):
1832 1833 if self.isdirty(path):
1833 1834 if self._cache[path]['exists']:
1834 1835 if self._cache[path]['data']:
1835 1836 return self._cache[path]['data']
1836 1837 else:
1837 1838 # Must fallback here, too, because we only set flags.
1838 1839 return self._wrappedctx[path].data()
1839 1840 else:
1840 1841 raise error.ProgrammingError("No such file or directory: %s" %
1841 1842 path)
1842 1843 else:
1843 1844 return self._wrappedctx[path].data()
1844 1845
1845 1846 @propertycache
1846 1847 def _manifest(self):
1847 1848 parents = self.parents()
1848 1849 man = parents[0].manifest().copy()
1849 1850
1850 1851 flag = self._flagfunc
1851 1852 for path in self.added():
1852 1853 man[path] = addednodeid
1853 1854 man.setflag(path, flag(path))
1854 1855 for path in self.modified():
1855 1856 man[path] = modifiednodeid
1856 1857 man.setflag(path, flag(path))
1857 1858 for path in self.removed():
1858 1859 del man[path]
1859 1860 return man
1860 1861
1861 1862 @propertycache
1862 1863 def _flagfunc(self):
1863 1864 def f(path):
1864 1865 return self._cache[path]['flags']
1865 1866 return f
1866 1867
1867 1868 def files(self):
1868 1869 return sorted(self.added() + self.modified() + self.removed())
1869 1870
1870 1871 def modified(self):
1871 1872 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1872 1873 self._existsinparent(f)]
1873 1874
1874 1875 def added(self):
1875 1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1876 1877 not self._existsinparent(f)]
1877 1878
1878 1879 def removed(self):
1879 1880 return [f for f in self._cache.keys() if
1880 1881 not self._cache[f]['exists'] and self._existsinparent(f)]
1881 1882
1882 1883 def isinmemory(self):
1883 1884 return True
1884 1885
1885 1886 def filedate(self, path):
1886 1887 if self.isdirty(path):
1887 1888 return self._cache[path]['date']
1888 1889 else:
1889 1890 return self._wrappedctx[path].date()
1890 1891
1891 1892 def markcopied(self, path, origin):
1892 1893 if self.isdirty(path):
1893 1894 self._cache[path]['copied'] = origin
1894 1895 else:
1895 1896 raise error.ProgrammingError('markcopied() called on clean context')
1896 1897
1897 1898 def copydata(self, path):
1898 1899 if self.isdirty(path):
1899 1900 return self._cache[path]['copied']
1900 1901 else:
1901 1902 raise error.ProgrammingError('copydata() called on clean context')
1902 1903
1903 1904 def flags(self, path):
1904 1905 if self.isdirty(path):
1905 1906 if self._cache[path]['exists']:
1906 1907 return self._cache[path]['flags']
1907 1908 else:
1908 1909 raise error.ProgrammingError("No such file or directory: %s" %
1909 1910 self._path)
1910 1911 else:
1911 1912 return self._wrappedctx[path].flags()
1912 1913
1913 1914 def _existsinparent(self, path):
1914 1915 try:
1915 1916 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1916 1917 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1917 1918 # with an ``exists()`` function.
1918 1919 self._wrappedctx[path]
1919 1920 return True
1920 1921 except error.ManifestLookupError:
1921 1922 return False
1922 1923
1923 1924 def _auditconflicts(self, path):
1924 1925 """Replicates conflict checks done by wvfs.write().
1925 1926
1926 1927 Since we never write to the filesystem and never call `applyupdates` in
1927 1928 IMM, we'll never check that a path is actually writable -- e.g., because
1928 1929 it adds `a/foo`, but `a` is actually a file in the other commit.
1929 1930 """
1930 1931 def fail(path, component):
1931 1932 # p1() is the base and we're receiving "writes" for p2()'s
1932 1933 # files.
1933 1934 if 'l' in self.p1()[component].flags():
1934 1935 raise error.Abort("error: %s conflicts with symlink %s "
1935 1936 "in %s." % (path, component,
1936 1937 self.p1().rev()))
1937 1938 else:
1938 1939 raise error.Abort("error: '%s' conflicts with file '%s' in "
1939 1940 "%s." % (path, component,
1940 1941 self.p1().rev()))
1941 1942
1942 1943 # Test that each new directory to be created to write this path from p2
1943 1944 # is not a file in p1.
1944 1945 components = path.split('/')
1945 1946 for i in xrange(len(components)):
1946 1947 component = "/".join(components[0:i])
1947 1948 if component in self.p1():
1948 1949 fail(path, component)
1949 1950
1950 1951 # Test the other direction -- that this path from p2 isn't a directory
1951 1952 # in p1 (test that p1 doesn't any paths matching `path/*`).
1952 1953 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1953 1954 matches = self.p1().manifest().matches(match)
1954 1955 if len(matches) > 0:
1955 1956 if len(matches) == 1 and matches.keys()[0] == path:
1956 1957 return
1957 1958 raise error.Abort("error: file '%s' cannot be written because "
1958 1959 " '%s/' is a folder in %s (containing %d "
1959 1960 "entries: %s)"
1960 1961 % (path, path, self.p1(), len(matches),
1961 1962 ', '.join(matches.keys())))
1962 1963
1963 1964 def write(self, path, data, flags='', **kwargs):
1964 1965 if data is None:
1965 1966 raise error.ProgrammingError("data must be non-None")
1966 1967 self._auditconflicts(path)
1967 1968 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1968 1969 flags=flags)
1969 1970
1970 1971 def setflags(self, path, l, x):
1971 1972 self._markdirty(path, exists=True, date=dateutil.makedate(),
1972 1973 flags=(l and 'l' or '') + (x and 'x' or ''))
1973 1974
1974 1975 def remove(self, path):
1975 1976 self._markdirty(path, exists=False)
1976 1977
1977 1978 def exists(self, path):
1978 1979 """exists behaves like `lexists`, but needs to follow symlinks and
1979 1980 return False if they are broken.
1980 1981 """
1981 1982 if self.isdirty(path):
1982 1983 # If this path exists and is a symlink, "follow" it by calling
1983 1984 # exists on the destination path.
1984 1985 if (self._cache[path]['exists'] and
1985 1986 'l' in self._cache[path]['flags']):
1986 1987 return self.exists(self._cache[path]['data'].strip())
1987 1988 else:
1988 1989 return self._cache[path]['exists']
1989 1990
1990 1991 return self._existsinparent(path)
1991 1992
1992 1993 def lexists(self, path):
1993 1994 """lexists returns True if the path exists"""
1994 1995 if self.isdirty(path):
1995 1996 return self._cache[path]['exists']
1996 1997
1997 1998 return self._existsinparent(path)
1998 1999
1999 2000 def size(self, path):
2000 2001 if self.isdirty(path):
2001 2002 if self._cache[path]['exists']:
2002 2003 return len(self._cache[path]['data'])
2003 2004 else:
2004 2005 raise error.ProgrammingError("No such file or directory: %s" %
2005 2006 self._path)
2006 2007 return self._wrappedctx[path].size()
2007 2008
2008 2009 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2009 2010 user=None, editor=None):
2010 2011 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2011 2012 committed.
2012 2013
2013 2014 ``text`` is the commit message.
2014 2015 ``parents`` (optional) are rev numbers.
2015 2016 """
2016 2017 # Default parents to the wrapped contexts' if not passed.
2017 2018 if parents is None:
2018 2019 parents = self._wrappedctx.parents()
2019 2020 if len(parents) == 1:
2020 2021 parents = (parents[0], None)
2021 2022
2022 2023 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2023 2024 if parents[1] is None:
2024 2025 parents = (self._repo[parents[0]], None)
2025 2026 else:
2026 2027 parents = (self._repo[parents[0]], self._repo[parents[1]])
2027 2028
2028 2029 files = self._cache.keys()
2029 2030 def getfile(repo, memctx, path):
2030 2031 if self._cache[path]['exists']:
2031 2032 return memfilectx(repo, memctx, path,
2032 2033 self._cache[path]['data'],
2033 2034 'l' in self._cache[path]['flags'],
2034 2035 'x' in self._cache[path]['flags'],
2035 2036 self._cache[path]['copied'])
2036 2037 else:
2037 2038 # Returning None, but including the path in `files`, is
2038 2039 # necessary for memctx to register a deletion.
2039 2040 return None
2040 2041 return memctx(self._repo, parents, text, files, getfile, date=date,
2041 2042 extra=extra, user=user, branch=branch, editor=editor)
2042 2043
2043 2044 def isdirty(self, path):
2044 2045 return path in self._cache
2045 2046
2046 2047 def isempty(self):
2047 2048 # We need to discard any keys that are actually clean before the empty
2048 2049 # commit check.
2049 2050 self._compact()
2050 2051 return len(self._cache) == 0
2051 2052
2052 2053 def clean(self):
2053 2054 self._cache = {}
2054 2055
2055 2056 def _compact(self):
2056 2057 """Removes keys from the cache that are actually clean, by comparing
2057 2058 them with the underlying context.
2058 2059
2059 2060 This can occur during the merge process, e.g. by passing --tool :local
2060 2061 to resolve a conflict.
2061 2062 """
2062 2063 keys = []
2063 2064 for path in self._cache.keys():
2064 2065 cache = self._cache[path]
2065 2066 try:
2066 2067 underlying = self._wrappedctx[path]
2067 2068 if (underlying.data() == cache['data'] and
2068 2069 underlying.flags() == cache['flags']):
2069 2070 keys.append(path)
2070 2071 except error.ManifestLookupError:
2071 2072 # Path not in the underlying manifest (created).
2072 2073 continue
2073 2074
2074 2075 for path in keys:
2075 2076 del self._cache[path]
2076 2077 return keys
2077 2078
2078 2079 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2079 2080 self._cache[path] = {
2080 2081 'exists': exists,
2081 2082 'data': data,
2082 2083 'date': date,
2083 2084 'flags': flags,
2084 2085 'copied': None,
2085 2086 }
2086 2087
2087 2088 def filectx(self, path, filelog=None):
2088 2089 return overlayworkingfilectx(self._repo, path, parent=self,
2089 2090 filelog=filelog)
2090 2091
2091 2092 class overlayworkingfilectx(committablefilectx):
2092 2093 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2093 2094 cache, which can be flushed through later by calling ``flush()``."""
2094 2095
2095 2096 def __init__(self, repo, path, filelog=None, parent=None):
2096 2097 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2097 2098 parent)
2098 2099 self._repo = repo
2099 2100 self._parent = parent
2100 2101 self._path = path
2101 2102
2102 2103 def cmp(self, fctx):
2103 2104 return self.data() != fctx.data()
2104 2105
2105 2106 def changectx(self):
2106 2107 return self._parent
2107 2108
2108 2109 def data(self):
2109 2110 return self._parent.data(self._path)
2110 2111
2111 2112 def date(self):
2112 2113 return self._parent.filedate(self._path)
2113 2114
2114 2115 def exists(self):
2115 2116 return self.lexists()
2116 2117
2117 2118 def lexists(self):
2118 2119 return self._parent.exists(self._path)
2119 2120
2120 2121 def renamed(self):
2121 2122 path = self._parent.copydata(self._path)
2122 2123 if not path:
2123 2124 return None
2124 2125 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2125 2126
2126 2127 def size(self):
2127 2128 return self._parent.size(self._path)
2128 2129
2129 2130 def markcopied(self, origin):
2130 2131 self._parent.markcopied(self._path, origin)
2131 2132
2132 2133 def audit(self):
2133 2134 pass
2134 2135
2135 2136 def flags(self):
2136 2137 return self._parent.flags(self._path)
2137 2138
2138 2139 def setflags(self, islink, isexec):
2139 2140 return self._parent.setflags(self._path, islink, isexec)
2140 2141
2141 2142 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 2143 return self._parent.write(self._path, data, flags, **kwargs)
2143 2144
2144 2145 def remove(self, ignoremissing=False):
2145 2146 return self._parent.remove(self._path)
2146 2147
2147 2148 def clearunknown(self):
2148 2149 pass
2149 2150
2150 2151 class workingcommitctx(workingctx):
2151 2152 """A workingcommitctx object makes access to data related to
2152 2153 the revision being committed convenient.
2153 2154
2154 2155 This hides changes in the working directory, if they aren't
2155 2156 committed in this context.
2156 2157 """
2157 2158 def __init__(self, repo, changes,
2158 2159 text="", user=None, date=None, extra=None):
2159 2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2160 2161 changes)
2161 2162
2162 2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2163 2164 """Return matched files only in ``self._status``
2164 2165
2165 2166 Uncommitted files appear "clean" via this context, even if
2166 2167 they aren't actually so in the working directory.
2167 2168 """
2168 2169 if clean:
2169 2170 clean = [f for f in self._manifest if f not in self._changedset]
2170 2171 else:
2171 2172 clean = []
2172 2173 return scmutil.status([f for f in self._status.modified if match(f)],
2173 2174 [f for f in self._status.added if match(f)],
2174 2175 [f for f in self._status.removed if match(f)],
2175 2176 [], [], [], clean)
2176 2177
2177 2178 @propertycache
2178 2179 def _changedset(self):
2179 2180 """Return the set of files changed in this context
2180 2181 """
2181 2182 changed = set(self._status.modified)
2182 2183 changed.update(self._status.added)
2183 2184 changed.update(self._status.removed)
2184 2185 return changed
2185 2186
2186 2187 def makecachingfilectxfn(func):
2187 2188 """Create a filectxfn that caches based on the path.
2188 2189
2189 2190 We can't use util.cachefunc because it uses all arguments as the cache
2190 2191 key and this creates a cycle since the arguments include the repo and
2191 2192 memctx.
2192 2193 """
2193 2194 cache = {}
2194 2195
2195 2196 def getfilectx(repo, memctx, path):
2196 2197 if path not in cache:
2197 2198 cache[path] = func(repo, memctx, path)
2198 2199 return cache[path]
2199 2200
2200 2201 return getfilectx
2201 2202
2202 2203 def memfilefromctx(ctx):
2203 2204 """Given a context return a memfilectx for ctx[path]
2204 2205
2205 2206 This is a convenience method for building a memctx based on another
2206 2207 context.
2207 2208 """
2208 2209 def getfilectx(repo, memctx, path):
2209 2210 fctx = ctx[path]
2210 2211 # this is weird but apparently we only keep track of one parent
2211 2212 # (why not only store that instead of a tuple?)
2212 2213 copied = fctx.renamed()
2213 2214 if copied:
2214 2215 copied = copied[0]
2215 2216 return memfilectx(repo, memctx, path, fctx.data(),
2216 2217 islink=fctx.islink(), isexec=fctx.isexec(),
2217 2218 copied=copied)
2218 2219
2219 2220 return getfilectx
2220 2221
2221 2222 def memfilefrompatch(patchstore):
2222 2223 """Given a patch (e.g. patchstore object) return a memfilectx
2223 2224
2224 2225 This is a convenience method for building a memctx based on a patchstore.
2225 2226 """
2226 2227 def getfilectx(repo, memctx, path):
2227 2228 data, mode, copied = patchstore.getfile(path)
2228 2229 if data is None:
2229 2230 return None
2230 2231 islink, isexec = mode
2231 2232 return memfilectx(repo, memctx, path, data, islink=islink,
2232 2233 isexec=isexec, copied=copied)
2233 2234
2234 2235 return getfilectx
2235 2236
2236 2237 class memctx(committablectx):
2237 2238 """Use memctx to perform in-memory commits via localrepo.commitctx().
2238 2239
2239 2240 Revision information is supplied at initialization time while
2240 2241 related files data and is made available through a callback
2241 2242 mechanism. 'repo' is the current localrepo, 'parents' is a
2242 2243 sequence of two parent revisions identifiers (pass None for every
2243 2244 missing parent), 'text' is the commit message and 'files' lists
2244 2245 names of files touched by the revision (normalized and relative to
2245 2246 repository root).
2246 2247
2247 2248 filectxfn(repo, memctx, path) is a callable receiving the
2248 2249 repository, the current memctx object and the normalized path of
2249 2250 requested file, relative to repository root. It is fired by the
2250 2251 commit function for every file in 'files', but calls order is
2251 2252 undefined. If the file is available in the revision being
2252 2253 committed (updated or added), filectxfn returns a memfilectx
2253 2254 object. If the file was removed, filectxfn return None for recent
2254 2255 Mercurial. Moved files are represented by marking the source file
2255 2256 removed and the new file added with copy information (see
2256 2257 memfilectx).
2257 2258
2258 2259 user receives the committer name and defaults to current
2259 2260 repository username, date is the commit date in any format
2260 2261 supported by dateutil.parsedate() and defaults to current date, extra
2261 2262 is a dictionary of metadata or is left empty.
2262 2263 """
2263 2264
2264 2265 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2265 2266 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2266 2267 # this field to determine what to do in filectxfn.
2267 2268 _returnnoneformissingfiles = True
2268 2269
2269 2270 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2270 2271 date=None, extra=None, branch=None, editor=False):
2271 2272 super(memctx, self).__init__(repo, text, user, date, extra)
2272 2273 self._rev = None
2273 2274 self._node = None
2274 2275 parents = [(p or nullid) for p in parents]
2275 2276 p1, p2 = parents
2276 2277 self._parents = [self._repo[p] for p in (p1, p2)]
2277 2278 files = sorted(set(files))
2278 2279 self._files = files
2279 2280 if branch is not None:
2280 2281 self._extra['branch'] = encoding.fromlocal(branch)
2281 2282 self.substate = {}
2282 2283
2283 2284 if isinstance(filectxfn, patch.filestore):
2284 2285 filectxfn = memfilefrompatch(filectxfn)
2285 2286 elif not callable(filectxfn):
2286 2287 # if store is not callable, wrap it in a function
2287 2288 filectxfn = memfilefromctx(filectxfn)
2288 2289
2289 2290 # memoizing increases performance for e.g. vcs convert scenarios.
2290 2291 self._filectxfn = makecachingfilectxfn(filectxfn)
2291 2292
2292 2293 if editor:
2293 2294 self._text = editor(self._repo, self, [])
2294 2295 self._repo.savecommitmessage(self._text)
2295 2296
2296 2297 def filectx(self, path, filelog=None):
2297 2298 """get a file context from the working directory
2298 2299
2299 2300 Returns None if file doesn't exist and should be removed."""
2300 2301 return self._filectxfn(self._repo, self, path)
2301 2302
2302 2303 def commit(self):
2303 2304 """commit context to the repo"""
2304 2305 return self._repo.commitctx(self)
2305 2306
2306 2307 @propertycache
2307 2308 def _manifest(self):
2308 2309 """generate a manifest based on the return values of filectxfn"""
2309 2310
2310 2311 # keep this simple for now; just worry about p1
2311 2312 pctx = self._parents[0]
2312 2313 man = pctx.manifest().copy()
2313 2314
2314 2315 for f in self._status.modified:
2315 2316 p1node = nullid
2316 2317 p2node = nullid
2317 2318 p = pctx[f].parents() # if file isn't in pctx, check p2?
2318 2319 if len(p) > 0:
2319 2320 p1node = p[0].filenode()
2320 2321 if len(p) > 1:
2321 2322 p2node = p[1].filenode()
2322 2323 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2323 2324
2324 2325 for f in self._status.added:
2325 2326 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2326 2327
2327 2328 for f in self._status.removed:
2328 2329 if f in man:
2329 2330 del man[f]
2330 2331
2331 2332 return man
2332 2333
2333 2334 @propertycache
2334 2335 def _status(self):
2335 2336 """Calculate exact status from ``files`` specified at construction
2336 2337 """
2337 2338 man1 = self.p1().manifest()
2338 2339 p2 = self._parents[1]
2339 2340 # "1 < len(self._parents)" can't be used for checking
2340 2341 # existence of the 2nd parent, because "memctx._parents" is
2341 2342 # explicitly initialized by the list, of which length is 2.
2342 2343 if p2.node() != nullid:
2343 2344 man2 = p2.manifest()
2344 2345 managing = lambda f: f in man1 or f in man2
2345 2346 else:
2346 2347 managing = lambda f: f in man1
2347 2348
2348 2349 modified, added, removed = [], [], []
2349 2350 for f in self._files:
2350 2351 if not managing(f):
2351 2352 added.append(f)
2352 2353 elif self[f]:
2353 2354 modified.append(f)
2354 2355 else:
2355 2356 removed.append(f)
2356 2357
2357 2358 return scmutil.status(modified, added, removed, [], [], [], [])
2358 2359
2359 2360 class memfilectx(committablefilectx):
2360 2361 """memfilectx represents an in-memory file to commit.
2361 2362
2362 2363 See memctx and committablefilectx for more details.
2363 2364 """
2364 2365 def __init__(self, repo, changectx, path, data, islink=False,
2365 2366 isexec=False, copied=None):
2366 2367 """
2367 2368 path is the normalized file path relative to repository root.
2368 2369 data is the file content as a string.
2369 2370 islink is True if the file is a symbolic link.
2370 2371 isexec is True if the file is executable.
2371 2372 copied is the source file path if current file was copied in the
2372 2373 revision being committed, or None."""
2373 2374 super(memfilectx, self).__init__(repo, path, None, changectx)
2374 2375 self._data = data
2375 2376 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2376 2377 self._copied = None
2377 2378 if copied:
2378 2379 self._copied = (copied, nullid)
2379 2380
2380 2381 def data(self):
2381 2382 return self._data
2382 2383
2383 2384 def remove(self, ignoremissing=False):
2384 2385 """wraps unlink for a repo's working directory"""
2385 2386 # need to figure out what to do here
2386 2387 del self._changectx[self._path]
2387 2388
2388 2389 def write(self, data, flags, **kwargs):
2389 2390 """wraps repo.wwrite"""
2390 2391 self._data = data
2391 2392
2392 2393 class overlayfilectx(committablefilectx):
2393 2394 """Like memfilectx but take an original filectx and optional parameters to
2394 2395 override parts of it. This is useful when fctx.data() is expensive (i.e.
2395 2396 flag processor is expensive) and raw data, flags, and filenode could be
2396 2397 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2397 2398 """
2398 2399
2399 2400 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2400 2401 copied=None, ctx=None):
2401 2402 """originalfctx: filecontext to duplicate
2402 2403
2403 2404 datafunc: None or a function to override data (file content). It is a
2404 2405 function to be lazy. path, flags, copied, ctx: None or overridden value
2405 2406
2406 2407 copied could be (path, rev), or False. copied could also be just path,
2407 2408 and will be converted to (path, nullid). This simplifies some callers.
2408 2409 """
2409 2410
2410 2411 if path is None:
2411 2412 path = originalfctx.path()
2412 2413 if ctx is None:
2413 2414 ctx = originalfctx.changectx()
2414 2415 ctxmatch = lambda: True
2415 2416 else:
2416 2417 ctxmatch = lambda: ctx == originalfctx.changectx()
2417 2418
2418 2419 repo = originalfctx.repo()
2419 2420 flog = originalfctx.filelog()
2420 2421 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2421 2422
2422 2423 if copied is None:
2423 2424 copied = originalfctx.renamed()
2424 2425 copiedmatch = lambda: True
2425 2426 else:
2426 2427 if copied and not isinstance(copied, tuple):
2427 2428 # repo._filecommit will recalculate copyrev so nullid is okay
2428 2429 copied = (copied, nullid)
2429 2430 copiedmatch = lambda: copied == originalfctx.renamed()
2430 2431
2431 2432 # When data, copied (could affect data), ctx (could affect filelog
2432 2433 # parents) are not overridden, rawdata, rawflags, and filenode may be
2433 2434 # reused (repo._filecommit should double check filelog parents).
2434 2435 #
2435 2436 # path, flags are not hashed in filelog (but in manifestlog) so they do
2436 2437 # not affect reusable here.
2437 2438 #
2438 2439 # If ctx or copied is overridden to a same value with originalfctx,
2439 2440 # still consider it's reusable. originalfctx.renamed() may be a bit
2440 2441 # expensive so it's not called unless necessary. Assuming datafunc is
2441 2442 # always expensive, do not call it for this "reusable" test.
2442 2443 reusable = datafunc is None and ctxmatch() and copiedmatch()
2443 2444
2444 2445 if datafunc is None:
2445 2446 datafunc = originalfctx.data
2446 2447 if flags is None:
2447 2448 flags = originalfctx.flags()
2448 2449
2449 2450 self._datafunc = datafunc
2450 2451 self._flags = flags
2451 2452 self._copied = copied
2452 2453
2453 2454 if reusable:
2454 2455 # copy extra fields from originalfctx
2455 2456 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2456 2457 for attr_ in attrs:
2457 2458 if util.safehasattr(originalfctx, attr_):
2458 2459 setattr(self, attr_, getattr(originalfctx, attr_))
2459 2460
2460 2461 def data(self):
2461 2462 return self._datafunc()
2462 2463
2463 2464 class metadataonlyctx(committablectx):
2464 2465 """Like memctx but it's reusing the manifest of different commit.
2465 2466 Intended to be used by lightweight operations that are creating
2466 2467 metadata-only changes.
2467 2468
2468 2469 Revision information is supplied at initialization time. 'repo' is the
2469 2470 current localrepo, 'ctx' is original revision which manifest we're reuisng
2470 2471 'parents' is a sequence of two parent revisions identifiers (pass None for
2471 2472 every missing parent), 'text' is the commit.
2472 2473
2473 2474 user receives the committer name and defaults to current repository
2474 2475 username, date is the commit date in any format supported by
2475 2476 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2476 2477 metadata or is left empty.
2477 2478 """
2478 2479 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2479 2480 date=None, extra=None, editor=False):
2480 2481 if text is None:
2481 2482 text = originalctx.description()
2482 2483 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2483 2484 self._rev = None
2484 2485 self._node = None
2485 2486 self._originalctx = originalctx
2486 2487 self._manifestnode = originalctx.manifestnode()
2487 2488 if parents is None:
2488 2489 parents = originalctx.parents()
2489 2490 else:
2490 2491 parents = [repo[p] for p in parents if p is not None]
2491 2492 parents = parents[:]
2492 2493 while len(parents) < 2:
2493 2494 parents.append(repo[nullid])
2494 2495 p1, p2 = self._parents = parents
2495 2496
2496 2497 # sanity check to ensure that the reused manifest parents are
2497 2498 # manifests of our commit parents
2498 2499 mp1, mp2 = self.manifestctx().parents
2499 2500 if p1 != nullid and p1.manifestnode() != mp1:
2500 2501 raise RuntimeError('can\'t reuse the manifest: '
2501 2502 'its p1 doesn\'t match the new ctx p1')
2502 2503 if p2 != nullid and p2.manifestnode() != mp2:
2503 2504 raise RuntimeError('can\'t reuse the manifest: '
2504 2505 'its p2 doesn\'t match the new ctx p2')
2505 2506
2506 2507 self._files = originalctx.files()
2507 2508 self.substate = {}
2508 2509
2509 2510 if editor:
2510 2511 self._text = editor(self._repo, self, [])
2511 2512 self._repo.savecommitmessage(self._text)
2512 2513
2513 2514 def manifestnode(self):
2514 2515 return self._manifestnode
2515 2516
2516 2517 @property
2517 2518 def _manifestctx(self):
2518 2519 return self._repo.manifestlog[self._manifestnode]
2519 2520
2520 2521 def filectx(self, path, filelog=None):
2521 2522 return self._originalctx.filectx(path, filelog=filelog)
2522 2523
2523 2524 def commit(self):
2524 2525 """commit context to the repo"""
2525 2526 return self._repo.commitctx(self)
2526 2527
2527 2528 @property
2528 2529 def _manifest(self):
2529 2530 return self._originalctx.manifest()
2530 2531
2531 2532 @propertycache
2532 2533 def _status(self):
2533 2534 """Calculate exact status from ``files`` specified in the ``origctx``
2534 2535 and parents manifests.
2535 2536 """
2536 2537 man1 = self.p1().manifest()
2537 2538 p2 = self._parents[1]
2538 2539 # "1 < len(self._parents)" can't be used for checking
2539 2540 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2540 2541 # explicitly initialized by the list, of which length is 2.
2541 2542 if p2.node() != nullid:
2542 2543 man2 = p2.manifest()
2543 2544 managing = lambda f: f in man1 or f in man2
2544 2545 else:
2545 2546 managing = lambda f: f in man1
2546 2547
2547 2548 modified, added, removed = [], [], []
2548 2549 for f in self._files:
2549 2550 if not managing(f):
2550 2551 added.append(f)
2551 2552 elif f in self:
2552 2553 modified.append(f)
2553 2554 else:
2554 2555 removed.append(f)
2555 2556
2556 2557 return scmutil.status(modified, added, removed, [], [], [], [])
2557 2558
2558 2559 class arbitraryfilectx(object):
2559 2560 """Allows you to use filectx-like functions on a file in an arbitrary
2560 2561 location on disk, possibly not in the working directory.
2561 2562 """
2562 2563 def __init__(self, path, repo=None):
2563 2564 # Repo is optional because contrib/simplemerge uses this class.
2564 2565 self._repo = repo
2565 2566 self._path = path
2566 2567
2567 2568 def cmp(self, fctx):
2568 2569 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2569 2570 # path if either side is a symlink.
2570 2571 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2571 2572 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2572 2573 # Add a fast-path for merge if both sides are disk-backed.
2573 2574 # Note that filecmp uses the opposite return values (True if same)
2574 2575 # from our cmp functions (True if different).
2575 2576 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2576 2577 return self.data() != fctx.data()
2577 2578
2578 2579 def path(self):
2579 2580 return self._path
2580 2581
2581 2582 def flags(self):
2582 2583 return ''
2583 2584
2584 2585 def data(self):
2585 2586 return util.readfile(self._path)
2586 2587
2587 2588 def decodeddata(self):
2588 2589 with open(self._path, "rb") as f:
2589 2590 return f.read()
2590 2591
2591 2592 def remove(self):
2592 2593 util.unlink(self._path)
2593 2594
2594 2595 def write(self, data, flags, **kwargs):
2595 2596 assert not flags
2596 2597 with open(self._path, "w") as f:
2597 2598 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now