##// END OF EJS Templates
context: move flags overrides from committablectx to workingctx...
Martin von Zweigbergk -
r42478:491855ea default
parent child Browse files
Show More
@@ -1,2571 +1,2571 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 @propertycache
276 276 def _copies(self):
277 277 p1copies = {}
278 278 p2copies = {}
279 279 p1 = self.p1()
280 280 p2 = self.p2()
281 281 narrowmatch = self._repo.narrowmatch()
282 282 for dst in self.files():
283 283 if not narrowmatch(dst) or dst not in self:
284 284 continue
285 285 copied = self[dst].renamed()
286 286 if not copied:
287 287 continue
288 288 src, srcnode = copied
289 289 if src in p1 and p1[src].filenode() == srcnode:
290 290 p1copies[dst] = src
291 291 elif src in p2 and p2[src].filenode() == srcnode:
292 292 p2copies[dst] = src
293 293 return p1copies, p2copies
294 294 def p1copies(self):
295 295 return self._copies[0]
296 296 def p2copies(self):
297 297 return self._copies[1]
298 298
299 299 def sub(self, path, allowcreate=True):
300 300 '''return a subrepo for the stored revision of path, never wdir()'''
301 301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
302 302
303 303 def nullsub(self, path, pctx):
304 304 return subrepo.nullsubrepo(self, path, pctx)
305 305
306 306 def workingsub(self, path):
307 307 '''return a subrepo for the stored revision, or wdir if this is a wdir
308 308 context.
309 309 '''
310 310 return subrepo.subrepo(self, path, allowwdir=True)
311 311
312 312 def match(self, pats=None, include=None, exclude=None, default='glob',
313 313 listsubrepos=False, badfn=None):
314 314 r = self._repo
315 315 return matchmod.match(r.root, r.getcwd(), pats,
316 316 include, exclude, default,
317 317 auditor=r.nofsauditor, ctx=self,
318 318 listsubrepos=listsubrepos, badfn=badfn)
319 319
320 320 def diff(self, ctx2=None, match=None, changes=None, opts=None,
321 321 losedatafn=None, pathfn=None, copy=None,
322 322 copysourcematch=None, hunksfilterfn=None):
323 323 """Returns a diff generator for the given contexts and matcher"""
324 324 if ctx2 is None:
325 325 ctx2 = self.p1()
326 326 if ctx2 is not None:
327 327 ctx2 = self._repo[ctx2]
328 328 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
329 329 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
330 330 copy=copy, copysourcematch=copysourcematch,
331 331 hunksfilterfn=hunksfilterfn)
332 332
333 333 def dirs(self):
334 334 return self._manifest.dirs()
335 335
336 336 def hasdir(self, dir):
337 337 return self._manifest.hasdir(dir)
338 338
339 339 def status(self, other=None, match=None, listignored=False,
340 340 listclean=False, listunknown=False, listsubrepos=False):
341 341 """return status of files between two nodes or node and working
342 342 directory.
343 343
344 344 If other is None, compare this node with working directory.
345 345
346 346 returns (modified, added, removed, deleted, unknown, ignored, clean)
347 347 """
348 348
349 349 ctx1 = self
350 350 ctx2 = self._repo[other]
351 351
352 352 # This next code block is, admittedly, fragile logic that tests for
353 353 # reversing the contexts and wouldn't need to exist if it weren't for
354 354 # the fast (and common) code path of comparing the working directory
355 355 # with its first parent.
356 356 #
357 357 # What we're aiming for here is the ability to call:
358 358 #
359 359 # workingctx.status(parentctx)
360 360 #
361 361 # If we always built the manifest for each context and compared those,
362 362 # then we'd be done. But the special case of the above call means we
363 363 # just copy the manifest of the parent.
364 364 reversed = False
365 365 if (not isinstance(ctx1, changectx)
366 366 and isinstance(ctx2, changectx)):
367 367 reversed = True
368 368 ctx1, ctx2 = ctx2, ctx1
369 369
370 370 match = self._repo.narrowmatch(match)
371 371 match = ctx2._matchstatus(ctx1, match)
372 372 r = scmutil.status([], [], [], [], [], [], [])
373 373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 374 listunknown)
375 375
376 376 if reversed:
377 377 # Reverse added and removed. Clear deleted, unknown and ignored as
378 378 # these make no sense to reverse.
379 379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 380 r.clean)
381 381
382 382 if listsubrepos:
383 383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 384 try:
385 385 rev2 = ctx2.subrev(subpath)
386 386 except KeyError:
387 387 # A subrepo that existed in node1 was deleted between
388 388 # node1 and node2 (inclusive). Thus, ctx2's substate
389 389 # won't contain that subpath. The best we can do ignore it.
390 390 rev2 = None
391 391 submatch = matchmod.subdirmatcher(subpath, match)
392 392 s = sub.status(rev2, match=submatch, ignored=listignored,
393 393 clean=listclean, unknown=listunknown,
394 394 listsubrepos=True)
395 395 for rfiles, sfiles in zip(r, s):
396 396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397 397
398 398 for l in r:
399 399 l.sort()
400 400
401 401 return r
402 402
403 403 class changectx(basectx):
404 404 """A changecontext object makes access to data related to a particular
405 405 changeset convenient. It represents a read-only context already present in
406 406 the repo."""
407 407 def __init__(self, repo, rev, node):
408 408 super(changectx, self).__init__(repo)
409 409 self._rev = rev
410 410 self._node = node
411 411
412 412 def __hash__(self):
413 413 try:
414 414 return hash(self._rev)
415 415 except AttributeError:
416 416 return id(self)
417 417
418 418 def __nonzero__(self):
419 419 return self._rev != nullrev
420 420
421 421 __bool__ = __nonzero__
422 422
423 423 @propertycache
424 424 def _changeset(self):
425 425 return self._repo.changelog.changelogrevision(self.rev())
426 426
427 427 @propertycache
428 428 def _manifest(self):
429 429 return self._manifestctx.read()
430 430
431 431 @property
432 432 def _manifestctx(self):
433 433 return self._repo.manifestlog[self._changeset.manifest]
434 434
435 435 @propertycache
436 436 def _manifestdelta(self):
437 437 return self._manifestctx.readdelta()
438 438
439 439 @propertycache
440 440 def _parents(self):
441 441 repo = self._repo
442 442 p1, p2 = repo.changelog.parentrevs(self._rev)
443 443 if p2 == nullrev:
444 444 return [repo[p1]]
445 445 return [repo[p1], repo[p2]]
446 446
447 447 def changeset(self):
448 448 c = self._changeset
449 449 return (
450 450 c.manifest,
451 451 c.user,
452 452 c.date,
453 453 c.files,
454 454 c.description,
455 455 c.extra,
456 456 )
457 457 def manifestnode(self):
458 458 return self._changeset.manifest
459 459
460 460 def user(self):
461 461 return self._changeset.user
462 462 def date(self):
463 463 return self._changeset.date
464 464 def files(self):
465 465 return self._changeset.files
466 466 @propertycache
467 467 def _copies(self):
468 468 source = self._repo.ui.config('experimental', 'copies.read-from')
469 469 p1copies = self._changeset.p1copies
470 470 p2copies = self._changeset.p2copies
471 471 # If config says to get copy metadata only from changeset, then return
472 472 # that, defaulting to {} if there was no copy metadata.
473 473 # In compatibility mode, we return copy data from the changeset if
474 474 # it was recorded there, and otherwise we fall back to getting it from
475 475 # the filelogs (below).
476 476 if (source == 'changeset-only' or
477 477 (source == 'compatibility' and p1copies is not None)):
478 478 return p1copies or {}, p2copies or {}
479 479
480 480 # Otherwise (config said to read only from filelog, or we are in
481 481 # compatiblity mode and there is not data in the changeset), we get
482 482 # the copy metadata from the filelogs.
483 483 return super(changectx, self)._copies
484 484 def description(self):
485 485 return self._changeset.description
486 486 def branch(self):
487 487 return encoding.tolocal(self._changeset.extra.get("branch"))
488 488 def closesbranch(self):
489 489 return 'close' in self._changeset.extra
490 490 def extra(self):
491 491 """Return a dict of extra information."""
492 492 return self._changeset.extra
493 493 def tags(self):
494 494 """Return a list of byte tag names"""
495 495 return self._repo.nodetags(self._node)
496 496 def bookmarks(self):
497 497 """Return a list of byte bookmark names."""
498 498 return self._repo.nodebookmarks(self._node)
499 499 def phase(self):
500 500 return self._repo._phasecache.phase(self._repo, self._rev)
501 501 def hidden(self):
502 502 return self._rev in repoview.filterrevs(self._repo, 'visible')
503 503
504 504 def isinmemory(self):
505 505 return False
506 506
507 507 def children(self):
508 508 """return list of changectx contexts for each child changeset.
509 509
510 510 This returns only the immediate child changesets. Use descendants() to
511 511 recursively walk children.
512 512 """
513 513 c = self._repo.changelog.children(self._node)
514 514 return [self._repo[x] for x in c]
515 515
516 516 def ancestors(self):
517 517 for a in self._repo.changelog.ancestors([self._rev]):
518 518 yield self._repo[a]
519 519
520 520 def descendants(self):
521 521 """Recursively yield all children of the changeset.
522 522
523 523 For just the immediate children, use children()
524 524 """
525 525 for d in self._repo.changelog.descendants([self._rev]):
526 526 yield self._repo[d]
527 527
528 528 def filectx(self, path, fileid=None, filelog=None):
529 529 """get a file context from this changeset"""
530 530 if fileid is None:
531 531 fileid = self.filenode(path)
532 532 return filectx(self._repo, path, fileid=fileid,
533 533 changectx=self, filelog=filelog)
534 534
535 535 def ancestor(self, c2, warn=False):
536 536 """return the "best" ancestor context of self and c2
537 537
538 538 If there are multiple candidates, it will show a message and check
539 539 merge.preferancestor configuration before falling back to the
540 540 revlog ancestor."""
541 541 # deal with workingctxs
542 542 n2 = c2._node
543 543 if n2 is None:
544 544 n2 = c2._parents[0]._node
545 545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 546 if not cahs:
547 547 anc = nullid
548 548 elif len(cahs) == 1:
549 549 anc = cahs[0]
550 550 else:
551 551 # experimental config: merge.preferancestor
552 552 for r in self._repo.ui.configlist('merge', 'preferancestor'):
553 553 try:
554 554 ctx = scmutil.revsymbol(self._repo, r)
555 555 except error.RepoLookupError:
556 556 continue
557 557 anc = ctx.node()
558 558 if anc in cahs:
559 559 break
560 560 else:
561 561 anc = self._repo.changelog.ancestor(self._node, n2)
562 562 if warn:
563 563 self._repo.ui.status(
564 564 (_("note: using %s as ancestor of %s and %s\n") %
565 565 (short(anc), short(self._node), short(n2))) +
566 566 ''.join(_(" alternatively, use --config "
567 567 "merge.preferancestor=%s\n") %
568 568 short(n) for n in sorted(cahs) if n != anc))
569 569 return self._repo[anc]
570 570
571 571 def isancestorof(self, other):
572 572 """True if this changeset is an ancestor of other"""
573 573 return self._repo.changelog.isancestorrev(self._rev, other._rev)
574 574
575 575 def walk(self, match):
576 576 '''Generates matching file names.'''
577 577
578 578 # Wrap match.bad method to have message with nodeid
579 579 def bad(fn, msg):
580 580 # The manifest doesn't know about subrepos, so don't complain about
581 581 # paths into valid subrepos.
582 582 if any(fn == s or fn.startswith(s + '/')
583 583 for s in self.substate):
584 584 return
585 585 match.bad(fn, _('no such file in rev %s') % self)
586 586
587 587 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
588 588 return self._manifest.walk(m)
589 589
590 590 def matches(self, match):
591 591 return self.walk(match)
592 592
593 593 class basefilectx(object):
594 594 """A filecontext object represents the common logic for its children:
595 595 filectx: read-only access to a filerevision that is already present
596 596 in the repo,
597 597 workingfilectx: a filecontext that represents files from the working
598 598 directory,
599 599 memfilectx: a filecontext that represents files in-memory,
600 600 """
601 601 @propertycache
602 602 def _filelog(self):
603 603 return self._repo.file(self._path)
604 604
605 605 @propertycache
606 606 def _changeid(self):
607 607 if r'_changectx' in self.__dict__:
608 608 return self._changectx.rev()
609 609 elif r'_descendantrev' in self.__dict__:
610 610 # this file context was created from a revision with a known
611 611 # descendant, we can (lazily) correct for linkrev aliases
612 612 return self._adjustlinkrev(self._descendantrev)
613 613 else:
614 614 return self._filelog.linkrev(self._filerev)
615 615
616 616 @propertycache
617 617 def _filenode(self):
618 618 if r'_fileid' in self.__dict__:
619 619 return self._filelog.lookup(self._fileid)
620 620 else:
621 621 return self._changectx.filenode(self._path)
622 622
623 623 @propertycache
624 624 def _filerev(self):
625 625 return self._filelog.rev(self._filenode)
626 626
627 627 @propertycache
628 628 def _repopath(self):
629 629 return self._path
630 630
631 631 def __nonzero__(self):
632 632 try:
633 633 self._filenode
634 634 return True
635 635 except error.LookupError:
636 636 # file is missing
637 637 return False
638 638
639 639 __bool__ = __nonzero__
640 640
641 641 def __bytes__(self):
642 642 try:
643 643 return "%s@%s" % (self.path(), self._changectx)
644 644 except error.LookupError:
645 645 return "%s@???" % self.path()
646 646
647 647 __str__ = encoding.strmethod(__bytes__)
648 648
649 649 def __repr__(self):
650 650 return r"<%s %s>" % (type(self).__name__, str(self))
651 651
652 652 def __hash__(self):
653 653 try:
654 654 return hash((self._path, self._filenode))
655 655 except AttributeError:
656 656 return id(self)
657 657
658 658 def __eq__(self, other):
659 659 try:
660 660 return (type(self) == type(other) and self._path == other._path
661 661 and self._filenode == other._filenode)
662 662 except AttributeError:
663 663 return False
664 664
665 665 def __ne__(self, other):
666 666 return not (self == other)
667 667
668 668 def filerev(self):
669 669 return self._filerev
670 670 def filenode(self):
671 671 return self._filenode
672 672 @propertycache
673 673 def _flags(self):
674 674 return self._changectx.flags(self._path)
675 675 def flags(self):
676 676 return self._flags
677 677 def filelog(self):
678 678 return self._filelog
679 679 def rev(self):
680 680 return self._changeid
681 681 def linkrev(self):
682 682 return self._filelog.linkrev(self._filerev)
683 683 def node(self):
684 684 return self._changectx.node()
685 685 def hex(self):
686 686 return self._changectx.hex()
687 687 def user(self):
688 688 return self._changectx.user()
689 689 def date(self):
690 690 return self._changectx.date()
691 691 def files(self):
692 692 return self._changectx.files()
693 693 def description(self):
694 694 return self._changectx.description()
695 695 def branch(self):
696 696 return self._changectx.branch()
697 697 def extra(self):
698 698 return self._changectx.extra()
699 699 def phase(self):
700 700 return self._changectx.phase()
701 701 def phasestr(self):
702 702 return self._changectx.phasestr()
703 703 def obsolete(self):
704 704 return self._changectx.obsolete()
705 705 def instabilities(self):
706 706 return self._changectx.instabilities()
707 707 def manifest(self):
708 708 return self._changectx.manifest()
709 709 def changectx(self):
710 710 return self._changectx
711 711 def renamed(self):
712 712 return self._copied
713 713 def copysource(self):
714 714 return self._copied and self._copied[0]
715 715 def repo(self):
716 716 return self._repo
717 717 def size(self):
718 718 return len(self.data())
719 719
720 720 def path(self):
721 721 return self._path
722 722
723 723 def isbinary(self):
724 724 try:
725 725 return stringutil.binary(self.data())
726 726 except IOError:
727 727 return False
728 728 def isexec(self):
729 729 return 'x' in self.flags()
730 730 def islink(self):
731 731 return 'l' in self.flags()
732 732
733 733 def isabsent(self):
734 734 """whether this filectx represents a file not in self._changectx
735 735
736 736 This is mainly for merge code to detect change/delete conflicts. This is
737 737 expected to be True for all subclasses of basectx."""
738 738 return False
739 739
740 740 _customcmp = False
741 741 def cmp(self, fctx):
742 742 """compare with other file context
743 743
744 744 returns True if different than fctx.
745 745 """
746 746 if fctx._customcmp:
747 747 return fctx.cmp(self)
748 748
749 749 if self._filenode is None:
750 750 raise error.ProgrammingError(
751 751 'filectx.cmp() must be reimplemented if not backed by revlog')
752 752
753 753 if fctx._filenode is None:
754 754 if self._repo._encodefilterpats:
755 755 # can't rely on size() because wdir content may be decoded
756 756 return self._filelog.cmp(self._filenode, fctx.data())
757 757 if self.size() - 4 == fctx.size():
758 758 # size() can match:
759 759 # if file data starts with '\1\n', empty metadata block is
760 760 # prepended, which adds 4 bytes to filelog.size().
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() == fctx.size():
763 763 # size() matches: need to compare content
764 764 return self._filelog.cmp(self._filenode, fctx.data())
765 765
766 766 # size() differs
767 767 return True
768 768
769 769 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
770 770 """return the first ancestor of <srcrev> introducing <fnode>
771 771
772 772 If the linkrev of the file revision does not point to an ancestor of
773 773 srcrev, we'll walk down the ancestors until we find one introducing
774 774 this file revision.
775 775
776 776 :srcrev: the changeset revision we search ancestors from
777 777 :inclusive: if true, the src revision will also be checked
778 778 :stoprev: an optional revision to stop the walk at. If no introduction
779 779 of this file content could be found before this floor
780 780 revision, the function will returns "None" and stops its
781 781 iteration.
782 782 """
783 783 repo = self._repo
784 784 cl = repo.unfiltered().changelog
785 785 mfl = repo.manifestlog
786 786 # fetch the linkrev
787 787 lkr = self.linkrev()
788 788 if srcrev == lkr:
789 789 return lkr
790 790 # hack to reuse ancestor computation when searching for renames
791 791 memberanc = getattr(self, '_ancestrycontext', None)
792 792 iteranc = None
793 793 if srcrev is None:
794 794 # wctx case, used by workingfilectx during mergecopy
795 795 revs = [p.rev() for p in self._repo[None].parents()]
796 796 inclusive = True # we skipped the real (revless) source
797 797 else:
798 798 revs = [srcrev]
799 799 if memberanc is None:
800 800 memberanc = iteranc = cl.ancestors(revs, lkr,
801 801 inclusive=inclusive)
802 802 # check if this linkrev is an ancestor of srcrev
803 803 if lkr not in memberanc:
804 804 if iteranc is None:
805 805 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
806 806 fnode = self._filenode
807 807 path = self._path
808 808 for a in iteranc:
809 809 if stoprev is not None and a < stoprev:
810 810 return None
811 811 ac = cl.read(a) # get changeset data (we avoid object creation)
812 812 if path in ac[3]: # checking the 'files' field.
813 813 # The file has been touched, check if the content is
814 814 # similar to the one we search for.
815 815 if fnode == mfl[ac[0]].readfast().get(path):
816 816 return a
817 817 # In theory, we should never get out of that loop without a result.
818 818 # But if manifest uses a buggy file revision (not children of the
819 819 # one it replaces) we could. Such a buggy situation will likely
820 820 # result is crash somewhere else at to some point.
821 821 return lkr
822 822
823 823 def isintroducedafter(self, changelogrev):
824 824 """True if a filectx has been introduced after a given floor revision
825 825 """
826 826 if self.linkrev() >= changelogrev:
827 827 return True
828 828 introrev = self._introrev(stoprev=changelogrev)
829 829 if introrev is None:
830 830 return False
831 831 return introrev >= changelogrev
832 832
833 833 def introrev(self):
834 834 """return the rev of the changeset which introduced this file revision
835 835
836 836 This method is different from linkrev because it take into account the
837 837 changeset the filectx was created from. It ensures the returned
838 838 revision is one of its ancestors. This prevents bugs from
839 839 'linkrev-shadowing' when a file revision is used by multiple
840 840 changesets.
841 841 """
842 842 return self._introrev()
843 843
844 844 def _introrev(self, stoprev=None):
845 845 """
846 846 Same as `introrev` but, with an extra argument to limit changelog
847 847 iteration range in some internal usecase.
848 848
849 849 If `stoprev` is set, the `introrev` will not be searched past that
850 850 `stoprev` revision and "None" might be returned. This is useful to
851 851 limit the iteration range.
852 852 """
853 853 toprev = None
854 854 attrs = vars(self)
855 855 if r'_changeid' in attrs:
856 856 # We have a cached value already
857 857 toprev = self._changeid
858 858 elif r'_changectx' in attrs:
859 859 # We know which changelog entry we are coming from
860 860 toprev = self._changectx.rev()
861 861
862 862 if toprev is not None:
863 863 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
864 864 elif r'_descendantrev' in attrs:
865 865 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
866 866 # be nice and cache the result of the computation
867 867 if introrev is not None:
868 868 self._changeid = introrev
869 869 return introrev
870 870 else:
871 871 return self.linkrev()
872 872
873 873 def introfilectx(self):
874 874 """Return filectx having identical contents, but pointing to the
875 875 changeset revision where this filectx was introduced"""
876 876 introrev = self.introrev()
877 877 if self.rev() == introrev:
878 878 return self
879 879 return self.filectx(self.filenode(), changeid=introrev)
880 880
881 881 def _parentfilectx(self, path, fileid, filelog):
882 882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 884 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
885 885 # If self is associated with a changeset (probably explicitly
886 886 # fed), ensure the created filectx is associated with a
887 887 # changeset that is an ancestor of self.changectx.
888 888 # This lets us later use _adjustlinkrev to get a correct link.
889 889 fctx._descendantrev = self.rev()
890 890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 891 elif r'_descendantrev' in vars(self):
892 892 # Otherwise propagate _descendantrev if we have one associated.
893 893 fctx._descendantrev = self._descendantrev
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 return fctx
896 896
897 897 def parents(self):
898 898 _path = self._path
899 899 fl = self._filelog
900 900 parents = self._filelog.parents(self._filenode)
901 901 pl = [(_path, node, fl) for node in parents if node != nullid]
902 902
903 903 r = fl.renamed(self._filenode)
904 904 if r:
905 905 # - In the simple rename case, both parent are nullid, pl is empty.
906 906 # - In case of merge, only one of the parent is null id and should
907 907 # be replaced with the rename information. This parent is -always-
908 908 # the first one.
909 909 #
910 910 # As null id have always been filtered out in the previous list
911 911 # comprehension, inserting to 0 will always result in "replacing
912 912 # first nullid parent with rename information.
913 913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 914
915 915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 916
917 917 def p1(self):
918 918 return self.parents()[0]
919 919
920 920 def p2(self):
921 921 p = self.parents()
922 922 if len(p) == 2:
923 923 return p[1]
924 924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 925
926 926 def annotate(self, follow=False, skiprevs=None, diffopts=None):
927 927 """Returns a list of annotateline objects for each line in the file
928 928
929 929 - line.fctx is the filectx of the node where that line was last changed
930 930 - line.lineno is the line number at the first appearance in the managed
931 931 file
932 932 - line.text is the data on that line (including newline character)
933 933 """
934 934 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
935 935
936 936 def parents(f):
937 937 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
938 938 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
939 939 # from the topmost introrev (= srcrev) down to p.linkrev() if it
940 940 # isn't an ancestor of the srcrev.
941 941 f._changeid
942 942 pl = f.parents()
943 943
944 944 # Don't return renamed parents if we aren't following.
945 945 if not follow:
946 946 pl = [p for p in pl if p.path() == f.path()]
947 947
948 948 # renamed filectx won't have a filelog yet, so set it
949 949 # from the cache to save time
950 950 for p in pl:
951 951 if not r'_filelog' in p.__dict__:
952 952 p._filelog = getlog(p.path())
953 953
954 954 return pl
955 955
956 956 # use linkrev to find the first changeset where self appeared
957 957 base = self.introfilectx()
958 958 if getattr(base, '_ancestrycontext', None) is None:
959 959 cl = self._repo.changelog
960 960 if base.rev() is None:
961 961 # wctx is not inclusive, but works because _ancestrycontext
962 962 # is used to test filelog revisions
963 963 ac = cl.ancestors([p.rev() for p in base.parents()],
964 964 inclusive=True)
965 965 else:
966 966 ac = cl.ancestors([base.rev()], inclusive=True)
967 967 base._ancestrycontext = ac
968 968
969 969 return dagop.annotate(base, parents, skiprevs=skiprevs,
970 970 diffopts=diffopts)
971 971
972 972 def ancestors(self, followfirst=False):
973 973 visit = {}
974 974 c = self
975 975 if followfirst:
976 976 cut = 1
977 977 else:
978 978 cut = None
979 979
980 980 while True:
981 981 for parent in c.parents()[:cut]:
982 982 visit[(parent.linkrev(), parent.filenode())] = parent
983 983 if not visit:
984 984 break
985 985 c = visit.pop(max(visit))
986 986 yield c
987 987
988 988 def decodeddata(self):
989 989 """Returns `data()` after running repository decoding filters.
990 990
991 991 This is often equivalent to how the data would be expressed on disk.
992 992 """
993 993 return self._repo.wwritedata(self.path(), self.data())
994 994
995 995 class filectx(basefilectx):
996 996 """A filecontext object makes access to data related to a particular
997 997 filerevision convenient."""
998 998 def __init__(self, repo, path, changeid=None, fileid=None,
999 999 filelog=None, changectx=None):
1000 1000 """changeid must be a revision number, if specified.
1001 1001 fileid can be a file revision or node."""
1002 1002 self._repo = repo
1003 1003 self._path = path
1004 1004
1005 1005 assert (changeid is not None
1006 1006 or fileid is not None
1007 1007 or changectx is not None), (
1008 1008 "bad args: changeid=%r, fileid=%r, changectx=%r"
1009 1009 % (changeid, fileid, changectx))
1010 1010
1011 1011 if filelog is not None:
1012 1012 self._filelog = filelog
1013 1013
1014 1014 if changeid is not None:
1015 1015 self._changeid = changeid
1016 1016 if changectx is not None:
1017 1017 self._changectx = changectx
1018 1018 if fileid is not None:
1019 1019 self._fileid = fileid
1020 1020
1021 1021 @propertycache
1022 1022 def _changectx(self):
1023 1023 try:
1024 1024 return self._repo[self._changeid]
1025 1025 except error.FilteredRepoLookupError:
1026 1026 # Linkrev may point to any revision in the repository. When the
1027 1027 # repository is filtered this may lead to `filectx` trying to build
1028 1028 # `changectx` for filtered revision. In such case we fallback to
1029 1029 # creating `changectx` on the unfiltered version of the reposition.
1030 1030 # This fallback should not be an issue because `changectx` from
1031 1031 # `filectx` are not used in complex operations that care about
1032 1032 # filtering.
1033 1033 #
1034 1034 # This fallback is a cheap and dirty fix that prevent several
1035 1035 # crashes. It does not ensure the behavior is correct. However the
1036 1036 # behavior was not correct before filtering either and "incorrect
1037 1037 # behavior" is seen as better as "crash"
1038 1038 #
1039 1039 # Linkrevs have several serious troubles with filtering that are
1040 1040 # complicated to solve. Proper handling of the issue here should be
1041 1041 # considered when solving linkrev issue are on the table.
1042 1042 return self._repo.unfiltered()[self._changeid]
1043 1043
1044 1044 def filectx(self, fileid, changeid=None):
1045 1045 '''opens an arbitrary revision of the file without
1046 1046 opening a new filelog'''
1047 1047 return filectx(self._repo, self._path, fileid=fileid,
1048 1048 filelog=self._filelog, changeid=changeid)
1049 1049
1050 1050 def rawdata(self):
1051 1051 return self._filelog.revision(self._filenode, raw=True)
1052 1052
1053 1053 def rawflags(self):
1054 1054 """low-level revlog flags"""
1055 1055 return self._filelog.flags(self._filerev)
1056 1056
1057 1057 def data(self):
1058 1058 try:
1059 1059 return self._filelog.read(self._filenode)
1060 1060 except error.CensoredNodeError:
1061 1061 if self._repo.ui.config("censor", "policy") == "ignore":
1062 1062 return ""
1063 1063 raise error.Abort(_("censored node: %s") % short(self._filenode),
1064 1064 hint=_("set censor.policy to ignore errors"))
1065 1065
1066 1066 def size(self):
1067 1067 return self._filelog.size(self._filerev)
1068 1068
1069 1069 @propertycache
1070 1070 def _copied(self):
1071 1071 """check if file was actually renamed in this changeset revision
1072 1072
1073 1073 If rename logged in file revision, we report copy for changeset only
1074 1074 if file revisions linkrev points back to the changeset in question
1075 1075 or both changeset parents contain different file revisions.
1076 1076 """
1077 1077
1078 1078 renamed = self._filelog.renamed(self._filenode)
1079 1079 if not renamed:
1080 1080 return None
1081 1081
1082 1082 if self.rev() == self.linkrev():
1083 1083 return renamed
1084 1084
1085 1085 name = self.path()
1086 1086 fnode = self._filenode
1087 1087 for p in self._changectx.parents():
1088 1088 try:
1089 1089 if fnode == p.filenode(name):
1090 1090 return None
1091 1091 except error.LookupError:
1092 1092 pass
1093 1093 return renamed
1094 1094
1095 1095 def children(self):
1096 1096 # hard for renames
1097 1097 c = self._filelog.children(self._filenode)
1098 1098 return [filectx(self._repo, self._path, fileid=x,
1099 1099 filelog=self._filelog) for x in c]
1100 1100
1101 1101 class committablectx(basectx):
1102 1102 """A committablectx object provides common functionality for a context that
1103 1103 wants the ability to commit, e.g. workingctx or memctx."""
1104 1104 def __init__(self, repo, text="", user=None, date=None, extra=None,
1105 1105 changes=None):
1106 1106 super(committablectx, self).__init__(repo)
1107 1107 self._rev = None
1108 1108 self._node = None
1109 1109 self._text = text
1110 1110 if date:
1111 1111 self._date = dateutil.parsedate(date)
1112 1112 if user:
1113 1113 self._user = user
1114 1114 if changes:
1115 1115 self._status = changes
1116 1116
1117 1117 self._extra = {}
1118 1118 if extra:
1119 1119 self._extra = extra.copy()
1120 1120 if 'branch' not in self._extra:
1121 1121 try:
1122 1122 branch = encoding.fromlocal(self._repo.dirstate.branch())
1123 1123 except UnicodeDecodeError:
1124 1124 raise error.Abort(_('branch name not in UTF-8!'))
1125 1125 self._extra['branch'] = branch
1126 1126 if self._extra['branch'] == '':
1127 1127 self._extra['branch'] = 'default'
1128 1128
1129 1129 def __bytes__(self):
1130 1130 return bytes(self._parents[0]) + "+"
1131 1131
1132 1132 __str__ = encoding.strmethod(__bytes__)
1133 1133
1134 1134 def __nonzero__(self):
1135 1135 return True
1136 1136
1137 1137 __bool__ = __nonzero__
1138 1138
1139 def _buildflagfunc(self):
1140 # Create a fallback function for getting file flags when the
1141 # filesystem doesn't support them
1142
1143 copiesget = self._repo.dirstate.copies().get
1144 parents = self.parents()
1145 if len(parents) < 2:
1146 # when we have one parent, it's easy: copy from parent
1147 man = parents[0].manifest()
1148 def func(f):
1149 f = copiesget(f, f)
1150 return man.flags(f)
1151 else:
1152 # merges are tricky: we try to reconstruct the unstored
1153 # result from the merge (issue1802)
1154 p1, p2 = parents
1155 pa = p1.ancestor(p2)
1156 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1157
1158 def func(f):
1159 f = copiesget(f, f) # may be wrong for merges with copies
1160 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1161 if fl1 == fl2:
1162 return fl1
1163 if fl1 == fla:
1164 return fl2
1165 if fl2 == fla:
1166 return fl1
1167 return '' # punt for conflicts
1168
1169 return func
1170
1171 @propertycache
1172 def _flagfunc(self):
1173 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1174
1175 1139 @propertycache
1176 1140 def _status(self):
1177 1141 return self._repo.status()
1178 1142
1179 1143 @propertycache
1180 1144 def _user(self):
1181 1145 return self._repo.ui.username()
1182 1146
1183 1147 @propertycache
1184 1148 def _date(self):
1185 1149 ui = self._repo.ui
1186 1150 date = ui.configdate('devel', 'default-date')
1187 1151 if date is None:
1188 1152 date = dateutil.makedate()
1189 1153 return date
1190 1154
1191 1155 def subrev(self, subpath):
1192 1156 return None
1193 1157
1194 1158 def manifestnode(self):
1195 1159 return None
1196 1160 def user(self):
1197 1161 return self._user or self._repo.ui.username()
1198 1162 def date(self):
1199 1163 return self._date
1200 1164 def description(self):
1201 1165 return self._text
1202 1166 def files(self):
1203 1167 return sorted(self._status.modified + self._status.added +
1204 1168 self._status.removed)
1205 1169 def modified(self):
1206 1170 return self._status.modified
1207 1171 def added(self):
1208 1172 return self._status.added
1209 1173 def removed(self):
1210 1174 return self._status.removed
1211 1175 def deleted(self):
1212 1176 return self._status.deleted
1213 1177 def branch(self):
1214 1178 return encoding.tolocal(self._extra['branch'])
1215 1179 def closesbranch(self):
1216 1180 return 'close' in self._extra
1217 1181 def extra(self):
1218 1182 return self._extra
1219 1183
1220 1184 def isinmemory(self):
1221 1185 return False
1222 1186
1223 1187 def tags(self):
1224 1188 return []
1225 1189
1226 1190 def bookmarks(self):
1227 1191 b = []
1228 1192 for p in self.parents():
1229 1193 b.extend(p.bookmarks())
1230 1194 return b
1231 1195
1232 1196 def phase(self):
1233 1197 phase = phases.draft # default phase to draft
1234 1198 for p in self.parents():
1235 1199 phase = max(phase, p.phase())
1236 1200 return phase
1237 1201
1238 1202 def hidden(self):
1239 1203 return False
1240 1204
1241 1205 def children(self):
1242 1206 return []
1243 1207
1244 def flags(self, path):
1245 if r'_manifest' in self.__dict__:
1246 try:
1247 return self._manifest.flags(path)
1248 except KeyError:
1249 return ''
1250
1251 try:
1252 return self._flagfunc(path)
1253 except OSError:
1254 return ''
1255
1256 1208 def ancestor(self, c2):
1257 1209 """return the "best" ancestor context of self and c2"""
1258 1210 return self._parents[0].ancestor(c2) # punt on two parents for now
1259 1211
1260 1212 def walk(self, match):
1261 1213 '''Generates matching file names.'''
1262 1214 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1263 1215 subrepos=sorted(self.substate),
1264 1216 unknown=True, ignored=False))
1265 1217
1266 1218 def matches(self, match):
1267 1219 match = self._repo.narrowmatch(match)
1268 1220 ds = self._repo.dirstate
1269 1221 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1270 1222
1271 1223 def ancestors(self):
1272 1224 for p in self._parents:
1273 1225 yield p
1274 1226 for a in self._repo.changelog.ancestors(
1275 1227 [p.rev() for p in self._parents]):
1276 1228 yield self._repo[a]
1277 1229
1278 1230 def markcommitted(self, node):
1279 1231 """Perform post-commit cleanup necessary after committing this ctx
1280 1232
1281 1233 Specifically, this updates backing stores this working context
1282 1234 wraps to reflect the fact that the changes reflected by this
1283 1235 workingctx have been committed. For example, it marks
1284 1236 modified and added files as normal in the dirstate.
1285 1237
1286 1238 """
1287 1239
1288 1240 with self._repo.dirstate.parentchange():
1289 1241 for f in self.modified() + self.added():
1290 1242 self._repo.dirstate.normal(f)
1291 1243 for f in self.removed():
1292 1244 self._repo.dirstate.drop(f)
1293 1245 self._repo.dirstate.setparents(node)
1294 1246
1295 1247 # write changes out explicitly, because nesting wlock at
1296 1248 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1297 1249 # from immediately doing so for subsequent changing files
1298 1250 self._repo.dirstate.write(self._repo.currenttransaction())
1299 1251
1300 1252 def dirty(self, missing=False, merge=True, branch=True):
1301 1253 return False
1302 1254
1303 1255 class workingctx(committablectx):
1304 1256 """A workingctx object makes access to data related to
1305 1257 the current working directory convenient.
1306 1258 date - any valid date string or (unixtime, offset), or None.
1307 1259 user - username string, or None.
1308 1260 extra - a dictionary of extra values, or None.
1309 1261 changes - a list of file lists as returned by localrepo.status()
1310 1262 or None to use the repository status.
1311 1263 """
1312 1264 def __init__(self, repo, text="", user=None, date=None, extra=None,
1313 1265 changes=None):
1314 1266 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1315 1267
1316 1268 def __iter__(self):
1317 1269 d = self._repo.dirstate
1318 1270 for f in d:
1319 1271 if d[f] != 'r':
1320 1272 yield f
1321 1273
1322 1274 def __contains__(self, key):
1323 1275 return self._repo.dirstate[key] not in "?r"
1324 1276
1325 1277 def hex(self):
1326 1278 return wdirhex
1327 1279
1328 1280 @propertycache
1329 1281 def _parents(self):
1330 1282 p = self._repo.dirstate.parents()
1331 1283 if p[1] == nullid:
1332 1284 p = p[:-1]
1333 1285 # use unfiltered repo to delay/avoid loading obsmarkers
1334 1286 unfi = self._repo.unfiltered()
1335 1287 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1336 1288
1337 1289 def _fileinfo(self, path):
1338 1290 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1339 1291 self._manifest
1340 1292 return super(workingctx, self)._fileinfo(path)
1341 1293
1294 def _buildflagfunc(self):
1295 # Create a fallback function for getting file flags when the
1296 # filesystem doesn't support them
1297
1298 copiesget = self._repo.dirstate.copies().get
1299 parents = self.parents()
1300 if len(parents) < 2:
1301 # when we have one parent, it's easy: copy from parent
1302 man = parents[0].manifest()
1303 def func(f):
1304 f = copiesget(f, f)
1305 return man.flags(f)
1306 else:
1307 # merges are tricky: we try to reconstruct the unstored
1308 # result from the merge (issue1802)
1309 p1, p2 = parents
1310 pa = p1.ancestor(p2)
1311 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1312
1313 def func(f):
1314 f = copiesget(f, f) # may be wrong for merges with copies
1315 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1316 if fl1 == fl2:
1317 return fl1
1318 if fl1 == fla:
1319 return fl2
1320 if fl2 == fla:
1321 return fl1
1322 return '' # punt for conflicts
1323
1324 return func
1325
1326 @propertycache
1327 def _flagfunc(self):
1328 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1329
1330 def flags(self, path):
1331 if r'_manifest' in self.__dict__:
1332 try:
1333 return self._manifest.flags(path)
1334 except KeyError:
1335 return ''
1336
1337 try:
1338 return self._flagfunc(path)
1339 except OSError:
1340 return ''
1341
1342 1342 def filectx(self, path, filelog=None):
1343 1343 """get a file context from the working directory"""
1344 1344 return workingfilectx(self._repo, path, workingctx=self,
1345 1345 filelog=filelog)
1346 1346
1347 1347 def dirty(self, missing=False, merge=True, branch=True):
1348 1348 "check whether a working directory is modified"
1349 1349 # check subrepos first
1350 1350 for s in sorted(self.substate):
1351 1351 if self.sub(s).dirty(missing=missing):
1352 1352 return True
1353 1353 # check current working dir
1354 1354 return ((merge and self.p2()) or
1355 1355 (branch and self.branch() != self.p1().branch()) or
1356 1356 self.modified() or self.added() or self.removed() or
1357 1357 (missing and self.deleted()))
1358 1358
1359 1359 def add(self, list, prefix=""):
1360 1360 with self._repo.wlock():
1361 1361 ui, ds = self._repo.ui, self._repo.dirstate
1362 1362 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1363 1363 rejected = []
1364 1364 lstat = self._repo.wvfs.lstat
1365 1365 for f in list:
1366 1366 # ds.pathto() returns an absolute file when this is invoked from
1367 1367 # the keyword extension. That gets flagged as non-portable on
1368 1368 # Windows, since it contains the drive letter and colon.
1369 1369 scmutil.checkportable(ui, os.path.join(prefix, f))
1370 1370 try:
1371 1371 st = lstat(f)
1372 1372 except OSError:
1373 1373 ui.warn(_("%s does not exist!\n") % uipath(f))
1374 1374 rejected.append(f)
1375 1375 continue
1376 1376 limit = ui.configbytes('ui', 'large-file-limit')
1377 1377 if limit != 0 and st.st_size > limit:
1378 1378 ui.warn(_("%s: up to %d MB of RAM may be required "
1379 1379 "to manage this file\n"
1380 1380 "(use 'hg revert %s' to cancel the "
1381 1381 "pending addition)\n")
1382 1382 % (f, 3 * st.st_size // 1000000, uipath(f)))
1383 1383 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1384 1384 ui.warn(_("%s not added: only files and symlinks "
1385 1385 "supported currently\n") % uipath(f))
1386 1386 rejected.append(f)
1387 1387 elif ds[f] in 'amn':
1388 1388 ui.warn(_("%s already tracked!\n") % uipath(f))
1389 1389 elif ds[f] == 'r':
1390 1390 ds.normallookup(f)
1391 1391 else:
1392 1392 ds.add(f)
1393 1393 return rejected
1394 1394
1395 1395 def forget(self, files, prefix=""):
1396 1396 with self._repo.wlock():
1397 1397 ds = self._repo.dirstate
1398 1398 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1399 1399 rejected = []
1400 1400 for f in files:
1401 1401 if f not in ds:
1402 1402 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1403 1403 rejected.append(f)
1404 1404 elif ds[f] != 'a':
1405 1405 ds.remove(f)
1406 1406 else:
1407 1407 ds.drop(f)
1408 1408 return rejected
1409 1409
1410 1410 def copy(self, source, dest):
1411 1411 try:
1412 1412 st = self._repo.wvfs.lstat(dest)
1413 1413 except OSError as err:
1414 1414 if err.errno != errno.ENOENT:
1415 1415 raise
1416 1416 self._repo.ui.warn(_("%s does not exist!\n")
1417 1417 % self._repo.dirstate.pathto(dest))
1418 1418 return
1419 1419 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1420 1420 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1421 1421 "symbolic link\n")
1422 1422 % self._repo.dirstate.pathto(dest))
1423 1423 else:
1424 1424 with self._repo.wlock():
1425 1425 ds = self._repo.dirstate
1426 1426 if ds[dest] in '?':
1427 1427 ds.add(dest)
1428 1428 elif ds[dest] in 'r':
1429 1429 ds.normallookup(dest)
1430 1430 ds.copy(source, dest)
1431 1431
1432 1432 def match(self, pats=None, include=None, exclude=None, default='glob',
1433 1433 listsubrepos=False, badfn=None):
1434 1434 r = self._repo
1435 1435
1436 1436 # Only a case insensitive filesystem needs magic to translate user input
1437 1437 # to actual case in the filesystem.
1438 1438 icasefs = not util.fscasesensitive(r.root)
1439 1439 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1440 1440 default, auditor=r.auditor, ctx=self,
1441 1441 listsubrepos=listsubrepos, badfn=badfn,
1442 1442 icasefs=icasefs)
1443 1443
1444 1444 def _filtersuspectsymlink(self, files):
1445 1445 if not files or self._repo.dirstate._checklink:
1446 1446 return files
1447 1447
1448 1448 # Symlink placeholders may get non-symlink-like contents
1449 1449 # via user error or dereferencing by NFS or Samba servers,
1450 1450 # so we filter out any placeholders that don't look like a
1451 1451 # symlink
1452 1452 sane = []
1453 1453 for f in files:
1454 1454 if self.flags(f) == 'l':
1455 1455 d = self[f].data()
1456 1456 if (d == '' or len(d) >= 1024 or '\n' in d
1457 1457 or stringutil.binary(d)):
1458 1458 self._repo.ui.debug('ignoring suspect symlink placeholder'
1459 1459 ' "%s"\n' % f)
1460 1460 continue
1461 1461 sane.append(f)
1462 1462 return sane
1463 1463
1464 1464 def _checklookup(self, files):
1465 1465 # check for any possibly clean files
1466 1466 if not files:
1467 1467 return [], [], []
1468 1468
1469 1469 modified = []
1470 1470 deleted = []
1471 1471 fixup = []
1472 1472 pctx = self._parents[0]
1473 1473 # do a full compare of any files that might have changed
1474 1474 for f in sorted(files):
1475 1475 try:
1476 1476 # This will return True for a file that got replaced by a
1477 1477 # directory in the interim, but fixing that is pretty hard.
1478 1478 if (f not in pctx or self.flags(f) != pctx.flags(f)
1479 1479 or pctx[f].cmp(self[f])):
1480 1480 modified.append(f)
1481 1481 else:
1482 1482 fixup.append(f)
1483 1483 except (IOError, OSError):
1484 1484 # A file become inaccessible in between? Mark it as deleted,
1485 1485 # matching dirstate behavior (issue5584).
1486 1486 # The dirstate has more complex behavior around whether a
1487 1487 # missing file matches a directory, etc, but we don't need to
1488 1488 # bother with that: if f has made it to this point, we're sure
1489 1489 # it's in the dirstate.
1490 1490 deleted.append(f)
1491 1491
1492 1492 return modified, deleted, fixup
1493 1493
1494 1494 def _poststatusfixup(self, status, fixup):
1495 1495 """update dirstate for files that are actually clean"""
1496 1496 poststatus = self._repo.postdsstatus()
1497 1497 if fixup or poststatus:
1498 1498 try:
1499 1499 oldid = self._repo.dirstate.identity()
1500 1500
1501 1501 # updating the dirstate is optional
1502 1502 # so we don't wait on the lock
1503 1503 # wlock can invalidate the dirstate, so cache normal _after_
1504 1504 # taking the lock
1505 1505 with self._repo.wlock(False):
1506 1506 if self._repo.dirstate.identity() == oldid:
1507 1507 if fixup:
1508 1508 normal = self._repo.dirstate.normal
1509 1509 for f in fixup:
1510 1510 normal(f)
1511 1511 # write changes out explicitly, because nesting
1512 1512 # wlock at runtime may prevent 'wlock.release()'
1513 1513 # after this block from doing so for subsequent
1514 1514 # changing files
1515 1515 tr = self._repo.currenttransaction()
1516 1516 self._repo.dirstate.write(tr)
1517 1517
1518 1518 if poststatus:
1519 1519 for ps in poststatus:
1520 1520 ps(self, status)
1521 1521 else:
1522 1522 # in this case, writing changes out breaks
1523 1523 # consistency, because .hg/dirstate was
1524 1524 # already changed simultaneously after last
1525 1525 # caching (see also issue5584 for detail)
1526 1526 self._repo.ui.debug('skip updating dirstate: '
1527 1527 'identity mismatch\n')
1528 1528 except error.LockError:
1529 1529 pass
1530 1530 finally:
1531 1531 # Even if the wlock couldn't be grabbed, clear out the list.
1532 1532 self._repo.clearpostdsstatus()
1533 1533
1534 1534 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1535 1535 '''Gets the status from the dirstate -- internal use only.'''
1536 1536 subrepos = []
1537 1537 if '.hgsub' in self:
1538 1538 subrepos = sorted(self.substate)
1539 1539 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1540 1540 clean=clean, unknown=unknown)
1541 1541
1542 1542 # check for any possibly clean files
1543 1543 fixup = []
1544 1544 if cmp:
1545 1545 modified2, deleted2, fixup = self._checklookup(cmp)
1546 1546 s.modified.extend(modified2)
1547 1547 s.deleted.extend(deleted2)
1548 1548
1549 1549 if fixup and clean:
1550 1550 s.clean.extend(fixup)
1551 1551
1552 1552 self._poststatusfixup(s, fixup)
1553 1553
1554 1554 if match.always():
1555 1555 # cache for performance
1556 1556 if s.unknown or s.ignored or s.clean:
1557 1557 # "_status" is cached with list*=False in the normal route
1558 1558 self._status = scmutil.status(s.modified, s.added, s.removed,
1559 1559 s.deleted, [], [], [])
1560 1560 else:
1561 1561 self._status = s
1562 1562
1563 1563 return s
1564 1564
1565 1565 @propertycache
1566 1566 def _copies(self):
1567 1567 p1copies = {}
1568 1568 p2copies = {}
1569 1569 parents = self._repo.dirstate.parents()
1570 1570 p1manifest = self._repo[parents[0]].manifest()
1571 1571 p2manifest = self._repo[parents[1]].manifest()
1572 1572 narrowmatch = self._repo.narrowmatch()
1573 1573 for dst, src in self._repo.dirstate.copies().items():
1574 1574 if not narrowmatch(dst):
1575 1575 continue
1576 1576 if src in p1manifest:
1577 1577 p1copies[dst] = src
1578 1578 elif src in p2manifest:
1579 1579 p2copies[dst] = src
1580 1580 return p1copies, p2copies
1581 1581 def p1copies(self):
1582 1582 return self._copies[0]
1583 1583 def p2copies(self):
1584 1584 return self._copies[1]
1585 1585
1586 1586 @propertycache
1587 1587 def _manifest(self):
1588 1588 """generate a manifest corresponding to the values in self._status
1589 1589
1590 1590 This reuse the file nodeid from parent, but we use special node
1591 1591 identifiers for added and modified files. This is used by manifests
1592 1592 merge to see that files are different and by update logic to avoid
1593 1593 deleting newly added files.
1594 1594 """
1595 1595 return self._buildstatusmanifest(self._status)
1596 1596
1597 1597 def _buildstatusmanifest(self, status):
1598 1598 """Builds a manifest that includes the given status results."""
1599 1599 parents = self.parents()
1600 1600
1601 1601 man = parents[0].manifest().copy()
1602 1602
1603 1603 ff = self._flagfunc
1604 1604 for i, l in ((addednodeid, status.added),
1605 1605 (modifiednodeid, status.modified)):
1606 1606 for f in l:
1607 1607 man[f] = i
1608 1608 try:
1609 1609 man.setflag(f, ff(f))
1610 1610 except OSError:
1611 1611 pass
1612 1612
1613 1613 for f in status.deleted + status.removed:
1614 1614 if f in man:
1615 1615 del man[f]
1616 1616
1617 1617 return man
1618 1618
1619 1619 def _buildstatus(self, other, s, match, listignored, listclean,
1620 1620 listunknown):
1621 1621 """build a status with respect to another context
1622 1622
1623 1623 This includes logic for maintaining the fast path of status when
1624 1624 comparing the working directory against its parent, which is to skip
1625 1625 building a new manifest if self (working directory) is not comparing
1626 1626 against its parent (repo['.']).
1627 1627 """
1628 1628 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1629 1629 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1630 1630 # might have accidentally ended up with the entire contents of the file
1631 1631 # they are supposed to be linking to.
1632 1632 s.modified[:] = self._filtersuspectsymlink(s.modified)
1633 1633 if other != self._repo['.']:
1634 1634 s = super(workingctx, self)._buildstatus(other, s, match,
1635 1635 listignored, listclean,
1636 1636 listunknown)
1637 1637 return s
1638 1638
1639 1639 def _matchstatus(self, other, match):
1640 1640 """override the match method with a filter for directory patterns
1641 1641
1642 1642 We use inheritance to customize the match.bad method only in cases of
1643 1643 workingctx since it belongs only to the working directory when
1644 1644 comparing against the parent changeset.
1645 1645
1646 1646 If we aren't comparing against the working directory's parent, then we
1647 1647 just use the default match object sent to us.
1648 1648 """
1649 1649 if other != self._repo['.']:
1650 1650 def bad(f, msg):
1651 1651 # 'f' may be a directory pattern from 'match.files()',
1652 1652 # so 'f not in ctx1' is not enough
1653 1653 if f not in other and not other.hasdir(f):
1654 1654 self._repo.ui.warn('%s: %s\n' %
1655 1655 (self._repo.dirstate.pathto(f), msg))
1656 1656 match.bad = bad
1657 1657 return match
1658 1658
1659 1659 def markcommitted(self, node):
1660 1660 super(workingctx, self).markcommitted(node)
1661 1661
1662 1662 sparse.aftercommit(self._repo, node)
1663 1663
1664 1664 class committablefilectx(basefilectx):
1665 1665 """A committablefilectx provides common functionality for a file context
1666 1666 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1667 1667 def __init__(self, repo, path, filelog=None, ctx=None):
1668 1668 self._repo = repo
1669 1669 self._path = path
1670 1670 self._changeid = None
1671 1671 self._filerev = self._filenode = None
1672 1672
1673 1673 if filelog is not None:
1674 1674 self._filelog = filelog
1675 1675 if ctx:
1676 1676 self._changectx = ctx
1677 1677
1678 1678 def __nonzero__(self):
1679 1679 return True
1680 1680
1681 1681 __bool__ = __nonzero__
1682 1682
1683 1683 def linkrev(self):
1684 1684 # linked to self._changectx no matter if file is modified or not
1685 1685 return self.rev()
1686 1686
1687 1687 def renamed(self):
1688 1688 path = self.copysource()
1689 1689 if not path:
1690 1690 return None
1691 1691 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1692 1692
1693 1693 def parents(self):
1694 1694 '''return parent filectxs, following copies if necessary'''
1695 1695 def filenode(ctx, path):
1696 1696 return ctx._manifest.get(path, nullid)
1697 1697
1698 1698 path = self._path
1699 1699 fl = self._filelog
1700 1700 pcl = self._changectx._parents
1701 1701 renamed = self.renamed()
1702 1702
1703 1703 if renamed:
1704 1704 pl = [renamed + (None,)]
1705 1705 else:
1706 1706 pl = [(path, filenode(pcl[0], path), fl)]
1707 1707
1708 1708 for pc in pcl[1:]:
1709 1709 pl.append((path, filenode(pc, path), fl))
1710 1710
1711 1711 return [self._parentfilectx(p, fileid=n, filelog=l)
1712 1712 for p, n, l in pl if n != nullid]
1713 1713
1714 1714 def children(self):
1715 1715 return []
1716 1716
1717 1717 class workingfilectx(committablefilectx):
1718 1718 """A workingfilectx object makes access to data related to a particular
1719 1719 file in the working directory convenient."""
1720 1720 def __init__(self, repo, path, filelog=None, workingctx=None):
1721 1721 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1722 1722
1723 1723 @propertycache
1724 1724 def _changectx(self):
1725 1725 return workingctx(self._repo)
1726 1726
1727 1727 def data(self):
1728 1728 return self._repo.wread(self._path)
1729 1729 def copysource(self):
1730 1730 return self._repo.dirstate.copied(self._path)
1731 1731
1732 1732 def size(self):
1733 1733 return self._repo.wvfs.lstat(self._path).st_size
1734 1734 def date(self):
1735 1735 t, tz = self._changectx.date()
1736 1736 try:
1737 1737 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1738 1738 except OSError as err:
1739 1739 if err.errno != errno.ENOENT:
1740 1740 raise
1741 1741 return (t, tz)
1742 1742
1743 1743 def exists(self):
1744 1744 return self._repo.wvfs.exists(self._path)
1745 1745
1746 1746 def lexists(self):
1747 1747 return self._repo.wvfs.lexists(self._path)
1748 1748
1749 1749 def audit(self):
1750 1750 return self._repo.wvfs.audit(self._path)
1751 1751
1752 1752 def cmp(self, fctx):
1753 1753 """compare with other file context
1754 1754
1755 1755 returns True if different than fctx.
1756 1756 """
1757 1757 # fctx should be a filectx (not a workingfilectx)
1758 1758 # invert comparison to reuse the same code path
1759 1759 return fctx.cmp(self)
1760 1760
1761 1761 def remove(self, ignoremissing=False):
1762 1762 """wraps unlink for a repo's working directory"""
1763 1763 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1764 1764 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1765 1765 rmdir=rmdir)
1766 1766
1767 1767 def write(self, data, flags, backgroundclose=False, **kwargs):
1768 1768 """wraps repo.wwrite"""
1769 1769 self._repo.wwrite(self._path, data, flags,
1770 1770 backgroundclose=backgroundclose,
1771 1771 **kwargs)
1772 1772
1773 1773 def markcopied(self, src):
1774 1774 """marks this file a copy of `src`"""
1775 1775 if self._repo.dirstate[self._path] in "nma":
1776 1776 self._repo.dirstate.copy(src, self._path)
1777 1777
1778 1778 def clearunknown(self):
1779 1779 """Removes conflicting items in the working directory so that
1780 1780 ``write()`` can be called successfully.
1781 1781 """
1782 1782 wvfs = self._repo.wvfs
1783 1783 f = self._path
1784 1784 wvfs.audit(f)
1785 1785 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1786 1786 # remove files under the directory as they should already be
1787 1787 # warned and backed up
1788 1788 if wvfs.isdir(f) and not wvfs.islink(f):
1789 1789 wvfs.rmtree(f, forcibly=True)
1790 1790 for p in reversed(list(util.finddirs(f))):
1791 1791 if wvfs.isfileorlink(p):
1792 1792 wvfs.unlink(p)
1793 1793 break
1794 1794 else:
1795 1795 # don't remove files if path conflicts are not processed
1796 1796 if wvfs.isdir(f) and not wvfs.islink(f):
1797 1797 wvfs.removedirs(f)
1798 1798
1799 1799 def setflags(self, l, x):
1800 1800 self._repo.wvfs.setflags(self._path, l, x)
1801 1801
1802 1802 class overlayworkingctx(committablectx):
1803 1803 """Wraps another mutable context with a write-back cache that can be
1804 1804 converted into a commit context.
1805 1805
1806 1806 self._cache[path] maps to a dict with keys: {
1807 1807 'exists': bool?
1808 1808 'date': date?
1809 1809 'data': str?
1810 1810 'flags': str?
1811 1811 'copied': str? (path or None)
1812 1812 }
1813 1813 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1814 1814 is `False`, the file was deleted.
1815 1815 """
1816 1816
1817 1817 def __init__(self, repo):
1818 1818 super(overlayworkingctx, self).__init__(repo)
1819 1819 self.clean()
1820 1820
1821 1821 def setbase(self, wrappedctx):
1822 1822 self._wrappedctx = wrappedctx
1823 1823 self._parents = [wrappedctx]
1824 1824 # Drop old manifest cache as it is now out of date.
1825 1825 # This is necessary when, e.g., rebasing several nodes with one
1826 1826 # ``overlayworkingctx`` (e.g. with --collapse).
1827 1827 util.clearcachedproperty(self, '_manifest')
1828 1828
1829 1829 def data(self, path):
1830 1830 if self.isdirty(path):
1831 1831 if self._cache[path]['exists']:
1832 1832 if self._cache[path]['data'] is not None:
1833 1833 return self._cache[path]['data']
1834 1834 else:
1835 1835 # Must fallback here, too, because we only set flags.
1836 1836 return self._wrappedctx[path].data()
1837 1837 else:
1838 1838 raise error.ProgrammingError("No such file or directory: %s" %
1839 1839 path)
1840 1840 else:
1841 1841 return self._wrappedctx[path].data()
1842 1842
1843 1843 @propertycache
1844 1844 def _manifest(self):
1845 1845 parents = self.parents()
1846 1846 man = parents[0].manifest().copy()
1847 1847
1848 1848 flag = self._flagfunc
1849 1849 for path in self.added():
1850 1850 man[path] = addednodeid
1851 1851 man.setflag(path, flag(path))
1852 1852 for path in self.modified():
1853 1853 man[path] = modifiednodeid
1854 1854 man.setflag(path, flag(path))
1855 1855 for path in self.removed():
1856 1856 del man[path]
1857 1857 return man
1858 1858
1859 1859 @propertycache
1860 1860 def _flagfunc(self):
1861 1861 def f(path):
1862 1862 return self._cache[path]['flags']
1863 1863 return f
1864 1864
1865 1865 def files(self):
1866 1866 return sorted(self.added() + self.modified() + self.removed())
1867 1867
1868 1868 def modified(self):
1869 1869 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1870 1870 self._existsinparent(f)]
1871 1871
1872 1872 def added(self):
1873 1873 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1874 1874 not self._existsinparent(f)]
1875 1875
1876 1876 def removed(self):
1877 1877 return [f for f in self._cache.keys() if
1878 1878 not self._cache[f]['exists'] and self._existsinparent(f)]
1879 1879
1880 1880 def p1copies(self):
1881 1881 copies = self._repo._wrappedctx.p1copies().copy()
1882 1882 narrowmatch = self._repo.narrowmatch()
1883 1883 for f in self._cache.keys():
1884 1884 if not narrowmatch(f):
1885 1885 continue
1886 1886 copies.pop(f, None) # delete if it exists
1887 1887 source = self._cache[f]['copied']
1888 1888 if source:
1889 1889 copies[f] = source
1890 1890 return copies
1891 1891
1892 1892 def p2copies(self):
1893 1893 copies = self._repo._wrappedctx.p2copies().copy()
1894 1894 narrowmatch = self._repo.narrowmatch()
1895 1895 for f in self._cache.keys():
1896 1896 if not narrowmatch(f):
1897 1897 continue
1898 1898 copies.pop(f, None) # delete if it exists
1899 1899 source = self._cache[f]['copied']
1900 1900 if source:
1901 1901 copies[f] = source
1902 1902 return copies
1903 1903
1904 1904 def isinmemory(self):
1905 1905 return True
1906 1906
1907 1907 def filedate(self, path):
1908 1908 if self.isdirty(path):
1909 1909 return self._cache[path]['date']
1910 1910 else:
1911 1911 return self._wrappedctx[path].date()
1912 1912
1913 1913 def markcopied(self, path, origin):
1914 1914 self._markdirty(path, exists=True, date=self.filedate(path),
1915 1915 flags=self.flags(path), copied=origin)
1916 1916
1917 1917 def copydata(self, path):
1918 1918 if self.isdirty(path):
1919 1919 return self._cache[path]['copied']
1920 1920 else:
1921 1921 return None
1922 1922
1923 1923 def flags(self, path):
1924 1924 if self.isdirty(path):
1925 1925 if self._cache[path]['exists']:
1926 1926 return self._cache[path]['flags']
1927 1927 else:
1928 1928 raise error.ProgrammingError("No such file or directory: %s" %
1929 1929 self._path)
1930 1930 else:
1931 1931 return self._wrappedctx[path].flags()
1932 1932
1933 1933 def __contains__(self, key):
1934 1934 if key in self._cache:
1935 1935 return self._cache[key]['exists']
1936 1936 return key in self.p1()
1937 1937
1938 1938 def _existsinparent(self, path):
1939 1939 try:
1940 1940 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1941 1941 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1942 1942 # with an ``exists()`` function.
1943 1943 self._wrappedctx[path]
1944 1944 return True
1945 1945 except error.ManifestLookupError:
1946 1946 return False
1947 1947
1948 1948 def _auditconflicts(self, path):
1949 1949 """Replicates conflict checks done by wvfs.write().
1950 1950
1951 1951 Since we never write to the filesystem and never call `applyupdates` in
1952 1952 IMM, we'll never check that a path is actually writable -- e.g., because
1953 1953 it adds `a/foo`, but `a` is actually a file in the other commit.
1954 1954 """
1955 1955 def fail(path, component):
1956 1956 # p1() is the base and we're receiving "writes" for p2()'s
1957 1957 # files.
1958 1958 if 'l' in self.p1()[component].flags():
1959 1959 raise error.Abort("error: %s conflicts with symlink %s "
1960 1960 "in %d." % (path, component,
1961 1961 self.p1().rev()))
1962 1962 else:
1963 1963 raise error.Abort("error: '%s' conflicts with file '%s' in "
1964 1964 "%d." % (path, component,
1965 1965 self.p1().rev()))
1966 1966
1967 1967 # Test that each new directory to be created to write this path from p2
1968 1968 # is not a file in p1.
1969 1969 components = path.split('/')
1970 1970 for i in pycompat.xrange(len(components)):
1971 1971 component = "/".join(components[0:i])
1972 1972 if component in self:
1973 1973 fail(path, component)
1974 1974
1975 1975 # Test the other direction -- that this path from p2 isn't a directory
1976 1976 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1977 1977 match = self.match([path], default=b'path')
1978 1978 matches = self.p1().manifest().matches(match)
1979 1979 mfiles = matches.keys()
1980 1980 if len(mfiles) > 0:
1981 1981 if len(mfiles) == 1 and mfiles[0] == path:
1982 1982 return
1983 1983 # omit the files which are deleted in current IMM wctx
1984 1984 mfiles = [m for m in mfiles if m in self]
1985 1985 if not mfiles:
1986 1986 return
1987 1987 raise error.Abort("error: file '%s' cannot be written because "
1988 1988 " '%s/' is a directory in %s (containing %d "
1989 1989 "entries: %s)"
1990 1990 % (path, path, self.p1(), len(mfiles),
1991 1991 ', '.join(mfiles)))
1992 1992
1993 1993 def write(self, path, data, flags='', **kwargs):
1994 1994 if data is None:
1995 1995 raise error.ProgrammingError("data must be non-None")
1996 1996 self._auditconflicts(path)
1997 1997 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1998 1998 flags=flags)
1999 1999
2000 2000 def setflags(self, path, l, x):
2001 2001 flag = ''
2002 2002 if l:
2003 2003 flag = 'l'
2004 2004 elif x:
2005 2005 flag = 'x'
2006 2006 self._markdirty(path, exists=True, date=dateutil.makedate(),
2007 2007 flags=flag)
2008 2008
2009 2009 def remove(self, path):
2010 2010 self._markdirty(path, exists=False)
2011 2011
2012 2012 def exists(self, path):
2013 2013 """exists behaves like `lexists`, but needs to follow symlinks and
2014 2014 return False if they are broken.
2015 2015 """
2016 2016 if self.isdirty(path):
2017 2017 # If this path exists and is a symlink, "follow" it by calling
2018 2018 # exists on the destination path.
2019 2019 if (self._cache[path]['exists'] and
2020 2020 'l' in self._cache[path]['flags']):
2021 2021 return self.exists(self._cache[path]['data'].strip())
2022 2022 else:
2023 2023 return self._cache[path]['exists']
2024 2024
2025 2025 return self._existsinparent(path)
2026 2026
2027 2027 def lexists(self, path):
2028 2028 """lexists returns True if the path exists"""
2029 2029 if self.isdirty(path):
2030 2030 return self._cache[path]['exists']
2031 2031
2032 2032 return self._existsinparent(path)
2033 2033
2034 2034 def size(self, path):
2035 2035 if self.isdirty(path):
2036 2036 if self._cache[path]['exists']:
2037 2037 return len(self._cache[path]['data'])
2038 2038 else:
2039 2039 raise error.ProgrammingError("No such file or directory: %s" %
2040 2040 self._path)
2041 2041 return self._wrappedctx[path].size()
2042 2042
2043 2043 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2044 2044 user=None, editor=None):
2045 2045 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2046 2046 committed.
2047 2047
2048 2048 ``text`` is the commit message.
2049 2049 ``parents`` (optional) are rev numbers.
2050 2050 """
2051 2051 # Default parents to the wrapped contexts' if not passed.
2052 2052 if parents is None:
2053 2053 parents = self._wrappedctx.parents()
2054 2054 if len(parents) == 1:
2055 2055 parents = (parents[0], None)
2056 2056
2057 2057 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2058 2058 if parents[1] is None:
2059 2059 parents = (self._repo[parents[0]], None)
2060 2060 else:
2061 2061 parents = (self._repo[parents[0]], self._repo[parents[1]])
2062 2062
2063 2063 files = self.files()
2064 2064 def getfile(repo, memctx, path):
2065 2065 if self._cache[path]['exists']:
2066 2066 return memfilectx(repo, memctx, path,
2067 2067 self._cache[path]['data'],
2068 2068 'l' in self._cache[path]['flags'],
2069 2069 'x' in self._cache[path]['flags'],
2070 2070 self._cache[path]['copied'])
2071 2071 else:
2072 2072 # Returning None, but including the path in `files`, is
2073 2073 # necessary for memctx to register a deletion.
2074 2074 return None
2075 2075 return memctx(self._repo, parents, text, files, getfile, date=date,
2076 2076 extra=extra, user=user, branch=branch, editor=editor)
2077 2077
2078 2078 def isdirty(self, path):
2079 2079 return path in self._cache
2080 2080
2081 2081 def isempty(self):
2082 2082 # We need to discard any keys that are actually clean before the empty
2083 2083 # commit check.
2084 2084 self._compact()
2085 2085 return len(self._cache) == 0
2086 2086
2087 2087 def clean(self):
2088 2088 self._cache = {}
2089 2089
2090 2090 def _compact(self):
2091 2091 """Removes keys from the cache that are actually clean, by comparing
2092 2092 them with the underlying context.
2093 2093
2094 2094 This can occur during the merge process, e.g. by passing --tool :local
2095 2095 to resolve a conflict.
2096 2096 """
2097 2097 keys = []
2098 2098 # This won't be perfect, but can help performance significantly when
2099 2099 # using things like remotefilelog.
2100 2100 scmutil.prefetchfiles(
2101 2101 self.repo(), [self.p1().rev()],
2102 2102 scmutil.matchfiles(self.repo(), self._cache.keys()))
2103 2103
2104 2104 for path in self._cache.keys():
2105 2105 cache = self._cache[path]
2106 2106 try:
2107 2107 underlying = self._wrappedctx[path]
2108 2108 if (underlying.data() == cache['data'] and
2109 2109 underlying.flags() == cache['flags']):
2110 2110 keys.append(path)
2111 2111 except error.ManifestLookupError:
2112 2112 # Path not in the underlying manifest (created).
2113 2113 continue
2114 2114
2115 2115 for path in keys:
2116 2116 del self._cache[path]
2117 2117 return keys
2118 2118
2119 2119 def _markdirty(self, path, exists, data=None, date=None, flags='',
2120 2120 copied=None):
2121 2121 # data not provided, let's see if we already have some; if not, let's
2122 2122 # grab it from our underlying context, so that we always have data if
2123 2123 # the file is marked as existing.
2124 2124 if exists and data is None:
2125 2125 oldentry = self._cache.get(path) or {}
2126 2126 data = oldentry.get('data') or self._wrappedctx[path].data()
2127 2127
2128 2128 self._cache[path] = {
2129 2129 'exists': exists,
2130 2130 'data': data,
2131 2131 'date': date,
2132 2132 'flags': flags,
2133 2133 'copied': copied,
2134 2134 }
2135 2135
2136 2136 def filectx(self, path, filelog=None):
2137 2137 return overlayworkingfilectx(self._repo, path, parent=self,
2138 2138 filelog=filelog)
2139 2139
2140 2140 class overlayworkingfilectx(committablefilectx):
2141 2141 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2142 2142 cache, which can be flushed through later by calling ``flush()``."""
2143 2143
2144 2144 def __init__(self, repo, path, filelog=None, parent=None):
2145 2145 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2146 2146 parent)
2147 2147 self._repo = repo
2148 2148 self._parent = parent
2149 2149 self._path = path
2150 2150
2151 2151 def cmp(self, fctx):
2152 2152 return self.data() != fctx.data()
2153 2153
2154 2154 def changectx(self):
2155 2155 return self._parent
2156 2156
2157 2157 def data(self):
2158 2158 return self._parent.data(self._path)
2159 2159
2160 2160 def date(self):
2161 2161 return self._parent.filedate(self._path)
2162 2162
2163 2163 def exists(self):
2164 2164 return self.lexists()
2165 2165
2166 2166 def lexists(self):
2167 2167 return self._parent.exists(self._path)
2168 2168
2169 2169 def copysource(self):
2170 2170 return self._parent.copydata(self._path)
2171 2171
2172 2172 def size(self):
2173 2173 return self._parent.size(self._path)
2174 2174
2175 2175 def markcopied(self, origin):
2176 2176 self._parent.markcopied(self._path, origin)
2177 2177
2178 2178 def audit(self):
2179 2179 pass
2180 2180
2181 2181 def flags(self):
2182 2182 return self._parent.flags(self._path)
2183 2183
2184 2184 def setflags(self, islink, isexec):
2185 2185 return self._parent.setflags(self._path, islink, isexec)
2186 2186
2187 2187 def write(self, data, flags, backgroundclose=False, **kwargs):
2188 2188 return self._parent.write(self._path, data, flags, **kwargs)
2189 2189
2190 2190 def remove(self, ignoremissing=False):
2191 2191 return self._parent.remove(self._path)
2192 2192
2193 2193 def clearunknown(self):
2194 2194 pass
2195 2195
2196 2196 class workingcommitctx(workingctx):
2197 2197 """A workingcommitctx object makes access to data related to
2198 2198 the revision being committed convenient.
2199 2199
2200 2200 This hides changes in the working directory, if they aren't
2201 2201 committed in this context.
2202 2202 """
2203 2203 def __init__(self, repo, changes,
2204 2204 text="", user=None, date=None, extra=None):
2205 2205 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2206 2206 changes)
2207 2207
2208 2208 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2209 2209 """Return matched files only in ``self._status``
2210 2210
2211 2211 Uncommitted files appear "clean" via this context, even if
2212 2212 they aren't actually so in the working directory.
2213 2213 """
2214 2214 if clean:
2215 2215 clean = [f for f in self._manifest if f not in self._changedset]
2216 2216 else:
2217 2217 clean = []
2218 2218 return scmutil.status([f for f in self._status.modified if match(f)],
2219 2219 [f for f in self._status.added if match(f)],
2220 2220 [f for f in self._status.removed if match(f)],
2221 2221 [], [], [], clean)
2222 2222
2223 2223 @propertycache
2224 2224 def _changedset(self):
2225 2225 """Return the set of files changed in this context
2226 2226 """
2227 2227 changed = set(self._status.modified)
2228 2228 changed.update(self._status.added)
2229 2229 changed.update(self._status.removed)
2230 2230 return changed
2231 2231
2232 2232 def makecachingfilectxfn(func):
2233 2233 """Create a filectxfn that caches based on the path.
2234 2234
2235 2235 We can't use util.cachefunc because it uses all arguments as the cache
2236 2236 key and this creates a cycle since the arguments include the repo and
2237 2237 memctx.
2238 2238 """
2239 2239 cache = {}
2240 2240
2241 2241 def getfilectx(repo, memctx, path):
2242 2242 if path not in cache:
2243 2243 cache[path] = func(repo, memctx, path)
2244 2244 return cache[path]
2245 2245
2246 2246 return getfilectx
2247 2247
2248 2248 def memfilefromctx(ctx):
2249 2249 """Given a context return a memfilectx for ctx[path]
2250 2250
2251 2251 This is a convenience method for building a memctx based on another
2252 2252 context.
2253 2253 """
2254 2254 def getfilectx(repo, memctx, path):
2255 2255 fctx = ctx[path]
2256 2256 copysource = fctx.copysource()
2257 2257 return memfilectx(repo, memctx, path, fctx.data(),
2258 2258 islink=fctx.islink(), isexec=fctx.isexec(),
2259 2259 copysource=copysource)
2260 2260
2261 2261 return getfilectx
2262 2262
2263 2263 def memfilefrompatch(patchstore):
2264 2264 """Given a patch (e.g. patchstore object) return a memfilectx
2265 2265
2266 2266 This is a convenience method for building a memctx based on a patchstore.
2267 2267 """
2268 2268 def getfilectx(repo, memctx, path):
2269 2269 data, mode, copysource = patchstore.getfile(path)
2270 2270 if data is None:
2271 2271 return None
2272 2272 islink, isexec = mode
2273 2273 return memfilectx(repo, memctx, path, data, islink=islink,
2274 2274 isexec=isexec, copysource=copysource)
2275 2275
2276 2276 return getfilectx
2277 2277
2278 2278 class memctx(committablectx):
2279 2279 """Use memctx to perform in-memory commits via localrepo.commitctx().
2280 2280
2281 2281 Revision information is supplied at initialization time while
2282 2282 related files data and is made available through a callback
2283 2283 mechanism. 'repo' is the current localrepo, 'parents' is a
2284 2284 sequence of two parent revisions identifiers (pass None for every
2285 2285 missing parent), 'text' is the commit message and 'files' lists
2286 2286 names of files touched by the revision (normalized and relative to
2287 2287 repository root).
2288 2288
2289 2289 filectxfn(repo, memctx, path) is a callable receiving the
2290 2290 repository, the current memctx object and the normalized path of
2291 2291 requested file, relative to repository root. It is fired by the
2292 2292 commit function for every file in 'files', but calls order is
2293 2293 undefined. If the file is available in the revision being
2294 2294 committed (updated or added), filectxfn returns a memfilectx
2295 2295 object. If the file was removed, filectxfn return None for recent
2296 2296 Mercurial. Moved files are represented by marking the source file
2297 2297 removed and the new file added with copy information (see
2298 2298 memfilectx).
2299 2299
2300 2300 user receives the committer name and defaults to current
2301 2301 repository username, date is the commit date in any format
2302 2302 supported by dateutil.parsedate() and defaults to current date, extra
2303 2303 is a dictionary of metadata or is left empty.
2304 2304 """
2305 2305
2306 2306 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2307 2307 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2308 2308 # this field to determine what to do in filectxfn.
2309 2309 _returnnoneformissingfiles = True
2310 2310
2311 2311 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2312 2312 date=None, extra=None, branch=None, editor=False):
2313 2313 super(memctx, self).__init__(repo, text, user, date, extra)
2314 2314 self._rev = None
2315 2315 self._node = None
2316 2316 parents = [(p or nullid) for p in parents]
2317 2317 p1, p2 = parents
2318 2318 self._parents = [self._repo[p] for p in (p1, p2)]
2319 2319 files = sorted(set(files))
2320 2320 self._files = files
2321 2321 if branch is not None:
2322 2322 self._extra['branch'] = encoding.fromlocal(branch)
2323 2323 self.substate = {}
2324 2324
2325 2325 if isinstance(filectxfn, patch.filestore):
2326 2326 filectxfn = memfilefrompatch(filectxfn)
2327 2327 elif not callable(filectxfn):
2328 2328 # if store is not callable, wrap it in a function
2329 2329 filectxfn = memfilefromctx(filectxfn)
2330 2330
2331 2331 # memoizing increases performance for e.g. vcs convert scenarios.
2332 2332 self._filectxfn = makecachingfilectxfn(filectxfn)
2333 2333
2334 2334 if editor:
2335 2335 self._text = editor(self._repo, self, [])
2336 2336 self._repo.savecommitmessage(self._text)
2337 2337
2338 2338 def filectx(self, path, filelog=None):
2339 2339 """get a file context from the working directory
2340 2340
2341 2341 Returns None if file doesn't exist and should be removed."""
2342 2342 return self._filectxfn(self._repo, self, path)
2343 2343
2344 2344 def commit(self):
2345 2345 """commit context to the repo"""
2346 2346 return self._repo.commitctx(self)
2347 2347
2348 2348 @propertycache
2349 2349 def _manifest(self):
2350 2350 """generate a manifest based on the return values of filectxfn"""
2351 2351
2352 2352 # keep this simple for now; just worry about p1
2353 2353 pctx = self._parents[0]
2354 2354 man = pctx.manifest().copy()
2355 2355
2356 2356 for f in self._status.modified:
2357 2357 man[f] = modifiednodeid
2358 2358
2359 2359 for f in self._status.added:
2360 2360 man[f] = addednodeid
2361 2361
2362 2362 for f in self._status.removed:
2363 2363 if f in man:
2364 2364 del man[f]
2365 2365
2366 2366 return man
2367 2367
2368 2368 @propertycache
2369 2369 def _status(self):
2370 2370 """Calculate exact status from ``files`` specified at construction
2371 2371 """
2372 2372 man1 = self.p1().manifest()
2373 2373 p2 = self._parents[1]
2374 2374 # "1 < len(self._parents)" can't be used for checking
2375 2375 # existence of the 2nd parent, because "memctx._parents" is
2376 2376 # explicitly initialized by the list, of which length is 2.
2377 2377 if p2.node() != nullid:
2378 2378 man2 = p2.manifest()
2379 2379 managing = lambda f: f in man1 or f in man2
2380 2380 else:
2381 2381 managing = lambda f: f in man1
2382 2382
2383 2383 modified, added, removed = [], [], []
2384 2384 for f in self._files:
2385 2385 if not managing(f):
2386 2386 added.append(f)
2387 2387 elif self[f]:
2388 2388 modified.append(f)
2389 2389 else:
2390 2390 removed.append(f)
2391 2391
2392 2392 return scmutil.status(modified, added, removed, [], [], [], [])
2393 2393
2394 2394 class memfilectx(committablefilectx):
2395 2395 """memfilectx represents an in-memory file to commit.
2396 2396
2397 2397 See memctx and committablefilectx for more details.
2398 2398 """
2399 2399 def __init__(self, repo, changectx, path, data, islink=False,
2400 2400 isexec=False, copysource=None):
2401 2401 """
2402 2402 path is the normalized file path relative to repository root.
2403 2403 data is the file content as a string.
2404 2404 islink is True if the file is a symbolic link.
2405 2405 isexec is True if the file is executable.
2406 2406 copied is the source file path if current file was copied in the
2407 2407 revision being committed, or None."""
2408 2408 super(memfilectx, self).__init__(repo, path, None, changectx)
2409 2409 self._data = data
2410 2410 if islink:
2411 2411 self._flags = 'l'
2412 2412 elif isexec:
2413 2413 self._flags = 'x'
2414 2414 else:
2415 2415 self._flags = ''
2416 2416 self._copysource = copysource
2417 2417
2418 2418 def copysource(self):
2419 2419 return self._copysource
2420 2420
2421 2421 def cmp(self, fctx):
2422 2422 return self.data() != fctx.data()
2423 2423
2424 2424 def data(self):
2425 2425 return self._data
2426 2426
2427 2427 def remove(self, ignoremissing=False):
2428 2428 """wraps unlink for a repo's working directory"""
2429 2429 # need to figure out what to do here
2430 2430 del self._changectx[self._path]
2431 2431
2432 2432 def write(self, data, flags, **kwargs):
2433 2433 """wraps repo.wwrite"""
2434 2434 self._data = data
2435 2435
2436 2436
2437 2437 class metadataonlyctx(committablectx):
2438 2438 """Like memctx but it's reusing the manifest of different commit.
2439 2439 Intended to be used by lightweight operations that are creating
2440 2440 metadata-only changes.
2441 2441
2442 2442 Revision information is supplied at initialization time. 'repo' is the
2443 2443 current localrepo, 'ctx' is original revision which manifest we're reuisng
2444 2444 'parents' is a sequence of two parent revisions identifiers (pass None for
2445 2445 every missing parent), 'text' is the commit.
2446 2446
2447 2447 user receives the committer name and defaults to current repository
2448 2448 username, date is the commit date in any format supported by
2449 2449 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2450 2450 metadata or is left empty.
2451 2451 """
2452 2452 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2453 2453 date=None, extra=None, editor=False):
2454 2454 if text is None:
2455 2455 text = originalctx.description()
2456 2456 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2457 2457 self._rev = None
2458 2458 self._node = None
2459 2459 self._originalctx = originalctx
2460 2460 self._manifestnode = originalctx.manifestnode()
2461 2461 if parents is None:
2462 2462 parents = originalctx.parents()
2463 2463 else:
2464 2464 parents = [repo[p] for p in parents if p is not None]
2465 2465 parents = parents[:]
2466 2466 while len(parents) < 2:
2467 2467 parents.append(repo[nullid])
2468 2468 p1, p2 = self._parents = parents
2469 2469
2470 2470 # sanity check to ensure that the reused manifest parents are
2471 2471 # manifests of our commit parents
2472 2472 mp1, mp2 = self.manifestctx().parents
2473 2473 if p1 != nullid and p1.manifestnode() != mp1:
2474 2474 raise RuntimeError(r"can't reuse the manifest: its p1 "
2475 2475 r"doesn't match the new ctx p1")
2476 2476 if p2 != nullid and p2.manifestnode() != mp2:
2477 2477 raise RuntimeError(r"can't reuse the manifest: "
2478 2478 r"its p2 doesn't match the new ctx p2")
2479 2479
2480 2480 self._files = originalctx.files()
2481 2481 self.substate = {}
2482 2482
2483 2483 if editor:
2484 2484 self._text = editor(self._repo, self, [])
2485 2485 self._repo.savecommitmessage(self._text)
2486 2486
2487 2487 def manifestnode(self):
2488 2488 return self._manifestnode
2489 2489
2490 2490 @property
2491 2491 def _manifestctx(self):
2492 2492 return self._repo.manifestlog[self._manifestnode]
2493 2493
2494 2494 def filectx(self, path, filelog=None):
2495 2495 return self._originalctx.filectx(path, filelog=filelog)
2496 2496
2497 2497 def commit(self):
2498 2498 """commit context to the repo"""
2499 2499 return self._repo.commitctx(self)
2500 2500
2501 2501 @property
2502 2502 def _manifest(self):
2503 2503 return self._originalctx.manifest()
2504 2504
2505 2505 @propertycache
2506 2506 def _status(self):
2507 2507 """Calculate exact status from ``files`` specified in the ``origctx``
2508 2508 and parents manifests.
2509 2509 """
2510 2510 man1 = self.p1().manifest()
2511 2511 p2 = self._parents[1]
2512 2512 # "1 < len(self._parents)" can't be used for checking
2513 2513 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2514 2514 # explicitly initialized by the list, of which length is 2.
2515 2515 if p2.node() != nullid:
2516 2516 man2 = p2.manifest()
2517 2517 managing = lambda f: f in man1 or f in man2
2518 2518 else:
2519 2519 managing = lambda f: f in man1
2520 2520
2521 2521 modified, added, removed = [], [], []
2522 2522 for f in self._files:
2523 2523 if not managing(f):
2524 2524 added.append(f)
2525 2525 elif f in self:
2526 2526 modified.append(f)
2527 2527 else:
2528 2528 removed.append(f)
2529 2529
2530 2530 return scmutil.status(modified, added, removed, [], [], [], [])
2531 2531
2532 2532 class arbitraryfilectx(object):
2533 2533 """Allows you to use filectx-like functions on a file in an arbitrary
2534 2534 location on disk, possibly not in the working directory.
2535 2535 """
2536 2536 def __init__(self, path, repo=None):
2537 2537 # Repo is optional because contrib/simplemerge uses this class.
2538 2538 self._repo = repo
2539 2539 self._path = path
2540 2540
2541 2541 def cmp(self, fctx):
2542 2542 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2543 2543 # path if either side is a symlink.
2544 2544 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2545 2545 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2546 2546 # Add a fast-path for merge if both sides are disk-backed.
2547 2547 # Note that filecmp uses the opposite return values (True if same)
2548 2548 # from our cmp functions (True if different).
2549 2549 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2550 2550 return self.data() != fctx.data()
2551 2551
2552 2552 def path(self):
2553 2553 return self._path
2554 2554
2555 2555 def flags(self):
2556 2556 return ''
2557 2557
2558 2558 def data(self):
2559 2559 return util.readfile(self._path)
2560 2560
2561 2561 def decodeddata(self):
2562 2562 with open(self._path, "rb") as f:
2563 2563 return f.read()
2564 2564
2565 2565 def remove(self):
2566 2566 util.unlink(self._path)
2567 2567
2568 2568 def write(self, data, flags, **kwargs):
2569 2569 assert not flags
2570 2570 with open(self._path, "wb") as f:
2571 2571 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now