##// END OF EJS Templates
context: clarify the various mode in the filesremoved method...
marmoute -
r43292:15badd62 default
parent child Browse files
Show More
@@ -1,2587 +1,2592 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 copies,
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52 class basectx(object):
53 53 """A basectx object represents the common logic for its children:
54 54 changectx: read-only context that is already present in the repo,
55 55 workingctx: a context that represents the working directory and can
56 56 be committed,
57 57 memctx: a context that represents changes in-memory and can also
58 58 be committed."""
59 59
60 60 def __init__(self, repo):
61 61 self._repo = repo
62 62
63 63 def __bytes__(self):
64 64 return short(self.node())
65 65
66 66 __str__ = encoding.strmethod(__bytes__)
67 67
68 68 def __repr__(self):
69 69 return r"<%s %s>" % (type(self).__name__, str(self))
70 70
71 71 def __eq__(self, other):
72 72 try:
73 73 return type(self) == type(other) and self._rev == other._rev
74 74 except AttributeError:
75 75 return False
76 76
77 77 def __ne__(self, other):
78 78 return not (self == other)
79 79
80 80 def __contains__(self, key):
81 81 return key in self._manifest
82 82
83 83 def __getitem__(self, key):
84 84 return self.filectx(key)
85 85
86 86 def __iter__(self):
87 87 return iter(self._manifest)
88 88
89 89 def _buildstatusmanifest(self, status):
90 90 """Builds a manifest that includes the given status results, if this is
91 91 a working copy context. For non-working copy contexts, it just returns
92 92 the normal manifest."""
93 93 return self.manifest()
94 94
95 95 def _matchstatus(self, other, match):
96 96 """This internal method provides a way for child objects to override the
97 97 match operator.
98 98 """
99 99 return match
100 100
101 101 def _buildstatus(self, other, s, match, listignored, listclean,
102 102 listunknown):
103 103 """build a status with respect to another context"""
104 104 # Load earliest manifest first for caching reasons. More specifically,
105 105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 108 # delta to what's in the cache. So that's one full reconstruction + one
109 109 # delta application.
110 110 mf2 = None
111 111 if self.rev() is not None and self.rev() < other.rev():
112 112 mf2 = self._buildstatusmanifest(s)
113 113 mf1 = other._buildstatusmanifest(s)
114 114 if mf2 is None:
115 115 mf2 = self._buildstatusmanifest(s)
116 116
117 117 modified, added = [], []
118 118 removed = []
119 119 clean = []
120 120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 121 deletedset = set(deleted)
122 122 d = mf1.diff(mf2, match=match, clean=listclean)
123 123 for fn, value in d.iteritems():
124 124 if fn in deletedset:
125 125 continue
126 126 if value is None:
127 127 clean.append(fn)
128 128 continue
129 129 (node1, flag1), (node2, flag2) = value
130 130 if node1 is None:
131 131 added.append(fn)
132 132 elif node2 is None:
133 133 removed.append(fn)
134 134 elif flag1 != flag2:
135 135 modified.append(fn)
136 136 elif node2 not in wdirfilenodeids:
137 137 # When comparing files between two commits, we save time by
138 138 # not comparing the file contents when the nodeids differ.
139 139 # Note that this means we incorrectly report a reverted change
140 140 # to a file as a modification.
141 141 modified.append(fn)
142 142 elif self[fn].cmp(other[fn]):
143 143 modified.append(fn)
144 144 else:
145 145 clean.append(fn)
146 146
147 147 if removed:
148 148 # need to filter files if they are already reported as removed
149 149 unknown = [fn for fn in unknown if fn not in mf1 and
150 150 (not match or match(fn))]
151 151 ignored = [fn for fn in ignored if fn not in mf1 and
152 152 (not match or match(fn))]
153 153 # if they're deleted, don't report them as removed
154 154 removed = [fn for fn in removed if fn not in deletedset]
155 155
156 156 return scmutil.status(modified, added, removed, deleted, unknown,
157 157 ignored, clean)
158 158
159 159 @propertycache
160 160 def substate(self):
161 161 return subrepoutil.state(self, self._repo.ui)
162 162
163 163 def subrev(self, subpath):
164 164 return self.substate[subpath][1]
165 165
166 166 def rev(self):
167 167 return self._rev
168 168 def node(self):
169 169 return self._node
170 170 def hex(self):
171 171 return hex(self.node())
172 172 def manifest(self):
173 173 return self._manifest
174 174 def manifestctx(self):
175 175 return self._manifestctx
176 176 def repo(self):
177 177 return self._repo
178 178 def phasestr(self):
179 179 return phases.phasenames[self.phase()]
180 180 def mutable(self):
181 181 return self.phase() > phases.public
182 182
183 183 def matchfileset(self, expr, badfn=None):
184 184 return fileset.match(self, expr, badfn=badfn)
185 185
186 186 def obsolete(self):
187 187 """True if the changeset is obsolete"""
188 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 189
190 190 def extinct(self):
191 191 """True if the changeset is extinct"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 193
194 194 def orphan(self):
195 195 """True if the changeset is not obsolete, but its ancestor is"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 197
198 198 def phasedivergent(self):
199 199 """True if the changeset tries to be a successor of a public changeset
200 200
201 201 Only non-public and non-obsolete changesets may be phase-divergent.
202 202 """
203 203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 204
205 205 def contentdivergent(self):
206 206 """Is a successor of a changeset with multiple possible successor sets
207 207
208 208 Only non-public and non-obsolete changesets may be content-divergent.
209 209 """
210 210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 211
212 212 def isunstable(self):
213 213 """True if the changeset is either orphan, phase-divergent or
214 214 content-divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return self._repo[nullrev]
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 @propertycache
277 277 def _copies(self):
278 278 return copies.computechangesetcopies(self)
279 279 def p1copies(self):
280 280 return self._copies[0]
281 281 def p2copies(self):
282 282 return self._copies[1]
283 283
284 284 def sub(self, path, allowcreate=True):
285 285 '''return a subrepo for the stored revision of path, never wdir()'''
286 286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287 287
288 288 def nullsub(self, path, pctx):
289 289 return subrepo.nullsubrepo(self, path, pctx)
290 290
291 291 def workingsub(self, path):
292 292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 293 context.
294 294 '''
295 295 return subrepo.subrepo(self, path, allowwdir=True)
296 296
297 297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 298 listsubrepos=False, badfn=None):
299 299 r = self._repo
300 300 return matchmod.match(r.root, r.getcwd(), pats,
301 301 include, exclude, default,
302 302 auditor=r.nofsauditor, ctx=self,
303 303 listsubrepos=listsubrepos, badfn=badfn)
304 304
305 305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 306 losedatafn=None, pathfn=None, copy=None,
307 307 copysourcematch=None, hunksfilterfn=None):
308 308 """Returns a diff generator for the given contexts and matcher"""
309 309 if ctx2 is None:
310 310 ctx2 = self.p1()
311 311 if ctx2 is not None:
312 312 ctx2 = self._repo[ctx2]
313 313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 315 copy=copy, copysourcematch=copysourcematch,
316 316 hunksfilterfn=hunksfilterfn)
317 317
318 318 def dirs(self):
319 319 return self._manifest.dirs()
320 320
321 321 def hasdir(self, dir):
322 322 return self._manifest.hasdir(dir)
323 323
324 324 def status(self, other=None, match=None, listignored=False,
325 325 listclean=False, listunknown=False, listsubrepos=False):
326 326 """return status of files between two nodes or node and working
327 327 directory.
328 328
329 329 If other is None, compare this node with working directory.
330 330
331 331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 332 """
333 333
334 334 ctx1 = self
335 335 ctx2 = self._repo[other]
336 336
337 337 # This next code block is, admittedly, fragile logic that tests for
338 338 # reversing the contexts and wouldn't need to exist if it weren't for
339 339 # the fast (and common) code path of comparing the working directory
340 340 # with its first parent.
341 341 #
342 342 # What we're aiming for here is the ability to call:
343 343 #
344 344 # workingctx.status(parentctx)
345 345 #
346 346 # If we always built the manifest for each context and compared those,
347 347 # then we'd be done. But the special case of the above call means we
348 348 # just copy the manifest of the parent.
349 349 reversed = False
350 350 if (not isinstance(ctx1, changectx)
351 351 and isinstance(ctx2, changectx)):
352 352 reversed = True
353 353 ctx1, ctx2 = ctx2, ctx1
354 354
355 355 match = self._repo.narrowmatch(match)
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 class changectx(basectx):
389 389 """A changecontext object makes access to data related to a particular
390 390 changeset convenient. It represents a read-only context already present in
391 391 the repo."""
392 392 def __init__(self, repo, rev, node):
393 393 super(changectx, self).__init__(repo)
394 394 self._rev = rev
395 395 self._node = node
396 396
397 397 def __hash__(self):
398 398 try:
399 399 return hash(self._rev)
400 400 except AttributeError:
401 401 return id(self)
402 402
403 403 def __nonzero__(self):
404 404 return self._rev != nullrev
405 405
406 406 __bool__ = __nonzero__
407 407
408 408 @propertycache
409 409 def _changeset(self):
410 410 return self._repo.changelog.changelogrevision(self.rev())
411 411
412 412 @propertycache
413 413 def _manifest(self):
414 414 return self._manifestctx.read()
415 415
416 416 @property
417 417 def _manifestctx(self):
418 418 return self._repo.manifestlog[self._changeset.manifest]
419 419
420 420 @propertycache
421 421 def _manifestdelta(self):
422 422 return self._manifestctx.readdelta()
423 423
424 424 @propertycache
425 425 def _parents(self):
426 426 repo = self._repo
427 427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 428 if p2 == nullrev:
429 429 return [repo[p1]]
430 430 return [repo[p1], repo[p2]]
431 431
432 432 def changeset(self):
433 433 c = self._changeset
434 434 return (
435 435 c.manifest,
436 436 c.user,
437 437 c.date,
438 438 c.files,
439 439 c.description,
440 440 c.extra,
441 441 )
442 442 def manifestnode(self):
443 443 return self._changeset.manifest
444 444
445 445 def user(self):
446 446 return self._changeset.user
447 447 def date(self):
448 448 return self._changeset.date
449 449 def files(self):
450 450 return self._changeset.files
451 451 def filesmodified(self):
452 452 modified = set(self.files())
453 453 modified.difference_update(self.filesadded())
454 454 modified.difference_update(self.filesremoved())
455 455 return sorted(modified)
456 456
457 457 def filesadded(self):
458 458 source = self._repo.ui.config('experimental', 'copies.read-from')
459 459 filesadded = self._changeset.filesadded
460 460 if source == 'changeset-only':
461 461 if filesadded is None:
462 462 filesadded = []
463 463 elif source == 'compatibility':
464 464 if filesadded is None:
465 465 filesadded = scmutil.computechangesetfilesadded(self)
466 466 else:
467 467 filesadded = scmutil.computechangesetfilesadded(self)
468 468 return filesadded
469 469
470 470 def filesremoved(self):
471 471 source = self._repo.ui.config('experimental', 'copies.read-from')
472 if (source == 'changeset-only' or
473 (source == 'compatibility' and
474 self._changeset.filesremoved is not None)):
475 return self._changeset.filesremoved or []
476 return scmutil.computechangesetfilesremoved(self)
472 filesremoved = self._changeset.filesremoved
473 if source == 'changeset-only':
474 if filesremoved is None:
475 filesremoved = []
476 elif source == 'compatibility':
477 if filesremoved is None:
478 filesremoved = scmutil.computechangesetfilesremoved(self)
479 else:
480 filesremoved = scmutil.computechangesetfilesremoved(self)
481 return filesremoved
477 482
478 483 @propertycache
479 484 def _copies(self):
480 485 source = self._repo.ui.config('experimental', 'copies.read-from')
481 486 p1copies = self._changeset.p1copies
482 487 p2copies = self._changeset.p2copies
483 488 # If config says to get copy metadata only from changeset, then return
484 489 # that, defaulting to {} if there was no copy metadata.
485 490 # In compatibility mode, we return copy data from the changeset if
486 491 # it was recorded there, and otherwise we fall back to getting it from
487 492 # the filelogs (below).
488 493 if (source == 'changeset-only' or
489 494 (source == 'compatibility' and p1copies is not None)):
490 495 return p1copies or {}, p2copies or {}
491 496
492 497 # Otherwise (config said to read only from filelog, or we are in
493 498 # compatiblity mode and there is not data in the changeset), we get
494 499 # the copy metadata from the filelogs.
495 500 return super(changectx, self)._copies
496 501 def description(self):
497 502 return self._changeset.description
498 503 def branch(self):
499 504 return encoding.tolocal(self._changeset.extra.get("branch"))
500 505 def closesbranch(self):
501 506 return 'close' in self._changeset.extra
502 507 def extra(self):
503 508 """Return a dict of extra information."""
504 509 return self._changeset.extra
505 510 def tags(self):
506 511 """Return a list of byte tag names"""
507 512 return self._repo.nodetags(self._node)
508 513 def bookmarks(self):
509 514 """Return a list of byte bookmark names."""
510 515 return self._repo.nodebookmarks(self._node)
511 516 def phase(self):
512 517 return self._repo._phasecache.phase(self._repo, self._rev)
513 518 def hidden(self):
514 519 return self._rev in repoview.filterrevs(self._repo, 'visible')
515 520
516 521 def isinmemory(self):
517 522 return False
518 523
519 524 def children(self):
520 525 """return list of changectx contexts for each child changeset.
521 526
522 527 This returns only the immediate child changesets. Use descendants() to
523 528 recursively walk children.
524 529 """
525 530 c = self._repo.changelog.children(self._node)
526 531 return [self._repo[x] for x in c]
527 532
528 533 def ancestors(self):
529 534 for a in self._repo.changelog.ancestors([self._rev]):
530 535 yield self._repo[a]
531 536
532 537 def descendants(self):
533 538 """Recursively yield all children of the changeset.
534 539
535 540 For just the immediate children, use children()
536 541 """
537 542 for d in self._repo.changelog.descendants([self._rev]):
538 543 yield self._repo[d]
539 544
540 545 def filectx(self, path, fileid=None, filelog=None):
541 546 """get a file context from this changeset"""
542 547 if fileid is None:
543 548 fileid = self.filenode(path)
544 549 return filectx(self._repo, path, fileid=fileid,
545 550 changectx=self, filelog=filelog)
546 551
547 552 def ancestor(self, c2, warn=False):
548 553 """return the "best" ancestor context of self and c2
549 554
550 555 If there are multiple candidates, it will show a message and check
551 556 merge.preferancestor configuration before falling back to the
552 557 revlog ancestor."""
553 558 # deal with workingctxs
554 559 n2 = c2._node
555 560 if n2 is None:
556 561 n2 = c2._parents[0]._node
557 562 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 563 if not cahs:
559 564 anc = nullid
560 565 elif len(cahs) == 1:
561 566 anc = cahs[0]
562 567 else:
563 568 # experimental config: merge.preferancestor
564 569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 570 try:
566 571 ctx = scmutil.revsymbol(self._repo, r)
567 572 except error.RepoLookupError:
568 573 continue
569 574 anc = ctx.node()
570 575 if anc in cahs:
571 576 break
572 577 else:
573 578 anc = self._repo.changelog.ancestor(self._node, n2)
574 579 if warn:
575 580 self._repo.ui.status(
576 581 (_("note: using %s as ancestor of %s and %s\n") %
577 582 (short(anc), short(self._node), short(n2))) +
578 583 ''.join(_(" alternatively, use --config "
579 584 "merge.preferancestor=%s\n") %
580 585 short(n) for n in sorted(cahs) if n != anc))
581 586 return self._repo[anc]
582 587
583 588 def isancestorof(self, other):
584 589 """True if this changeset is an ancestor of other"""
585 590 return self._repo.changelog.isancestorrev(self._rev, other._rev)
586 591
587 592 def walk(self, match):
588 593 '''Generates matching file names.'''
589 594
590 595 # Wrap match.bad method to have message with nodeid
591 596 def bad(fn, msg):
592 597 # The manifest doesn't know about subrepos, so don't complain about
593 598 # paths into valid subrepos.
594 599 if any(fn == s or fn.startswith(s + '/')
595 600 for s in self.substate):
596 601 return
597 602 match.bad(fn, _('no such file in rev %s') % self)
598 603
599 604 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
600 605 return self._manifest.walk(m)
601 606
602 607 def matches(self, match):
603 608 return self.walk(match)
604 609
605 610 class basefilectx(object):
606 611 """A filecontext object represents the common logic for its children:
607 612 filectx: read-only access to a filerevision that is already present
608 613 in the repo,
609 614 workingfilectx: a filecontext that represents files from the working
610 615 directory,
611 616 memfilectx: a filecontext that represents files in-memory,
612 617 """
613 618 @propertycache
614 619 def _filelog(self):
615 620 return self._repo.file(self._path)
616 621
617 622 @propertycache
618 623 def _changeid(self):
619 624 if r'_changectx' in self.__dict__:
620 625 return self._changectx.rev()
621 626 elif r'_descendantrev' in self.__dict__:
622 627 # this file context was created from a revision with a known
623 628 # descendant, we can (lazily) correct for linkrev aliases
624 629 return self._adjustlinkrev(self._descendantrev)
625 630 else:
626 631 return self._filelog.linkrev(self._filerev)
627 632
628 633 @propertycache
629 634 def _filenode(self):
630 635 if r'_fileid' in self.__dict__:
631 636 return self._filelog.lookup(self._fileid)
632 637 else:
633 638 return self._changectx.filenode(self._path)
634 639
635 640 @propertycache
636 641 def _filerev(self):
637 642 return self._filelog.rev(self._filenode)
638 643
639 644 @propertycache
640 645 def _repopath(self):
641 646 return self._path
642 647
643 648 def __nonzero__(self):
644 649 try:
645 650 self._filenode
646 651 return True
647 652 except error.LookupError:
648 653 # file is missing
649 654 return False
650 655
651 656 __bool__ = __nonzero__
652 657
653 658 def __bytes__(self):
654 659 try:
655 660 return "%s@%s" % (self.path(), self._changectx)
656 661 except error.LookupError:
657 662 return "%s@???" % self.path()
658 663
659 664 __str__ = encoding.strmethod(__bytes__)
660 665
661 666 def __repr__(self):
662 667 return r"<%s %s>" % (type(self).__name__, str(self))
663 668
664 669 def __hash__(self):
665 670 try:
666 671 return hash((self._path, self._filenode))
667 672 except AttributeError:
668 673 return id(self)
669 674
670 675 def __eq__(self, other):
671 676 try:
672 677 return (type(self) == type(other) and self._path == other._path
673 678 and self._filenode == other._filenode)
674 679 except AttributeError:
675 680 return False
676 681
677 682 def __ne__(self, other):
678 683 return not (self == other)
679 684
680 685 def filerev(self):
681 686 return self._filerev
682 687 def filenode(self):
683 688 return self._filenode
684 689 @propertycache
685 690 def _flags(self):
686 691 return self._changectx.flags(self._path)
687 692 def flags(self):
688 693 return self._flags
689 694 def filelog(self):
690 695 return self._filelog
691 696 def rev(self):
692 697 return self._changeid
693 698 def linkrev(self):
694 699 return self._filelog.linkrev(self._filerev)
695 700 def node(self):
696 701 return self._changectx.node()
697 702 def hex(self):
698 703 return self._changectx.hex()
699 704 def user(self):
700 705 return self._changectx.user()
701 706 def date(self):
702 707 return self._changectx.date()
703 708 def files(self):
704 709 return self._changectx.files()
705 710 def description(self):
706 711 return self._changectx.description()
707 712 def branch(self):
708 713 return self._changectx.branch()
709 714 def extra(self):
710 715 return self._changectx.extra()
711 716 def phase(self):
712 717 return self._changectx.phase()
713 718 def phasestr(self):
714 719 return self._changectx.phasestr()
715 720 def obsolete(self):
716 721 return self._changectx.obsolete()
717 722 def instabilities(self):
718 723 return self._changectx.instabilities()
719 724 def manifest(self):
720 725 return self._changectx.manifest()
721 726 def changectx(self):
722 727 return self._changectx
723 728 def renamed(self):
724 729 return self._copied
725 730 def copysource(self):
726 731 return self._copied and self._copied[0]
727 732 def repo(self):
728 733 return self._repo
729 734 def size(self):
730 735 return len(self.data())
731 736
732 737 def path(self):
733 738 return self._path
734 739
735 740 def isbinary(self):
736 741 try:
737 742 return stringutil.binary(self.data())
738 743 except IOError:
739 744 return False
740 745 def isexec(self):
741 746 return 'x' in self.flags()
742 747 def islink(self):
743 748 return 'l' in self.flags()
744 749
745 750 def isabsent(self):
746 751 """whether this filectx represents a file not in self._changectx
747 752
748 753 This is mainly for merge code to detect change/delete conflicts. This is
749 754 expected to be True for all subclasses of basectx."""
750 755 return False
751 756
752 757 _customcmp = False
753 758 def cmp(self, fctx):
754 759 """compare with other file context
755 760
756 761 returns True if different than fctx.
757 762 """
758 763 if fctx._customcmp:
759 764 return fctx.cmp(self)
760 765
761 766 if self._filenode is None:
762 767 raise error.ProgrammingError(
763 768 'filectx.cmp() must be reimplemented if not backed by revlog')
764 769
765 770 if fctx._filenode is None:
766 771 if self._repo._encodefilterpats:
767 772 # can't rely on size() because wdir content may be decoded
768 773 return self._filelog.cmp(self._filenode, fctx.data())
769 774 if self.size() - 4 == fctx.size():
770 775 # size() can match:
771 776 # if file data starts with '\1\n', empty metadata block is
772 777 # prepended, which adds 4 bytes to filelog.size().
773 778 return self._filelog.cmp(self._filenode, fctx.data())
774 779 if self.size() == fctx.size():
775 780 # size() matches: need to compare content
776 781 return self._filelog.cmp(self._filenode, fctx.data())
777 782
778 783 # size() differs
779 784 return True
780 785
781 786 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
782 787 """return the first ancestor of <srcrev> introducing <fnode>
783 788
784 789 If the linkrev of the file revision does not point to an ancestor of
785 790 srcrev, we'll walk down the ancestors until we find one introducing
786 791 this file revision.
787 792
788 793 :srcrev: the changeset revision we search ancestors from
789 794 :inclusive: if true, the src revision will also be checked
790 795 :stoprev: an optional revision to stop the walk at. If no introduction
791 796 of this file content could be found before this floor
792 797 revision, the function will returns "None" and stops its
793 798 iteration.
794 799 """
795 800 repo = self._repo
796 801 cl = repo.unfiltered().changelog
797 802 mfl = repo.manifestlog
798 803 # fetch the linkrev
799 804 lkr = self.linkrev()
800 805 if srcrev == lkr:
801 806 return lkr
802 807 # hack to reuse ancestor computation when searching for renames
803 808 memberanc = getattr(self, '_ancestrycontext', None)
804 809 iteranc = None
805 810 if srcrev is None:
806 811 # wctx case, used by workingfilectx during mergecopy
807 812 revs = [p.rev() for p in self._repo[None].parents()]
808 813 inclusive = True # we skipped the real (revless) source
809 814 else:
810 815 revs = [srcrev]
811 816 if memberanc is None:
812 817 memberanc = iteranc = cl.ancestors(revs, lkr,
813 818 inclusive=inclusive)
814 819 # check if this linkrev is an ancestor of srcrev
815 820 if lkr not in memberanc:
816 821 if iteranc is None:
817 822 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
818 823 fnode = self._filenode
819 824 path = self._path
820 825 for a in iteranc:
821 826 if stoprev is not None and a < stoprev:
822 827 return None
823 828 ac = cl.read(a) # get changeset data (we avoid object creation)
824 829 if path in ac[3]: # checking the 'files' field.
825 830 # The file has been touched, check if the content is
826 831 # similar to the one we search for.
827 832 if fnode == mfl[ac[0]].readfast().get(path):
828 833 return a
829 834 # In theory, we should never get out of that loop without a result.
830 835 # But if manifest uses a buggy file revision (not children of the
831 836 # one it replaces) we could. Such a buggy situation will likely
832 837 # result is crash somewhere else at to some point.
833 838 return lkr
834 839
835 840 def isintroducedafter(self, changelogrev):
836 841 """True if a filectx has been introduced after a given floor revision
837 842 """
838 843 if self.linkrev() >= changelogrev:
839 844 return True
840 845 introrev = self._introrev(stoprev=changelogrev)
841 846 if introrev is None:
842 847 return False
843 848 return introrev >= changelogrev
844 849
845 850 def introrev(self):
846 851 """return the rev of the changeset which introduced this file revision
847 852
848 853 This method is different from linkrev because it take into account the
849 854 changeset the filectx was created from. It ensures the returned
850 855 revision is one of its ancestors. This prevents bugs from
851 856 'linkrev-shadowing' when a file revision is used by multiple
852 857 changesets.
853 858 """
854 859 return self._introrev()
855 860
856 861 def _introrev(self, stoprev=None):
857 862 """
858 863 Same as `introrev` but, with an extra argument to limit changelog
859 864 iteration range in some internal usecase.
860 865
861 866 If `stoprev` is set, the `introrev` will not be searched past that
862 867 `stoprev` revision and "None" might be returned. This is useful to
863 868 limit the iteration range.
864 869 """
865 870 toprev = None
866 871 attrs = vars(self)
867 872 if r'_changeid' in attrs:
868 873 # We have a cached value already
869 874 toprev = self._changeid
870 875 elif r'_changectx' in attrs:
871 876 # We know which changelog entry we are coming from
872 877 toprev = self._changectx.rev()
873 878
874 879 if toprev is not None:
875 880 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
876 881 elif r'_descendantrev' in attrs:
877 882 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
878 883 # be nice and cache the result of the computation
879 884 if introrev is not None:
880 885 self._changeid = introrev
881 886 return introrev
882 887 else:
883 888 return self.linkrev()
884 889
885 890 def introfilectx(self):
886 891 """Return filectx having identical contents, but pointing to the
887 892 changeset revision where this filectx was introduced"""
888 893 introrev = self.introrev()
889 894 if self.rev() == introrev:
890 895 return self
891 896 return self.filectx(self.filenode(), changeid=introrev)
892 897
893 898 def _parentfilectx(self, path, fileid, filelog):
894 899 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
895 900 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
896 901 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
897 902 # If self is associated with a changeset (probably explicitly
898 903 # fed), ensure the created filectx is associated with a
899 904 # changeset that is an ancestor of self.changectx.
900 905 # This lets us later use _adjustlinkrev to get a correct link.
901 906 fctx._descendantrev = self.rev()
902 907 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
903 908 elif r'_descendantrev' in vars(self):
904 909 # Otherwise propagate _descendantrev if we have one associated.
905 910 fctx._descendantrev = self._descendantrev
906 911 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
907 912 return fctx
908 913
909 914 def parents(self):
910 915 _path = self._path
911 916 fl = self._filelog
912 917 parents = self._filelog.parents(self._filenode)
913 918 pl = [(_path, node, fl) for node in parents if node != nullid]
914 919
915 920 r = fl.renamed(self._filenode)
916 921 if r:
917 922 # - In the simple rename case, both parent are nullid, pl is empty.
918 923 # - In case of merge, only one of the parent is null id and should
919 924 # be replaced with the rename information. This parent is -always-
920 925 # the first one.
921 926 #
922 927 # As null id have always been filtered out in the previous list
923 928 # comprehension, inserting to 0 will always result in "replacing
924 929 # first nullid parent with rename information.
925 930 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
926 931
927 932 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
928 933
929 934 def p1(self):
930 935 return self.parents()[0]
931 936
932 937 def p2(self):
933 938 p = self.parents()
934 939 if len(p) == 2:
935 940 return p[1]
936 941 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
937 942
938 943 def annotate(self, follow=False, skiprevs=None, diffopts=None):
939 944 """Returns a list of annotateline objects for each line in the file
940 945
941 946 - line.fctx is the filectx of the node where that line was last changed
942 947 - line.lineno is the line number at the first appearance in the managed
943 948 file
944 949 - line.text is the data on that line (including newline character)
945 950 """
946 951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
947 952
948 953 def parents(f):
949 954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
950 955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
951 956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
952 957 # isn't an ancestor of the srcrev.
953 958 f._changeid
954 959 pl = f.parents()
955 960
956 961 # Don't return renamed parents if we aren't following.
957 962 if not follow:
958 963 pl = [p for p in pl if p.path() == f.path()]
959 964
960 965 # renamed filectx won't have a filelog yet, so set it
961 966 # from the cache to save time
962 967 for p in pl:
963 968 if not r'_filelog' in p.__dict__:
964 969 p._filelog = getlog(p.path())
965 970
966 971 return pl
967 972
968 973 # use linkrev to find the first changeset where self appeared
969 974 base = self.introfilectx()
970 975 if getattr(base, '_ancestrycontext', None) is None:
971 976 cl = self._repo.changelog
972 977 if base.rev() is None:
973 978 # wctx is not inclusive, but works because _ancestrycontext
974 979 # is used to test filelog revisions
975 980 ac = cl.ancestors([p.rev() for p in base.parents()],
976 981 inclusive=True)
977 982 else:
978 983 ac = cl.ancestors([base.rev()], inclusive=True)
979 984 base._ancestrycontext = ac
980 985
981 986 return dagop.annotate(base, parents, skiprevs=skiprevs,
982 987 diffopts=diffopts)
983 988
984 989 def ancestors(self, followfirst=False):
985 990 visit = {}
986 991 c = self
987 992 if followfirst:
988 993 cut = 1
989 994 else:
990 995 cut = None
991 996
992 997 while True:
993 998 for parent in c.parents()[:cut]:
994 999 visit[(parent.linkrev(), parent.filenode())] = parent
995 1000 if not visit:
996 1001 break
997 1002 c = visit.pop(max(visit))
998 1003 yield c
999 1004
1000 1005 def decodeddata(self):
1001 1006 """Returns `data()` after running repository decoding filters.
1002 1007
1003 1008 This is often equivalent to how the data would be expressed on disk.
1004 1009 """
1005 1010 return self._repo.wwritedata(self.path(), self.data())
1006 1011
1007 1012 class filectx(basefilectx):
1008 1013 """A filecontext object makes access to data related to a particular
1009 1014 filerevision convenient."""
1010 1015 def __init__(self, repo, path, changeid=None, fileid=None,
1011 1016 filelog=None, changectx=None):
1012 1017 """changeid must be a revision number, if specified.
1013 1018 fileid can be a file revision or node."""
1014 1019 self._repo = repo
1015 1020 self._path = path
1016 1021
1017 1022 assert (changeid is not None
1018 1023 or fileid is not None
1019 1024 or changectx is not None), (
1020 1025 "bad args: changeid=%r, fileid=%r, changectx=%r"
1021 1026 % (changeid, fileid, changectx))
1022 1027
1023 1028 if filelog is not None:
1024 1029 self._filelog = filelog
1025 1030
1026 1031 if changeid is not None:
1027 1032 self._changeid = changeid
1028 1033 if changectx is not None:
1029 1034 self._changectx = changectx
1030 1035 if fileid is not None:
1031 1036 self._fileid = fileid
1032 1037
1033 1038 @propertycache
1034 1039 def _changectx(self):
1035 1040 try:
1036 1041 return self._repo[self._changeid]
1037 1042 except error.FilteredRepoLookupError:
1038 1043 # Linkrev may point to any revision in the repository. When the
1039 1044 # repository is filtered this may lead to `filectx` trying to build
1040 1045 # `changectx` for filtered revision. In such case we fallback to
1041 1046 # creating `changectx` on the unfiltered version of the reposition.
1042 1047 # This fallback should not be an issue because `changectx` from
1043 1048 # `filectx` are not used in complex operations that care about
1044 1049 # filtering.
1045 1050 #
1046 1051 # This fallback is a cheap and dirty fix that prevent several
1047 1052 # crashes. It does not ensure the behavior is correct. However the
1048 1053 # behavior was not correct before filtering either and "incorrect
1049 1054 # behavior" is seen as better as "crash"
1050 1055 #
1051 1056 # Linkrevs have several serious troubles with filtering that are
1052 1057 # complicated to solve. Proper handling of the issue here should be
1053 1058 # considered when solving linkrev issue are on the table.
1054 1059 return self._repo.unfiltered()[self._changeid]
1055 1060
1056 1061 def filectx(self, fileid, changeid=None):
1057 1062 '''opens an arbitrary revision of the file without
1058 1063 opening a new filelog'''
1059 1064 return filectx(self._repo, self._path, fileid=fileid,
1060 1065 filelog=self._filelog, changeid=changeid)
1061 1066
1062 1067 def rawdata(self):
1063 1068 return self._filelog.rawdata(self._filenode)
1064 1069
1065 1070 def rawflags(self):
1066 1071 """low-level revlog flags"""
1067 1072 return self._filelog.flags(self._filerev)
1068 1073
1069 1074 def data(self):
1070 1075 try:
1071 1076 return self._filelog.read(self._filenode)
1072 1077 except error.CensoredNodeError:
1073 1078 if self._repo.ui.config("censor", "policy") == "ignore":
1074 1079 return ""
1075 1080 raise error.Abort(_("censored node: %s") % short(self._filenode),
1076 1081 hint=_("set censor.policy to ignore errors"))
1077 1082
1078 1083 def size(self):
1079 1084 return self._filelog.size(self._filerev)
1080 1085
1081 1086 @propertycache
1082 1087 def _copied(self):
1083 1088 """check if file was actually renamed in this changeset revision
1084 1089
1085 1090 If rename logged in file revision, we report copy for changeset only
1086 1091 if file revisions linkrev points back to the changeset in question
1087 1092 or both changeset parents contain different file revisions.
1088 1093 """
1089 1094
1090 1095 renamed = self._filelog.renamed(self._filenode)
1091 1096 if not renamed:
1092 1097 return None
1093 1098
1094 1099 if self.rev() == self.linkrev():
1095 1100 return renamed
1096 1101
1097 1102 name = self.path()
1098 1103 fnode = self._filenode
1099 1104 for p in self._changectx.parents():
1100 1105 try:
1101 1106 if fnode == p.filenode(name):
1102 1107 return None
1103 1108 except error.LookupError:
1104 1109 pass
1105 1110 return renamed
1106 1111
1107 1112 def children(self):
1108 1113 # hard for renames
1109 1114 c = self._filelog.children(self._filenode)
1110 1115 return [filectx(self._repo, self._path, fileid=x,
1111 1116 filelog=self._filelog) for x in c]
1112 1117
1113 1118 class committablectx(basectx):
1114 1119 """A committablectx object provides common functionality for a context that
1115 1120 wants the ability to commit, e.g. workingctx or memctx."""
1116 1121 def __init__(self, repo, text="", user=None, date=None, extra=None,
1117 1122 changes=None, branch=None):
1118 1123 super(committablectx, self).__init__(repo)
1119 1124 self._rev = None
1120 1125 self._node = None
1121 1126 self._text = text
1122 1127 if date:
1123 1128 self._date = dateutil.parsedate(date)
1124 1129 if user:
1125 1130 self._user = user
1126 1131 if changes:
1127 1132 self._status = changes
1128 1133
1129 1134 self._extra = {}
1130 1135 if extra:
1131 1136 self._extra = extra.copy()
1132 1137 if branch is not None:
1133 1138 self._extra['branch'] = encoding.fromlocal(branch)
1134 1139 if not self._extra.get('branch'):
1135 1140 self._extra['branch'] = 'default'
1136 1141
1137 1142 def __bytes__(self):
1138 1143 return bytes(self._parents[0]) + "+"
1139 1144
1140 1145 __str__ = encoding.strmethod(__bytes__)
1141 1146
1142 1147 def __nonzero__(self):
1143 1148 return True
1144 1149
1145 1150 __bool__ = __nonzero__
1146 1151
1147 1152 @propertycache
1148 1153 def _status(self):
1149 1154 return self._repo.status()
1150 1155
1151 1156 @propertycache
1152 1157 def _user(self):
1153 1158 return self._repo.ui.username()
1154 1159
1155 1160 @propertycache
1156 1161 def _date(self):
1157 1162 ui = self._repo.ui
1158 1163 date = ui.configdate('devel', 'default-date')
1159 1164 if date is None:
1160 1165 date = dateutil.makedate()
1161 1166 return date
1162 1167
1163 1168 def subrev(self, subpath):
1164 1169 return None
1165 1170
1166 1171 def manifestnode(self):
1167 1172 return None
1168 1173 def user(self):
1169 1174 return self._user or self._repo.ui.username()
1170 1175 def date(self):
1171 1176 return self._date
1172 1177 def description(self):
1173 1178 return self._text
1174 1179 def files(self):
1175 1180 return sorted(self._status.modified + self._status.added +
1176 1181 self._status.removed)
1177 1182 def modified(self):
1178 1183 return self._status.modified
1179 1184 def added(self):
1180 1185 return self._status.added
1181 1186 def removed(self):
1182 1187 return self._status.removed
1183 1188 def deleted(self):
1184 1189 return self._status.deleted
1185 1190 filesmodified = modified
1186 1191 filesadded = added
1187 1192 filesremoved = removed
1188 1193
1189 1194 def branch(self):
1190 1195 return encoding.tolocal(self._extra['branch'])
1191 1196 def closesbranch(self):
1192 1197 return 'close' in self._extra
1193 1198 def extra(self):
1194 1199 return self._extra
1195 1200
1196 1201 def isinmemory(self):
1197 1202 return False
1198 1203
1199 1204 def tags(self):
1200 1205 return []
1201 1206
1202 1207 def bookmarks(self):
1203 1208 b = []
1204 1209 for p in self.parents():
1205 1210 b.extend(p.bookmarks())
1206 1211 return b
1207 1212
1208 1213 def phase(self):
1209 1214 phase = phases.draft # default phase to draft
1210 1215 for p in self.parents():
1211 1216 phase = max(phase, p.phase())
1212 1217 return phase
1213 1218
1214 1219 def hidden(self):
1215 1220 return False
1216 1221
1217 1222 def children(self):
1218 1223 return []
1219 1224
1220 1225 def ancestor(self, c2):
1221 1226 """return the "best" ancestor context of self and c2"""
1222 1227 return self._parents[0].ancestor(c2) # punt on two parents for now
1223 1228
1224 1229 def ancestors(self):
1225 1230 for p in self._parents:
1226 1231 yield p
1227 1232 for a in self._repo.changelog.ancestors(
1228 1233 [p.rev() for p in self._parents]):
1229 1234 yield self._repo[a]
1230 1235
1231 1236 def markcommitted(self, node):
1232 1237 """Perform post-commit cleanup necessary after committing this ctx
1233 1238
1234 1239 Specifically, this updates backing stores this working context
1235 1240 wraps to reflect the fact that the changes reflected by this
1236 1241 workingctx have been committed. For example, it marks
1237 1242 modified and added files as normal in the dirstate.
1238 1243
1239 1244 """
1240 1245
1241 1246 def dirty(self, missing=False, merge=True, branch=True):
1242 1247 return False
1243 1248
1244 1249 class workingctx(committablectx):
1245 1250 """A workingctx object makes access to data related to
1246 1251 the current working directory convenient.
1247 1252 date - any valid date string or (unixtime, offset), or None.
1248 1253 user - username string, or None.
1249 1254 extra - a dictionary of extra values, or None.
1250 1255 changes - a list of file lists as returned by localrepo.status()
1251 1256 or None to use the repository status.
1252 1257 """
1253 1258 def __init__(self, repo, text="", user=None, date=None, extra=None,
1254 1259 changes=None):
1255 1260 branch = None
1256 1261 if not extra or 'branch' not in extra:
1257 1262 try:
1258 1263 branch = repo.dirstate.branch()
1259 1264 except UnicodeDecodeError:
1260 1265 raise error.Abort(_('branch name not in UTF-8!'))
1261 1266 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1262 1267 branch=branch)
1263 1268
1264 1269 def __iter__(self):
1265 1270 d = self._repo.dirstate
1266 1271 for f in d:
1267 1272 if d[f] != 'r':
1268 1273 yield f
1269 1274
1270 1275 def __contains__(self, key):
1271 1276 return self._repo.dirstate[key] not in "?r"
1272 1277
1273 1278 def hex(self):
1274 1279 return wdirhex
1275 1280
1276 1281 @propertycache
1277 1282 def _parents(self):
1278 1283 p = self._repo.dirstate.parents()
1279 1284 if p[1] == nullid:
1280 1285 p = p[:-1]
1281 1286 # use unfiltered repo to delay/avoid loading obsmarkers
1282 1287 unfi = self._repo.unfiltered()
1283 1288 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1284 1289
1285 1290 def _fileinfo(self, path):
1286 1291 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1287 1292 self._manifest
1288 1293 return super(workingctx, self)._fileinfo(path)
1289 1294
1290 1295 def _buildflagfunc(self):
1291 1296 # Create a fallback function for getting file flags when the
1292 1297 # filesystem doesn't support them
1293 1298
1294 1299 copiesget = self._repo.dirstate.copies().get
1295 1300 parents = self.parents()
1296 1301 if len(parents) < 2:
1297 1302 # when we have one parent, it's easy: copy from parent
1298 1303 man = parents[0].manifest()
1299 1304 def func(f):
1300 1305 f = copiesget(f, f)
1301 1306 return man.flags(f)
1302 1307 else:
1303 1308 # merges are tricky: we try to reconstruct the unstored
1304 1309 # result from the merge (issue1802)
1305 1310 p1, p2 = parents
1306 1311 pa = p1.ancestor(p2)
1307 1312 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1308 1313
1309 1314 def func(f):
1310 1315 f = copiesget(f, f) # may be wrong for merges with copies
1311 1316 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1312 1317 if fl1 == fl2:
1313 1318 return fl1
1314 1319 if fl1 == fla:
1315 1320 return fl2
1316 1321 if fl2 == fla:
1317 1322 return fl1
1318 1323 return '' # punt for conflicts
1319 1324
1320 1325 return func
1321 1326
1322 1327 @propertycache
1323 1328 def _flagfunc(self):
1324 1329 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1325 1330
1326 1331 def flags(self, path):
1327 1332 if r'_manifest' in self.__dict__:
1328 1333 try:
1329 1334 return self._manifest.flags(path)
1330 1335 except KeyError:
1331 1336 return ''
1332 1337
1333 1338 try:
1334 1339 return self._flagfunc(path)
1335 1340 except OSError:
1336 1341 return ''
1337 1342
1338 1343 def filectx(self, path, filelog=None):
1339 1344 """get a file context from the working directory"""
1340 1345 return workingfilectx(self._repo, path, workingctx=self,
1341 1346 filelog=filelog)
1342 1347
1343 1348 def dirty(self, missing=False, merge=True, branch=True):
1344 1349 "check whether a working directory is modified"
1345 1350 # check subrepos first
1346 1351 for s in sorted(self.substate):
1347 1352 if self.sub(s).dirty(missing=missing):
1348 1353 return True
1349 1354 # check current working dir
1350 1355 return ((merge and self.p2()) or
1351 1356 (branch and self.branch() != self.p1().branch()) or
1352 1357 self.modified() or self.added() or self.removed() or
1353 1358 (missing and self.deleted()))
1354 1359
1355 1360 def add(self, list, prefix=""):
1356 1361 with self._repo.wlock():
1357 1362 ui, ds = self._repo.ui, self._repo.dirstate
1358 1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1359 1364 rejected = []
1360 1365 lstat = self._repo.wvfs.lstat
1361 1366 for f in list:
1362 1367 # ds.pathto() returns an absolute file when this is invoked from
1363 1368 # the keyword extension. That gets flagged as non-portable on
1364 1369 # Windows, since it contains the drive letter and colon.
1365 1370 scmutil.checkportable(ui, os.path.join(prefix, f))
1366 1371 try:
1367 1372 st = lstat(f)
1368 1373 except OSError:
1369 1374 ui.warn(_("%s does not exist!\n") % uipath(f))
1370 1375 rejected.append(f)
1371 1376 continue
1372 1377 limit = ui.configbytes('ui', 'large-file-limit')
1373 1378 if limit != 0 and st.st_size > limit:
1374 1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1375 1380 "to manage this file\n"
1376 1381 "(use 'hg revert %s' to cancel the "
1377 1382 "pending addition)\n")
1378 1383 % (f, 3 * st.st_size // 1000000, uipath(f)))
1379 1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1380 1385 ui.warn(_("%s not added: only files and symlinks "
1381 1386 "supported currently\n") % uipath(f))
1382 1387 rejected.append(f)
1383 1388 elif ds[f] in 'amn':
1384 1389 ui.warn(_("%s already tracked!\n") % uipath(f))
1385 1390 elif ds[f] == 'r':
1386 1391 ds.normallookup(f)
1387 1392 else:
1388 1393 ds.add(f)
1389 1394 return rejected
1390 1395
1391 1396 def forget(self, files, prefix=""):
1392 1397 with self._repo.wlock():
1393 1398 ds = self._repo.dirstate
1394 1399 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1395 1400 rejected = []
1396 1401 for f in files:
1397 1402 if f not in ds:
1398 1403 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1399 1404 rejected.append(f)
1400 1405 elif ds[f] != 'a':
1401 1406 ds.remove(f)
1402 1407 else:
1403 1408 ds.drop(f)
1404 1409 return rejected
1405 1410
1406 1411 def copy(self, source, dest):
1407 1412 try:
1408 1413 st = self._repo.wvfs.lstat(dest)
1409 1414 except OSError as err:
1410 1415 if err.errno != errno.ENOENT:
1411 1416 raise
1412 1417 self._repo.ui.warn(_("%s does not exist!\n")
1413 1418 % self._repo.dirstate.pathto(dest))
1414 1419 return
1415 1420 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1416 1421 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1417 1422 "symbolic link\n")
1418 1423 % self._repo.dirstate.pathto(dest))
1419 1424 else:
1420 1425 with self._repo.wlock():
1421 1426 ds = self._repo.dirstate
1422 1427 if ds[dest] in '?':
1423 1428 ds.add(dest)
1424 1429 elif ds[dest] in 'r':
1425 1430 ds.normallookup(dest)
1426 1431 ds.copy(source, dest)
1427 1432
1428 1433 def match(self, pats=None, include=None, exclude=None, default='glob',
1429 1434 listsubrepos=False, badfn=None):
1430 1435 r = self._repo
1431 1436
1432 1437 # Only a case insensitive filesystem needs magic to translate user input
1433 1438 # to actual case in the filesystem.
1434 1439 icasefs = not util.fscasesensitive(r.root)
1435 1440 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1436 1441 default, auditor=r.auditor, ctx=self,
1437 1442 listsubrepos=listsubrepos, badfn=badfn,
1438 1443 icasefs=icasefs)
1439 1444
1440 1445 def _filtersuspectsymlink(self, files):
1441 1446 if not files or self._repo.dirstate._checklink:
1442 1447 return files
1443 1448
1444 1449 # Symlink placeholders may get non-symlink-like contents
1445 1450 # via user error or dereferencing by NFS or Samba servers,
1446 1451 # so we filter out any placeholders that don't look like a
1447 1452 # symlink
1448 1453 sane = []
1449 1454 for f in files:
1450 1455 if self.flags(f) == 'l':
1451 1456 d = self[f].data()
1452 1457 if (d == '' or len(d) >= 1024 or '\n' in d
1453 1458 or stringutil.binary(d)):
1454 1459 self._repo.ui.debug('ignoring suspect symlink placeholder'
1455 1460 ' "%s"\n' % f)
1456 1461 continue
1457 1462 sane.append(f)
1458 1463 return sane
1459 1464
1460 1465 def _checklookup(self, files):
1461 1466 # check for any possibly clean files
1462 1467 if not files:
1463 1468 return [], [], []
1464 1469
1465 1470 modified = []
1466 1471 deleted = []
1467 1472 fixup = []
1468 1473 pctx = self._parents[0]
1469 1474 # do a full compare of any files that might have changed
1470 1475 for f in sorted(files):
1471 1476 try:
1472 1477 # This will return True for a file that got replaced by a
1473 1478 # directory in the interim, but fixing that is pretty hard.
1474 1479 if (f not in pctx or self.flags(f) != pctx.flags(f)
1475 1480 or pctx[f].cmp(self[f])):
1476 1481 modified.append(f)
1477 1482 else:
1478 1483 fixup.append(f)
1479 1484 except (IOError, OSError):
1480 1485 # A file become inaccessible in between? Mark it as deleted,
1481 1486 # matching dirstate behavior (issue5584).
1482 1487 # The dirstate has more complex behavior around whether a
1483 1488 # missing file matches a directory, etc, but we don't need to
1484 1489 # bother with that: if f has made it to this point, we're sure
1485 1490 # it's in the dirstate.
1486 1491 deleted.append(f)
1487 1492
1488 1493 return modified, deleted, fixup
1489 1494
1490 1495 def _poststatusfixup(self, status, fixup):
1491 1496 """update dirstate for files that are actually clean"""
1492 1497 poststatus = self._repo.postdsstatus()
1493 1498 if fixup or poststatus:
1494 1499 try:
1495 1500 oldid = self._repo.dirstate.identity()
1496 1501
1497 1502 # updating the dirstate is optional
1498 1503 # so we don't wait on the lock
1499 1504 # wlock can invalidate the dirstate, so cache normal _after_
1500 1505 # taking the lock
1501 1506 with self._repo.wlock(False):
1502 1507 if self._repo.dirstate.identity() == oldid:
1503 1508 if fixup:
1504 1509 normal = self._repo.dirstate.normal
1505 1510 for f in fixup:
1506 1511 normal(f)
1507 1512 # write changes out explicitly, because nesting
1508 1513 # wlock at runtime may prevent 'wlock.release()'
1509 1514 # after this block from doing so for subsequent
1510 1515 # changing files
1511 1516 tr = self._repo.currenttransaction()
1512 1517 self._repo.dirstate.write(tr)
1513 1518
1514 1519 if poststatus:
1515 1520 for ps in poststatus:
1516 1521 ps(self, status)
1517 1522 else:
1518 1523 # in this case, writing changes out breaks
1519 1524 # consistency, because .hg/dirstate was
1520 1525 # already changed simultaneously after last
1521 1526 # caching (see also issue5584 for detail)
1522 1527 self._repo.ui.debug('skip updating dirstate: '
1523 1528 'identity mismatch\n')
1524 1529 except error.LockError:
1525 1530 pass
1526 1531 finally:
1527 1532 # Even if the wlock couldn't be grabbed, clear out the list.
1528 1533 self._repo.clearpostdsstatus()
1529 1534
1530 1535 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1531 1536 '''Gets the status from the dirstate -- internal use only.'''
1532 1537 subrepos = []
1533 1538 if '.hgsub' in self:
1534 1539 subrepos = sorted(self.substate)
1535 1540 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1536 1541 clean=clean, unknown=unknown)
1537 1542
1538 1543 # check for any possibly clean files
1539 1544 fixup = []
1540 1545 if cmp:
1541 1546 modified2, deleted2, fixup = self._checklookup(cmp)
1542 1547 s.modified.extend(modified2)
1543 1548 s.deleted.extend(deleted2)
1544 1549
1545 1550 if fixup and clean:
1546 1551 s.clean.extend(fixup)
1547 1552
1548 1553 self._poststatusfixup(s, fixup)
1549 1554
1550 1555 if match.always():
1551 1556 # cache for performance
1552 1557 if s.unknown or s.ignored or s.clean:
1553 1558 # "_status" is cached with list*=False in the normal route
1554 1559 self._status = scmutil.status(s.modified, s.added, s.removed,
1555 1560 s.deleted, [], [], [])
1556 1561 else:
1557 1562 self._status = s
1558 1563
1559 1564 return s
1560 1565
1561 1566 @propertycache
1562 1567 def _copies(self):
1563 1568 p1copies = {}
1564 1569 p2copies = {}
1565 1570 parents = self._repo.dirstate.parents()
1566 1571 p1manifest = self._repo[parents[0]].manifest()
1567 1572 p2manifest = self._repo[parents[1]].manifest()
1568 1573 changedset = set(self.added()) | set(self.modified())
1569 1574 narrowmatch = self._repo.narrowmatch()
1570 1575 for dst, src in self._repo.dirstate.copies().items():
1571 1576 if dst not in changedset or not narrowmatch(dst):
1572 1577 continue
1573 1578 if src in p1manifest:
1574 1579 p1copies[dst] = src
1575 1580 elif src in p2manifest:
1576 1581 p2copies[dst] = src
1577 1582 return p1copies, p2copies
1578 1583
1579 1584 @propertycache
1580 1585 def _manifest(self):
1581 1586 """generate a manifest corresponding to the values in self._status
1582 1587
1583 1588 This reuse the file nodeid from parent, but we use special node
1584 1589 identifiers for added and modified files. This is used by manifests
1585 1590 merge to see that files are different and by update logic to avoid
1586 1591 deleting newly added files.
1587 1592 """
1588 1593 return self._buildstatusmanifest(self._status)
1589 1594
1590 1595 def _buildstatusmanifest(self, status):
1591 1596 """Builds a manifest that includes the given status results."""
1592 1597 parents = self.parents()
1593 1598
1594 1599 man = parents[0].manifest().copy()
1595 1600
1596 1601 ff = self._flagfunc
1597 1602 for i, l in ((addednodeid, status.added),
1598 1603 (modifiednodeid, status.modified)):
1599 1604 for f in l:
1600 1605 man[f] = i
1601 1606 try:
1602 1607 man.setflag(f, ff(f))
1603 1608 except OSError:
1604 1609 pass
1605 1610
1606 1611 for f in status.deleted + status.removed:
1607 1612 if f in man:
1608 1613 del man[f]
1609 1614
1610 1615 return man
1611 1616
1612 1617 def _buildstatus(self, other, s, match, listignored, listclean,
1613 1618 listunknown):
1614 1619 """build a status with respect to another context
1615 1620
1616 1621 This includes logic for maintaining the fast path of status when
1617 1622 comparing the working directory against its parent, which is to skip
1618 1623 building a new manifest if self (working directory) is not comparing
1619 1624 against its parent (repo['.']).
1620 1625 """
1621 1626 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1622 1627 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1623 1628 # might have accidentally ended up with the entire contents of the file
1624 1629 # they are supposed to be linking to.
1625 1630 s.modified[:] = self._filtersuspectsymlink(s.modified)
1626 1631 if other != self._repo['.']:
1627 1632 s = super(workingctx, self)._buildstatus(other, s, match,
1628 1633 listignored, listclean,
1629 1634 listunknown)
1630 1635 return s
1631 1636
1632 1637 def _matchstatus(self, other, match):
1633 1638 """override the match method with a filter for directory patterns
1634 1639
1635 1640 We use inheritance to customize the match.bad method only in cases of
1636 1641 workingctx since it belongs only to the working directory when
1637 1642 comparing against the parent changeset.
1638 1643
1639 1644 If we aren't comparing against the working directory's parent, then we
1640 1645 just use the default match object sent to us.
1641 1646 """
1642 1647 if other != self._repo['.']:
1643 1648 def bad(f, msg):
1644 1649 # 'f' may be a directory pattern from 'match.files()',
1645 1650 # so 'f not in ctx1' is not enough
1646 1651 if f not in other and not other.hasdir(f):
1647 1652 self._repo.ui.warn('%s: %s\n' %
1648 1653 (self._repo.dirstate.pathto(f), msg))
1649 1654 match.bad = bad
1650 1655 return match
1651 1656
1652 1657 def walk(self, match):
1653 1658 '''Generates matching file names.'''
1654 1659 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1655 1660 subrepos=sorted(self.substate),
1656 1661 unknown=True, ignored=False))
1657 1662
1658 1663 def matches(self, match):
1659 1664 match = self._repo.narrowmatch(match)
1660 1665 ds = self._repo.dirstate
1661 1666 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1662 1667
1663 1668 def markcommitted(self, node):
1664 1669 with self._repo.dirstate.parentchange():
1665 1670 for f in self.modified() + self.added():
1666 1671 self._repo.dirstate.normal(f)
1667 1672 for f in self.removed():
1668 1673 self._repo.dirstate.drop(f)
1669 1674 self._repo.dirstate.setparents(node)
1670 1675
1671 1676 # write changes out explicitly, because nesting wlock at
1672 1677 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1673 1678 # from immediately doing so for subsequent changing files
1674 1679 self._repo.dirstate.write(self._repo.currenttransaction())
1675 1680
1676 1681 sparse.aftercommit(self._repo, node)
1677 1682
1678 1683 class committablefilectx(basefilectx):
1679 1684 """A committablefilectx provides common functionality for a file context
1680 1685 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1681 1686 def __init__(self, repo, path, filelog=None, ctx=None):
1682 1687 self._repo = repo
1683 1688 self._path = path
1684 1689 self._changeid = None
1685 1690 self._filerev = self._filenode = None
1686 1691
1687 1692 if filelog is not None:
1688 1693 self._filelog = filelog
1689 1694 if ctx:
1690 1695 self._changectx = ctx
1691 1696
1692 1697 def __nonzero__(self):
1693 1698 return True
1694 1699
1695 1700 __bool__ = __nonzero__
1696 1701
1697 1702 def linkrev(self):
1698 1703 # linked to self._changectx no matter if file is modified or not
1699 1704 return self.rev()
1700 1705
1701 1706 def renamed(self):
1702 1707 path = self.copysource()
1703 1708 if not path:
1704 1709 return None
1705 1710 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1706 1711
1707 1712 def parents(self):
1708 1713 '''return parent filectxs, following copies if necessary'''
1709 1714 def filenode(ctx, path):
1710 1715 return ctx._manifest.get(path, nullid)
1711 1716
1712 1717 path = self._path
1713 1718 fl = self._filelog
1714 1719 pcl = self._changectx._parents
1715 1720 renamed = self.renamed()
1716 1721
1717 1722 if renamed:
1718 1723 pl = [renamed + (None,)]
1719 1724 else:
1720 1725 pl = [(path, filenode(pcl[0], path), fl)]
1721 1726
1722 1727 for pc in pcl[1:]:
1723 1728 pl.append((path, filenode(pc, path), fl))
1724 1729
1725 1730 return [self._parentfilectx(p, fileid=n, filelog=l)
1726 1731 for p, n, l in pl if n != nullid]
1727 1732
1728 1733 def children(self):
1729 1734 return []
1730 1735
1731 1736 class workingfilectx(committablefilectx):
1732 1737 """A workingfilectx object makes access to data related to a particular
1733 1738 file in the working directory convenient."""
1734 1739 def __init__(self, repo, path, filelog=None, workingctx=None):
1735 1740 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1736 1741
1737 1742 @propertycache
1738 1743 def _changectx(self):
1739 1744 return workingctx(self._repo)
1740 1745
1741 1746 def data(self):
1742 1747 return self._repo.wread(self._path)
1743 1748 def copysource(self):
1744 1749 return self._repo.dirstate.copied(self._path)
1745 1750
1746 1751 def size(self):
1747 1752 return self._repo.wvfs.lstat(self._path).st_size
1748 1753 def lstat(self):
1749 1754 return self._repo.wvfs.lstat(self._path)
1750 1755 def date(self):
1751 1756 t, tz = self._changectx.date()
1752 1757 try:
1753 1758 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1754 1759 except OSError as err:
1755 1760 if err.errno != errno.ENOENT:
1756 1761 raise
1757 1762 return (t, tz)
1758 1763
1759 1764 def exists(self):
1760 1765 return self._repo.wvfs.exists(self._path)
1761 1766
1762 1767 def lexists(self):
1763 1768 return self._repo.wvfs.lexists(self._path)
1764 1769
1765 1770 def audit(self):
1766 1771 return self._repo.wvfs.audit(self._path)
1767 1772
1768 1773 def cmp(self, fctx):
1769 1774 """compare with other file context
1770 1775
1771 1776 returns True if different than fctx.
1772 1777 """
1773 1778 # fctx should be a filectx (not a workingfilectx)
1774 1779 # invert comparison to reuse the same code path
1775 1780 return fctx.cmp(self)
1776 1781
1777 1782 def remove(self, ignoremissing=False):
1778 1783 """wraps unlink for a repo's working directory"""
1779 1784 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1780 1785 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1781 1786 rmdir=rmdir)
1782 1787
1783 1788 def write(self, data, flags, backgroundclose=False, **kwargs):
1784 1789 """wraps repo.wwrite"""
1785 1790 return self._repo.wwrite(self._path, data, flags,
1786 1791 backgroundclose=backgroundclose,
1787 1792 **kwargs)
1788 1793
1789 1794 def markcopied(self, src):
1790 1795 """marks this file a copy of `src`"""
1791 1796 self._repo.dirstate.copy(src, self._path)
1792 1797
1793 1798 def clearunknown(self):
1794 1799 """Removes conflicting items in the working directory so that
1795 1800 ``write()`` can be called successfully.
1796 1801 """
1797 1802 wvfs = self._repo.wvfs
1798 1803 f = self._path
1799 1804 wvfs.audit(f)
1800 1805 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1801 1806 # remove files under the directory as they should already be
1802 1807 # warned and backed up
1803 1808 if wvfs.isdir(f) and not wvfs.islink(f):
1804 1809 wvfs.rmtree(f, forcibly=True)
1805 1810 for p in reversed(list(util.finddirs(f))):
1806 1811 if wvfs.isfileorlink(p):
1807 1812 wvfs.unlink(p)
1808 1813 break
1809 1814 else:
1810 1815 # don't remove files if path conflicts are not processed
1811 1816 if wvfs.isdir(f) and not wvfs.islink(f):
1812 1817 wvfs.removedirs(f)
1813 1818
1814 1819 def setflags(self, l, x):
1815 1820 self._repo.wvfs.setflags(self._path, l, x)
1816 1821
1817 1822 class overlayworkingctx(committablectx):
1818 1823 """Wraps another mutable context with a write-back cache that can be
1819 1824 converted into a commit context.
1820 1825
1821 1826 self._cache[path] maps to a dict with keys: {
1822 1827 'exists': bool?
1823 1828 'date': date?
1824 1829 'data': str?
1825 1830 'flags': str?
1826 1831 'copied': str? (path or None)
1827 1832 }
1828 1833 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1829 1834 is `False`, the file was deleted.
1830 1835 """
1831 1836
1832 1837 def __init__(self, repo):
1833 1838 super(overlayworkingctx, self).__init__(repo)
1834 1839 self.clean()
1835 1840
1836 1841 def setbase(self, wrappedctx):
1837 1842 self._wrappedctx = wrappedctx
1838 1843 self._parents = [wrappedctx]
1839 1844 # Drop old manifest cache as it is now out of date.
1840 1845 # This is necessary when, e.g., rebasing several nodes with one
1841 1846 # ``overlayworkingctx`` (e.g. with --collapse).
1842 1847 util.clearcachedproperty(self, '_manifest')
1843 1848
1844 1849 def data(self, path):
1845 1850 if self.isdirty(path):
1846 1851 if self._cache[path]['exists']:
1847 1852 if self._cache[path]['data'] is not None:
1848 1853 return self._cache[path]['data']
1849 1854 else:
1850 1855 # Must fallback here, too, because we only set flags.
1851 1856 return self._wrappedctx[path].data()
1852 1857 else:
1853 1858 raise error.ProgrammingError("No such file or directory: %s" %
1854 1859 path)
1855 1860 else:
1856 1861 return self._wrappedctx[path].data()
1857 1862
1858 1863 @propertycache
1859 1864 def _manifest(self):
1860 1865 parents = self.parents()
1861 1866 man = parents[0].manifest().copy()
1862 1867
1863 1868 flag = self._flagfunc
1864 1869 for path in self.added():
1865 1870 man[path] = addednodeid
1866 1871 man.setflag(path, flag(path))
1867 1872 for path in self.modified():
1868 1873 man[path] = modifiednodeid
1869 1874 man.setflag(path, flag(path))
1870 1875 for path in self.removed():
1871 1876 del man[path]
1872 1877 return man
1873 1878
1874 1879 @propertycache
1875 1880 def _flagfunc(self):
1876 1881 def f(path):
1877 1882 return self._cache[path]['flags']
1878 1883 return f
1879 1884
1880 1885 def files(self):
1881 1886 return sorted(self.added() + self.modified() + self.removed())
1882 1887
1883 1888 def modified(self):
1884 1889 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1885 1890 self._existsinparent(f)]
1886 1891
1887 1892 def added(self):
1888 1893 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1889 1894 not self._existsinparent(f)]
1890 1895
1891 1896 def removed(self):
1892 1897 return [f for f in self._cache.keys() if
1893 1898 not self._cache[f]['exists'] and self._existsinparent(f)]
1894 1899
1895 1900 def p1copies(self):
1896 1901 copies = self._repo._wrappedctx.p1copies().copy()
1897 1902 narrowmatch = self._repo.narrowmatch()
1898 1903 for f in self._cache.keys():
1899 1904 if not narrowmatch(f):
1900 1905 continue
1901 1906 copies.pop(f, None) # delete if it exists
1902 1907 source = self._cache[f]['copied']
1903 1908 if source:
1904 1909 copies[f] = source
1905 1910 return copies
1906 1911
1907 1912 def p2copies(self):
1908 1913 copies = self._repo._wrappedctx.p2copies().copy()
1909 1914 narrowmatch = self._repo.narrowmatch()
1910 1915 for f in self._cache.keys():
1911 1916 if not narrowmatch(f):
1912 1917 continue
1913 1918 copies.pop(f, None) # delete if it exists
1914 1919 source = self._cache[f]['copied']
1915 1920 if source:
1916 1921 copies[f] = source
1917 1922 return copies
1918 1923
1919 1924 def isinmemory(self):
1920 1925 return True
1921 1926
1922 1927 def filedate(self, path):
1923 1928 if self.isdirty(path):
1924 1929 return self._cache[path]['date']
1925 1930 else:
1926 1931 return self._wrappedctx[path].date()
1927 1932
1928 1933 def markcopied(self, path, origin):
1929 1934 self._markdirty(path, exists=True, date=self.filedate(path),
1930 1935 flags=self.flags(path), copied=origin)
1931 1936
1932 1937 def copydata(self, path):
1933 1938 if self.isdirty(path):
1934 1939 return self._cache[path]['copied']
1935 1940 else:
1936 1941 return None
1937 1942
1938 1943 def flags(self, path):
1939 1944 if self.isdirty(path):
1940 1945 if self._cache[path]['exists']:
1941 1946 return self._cache[path]['flags']
1942 1947 else:
1943 1948 raise error.ProgrammingError("No such file or directory: %s" %
1944 1949 self._path)
1945 1950 else:
1946 1951 return self._wrappedctx[path].flags()
1947 1952
1948 1953 def __contains__(self, key):
1949 1954 if key in self._cache:
1950 1955 return self._cache[key]['exists']
1951 1956 return key in self.p1()
1952 1957
1953 1958 def _existsinparent(self, path):
1954 1959 try:
1955 1960 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1956 1961 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1957 1962 # with an ``exists()`` function.
1958 1963 self._wrappedctx[path]
1959 1964 return True
1960 1965 except error.ManifestLookupError:
1961 1966 return False
1962 1967
1963 1968 def _auditconflicts(self, path):
1964 1969 """Replicates conflict checks done by wvfs.write().
1965 1970
1966 1971 Since we never write to the filesystem and never call `applyupdates` in
1967 1972 IMM, we'll never check that a path is actually writable -- e.g., because
1968 1973 it adds `a/foo`, but `a` is actually a file in the other commit.
1969 1974 """
1970 1975 def fail(path, component):
1971 1976 # p1() is the base and we're receiving "writes" for p2()'s
1972 1977 # files.
1973 1978 if 'l' in self.p1()[component].flags():
1974 1979 raise error.Abort("error: %s conflicts with symlink %s "
1975 1980 "in %d." % (path, component,
1976 1981 self.p1().rev()))
1977 1982 else:
1978 1983 raise error.Abort("error: '%s' conflicts with file '%s' in "
1979 1984 "%d." % (path, component,
1980 1985 self.p1().rev()))
1981 1986
1982 1987 # Test that each new directory to be created to write this path from p2
1983 1988 # is not a file in p1.
1984 1989 components = path.split('/')
1985 1990 for i in pycompat.xrange(len(components)):
1986 1991 component = "/".join(components[0:i])
1987 1992 if component in self:
1988 1993 fail(path, component)
1989 1994
1990 1995 # Test the other direction -- that this path from p2 isn't a directory
1991 1996 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1992 1997 match = self.match([path], default=b'path')
1993 1998 matches = self.p1().manifest().matches(match)
1994 1999 mfiles = matches.keys()
1995 2000 if len(mfiles) > 0:
1996 2001 if len(mfiles) == 1 and mfiles[0] == path:
1997 2002 return
1998 2003 # omit the files which are deleted in current IMM wctx
1999 2004 mfiles = [m for m in mfiles if m in self]
2000 2005 if not mfiles:
2001 2006 return
2002 2007 raise error.Abort("error: file '%s' cannot be written because "
2003 2008 " '%s/' is a directory in %s (containing %d "
2004 2009 "entries: %s)"
2005 2010 % (path, path, self.p1(), len(mfiles),
2006 2011 ', '.join(mfiles)))
2007 2012
2008 2013 def write(self, path, data, flags='', **kwargs):
2009 2014 if data is None:
2010 2015 raise error.ProgrammingError("data must be non-None")
2011 2016 self._auditconflicts(path)
2012 2017 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2013 2018 flags=flags)
2014 2019
2015 2020 def setflags(self, path, l, x):
2016 2021 flag = ''
2017 2022 if l:
2018 2023 flag = 'l'
2019 2024 elif x:
2020 2025 flag = 'x'
2021 2026 self._markdirty(path, exists=True, date=dateutil.makedate(),
2022 2027 flags=flag)
2023 2028
2024 2029 def remove(self, path):
2025 2030 self._markdirty(path, exists=False)
2026 2031
2027 2032 def exists(self, path):
2028 2033 """exists behaves like `lexists`, but needs to follow symlinks and
2029 2034 return False if they are broken.
2030 2035 """
2031 2036 if self.isdirty(path):
2032 2037 # If this path exists and is a symlink, "follow" it by calling
2033 2038 # exists on the destination path.
2034 2039 if (self._cache[path]['exists'] and
2035 2040 'l' in self._cache[path]['flags']):
2036 2041 return self.exists(self._cache[path]['data'].strip())
2037 2042 else:
2038 2043 return self._cache[path]['exists']
2039 2044
2040 2045 return self._existsinparent(path)
2041 2046
2042 2047 def lexists(self, path):
2043 2048 """lexists returns True if the path exists"""
2044 2049 if self.isdirty(path):
2045 2050 return self._cache[path]['exists']
2046 2051
2047 2052 return self._existsinparent(path)
2048 2053
2049 2054 def size(self, path):
2050 2055 if self.isdirty(path):
2051 2056 if self._cache[path]['exists']:
2052 2057 return len(self._cache[path]['data'])
2053 2058 else:
2054 2059 raise error.ProgrammingError("No such file or directory: %s" %
2055 2060 self._path)
2056 2061 return self._wrappedctx[path].size()
2057 2062
2058 2063 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2059 2064 user=None, editor=None):
2060 2065 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2061 2066 committed.
2062 2067
2063 2068 ``text`` is the commit message.
2064 2069 ``parents`` (optional) are rev numbers.
2065 2070 """
2066 2071 # Default parents to the wrapped contexts' if not passed.
2067 2072 if parents is None:
2068 2073 parents = self._wrappedctx.parents()
2069 2074 if len(parents) == 1:
2070 2075 parents = (parents[0], None)
2071 2076
2072 2077 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2073 2078 if parents[1] is None:
2074 2079 parents = (self._repo[parents[0]], None)
2075 2080 else:
2076 2081 parents = (self._repo[parents[0]], self._repo[parents[1]])
2077 2082
2078 2083 files = self.files()
2079 2084 def getfile(repo, memctx, path):
2080 2085 if self._cache[path]['exists']:
2081 2086 return memfilectx(repo, memctx, path,
2082 2087 self._cache[path]['data'],
2083 2088 'l' in self._cache[path]['flags'],
2084 2089 'x' in self._cache[path]['flags'],
2085 2090 self._cache[path]['copied'])
2086 2091 else:
2087 2092 # Returning None, but including the path in `files`, is
2088 2093 # necessary for memctx to register a deletion.
2089 2094 return None
2090 2095 return memctx(self._repo, parents, text, files, getfile, date=date,
2091 2096 extra=extra, user=user, branch=branch, editor=editor)
2092 2097
2093 2098 def isdirty(self, path):
2094 2099 return path in self._cache
2095 2100
2096 2101 def isempty(self):
2097 2102 # We need to discard any keys that are actually clean before the empty
2098 2103 # commit check.
2099 2104 self._compact()
2100 2105 return len(self._cache) == 0
2101 2106
2102 2107 def clean(self):
2103 2108 self._cache = {}
2104 2109
2105 2110 def _compact(self):
2106 2111 """Removes keys from the cache that are actually clean, by comparing
2107 2112 them with the underlying context.
2108 2113
2109 2114 This can occur during the merge process, e.g. by passing --tool :local
2110 2115 to resolve a conflict.
2111 2116 """
2112 2117 keys = []
2113 2118 # This won't be perfect, but can help performance significantly when
2114 2119 # using things like remotefilelog.
2115 2120 scmutil.prefetchfiles(
2116 2121 self.repo(), [self.p1().rev()],
2117 2122 scmutil.matchfiles(self.repo(), self._cache.keys()))
2118 2123
2119 2124 for path in self._cache.keys():
2120 2125 cache = self._cache[path]
2121 2126 try:
2122 2127 underlying = self._wrappedctx[path]
2123 2128 if (underlying.data() == cache['data'] and
2124 2129 underlying.flags() == cache['flags']):
2125 2130 keys.append(path)
2126 2131 except error.ManifestLookupError:
2127 2132 # Path not in the underlying manifest (created).
2128 2133 continue
2129 2134
2130 2135 for path in keys:
2131 2136 del self._cache[path]
2132 2137 return keys
2133 2138
2134 2139 def _markdirty(self, path, exists, data=None, date=None, flags='',
2135 2140 copied=None):
2136 2141 # data not provided, let's see if we already have some; if not, let's
2137 2142 # grab it from our underlying context, so that we always have data if
2138 2143 # the file is marked as existing.
2139 2144 if exists and data is None:
2140 2145 oldentry = self._cache.get(path) or {}
2141 2146 data = oldentry.get('data')
2142 2147 if data is None:
2143 2148 data = self._wrappedctx[path].data()
2144 2149
2145 2150 self._cache[path] = {
2146 2151 'exists': exists,
2147 2152 'data': data,
2148 2153 'date': date,
2149 2154 'flags': flags,
2150 2155 'copied': copied,
2151 2156 }
2152 2157
2153 2158 def filectx(self, path, filelog=None):
2154 2159 return overlayworkingfilectx(self._repo, path, parent=self,
2155 2160 filelog=filelog)
2156 2161
2157 2162 class overlayworkingfilectx(committablefilectx):
2158 2163 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2159 2164 cache, which can be flushed through later by calling ``flush()``."""
2160 2165
2161 2166 def __init__(self, repo, path, filelog=None, parent=None):
2162 2167 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2163 2168 parent)
2164 2169 self._repo = repo
2165 2170 self._parent = parent
2166 2171 self._path = path
2167 2172
2168 2173 def cmp(self, fctx):
2169 2174 return self.data() != fctx.data()
2170 2175
2171 2176 def changectx(self):
2172 2177 return self._parent
2173 2178
2174 2179 def data(self):
2175 2180 return self._parent.data(self._path)
2176 2181
2177 2182 def date(self):
2178 2183 return self._parent.filedate(self._path)
2179 2184
2180 2185 def exists(self):
2181 2186 return self.lexists()
2182 2187
2183 2188 def lexists(self):
2184 2189 return self._parent.exists(self._path)
2185 2190
2186 2191 def copysource(self):
2187 2192 return self._parent.copydata(self._path)
2188 2193
2189 2194 def size(self):
2190 2195 return self._parent.size(self._path)
2191 2196
2192 2197 def markcopied(self, origin):
2193 2198 self._parent.markcopied(self._path, origin)
2194 2199
2195 2200 def audit(self):
2196 2201 pass
2197 2202
2198 2203 def flags(self):
2199 2204 return self._parent.flags(self._path)
2200 2205
2201 2206 def setflags(self, islink, isexec):
2202 2207 return self._parent.setflags(self._path, islink, isexec)
2203 2208
2204 2209 def write(self, data, flags, backgroundclose=False, **kwargs):
2205 2210 return self._parent.write(self._path, data, flags, **kwargs)
2206 2211
2207 2212 def remove(self, ignoremissing=False):
2208 2213 return self._parent.remove(self._path)
2209 2214
2210 2215 def clearunknown(self):
2211 2216 pass
2212 2217
2213 2218 class workingcommitctx(workingctx):
2214 2219 """A workingcommitctx object makes access to data related to
2215 2220 the revision being committed convenient.
2216 2221
2217 2222 This hides changes in the working directory, if they aren't
2218 2223 committed in this context.
2219 2224 """
2220 2225 def __init__(self, repo, changes,
2221 2226 text="", user=None, date=None, extra=None):
2222 2227 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2223 2228 changes)
2224 2229
2225 2230 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2226 2231 """Return matched files only in ``self._status``
2227 2232
2228 2233 Uncommitted files appear "clean" via this context, even if
2229 2234 they aren't actually so in the working directory.
2230 2235 """
2231 2236 if clean:
2232 2237 clean = [f for f in self._manifest if f not in self._changedset]
2233 2238 else:
2234 2239 clean = []
2235 2240 return scmutil.status([f for f in self._status.modified if match(f)],
2236 2241 [f for f in self._status.added if match(f)],
2237 2242 [f for f in self._status.removed if match(f)],
2238 2243 [], [], [], clean)
2239 2244
2240 2245 @propertycache
2241 2246 def _changedset(self):
2242 2247 """Return the set of files changed in this context
2243 2248 """
2244 2249 changed = set(self._status.modified)
2245 2250 changed.update(self._status.added)
2246 2251 changed.update(self._status.removed)
2247 2252 return changed
2248 2253
2249 2254 def makecachingfilectxfn(func):
2250 2255 """Create a filectxfn that caches based on the path.
2251 2256
2252 2257 We can't use util.cachefunc because it uses all arguments as the cache
2253 2258 key and this creates a cycle since the arguments include the repo and
2254 2259 memctx.
2255 2260 """
2256 2261 cache = {}
2257 2262
2258 2263 def getfilectx(repo, memctx, path):
2259 2264 if path not in cache:
2260 2265 cache[path] = func(repo, memctx, path)
2261 2266 return cache[path]
2262 2267
2263 2268 return getfilectx
2264 2269
2265 2270 def memfilefromctx(ctx):
2266 2271 """Given a context return a memfilectx for ctx[path]
2267 2272
2268 2273 This is a convenience method for building a memctx based on another
2269 2274 context.
2270 2275 """
2271 2276 def getfilectx(repo, memctx, path):
2272 2277 fctx = ctx[path]
2273 2278 copysource = fctx.copysource()
2274 2279 return memfilectx(repo, memctx, path, fctx.data(),
2275 2280 islink=fctx.islink(), isexec=fctx.isexec(),
2276 2281 copysource=copysource)
2277 2282
2278 2283 return getfilectx
2279 2284
2280 2285 def memfilefrompatch(patchstore):
2281 2286 """Given a patch (e.g. patchstore object) return a memfilectx
2282 2287
2283 2288 This is a convenience method for building a memctx based on a patchstore.
2284 2289 """
2285 2290 def getfilectx(repo, memctx, path):
2286 2291 data, mode, copysource = patchstore.getfile(path)
2287 2292 if data is None:
2288 2293 return None
2289 2294 islink, isexec = mode
2290 2295 return memfilectx(repo, memctx, path, data, islink=islink,
2291 2296 isexec=isexec, copysource=copysource)
2292 2297
2293 2298 return getfilectx
2294 2299
2295 2300 class memctx(committablectx):
2296 2301 """Use memctx to perform in-memory commits via localrepo.commitctx().
2297 2302
2298 2303 Revision information is supplied at initialization time while
2299 2304 related files data and is made available through a callback
2300 2305 mechanism. 'repo' is the current localrepo, 'parents' is a
2301 2306 sequence of two parent revisions identifiers (pass None for every
2302 2307 missing parent), 'text' is the commit message and 'files' lists
2303 2308 names of files touched by the revision (normalized and relative to
2304 2309 repository root).
2305 2310
2306 2311 filectxfn(repo, memctx, path) is a callable receiving the
2307 2312 repository, the current memctx object and the normalized path of
2308 2313 requested file, relative to repository root. It is fired by the
2309 2314 commit function for every file in 'files', but calls order is
2310 2315 undefined. If the file is available in the revision being
2311 2316 committed (updated or added), filectxfn returns a memfilectx
2312 2317 object. If the file was removed, filectxfn return None for recent
2313 2318 Mercurial. Moved files are represented by marking the source file
2314 2319 removed and the new file added with copy information (see
2315 2320 memfilectx).
2316 2321
2317 2322 user receives the committer name and defaults to current
2318 2323 repository username, date is the commit date in any format
2319 2324 supported by dateutil.parsedate() and defaults to current date, extra
2320 2325 is a dictionary of metadata or is left empty.
2321 2326 """
2322 2327
2323 2328 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2324 2329 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2325 2330 # this field to determine what to do in filectxfn.
2326 2331 _returnnoneformissingfiles = True
2327 2332
2328 2333 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2329 2334 date=None, extra=None, branch=None, editor=False):
2330 2335 super(memctx, self).__init__(repo, text, user, date, extra,
2331 2336 branch=branch)
2332 2337 self._rev = None
2333 2338 self._node = None
2334 2339 parents = [(p or nullid) for p in parents]
2335 2340 p1, p2 = parents
2336 2341 self._parents = [self._repo[p] for p in (p1, p2)]
2337 2342 files = sorted(set(files))
2338 2343 self._files = files
2339 2344 self.substate = {}
2340 2345
2341 2346 if isinstance(filectxfn, patch.filestore):
2342 2347 filectxfn = memfilefrompatch(filectxfn)
2343 2348 elif not callable(filectxfn):
2344 2349 # if store is not callable, wrap it in a function
2345 2350 filectxfn = memfilefromctx(filectxfn)
2346 2351
2347 2352 # memoizing increases performance for e.g. vcs convert scenarios.
2348 2353 self._filectxfn = makecachingfilectxfn(filectxfn)
2349 2354
2350 2355 if editor:
2351 2356 self._text = editor(self._repo, self, [])
2352 2357 self._repo.savecommitmessage(self._text)
2353 2358
2354 2359 def filectx(self, path, filelog=None):
2355 2360 """get a file context from the working directory
2356 2361
2357 2362 Returns None if file doesn't exist and should be removed."""
2358 2363 return self._filectxfn(self._repo, self, path)
2359 2364
2360 2365 def commit(self):
2361 2366 """commit context to the repo"""
2362 2367 return self._repo.commitctx(self)
2363 2368
2364 2369 @propertycache
2365 2370 def _manifest(self):
2366 2371 """generate a manifest based on the return values of filectxfn"""
2367 2372
2368 2373 # keep this simple for now; just worry about p1
2369 2374 pctx = self._parents[0]
2370 2375 man = pctx.manifest().copy()
2371 2376
2372 2377 for f in self._status.modified:
2373 2378 man[f] = modifiednodeid
2374 2379
2375 2380 for f in self._status.added:
2376 2381 man[f] = addednodeid
2377 2382
2378 2383 for f in self._status.removed:
2379 2384 if f in man:
2380 2385 del man[f]
2381 2386
2382 2387 return man
2383 2388
2384 2389 @propertycache
2385 2390 def _status(self):
2386 2391 """Calculate exact status from ``files`` specified at construction
2387 2392 """
2388 2393 man1 = self.p1().manifest()
2389 2394 p2 = self._parents[1]
2390 2395 # "1 < len(self._parents)" can't be used for checking
2391 2396 # existence of the 2nd parent, because "memctx._parents" is
2392 2397 # explicitly initialized by the list, of which length is 2.
2393 2398 if p2.node() != nullid:
2394 2399 man2 = p2.manifest()
2395 2400 managing = lambda f: f in man1 or f in man2
2396 2401 else:
2397 2402 managing = lambda f: f in man1
2398 2403
2399 2404 modified, added, removed = [], [], []
2400 2405 for f in self._files:
2401 2406 if not managing(f):
2402 2407 added.append(f)
2403 2408 elif self[f]:
2404 2409 modified.append(f)
2405 2410 else:
2406 2411 removed.append(f)
2407 2412
2408 2413 return scmutil.status(modified, added, removed, [], [], [], [])
2409 2414
2410 2415 class memfilectx(committablefilectx):
2411 2416 """memfilectx represents an in-memory file to commit.
2412 2417
2413 2418 See memctx and committablefilectx for more details.
2414 2419 """
2415 2420 def __init__(self, repo, changectx, path, data, islink=False,
2416 2421 isexec=False, copysource=None):
2417 2422 """
2418 2423 path is the normalized file path relative to repository root.
2419 2424 data is the file content as a string.
2420 2425 islink is True if the file is a symbolic link.
2421 2426 isexec is True if the file is executable.
2422 2427 copied is the source file path if current file was copied in the
2423 2428 revision being committed, or None."""
2424 2429 super(memfilectx, self).__init__(repo, path, None, changectx)
2425 2430 self._data = data
2426 2431 if islink:
2427 2432 self._flags = 'l'
2428 2433 elif isexec:
2429 2434 self._flags = 'x'
2430 2435 else:
2431 2436 self._flags = ''
2432 2437 self._copysource = copysource
2433 2438
2434 2439 def copysource(self):
2435 2440 return self._copysource
2436 2441
2437 2442 def cmp(self, fctx):
2438 2443 return self.data() != fctx.data()
2439 2444
2440 2445 def data(self):
2441 2446 return self._data
2442 2447
2443 2448 def remove(self, ignoremissing=False):
2444 2449 """wraps unlink for a repo's working directory"""
2445 2450 # need to figure out what to do here
2446 2451 del self._changectx[self._path]
2447 2452
2448 2453 def write(self, data, flags, **kwargs):
2449 2454 """wraps repo.wwrite"""
2450 2455 self._data = data
2451 2456
2452 2457
2453 2458 class metadataonlyctx(committablectx):
2454 2459 """Like memctx but it's reusing the manifest of different commit.
2455 2460 Intended to be used by lightweight operations that are creating
2456 2461 metadata-only changes.
2457 2462
2458 2463 Revision information is supplied at initialization time. 'repo' is the
2459 2464 current localrepo, 'ctx' is original revision which manifest we're reuisng
2460 2465 'parents' is a sequence of two parent revisions identifiers (pass None for
2461 2466 every missing parent), 'text' is the commit.
2462 2467
2463 2468 user receives the committer name and defaults to current repository
2464 2469 username, date is the commit date in any format supported by
2465 2470 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2466 2471 metadata or is left empty.
2467 2472 """
2468 2473 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2469 2474 date=None, extra=None, editor=False):
2470 2475 if text is None:
2471 2476 text = originalctx.description()
2472 2477 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2473 2478 self._rev = None
2474 2479 self._node = None
2475 2480 self._originalctx = originalctx
2476 2481 self._manifestnode = originalctx.manifestnode()
2477 2482 if parents is None:
2478 2483 parents = originalctx.parents()
2479 2484 else:
2480 2485 parents = [repo[p] for p in parents if p is not None]
2481 2486 parents = parents[:]
2482 2487 while len(parents) < 2:
2483 2488 parents.append(repo[nullid])
2484 2489 p1, p2 = self._parents = parents
2485 2490
2486 2491 # sanity check to ensure that the reused manifest parents are
2487 2492 # manifests of our commit parents
2488 2493 mp1, mp2 = self.manifestctx().parents
2489 2494 if p1 != nullid and p1.manifestnode() != mp1:
2490 2495 raise RuntimeError(r"can't reuse the manifest: its p1 "
2491 2496 r"doesn't match the new ctx p1")
2492 2497 if p2 != nullid and p2.manifestnode() != mp2:
2493 2498 raise RuntimeError(r"can't reuse the manifest: "
2494 2499 r"its p2 doesn't match the new ctx p2")
2495 2500
2496 2501 self._files = originalctx.files()
2497 2502 self.substate = {}
2498 2503
2499 2504 if editor:
2500 2505 self._text = editor(self._repo, self, [])
2501 2506 self._repo.savecommitmessage(self._text)
2502 2507
2503 2508 def manifestnode(self):
2504 2509 return self._manifestnode
2505 2510
2506 2511 @property
2507 2512 def _manifestctx(self):
2508 2513 return self._repo.manifestlog[self._manifestnode]
2509 2514
2510 2515 def filectx(self, path, filelog=None):
2511 2516 return self._originalctx.filectx(path, filelog=filelog)
2512 2517
2513 2518 def commit(self):
2514 2519 """commit context to the repo"""
2515 2520 return self._repo.commitctx(self)
2516 2521
2517 2522 @property
2518 2523 def _manifest(self):
2519 2524 return self._originalctx.manifest()
2520 2525
2521 2526 @propertycache
2522 2527 def _status(self):
2523 2528 """Calculate exact status from ``files`` specified in the ``origctx``
2524 2529 and parents manifests.
2525 2530 """
2526 2531 man1 = self.p1().manifest()
2527 2532 p2 = self._parents[1]
2528 2533 # "1 < len(self._parents)" can't be used for checking
2529 2534 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2530 2535 # explicitly initialized by the list, of which length is 2.
2531 2536 if p2.node() != nullid:
2532 2537 man2 = p2.manifest()
2533 2538 managing = lambda f: f in man1 or f in man2
2534 2539 else:
2535 2540 managing = lambda f: f in man1
2536 2541
2537 2542 modified, added, removed = [], [], []
2538 2543 for f in self._files:
2539 2544 if not managing(f):
2540 2545 added.append(f)
2541 2546 elif f in self:
2542 2547 modified.append(f)
2543 2548 else:
2544 2549 removed.append(f)
2545 2550
2546 2551 return scmutil.status(modified, added, removed, [], [], [], [])
2547 2552
2548 2553 class arbitraryfilectx(object):
2549 2554 """Allows you to use filectx-like functions on a file in an arbitrary
2550 2555 location on disk, possibly not in the working directory.
2551 2556 """
2552 2557 def __init__(self, path, repo=None):
2553 2558 # Repo is optional because contrib/simplemerge uses this class.
2554 2559 self._repo = repo
2555 2560 self._path = path
2556 2561
2557 2562 def cmp(self, fctx):
2558 2563 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2559 2564 # path if either side is a symlink.
2560 2565 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2561 2566 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2562 2567 # Add a fast-path for merge if both sides are disk-backed.
2563 2568 # Note that filecmp uses the opposite return values (True if same)
2564 2569 # from our cmp functions (True if different).
2565 2570 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2566 2571 return self.data() != fctx.data()
2567 2572
2568 2573 def path(self):
2569 2574 return self._path
2570 2575
2571 2576 def flags(self):
2572 2577 return ''
2573 2578
2574 2579 def data(self):
2575 2580 return util.readfile(self._path)
2576 2581
2577 2582 def decodeddata(self):
2578 2583 with open(self._path, "rb") as f:
2579 2584 return f.read()
2580 2585
2581 2586 def remove(self):
2582 2587 util.unlink(self._path)
2583 2588
2584 2589 def write(self, data, flags, **kwargs):
2585 2590 assert not flags
2586 2591 with open(self._path, "wb") as f:
2587 2592 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now