##// END OF EJS Templates
rawdata: update callers in context...
marmoute -
r43014:1928f7bb default draft
parent child Browse files
Show More
@@ -1,2579 +1,2579 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 copies,
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52 class basectx(object):
53 53 """A basectx object represents the common logic for its children:
54 54 changectx: read-only context that is already present in the repo,
55 55 workingctx: a context that represents the working directory and can
56 56 be committed,
57 57 memctx: a context that represents changes in-memory and can also
58 58 be committed."""
59 59
60 60 def __init__(self, repo):
61 61 self._repo = repo
62 62
63 63 def __bytes__(self):
64 64 return short(self.node())
65 65
66 66 __str__ = encoding.strmethod(__bytes__)
67 67
68 68 def __repr__(self):
69 69 return r"<%s %s>" % (type(self).__name__, str(self))
70 70
71 71 def __eq__(self, other):
72 72 try:
73 73 return type(self) == type(other) and self._rev == other._rev
74 74 except AttributeError:
75 75 return False
76 76
77 77 def __ne__(self, other):
78 78 return not (self == other)
79 79
80 80 def __contains__(self, key):
81 81 return key in self._manifest
82 82
83 83 def __getitem__(self, key):
84 84 return self.filectx(key)
85 85
86 86 def __iter__(self):
87 87 return iter(self._manifest)
88 88
89 89 def _buildstatusmanifest(self, status):
90 90 """Builds a manifest that includes the given status results, if this is
91 91 a working copy context. For non-working copy contexts, it just returns
92 92 the normal manifest."""
93 93 return self.manifest()
94 94
95 95 def _matchstatus(self, other, match):
96 96 """This internal method provides a way for child objects to override the
97 97 match operator.
98 98 """
99 99 return match
100 100
101 101 def _buildstatus(self, other, s, match, listignored, listclean,
102 102 listunknown):
103 103 """build a status with respect to another context"""
104 104 # Load earliest manifest first for caching reasons. More specifically,
105 105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 108 # delta to what's in the cache. So that's one full reconstruction + one
109 109 # delta application.
110 110 mf2 = None
111 111 if self.rev() is not None and self.rev() < other.rev():
112 112 mf2 = self._buildstatusmanifest(s)
113 113 mf1 = other._buildstatusmanifest(s)
114 114 if mf2 is None:
115 115 mf2 = self._buildstatusmanifest(s)
116 116
117 117 modified, added = [], []
118 118 removed = []
119 119 clean = []
120 120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 121 deletedset = set(deleted)
122 122 d = mf1.diff(mf2, match=match, clean=listclean)
123 123 for fn, value in d.iteritems():
124 124 if fn in deletedset:
125 125 continue
126 126 if value is None:
127 127 clean.append(fn)
128 128 continue
129 129 (node1, flag1), (node2, flag2) = value
130 130 if node1 is None:
131 131 added.append(fn)
132 132 elif node2 is None:
133 133 removed.append(fn)
134 134 elif flag1 != flag2:
135 135 modified.append(fn)
136 136 elif node2 not in wdirfilenodeids:
137 137 # When comparing files between two commits, we save time by
138 138 # not comparing the file contents when the nodeids differ.
139 139 # Note that this means we incorrectly report a reverted change
140 140 # to a file as a modification.
141 141 modified.append(fn)
142 142 elif self[fn].cmp(other[fn]):
143 143 modified.append(fn)
144 144 else:
145 145 clean.append(fn)
146 146
147 147 if removed:
148 148 # need to filter files if they are already reported as removed
149 149 unknown = [fn for fn in unknown if fn not in mf1 and
150 150 (not match or match(fn))]
151 151 ignored = [fn for fn in ignored if fn not in mf1 and
152 152 (not match or match(fn))]
153 153 # if they're deleted, don't report them as removed
154 154 removed = [fn for fn in removed if fn not in deletedset]
155 155
156 156 return scmutil.status(modified, added, removed, deleted, unknown,
157 157 ignored, clean)
158 158
159 159 @propertycache
160 160 def substate(self):
161 161 return subrepoutil.state(self, self._repo.ui)
162 162
163 163 def subrev(self, subpath):
164 164 return self.substate[subpath][1]
165 165
166 166 def rev(self):
167 167 return self._rev
168 168 def node(self):
169 169 return self._node
170 170 def hex(self):
171 171 return hex(self.node())
172 172 def manifest(self):
173 173 return self._manifest
174 174 def manifestctx(self):
175 175 return self._manifestctx
176 176 def repo(self):
177 177 return self._repo
178 178 def phasestr(self):
179 179 return phases.phasenames[self.phase()]
180 180 def mutable(self):
181 181 return self.phase() > phases.public
182 182
183 183 def matchfileset(self, expr, badfn=None):
184 184 return fileset.match(self, expr, badfn=badfn)
185 185
186 186 def obsolete(self):
187 187 """True if the changeset is obsolete"""
188 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 189
190 190 def extinct(self):
191 191 """True if the changeset is extinct"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 193
194 194 def orphan(self):
195 195 """True if the changeset is not obsolete, but its ancestor is"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 197
198 198 def phasedivergent(self):
199 199 """True if the changeset tries to be a successor of a public changeset
200 200
201 201 Only non-public and non-obsolete changesets may be phase-divergent.
202 202 """
203 203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 204
205 205 def contentdivergent(self):
206 206 """Is a successor of a changeset with multiple possible successor sets
207 207
208 208 Only non-public and non-obsolete changesets may be content-divergent.
209 209 """
210 210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 211
212 212 def isunstable(self):
213 213 """True if the changeset is either orphan, phase-divergent or
214 214 content-divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return self._repo[nullrev]
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 @propertycache
277 277 def _copies(self):
278 278 return copies.computechangesetcopies(self)
279 279 def p1copies(self):
280 280 return self._copies[0]
281 281 def p2copies(self):
282 282 return self._copies[1]
283 283
284 284 def sub(self, path, allowcreate=True):
285 285 '''return a subrepo for the stored revision of path, never wdir()'''
286 286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287 287
288 288 def nullsub(self, path, pctx):
289 289 return subrepo.nullsubrepo(self, path, pctx)
290 290
291 291 def workingsub(self, path):
292 292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 293 context.
294 294 '''
295 295 return subrepo.subrepo(self, path, allowwdir=True)
296 296
297 297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 298 listsubrepos=False, badfn=None):
299 299 r = self._repo
300 300 return matchmod.match(r.root, r.getcwd(), pats,
301 301 include, exclude, default,
302 302 auditor=r.nofsauditor, ctx=self,
303 303 listsubrepos=listsubrepos, badfn=badfn)
304 304
305 305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 306 losedatafn=None, pathfn=None, copy=None,
307 307 copysourcematch=None, hunksfilterfn=None):
308 308 """Returns a diff generator for the given contexts and matcher"""
309 309 if ctx2 is None:
310 310 ctx2 = self.p1()
311 311 if ctx2 is not None:
312 312 ctx2 = self._repo[ctx2]
313 313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 315 copy=copy, copysourcematch=copysourcematch,
316 316 hunksfilterfn=hunksfilterfn)
317 317
318 318 def dirs(self):
319 319 return self._manifest.dirs()
320 320
321 321 def hasdir(self, dir):
322 322 return self._manifest.hasdir(dir)
323 323
324 324 def status(self, other=None, match=None, listignored=False,
325 325 listclean=False, listunknown=False, listsubrepos=False):
326 326 """return status of files between two nodes or node and working
327 327 directory.
328 328
329 329 If other is None, compare this node with working directory.
330 330
331 331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 332 """
333 333
334 334 ctx1 = self
335 335 ctx2 = self._repo[other]
336 336
337 337 # This next code block is, admittedly, fragile logic that tests for
338 338 # reversing the contexts and wouldn't need to exist if it weren't for
339 339 # the fast (and common) code path of comparing the working directory
340 340 # with its first parent.
341 341 #
342 342 # What we're aiming for here is the ability to call:
343 343 #
344 344 # workingctx.status(parentctx)
345 345 #
346 346 # If we always built the manifest for each context and compared those,
347 347 # then we'd be done. But the special case of the above call means we
348 348 # just copy the manifest of the parent.
349 349 reversed = False
350 350 if (not isinstance(ctx1, changectx)
351 351 and isinstance(ctx2, changectx)):
352 352 reversed = True
353 353 ctx1, ctx2 = ctx2, ctx1
354 354
355 355 match = self._repo.narrowmatch(match)
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 class changectx(basectx):
389 389 """A changecontext object makes access to data related to a particular
390 390 changeset convenient. It represents a read-only context already present in
391 391 the repo."""
392 392 def __init__(self, repo, rev, node):
393 393 super(changectx, self).__init__(repo)
394 394 self._rev = rev
395 395 self._node = node
396 396
397 397 def __hash__(self):
398 398 try:
399 399 return hash(self._rev)
400 400 except AttributeError:
401 401 return id(self)
402 402
403 403 def __nonzero__(self):
404 404 return self._rev != nullrev
405 405
406 406 __bool__ = __nonzero__
407 407
408 408 @propertycache
409 409 def _changeset(self):
410 410 return self._repo.changelog.changelogrevision(self.rev())
411 411
412 412 @propertycache
413 413 def _manifest(self):
414 414 return self._manifestctx.read()
415 415
416 416 @property
417 417 def _manifestctx(self):
418 418 return self._repo.manifestlog[self._changeset.manifest]
419 419
420 420 @propertycache
421 421 def _manifestdelta(self):
422 422 return self._manifestctx.readdelta()
423 423
424 424 @propertycache
425 425 def _parents(self):
426 426 repo = self._repo
427 427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 428 if p2 == nullrev:
429 429 return [repo[p1]]
430 430 return [repo[p1], repo[p2]]
431 431
432 432 def changeset(self):
433 433 c = self._changeset
434 434 return (
435 435 c.manifest,
436 436 c.user,
437 437 c.date,
438 438 c.files,
439 439 c.description,
440 440 c.extra,
441 441 )
442 442 def manifestnode(self):
443 443 return self._changeset.manifest
444 444
445 445 def user(self):
446 446 return self._changeset.user
447 447 def date(self):
448 448 return self._changeset.date
449 449 def files(self):
450 450 return self._changeset.files
451 451 def filesmodified(self):
452 452 modified = set(self.files())
453 453 modified.difference_update(self.filesadded())
454 454 modified.difference_update(self.filesremoved())
455 455 return sorted(modified)
456 456 def filesadded(self):
457 457 source = self._repo.ui.config('experimental', 'copies.read-from')
458 458 if (source == 'changeset-only' or
459 459 (source == 'compatibility' and
460 460 self._changeset.filesadded is not None)):
461 461 return self._changeset.filesadded or []
462 462 return scmutil.computechangesetfilesadded(self)
463 463 def filesremoved(self):
464 464 source = self._repo.ui.config('experimental', 'copies.read-from')
465 465 if (source == 'changeset-only' or
466 466 (source == 'compatibility' and
467 467 self._changeset.filesremoved is not None)):
468 468 return self._changeset.filesremoved or []
469 469 return scmutil.computechangesetfilesremoved(self)
470 470
471 471 @propertycache
472 472 def _copies(self):
473 473 source = self._repo.ui.config('experimental', 'copies.read-from')
474 474 p1copies = self._changeset.p1copies
475 475 p2copies = self._changeset.p2copies
476 476 # If config says to get copy metadata only from changeset, then return
477 477 # that, defaulting to {} if there was no copy metadata.
478 478 # In compatibility mode, we return copy data from the changeset if
479 479 # it was recorded there, and otherwise we fall back to getting it from
480 480 # the filelogs (below).
481 481 if (source == 'changeset-only' or
482 482 (source == 'compatibility' and p1copies is not None)):
483 483 return p1copies or {}, p2copies or {}
484 484
485 485 # Otherwise (config said to read only from filelog, or we are in
486 486 # compatiblity mode and there is not data in the changeset), we get
487 487 # the copy metadata from the filelogs.
488 488 return super(changectx, self)._copies
489 489 def description(self):
490 490 return self._changeset.description
491 491 def branch(self):
492 492 return encoding.tolocal(self._changeset.extra.get("branch"))
493 493 def closesbranch(self):
494 494 return 'close' in self._changeset.extra
495 495 def extra(self):
496 496 """Return a dict of extra information."""
497 497 return self._changeset.extra
498 498 def tags(self):
499 499 """Return a list of byte tag names"""
500 500 return self._repo.nodetags(self._node)
501 501 def bookmarks(self):
502 502 """Return a list of byte bookmark names."""
503 503 return self._repo.nodebookmarks(self._node)
504 504 def phase(self):
505 505 return self._repo._phasecache.phase(self._repo, self._rev)
506 506 def hidden(self):
507 507 return self._rev in repoview.filterrevs(self._repo, 'visible')
508 508
509 509 def isinmemory(self):
510 510 return False
511 511
512 512 def children(self):
513 513 """return list of changectx contexts for each child changeset.
514 514
515 515 This returns only the immediate child changesets. Use descendants() to
516 516 recursively walk children.
517 517 """
518 518 c = self._repo.changelog.children(self._node)
519 519 return [self._repo[x] for x in c]
520 520
521 521 def ancestors(self):
522 522 for a in self._repo.changelog.ancestors([self._rev]):
523 523 yield self._repo[a]
524 524
525 525 def descendants(self):
526 526 """Recursively yield all children of the changeset.
527 527
528 528 For just the immediate children, use children()
529 529 """
530 530 for d in self._repo.changelog.descendants([self._rev]):
531 531 yield self._repo[d]
532 532
533 533 def filectx(self, path, fileid=None, filelog=None):
534 534 """get a file context from this changeset"""
535 535 if fileid is None:
536 536 fileid = self.filenode(path)
537 537 return filectx(self._repo, path, fileid=fileid,
538 538 changectx=self, filelog=filelog)
539 539
540 540 def ancestor(self, c2, warn=False):
541 541 """return the "best" ancestor context of self and c2
542 542
543 543 If there are multiple candidates, it will show a message and check
544 544 merge.preferancestor configuration before falling back to the
545 545 revlog ancestor."""
546 546 # deal with workingctxs
547 547 n2 = c2._node
548 548 if n2 is None:
549 549 n2 = c2._parents[0]._node
550 550 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
551 551 if not cahs:
552 552 anc = nullid
553 553 elif len(cahs) == 1:
554 554 anc = cahs[0]
555 555 else:
556 556 # experimental config: merge.preferancestor
557 557 for r in self._repo.ui.configlist('merge', 'preferancestor'):
558 558 try:
559 559 ctx = scmutil.revsymbol(self._repo, r)
560 560 except error.RepoLookupError:
561 561 continue
562 562 anc = ctx.node()
563 563 if anc in cahs:
564 564 break
565 565 else:
566 566 anc = self._repo.changelog.ancestor(self._node, n2)
567 567 if warn:
568 568 self._repo.ui.status(
569 569 (_("note: using %s as ancestor of %s and %s\n") %
570 570 (short(anc), short(self._node), short(n2))) +
571 571 ''.join(_(" alternatively, use --config "
572 572 "merge.preferancestor=%s\n") %
573 573 short(n) for n in sorted(cahs) if n != anc))
574 574 return self._repo[anc]
575 575
576 576 def isancestorof(self, other):
577 577 """True if this changeset is an ancestor of other"""
578 578 return self._repo.changelog.isancestorrev(self._rev, other._rev)
579 579
580 580 def walk(self, match):
581 581 '''Generates matching file names.'''
582 582
583 583 # Wrap match.bad method to have message with nodeid
584 584 def bad(fn, msg):
585 585 # The manifest doesn't know about subrepos, so don't complain about
586 586 # paths into valid subrepos.
587 587 if any(fn == s or fn.startswith(s + '/')
588 588 for s in self.substate):
589 589 return
590 590 match.bad(fn, _('no such file in rev %s') % self)
591 591
592 592 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
593 593 return self._manifest.walk(m)
594 594
595 595 def matches(self, match):
596 596 return self.walk(match)
597 597
598 598 class basefilectx(object):
599 599 """A filecontext object represents the common logic for its children:
600 600 filectx: read-only access to a filerevision that is already present
601 601 in the repo,
602 602 workingfilectx: a filecontext that represents files from the working
603 603 directory,
604 604 memfilectx: a filecontext that represents files in-memory,
605 605 """
606 606 @propertycache
607 607 def _filelog(self):
608 608 return self._repo.file(self._path)
609 609
610 610 @propertycache
611 611 def _changeid(self):
612 612 if r'_changectx' in self.__dict__:
613 613 return self._changectx.rev()
614 614 elif r'_descendantrev' in self.__dict__:
615 615 # this file context was created from a revision with a known
616 616 # descendant, we can (lazily) correct for linkrev aliases
617 617 return self._adjustlinkrev(self._descendantrev)
618 618 else:
619 619 return self._filelog.linkrev(self._filerev)
620 620
621 621 @propertycache
622 622 def _filenode(self):
623 623 if r'_fileid' in self.__dict__:
624 624 return self._filelog.lookup(self._fileid)
625 625 else:
626 626 return self._changectx.filenode(self._path)
627 627
628 628 @propertycache
629 629 def _filerev(self):
630 630 return self._filelog.rev(self._filenode)
631 631
632 632 @propertycache
633 633 def _repopath(self):
634 634 return self._path
635 635
636 636 def __nonzero__(self):
637 637 try:
638 638 self._filenode
639 639 return True
640 640 except error.LookupError:
641 641 # file is missing
642 642 return False
643 643
644 644 __bool__ = __nonzero__
645 645
646 646 def __bytes__(self):
647 647 try:
648 648 return "%s@%s" % (self.path(), self._changectx)
649 649 except error.LookupError:
650 650 return "%s@???" % self.path()
651 651
652 652 __str__ = encoding.strmethod(__bytes__)
653 653
654 654 def __repr__(self):
655 655 return r"<%s %s>" % (type(self).__name__, str(self))
656 656
657 657 def __hash__(self):
658 658 try:
659 659 return hash((self._path, self._filenode))
660 660 except AttributeError:
661 661 return id(self)
662 662
663 663 def __eq__(self, other):
664 664 try:
665 665 return (type(self) == type(other) and self._path == other._path
666 666 and self._filenode == other._filenode)
667 667 except AttributeError:
668 668 return False
669 669
670 670 def __ne__(self, other):
671 671 return not (self == other)
672 672
673 673 def filerev(self):
674 674 return self._filerev
675 675 def filenode(self):
676 676 return self._filenode
677 677 @propertycache
678 678 def _flags(self):
679 679 return self._changectx.flags(self._path)
680 680 def flags(self):
681 681 return self._flags
682 682 def filelog(self):
683 683 return self._filelog
684 684 def rev(self):
685 685 return self._changeid
686 686 def linkrev(self):
687 687 return self._filelog.linkrev(self._filerev)
688 688 def node(self):
689 689 return self._changectx.node()
690 690 def hex(self):
691 691 return self._changectx.hex()
692 692 def user(self):
693 693 return self._changectx.user()
694 694 def date(self):
695 695 return self._changectx.date()
696 696 def files(self):
697 697 return self._changectx.files()
698 698 def description(self):
699 699 return self._changectx.description()
700 700 def branch(self):
701 701 return self._changectx.branch()
702 702 def extra(self):
703 703 return self._changectx.extra()
704 704 def phase(self):
705 705 return self._changectx.phase()
706 706 def phasestr(self):
707 707 return self._changectx.phasestr()
708 708 def obsolete(self):
709 709 return self._changectx.obsolete()
710 710 def instabilities(self):
711 711 return self._changectx.instabilities()
712 712 def manifest(self):
713 713 return self._changectx.manifest()
714 714 def changectx(self):
715 715 return self._changectx
716 716 def renamed(self):
717 717 return self._copied
718 718 def copysource(self):
719 719 return self._copied and self._copied[0]
720 720 def repo(self):
721 721 return self._repo
722 722 def size(self):
723 723 return len(self.data())
724 724
725 725 def path(self):
726 726 return self._path
727 727
728 728 def isbinary(self):
729 729 try:
730 730 return stringutil.binary(self.data())
731 731 except IOError:
732 732 return False
733 733 def isexec(self):
734 734 return 'x' in self.flags()
735 735 def islink(self):
736 736 return 'l' in self.flags()
737 737
738 738 def isabsent(self):
739 739 """whether this filectx represents a file not in self._changectx
740 740
741 741 This is mainly for merge code to detect change/delete conflicts. This is
742 742 expected to be True for all subclasses of basectx."""
743 743 return False
744 744
745 745 _customcmp = False
746 746 def cmp(self, fctx):
747 747 """compare with other file context
748 748
749 749 returns True if different than fctx.
750 750 """
751 751 if fctx._customcmp:
752 752 return fctx.cmp(self)
753 753
754 754 if self._filenode is None:
755 755 raise error.ProgrammingError(
756 756 'filectx.cmp() must be reimplemented if not backed by revlog')
757 757
758 758 if fctx._filenode is None:
759 759 if self._repo._encodefilterpats:
760 760 # can't rely on size() because wdir content may be decoded
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() - 4 == fctx.size():
763 763 # size() can match:
764 764 # if file data starts with '\1\n', empty metadata block is
765 765 # prepended, which adds 4 bytes to filelog.size().
766 766 return self._filelog.cmp(self._filenode, fctx.data())
767 767 if self.size() == fctx.size():
768 768 # size() matches: need to compare content
769 769 return self._filelog.cmp(self._filenode, fctx.data())
770 770
771 771 # size() differs
772 772 return True
773 773
774 774 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
775 775 """return the first ancestor of <srcrev> introducing <fnode>
776 776
777 777 If the linkrev of the file revision does not point to an ancestor of
778 778 srcrev, we'll walk down the ancestors until we find one introducing
779 779 this file revision.
780 780
781 781 :srcrev: the changeset revision we search ancestors from
782 782 :inclusive: if true, the src revision will also be checked
783 783 :stoprev: an optional revision to stop the walk at. If no introduction
784 784 of this file content could be found before this floor
785 785 revision, the function will returns "None" and stops its
786 786 iteration.
787 787 """
788 788 repo = self._repo
789 789 cl = repo.unfiltered().changelog
790 790 mfl = repo.manifestlog
791 791 # fetch the linkrev
792 792 lkr = self.linkrev()
793 793 if srcrev == lkr:
794 794 return lkr
795 795 # hack to reuse ancestor computation when searching for renames
796 796 memberanc = getattr(self, '_ancestrycontext', None)
797 797 iteranc = None
798 798 if srcrev is None:
799 799 # wctx case, used by workingfilectx during mergecopy
800 800 revs = [p.rev() for p in self._repo[None].parents()]
801 801 inclusive = True # we skipped the real (revless) source
802 802 else:
803 803 revs = [srcrev]
804 804 if memberanc is None:
805 805 memberanc = iteranc = cl.ancestors(revs, lkr,
806 806 inclusive=inclusive)
807 807 # check if this linkrev is an ancestor of srcrev
808 808 if lkr not in memberanc:
809 809 if iteranc is None:
810 810 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
811 811 fnode = self._filenode
812 812 path = self._path
813 813 for a in iteranc:
814 814 if stoprev is not None and a < stoprev:
815 815 return None
816 816 ac = cl.read(a) # get changeset data (we avoid object creation)
817 817 if path in ac[3]: # checking the 'files' field.
818 818 # The file has been touched, check if the content is
819 819 # similar to the one we search for.
820 820 if fnode == mfl[ac[0]].readfast().get(path):
821 821 return a
822 822 # In theory, we should never get out of that loop without a result.
823 823 # But if manifest uses a buggy file revision (not children of the
824 824 # one it replaces) we could. Such a buggy situation will likely
825 825 # result is crash somewhere else at to some point.
826 826 return lkr
827 827
828 828 def isintroducedafter(self, changelogrev):
829 829 """True if a filectx has been introduced after a given floor revision
830 830 """
831 831 if self.linkrev() >= changelogrev:
832 832 return True
833 833 introrev = self._introrev(stoprev=changelogrev)
834 834 if introrev is None:
835 835 return False
836 836 return introrev >= changelogrev
837 837
838 838 def introrev(self):
839 839 """return the rev of the changeset which introduced this file revision
840 840
841 841 This method is different from linkrev because it take into account the
842 842 changeset the filectx was created from. It ensures the returned
843 843 revision is one of its ancestors. This prevents bugs from
844 844 'linkrev-shadowing' when a file revision is used by multiple
845 845 changesets.
846 846 """
847 847 return self._introrev()
848 848
849 849 def _introrev(self, stoprev=None):
850 850 """
851 851 Same as `introrev` but, with an extra argument to limit changelog
852 852 iteration range in some internal usecase.
853 853
854 854 If `stoprev` is set, the `introrev` will not be searched past that
855 855 `stoprev` revision and "None" might be returned. This is useful to
856 856 limit the iteration range.
857 857 """
858 858 toprev = None
859 859 attrs = vars(self)
860 860 if r'_changeid' in attrs:
861 861 # We have a cached value already
862 862 toprev = self._changeid
863 863 elif r'_changectx' in attrs:
864 864 # We know which changelog entry we are coming from
865 865 toprev = self._changectx.rev()
866 866
867 867 if toprev is not None:
868 868 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
869 869 elif r'_descendantrev' in attrs:
870 870 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
871 871 # be nice and cache the result of the computation
872 872 if introrev is not None:
873 873 self._changeid = introrev
874 874 return introrev
875 875 else:
876 876 return self.linkrev()
877 877
878 878 def introfilectx(self):
879 879 """Return filectx having identical contents, but pointing to the
880 880 changeset revision where this filectx was introduced"""
881 881 introrev = self.introrev()
882 882 if self.rev() == introrev:
883 883 return self
884 884 return self.filectx(self.filenode(), changeid=introrev)
885 885
886 886 def _parentfilectx(self, path, fileid, filelog):
887 887 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
888 888 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
889 889 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
890 890 # If self is associated with a changeset (probably explicitly
891 891 # fed), ensure the created filectx is associated with a
892 892 # changeset that is an ancestor of self.changectx.
893 893 # This lets us later use _adjustlinkrev to get a correct link.
894 894 fctx._descendantrev = self.rev()
895 895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
896 896 elif r'_descendantrev' in vars(self):
897 897 # Otherwise propagate _descendantrev if we have one associated.
898 898 fctx._descendantrev = self._descendantrev
899 899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
900 900 return fctx
901 901
902 902 def parents(self):
903 903 _path = self._path
904 904 fl = self._filelog
905 905 parents = self._filelog.parents(self._filenode)
906 906 pl = [(_path, node, fl) for node in parents if node != nullid]
907 907
908 908 r = fl.renamed(self._filenode)
909 909 if r:
910 910 # - In the simple rename case, both parent are nullid, pl is empty.
911 911 # - In case of merge, only one of the parent is null id and should
912 912 # be replaced with the rename information. This parent is -always-
913 913 # the first one.
914 914 #
915 915 # As null id have always been filtered out in the previous list
916 916 # comprehension, inserting to 0 will always result in "replacing
917 917 # first nullid parent with rename information.
918 918 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
919 919
920 920 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
921 921
922 922 def p1(self):
923 923 return self.parents()[0]
924 924
925 925 def p2(self):
926 926 p = self.parents()
927 927 if len(p) == 2:
928 928 return p[1]
929 929 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
930 930
931 931 def annotate(self, follow=False, skiprevs=None, diffopts=None):
932 932 """Returns a list of annotateline objects for each line in the file
933 933
934 934 - line.fctx is the filectx of the node where that line was last changed
935 935 - line.lineno is the line number at the first appearance in the managed
936 936 file
937 937 - line.text is the data on that line (including newline character)
938 938 """
939 939 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
940 940
941 941 def parents(f):
942 942 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
943 943 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
944 944 # from the topmost introrev (= srcrev) down to p.linkrev() if it
945 945 # isn't an ancestor of the srcrev.
946 946 f._changeid
947 947 pl = f.parents()
948 948
949 949 # Don't return renamed parents if we aren't following.
950 950 if not follow:
951 951 pl = [p for p in pl if p.path() == f.path()]
952 952
953 953 # renamed filectx won't have a filelog yet, so set it
954 954 # from the cache to save time
955 955 for p in pl:
956 956 if not r'_filelog' in p.__dict__:
957 957 p._filelog = getlog(p.path())
958 958
959 959 return pl
960 960
961 961 # use linkrev to find the first changeset where self appeared
962 962 base = self.introfilectx()
963 963 if getattr(base, '_ancestrycontext', None) is None:
964 964 cl = self._repo.changelog
965 965 if base.rev() is None:
966 966 # wctx is not inclusive, but works because _ancestrycontext
967 967 # is used to test filelog revisions
968 968 ac = cl.ancestors([p.rev() for p in base.parents()],
969 969 inclusive=True)
970 970 else:
971 971 ac = cl.ancestors([base.rev()], inclusive=True)
972 972 base._ancestrycontext = ac
973 973
974 974 return dagop.annotate(base, parents, skiprevs=skiprevs,
975 975 diffopts=diffopts)
976 976
977 977 def ancestors(self, followfirst=False):
978 978 visit = {}
979 979 c = self
980 980 if followfirst:
981 981 cut = 1
982 982 else:
983 983 cut = None
984 984
985 985 while True:
986 986 for parent in c.parents()[:cut]:
987 987 visit[(parent.linkrev(), parent.filenode())] = parent
988 988 if not visit:
989 989 break
990 990 c = visit.pop(max(visit))
991 991 yield c
992 992
993 993 def decodeddata(self):
994 994 """Returns `data()` after running repository decoding filters.
995 995
996 996 This is often equivalent to how the data would be expressed on disk.
997 997 """
998 998 return self._repo.wwritedata(self.path(), self.data())
999 999
1000 1000 class filectx(basefilectx):
1001 1001 """A filecontext object makes access to data related to a particular
1002 1002 filerevision convenient."""
1003 1003 def __init__(self, repo, path, changeid=None, fileid=None,
1004 1004 filelog=None, changectx=None):
1005 1005 """changeid must be a revision number, if specified.
1006 1006 fileid can be a file revision or node."""
1007 1007 self._repo = repo
1008 1008 self._path = path
1009 1009
1010 1010 assert (changeid is not None
1011 1011 or fileid is not None
1012 1012 or changectx is not None), (
1013 1013 "bad args: changeid=%r, fileid=%r, changectx=%r"
1014 1014 % (changeid, fileid, changectx))
1015 1015
1016 1016 if filelog is not None:
1017 1017 self._filelog = filelog
1018 1018
1019 1019 if changeid is not None:
1020 1020 self._changeid = changeid
1021 1021 if changectx is not None:
1022 1022 self._changectx = changectx
1023 1023 if fileid is not None:
1024 1024 self._fileid = fileid
1025 1025
1026 1026 @propertycache
1027 1027 def _changectx(self):
1028 1028 try:
1029 1029 return self._repo[self._changeid]
1030 1030 except error.FilteredRepoLookupError:
1031 1031 # Linkrev may point to any revision in the repository. When the
1032 1032 # repository is filtered this may lead to `filectx` trying to build
1033 1033 # `changectx` for filtered revision. In such case we fallback to
1034 1034 # creating `changectx` on the unfiltered version of the reposition.
1035 1035 # This fallback should not be an issue because `changectx` from
1036 1036 # `filectx` are not used in complex operations that care about
1037 1037 # filtering.
1038 1038 #
1039 1039 # This fallback is a cheap and dirty fix that prevent several
1040 1040 # crashes. It does not ensure the behavior is correct. However the
1041 1041 # behavior was not correct before filtering either and "incorrect
1042 1042 # behavior" is seen as better as "crash"
1043 1043 #
1044 1044 # Linkrevs have several serious troubles with filtering that are
1045 1045 # complicated to solve. Proper handling of the issue here should be
1046 1046 # considered when solving linkrev issue are on the table.
1047 1047 return self._repo.unfiltered()[self._changeid]
1048 1048
1049 1049 def filectx(self, fileid, changeid=None):
1050 1050 '''opens an arbitrary revision of the file without
1051 1051 opening a new filelog'''
1052 1052 return filectx(self._repo, self._path, fileid=fileid,
1053 1053 filelog=self._filelog, changeid=changeid)
1054 1054
1055 1055 def rawdata(self):
1056 return self._filelog.revision(self._filenode, raw=True)
1056 return self._filelog.rawdata(self._filenode)
1057 1057
1058 1058 def rawflags(self):
1059 1059 """low-level revlog flags"""
1060 1060 return self._filelog.flags(self._filerev)
1061 1061
1062 1062 def data(self):
1063 1063 try:
1064 1064 return self._filelog.read(self._filenode)
1065 1065 except error.CensoredNodeError:
1066 1066 if self._repo.ui.config("censor", "policy") == "ignore":
1067 1067 return ""
1068 1068 raise error.Abort(_("censored node: %s") % short(self._filenode),
1069 1069 hint=_("set censor.policy to ignore errors"))
1070 1070
1071 1071 def size(self):
1072 1072 return self._filelog.size(self._filerev)
1073 1073
1074 1074 @propertycache
1075 1075 def _copied(self):
1076 1076 """check if file was actually renamed in this changeset revision
1077 1077
1078 1078 If rename logged in file revision, we report copy for changeset only
1079 1079 if file revisions linkrev points back to the changeset in question
1080 1080 or both changeset parents contain different file revisions.
1081 1081 """
1082 1082
1083 1083 renamed = self._filelog.renamed(self._filenode)
1084 1084 if not renamed:
1085 1085 return None
1086 1086
1087 1087 if self.rev() == self.linkrev():
1088 1088 return renamed
1089 1089
1090 1090 name = self.path()
1091 1091 fnode = self._filenode
1092 1092 for p in self._changectx.parents():
1093 1093 try:
1094 1094 if fnode == p.filenode(name):
1095 1095 return None
1096 1096 except error.LookupError:
1097 1097 pass
1098 1098 return renamed
1099 1099
1100 1100 def children(self):
1101 1101 # hard for renames
1102 1102 c = self._filelog.children(self._filenode)
1103 1103 return [filectx(self._repo, self._path, fileid=x,
1104 1104 filelog=self._filelog) for x in c]
1105 1105
1106 1106 class committablectx(basectx):
1107 1107 """A committablectx object provides common functionality for a context that
1108 1108 wants the ability to commit, e.g. workingctx or memctx."""
1109 1109 def __init__(self, repo, text="", user=None, date=None, extra=None,
1110 1110 changes=None, branch=None):
1111 1111 super(committablectx, self).__init__(repo)
1112 1112 self._rev = None
1113 1113 self._node = None
1114 1114 self._text = text
1115 1115 if date:
1116 1116 self._date = dateutil.parsedate(date)
1117 1117 if user:
1118 1118 self._user = user
1119 1119 if changes:
1120 1120 self._status = changes
1121 1121
1122 1122 self._extra = {}
1123 1123 if extra:
1124 1124 self._extra = extra.copy()
1125 1125 if branch is not None:
1126 1126 self._extra['branch'] = encoding.fromlocal(branch)
1127 1127 if not self._extra.get('branch'):
1128 1128 self._extra['branch'] = 'default'
1129 1129
1130 1130 def __bytes__(self):
1131 1131 return bytes(self._parents[0]) + "+"
1132 1132
1133 1133 __str__ = encoding.strmethod(__bytes__)
1134 1134
1135 1135 def __nonzero__(self):
1136 1136 return True
1137 1137
1138 1138 __bool__ = __nonzero__
1139 1139
1140 1140 @propertycache
1141 1141 def _status(self):
1142 1142 return self._repo.status()
1143 1143
1144 1144 @propertycache
1145 1145 def _user(self):
1146 1146 return self._repo.ui.username()
1147 1147
1148 1148 @propertycache
1149 1149 def _date(self):
1150 1150 ui = self._repo.ui
1151 1151 date = ui.configdate('devel', 'default-date')
1152 1152 if date is None:
1153 1153 date = dateutil.makedate()
1154 1154 return date
1155 1155
1156 1156 def subrev(self, subpath):
1157 1157 return None
1158 1158
1159 1159 def manifestnode(self):
1160 1160 return None
1161 1161 def user(self):
1162 1162 return self._user or self._repo.ui.username()
1163 1163 def date(self):
1164 1164 return self._date
1165 1165 def description(self):
1166 1166 return self._text
1167 1167 def files(self):
1168 1168 return sorted(self._status.modified + self._status.added +
1169 1169 self._status.removed)
1170 1170 def modified(self):
1171 1171 return self._status.modified
1172 1172 def added(self):
1173 1173 return self._status.added
1174 1174 def removed(self):
1175 1175 return self._status.removed
1176 1176 def deleted(self):
1177 1177 return self._status.deleted
1178 1178 filesmodified = modified
1179 1179 filesadded = added
1180 1180 filesremoved = removed
1181 1181
1182 1182 def branch(self):
1183 1183 return encoding.tolocal(self._extra['branch'])
1184 1184 def closesbranch(self):
1185 1185 return 'close' in self._extra
1186 1186 def extra(self):
1187 1187 return self._extra
1188 1188
1189 1189 def isinmemory(self):
1190 1190 return False
1191 1191
1192 1192 def tags(self):
1193 1193 return []
1194 1194
1195 1195 def bookmarks(self):
1196 1196 b = []
1197 1197 for p in self.parents():
1198 1198 b.extend(p.bookmarks())
1199 1199 return b
1200 1200
1201 1201 def phase(self):
1202 1202 phase = phases.draft # default phase to draft
1203 1203 for p in self.parents():
1204 1204 phase = max(phase, p.phase())
1205 1205 return phase
1206 1206
1207 1207 def hidden(self):
1208 1208 return False
1209 1209
1210 1210 def children(self):
1211 1211 return []
1212 1212
1213 1213 def ancestor(self, c2):
1214 1214 """return the "best" ancestor context of self and c2"""
1215 1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1216 1216
1217 1217 def ancestors(self):
1218 1218 for p in self._parents:
1219 1219 yield p
1220 1220 for a in self._repo.changelog.ancestors(
1221 1221 [p.rev() for p in self._parents]):
1222 1222 yield self._repo[a]
1223 1223
1224 1224 def markcommitted(self, node):
1225 1225 """Perform post-commit cleanup necessary after committing this ctx
1226 1226
1227 1227 Specifically, this updates backing stores this working context
1228 1228 wraps to reflect the fact that the changes reflected by this
1229 1229 workingctx have been committed. For example, it marks
1230 1230 modified and added files as normal in the dirstate.
1231 1231
1232 1232 """
1233 1233
1234 1234 def dirty(self, missing=False, merge=True, branch=True):
1235 1235 return False
1236 1236
1237 1237 class workingctx(committablectx):
1238 1238 """A workingctx object makes access to data related to
1239 1239 the current working directory convenient.
1240 1240 date - any valid date string or (unixtime, offset), or None.
1241 1241 user - username string, or None.
1242 1242 extra - a dictionary of extra values, or None.
1243 1243 changes - a list of file lists as returned by localrepo.status()
1244 1244 or None to use the repository status.
1245 1245 """
1246 1246 def __init__(self, repo, text="", user=None, date=None, extra=None,
1247 1247 changes=None):
1248 1248 branch = None
1249 1249 if not extra or 'branch' not in extra:
1250 1250 try:
1251 1251 branch = repo.dirstate.branch()
1252 1252 except UnicodeDecodeError:
1253 1253 raise error.Abort(_('branch name not in UTF-8!'))
1254 1254 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1255 1255 branch=branch)
1256 1256
1257 1257 def __iter__(self):
1258 1258 d = self._repo.dirstate
1259 1259 for f in d:
1260 1260 if d[f] != 'r':
1261 1261 yield f
1262 1262
1263 1263 def __contains__(self, key):
1264 1264 return self._repo.dirstate[key] not in "?r"
1265 1265
1266 1266 def hex(self):
1267 1267 return wdirhex
1268 1268
1269 1269 @propertycache
1270 1270 def _parents(self):
1271 1271 p = self._repo.dirstate.parents()
1272 1272 if p[1] == nullid:
1273 1273 p = p[:-1]
1274 1274 # use unfiltered repo to delay/avoid loading obsmarkers
1275 1275 unfi = self._repo.unfiltered()
1276 1276 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1277 1277
1278 1278 def _fileinfo(self, path):
1279 1279 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1280 1280 self._manifest
1281 1281 return super(workingctx, self)._fileinfo(path)
1282 1282
1283 1283 def _buildflagfunc(self):
1284 1284 # Create a fallback function for getting file flags when the
1285 1285 # filesystem doesn't support them
1286 1286
1287 1287 copiesget = self._repo.dirstate.copies().get
1288 1288 parents = self.parents()
1289 1289 if len(parents) < 2:
1290 1290 # when we have one parent, it's easy: copy from parent
1291 1291 man = parents[0].manifest()
1292 1292 def func(f):
1293 1293 f = copiesget(f, f)
1294 1294 return man.flags(f)
1295 1295 else:
1296 1296 # merges are tricky: we try to reconstruct the unstored
1297 1297 # result from the merge (issue1802)
1298 1298 p1, p2 = parents
1299 1299 pa = p1.ancestor(p2)
1300 1300 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1301 1301
1302 1302 def func(f):
1303 1303 f = copiesget(f, f) # may be wrong for merges with copies
1304 1304 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1305 1305 if fl1 == fl2:
1306 1306 return fl1
1307 1307 if fl1 == fla:
1308 1308 return fl2
1309 1309 if fl2 == fla:
1310 1310 return fl1
1311 1311 return '' # punt for conflicts
1312 1312
1313 1313 return func
1314 1314
1315 1315 @propertycache
1316 1316 def _flagfunc(self):
1317 1317 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1318 1318
1319 1319 def flags(self, path):
1320 1320 if r'_manifest' in self.__dict__:
1321 1321 try:
1322 1322 return self._manifest.flags(path)
1323 1323 except KeyError:
1324 1324 return ''
1325 1325
1326 1326 try:
1327 1327 return self._flagfunc(path)
1328 1328 except OSError:
1329 1329 return ''
1330 1330
1331 1331 def filectx(self, path, filelog=None):
1332 1332 """get a file context from the working directory"""
1333 1333 return workingfilectx(self._repo, path, workingctx=self,
1334 1334 filelog=filelog)
1335 1335
1336 1336 def dirty(self, missing=False, merge=True, branch=True):
1337 1337 "check whether a working directory is modified"
1338 1338 # check subrepos first
1339 1339 for s in sorted(self.substate):
1340 1340 if self.sub(s).dirty(missing=missing):
1341 1341 return True
1342 1342 # check current working dir
1343 1343 return ((merge and self.p2()) or
1344 1344 (branch and self.branch() != self.p1().branch()) or
1345 1345 self.modified() or self.added() or self.removed() or
1346 1346 (missing and self.deleted()))
1347 1347
1348 1348 def add(self, list, prefix=""):
1349 1349 with self._repo.wlock():
1350 1350 ui, ds = self._repo.ui, self._repo.dirstate
1351 1351 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1352 1352 rejected = []
1353 1353 lstat = self._repo.wvfs.lstat
1354 1354 for f in list:
1355 1355 # ds.pathto() returns an absolute file when this is invoked from
1356 1356 # the keyword extension. That gets flagged as non-portable on
1357 1357 # Windows, since it contains the drive letter and colon.
1358 1358 scmutil.checkportable(ui, os.path.join(prefix, f))
1359 1359 try:
1360 1360 st = lstat(f)
1361 1361 except OSError:
1362 1362 ui.warn(_("%s does not exist!\n") % uipath(f))
1363 1363 rejected.append(f)
1364 1364 continue
1365 1365 limit = ui.configbytes('ui', 'large-file-limit')
1366 1366 if limit != 0 and st.st_size > limit:
1367 1367 ui.warn(_("%s: up to %d MB of RAM may be required "
1368 1368 "to manage this file\n"
1369 1369 "(use 'hg revert %s' to cancel the "
1370 1370 "pending addition)\n")
1371 1371 % (f, 3 * st.st_size // 1000000, uipath(f)))
1372 1372 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1373 1373 ui.warn(_("%s not added: only files and symlinks "
1374 1374 "supported currently\n") % uipath(f))
1375 1375 rejected.append(f)
1376 1376 elif ds[f] in 'amn':
1377 1377 ui.warn(_("%s already tracked!\n") % uipath(f))
1378 1378 elif ds[f] == 'r':
1379 1379 ds.normallookup(f)
1380 1380 else:
1381 1381 ds.add(f)
1382 1382 return rejected
1383 1383
1384 1384 def forget(self, files, prefix=""):
1385 1385 with self._repo.wlock():
1386 1386 ds = self._repo.dirstate
1387 1387 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1388 1388 rejected = []
1389 1389 for f in files:
1390 1390 if f not in ds:
1391 1391 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1392 1392 rejected.append(f)
1393 1393 elif ds[f] != 'a':
1394 1394 ds.remove(f)
1395 1395 else:
1396 1396 ds.drop(f)
1397 1397 return rejected
1398 1398
1399 1399 def copy(self, source, dest):
1400 1400 try:
1401 1401 st = self._repo.wvfs.lstat(dest)
1402 1402 except OSError as err:
1403 1403 if err.errno != errno.ENOENT:
1404 1404 raise
1405 1405 self._repo.ui.warn(_("%s does not exist!\n")
1406 1406 % self._repo.dirstate.pathto(dest))
1407 1407 return
1408 1408 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1409 1409 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1410 1410 "symbolic link\n")
1411 1411 % self._repo.dirstate.pathto(dest))
1412 1412 else:
1413 1413 with self._repo.wlock():
1414 1414 ds = self._repo.dirstate
1415 1415 if ds[dest] in '?':
1416 1416 ds.add(dest)
1417 1417 elif ds[dest] in 'r':
1418 1418 ds.normallookup(dest)
1419 1419 ds.copy(source, dest)
1420 1420
1421 1421 def match(self, pats=None, include=None, exclude=None, default='glob',
1422 1422 listsubrepos=False, badfn=None):
1423 1423 r = self._repo
1424 1424
1425 1425 # Only a case insensitive filesystem needs magic to translate user input
1426 1426 # to actual case in the filesystem.
1427 1427 icasefs = not util.fscasesensitive(r.root)
1428 1428 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1429 1429 default, auditor=r.auditor, ctx=self,
1430 1430 listsubrepos=listsubrepos, badfn=badfn,
1431 1431 icasefs=icasefs)
1432 1432
1433 1433 def _filtersuspectsymlink(self, files):
1434 1434 if not files or self._repo.dirstate._checklink:
1435 1435 return files
1436 1436
1437 1437 # Symlink placeholders may get non-symlink-like contents
1438 1438 # via user error or dereferencing by NFS or Samba servers,
1439 1439 # so we filter out any placeholders that don't look like a
1440 1440 # symlink
1441 1441 sane = []
1442 1442 for f in files:
1443 1443 if self.flags(f) == 'l':
1444 1444 d = self[f].data()
1445 1445 if (d == '' or len(d) >= 1024 or '\n' in d
1446 1446 or stringutil.binary(d)):
1447 1447 self._repo.ui.debug('ignoring suspect symlink placeholder'
1448 1448 ' "%s"\n' % f)
1449 1449 continue
1450 1450 sane.append(f)
1451 1451 return sane
1452 1452
1453 1453 def _checklookup(self, files):
1454 1454 # check for any possibly clean files
1455 1455 if not files:
1456 1456 return [], [], []
1457 1457
1458 1458 modified = []
1459 1459 deleted = []
1460 1460 fixup = []
1461 1461 pctx = self._parents[0]
1462 1462 # do a full compare of any files that might have changed
1463 1463 for f in sorted(files):
1464 1464 try:
1465 1465 # This will return True for a file that got replaced by a
1466 1466 # directory in the interim, but fixing that is pretty hard.
1467 1467 if (f not in pctx or self.flags(f) != pctx.flags(f)
1468 1468 or pctx[f].cmp(self[f])):
1469 1469 modified.append(f)
1470 1470 else:
1471 1471 fixup.append(f)
1472 1472 except (IOError, OSError):
1473 1473 # A file become inaccessible in between? Mark it as deleted,
1474 1474 # matching dirstate behavior (issue5584).
1475 1475 # The dirstate has more complex behavior around whether a
1476 1476 # missing file matches a directory, etc, but we don't need to
1477 1477 # bother with that: if f has made it to this point, we're sure
1478 1478 # it's in the dirstate.
1479 1479 deleted.append(f)
1480 1480
1481 1481 return modified, deleted, fixup
1482 1482
1483 1483 def _poststatusfixup(self, status, fixup):
1484 1484 """update dirstate for files that are actually clean"""
1485 1485 poststatus = self._repo.postdsstatus()
1486 1486 if fixup or poststatus:
1487 1487 try:
1488 1488 oldid = self._repo.dirstate.identity()
1489 1489
1490 1490 # updating the dirstate is optional
1491 1491 # so we don't wait on the lock
1492 1492 # wlock can invalidate the dirstate, so cache normal _after_
1493 1493 # taking the lock
1494 1494 with self._repo.wlock(False):
1495 1495 if self._repo.dirstate.identity() == oldid:
1496 1496 if fixup:
1497 1497 normal = self._repo.dirstate.normal
1498 1498 for f in fixup:
1499 1499 normal(f)
1500 1500 # write changes out explicitly, because nesting
1501 1501 # wlock at runtime may prevent 'wlock.release()'
1502 1502 # after this block from doing so for subsequent
1503 1503 # changing files
1504 1504 tr = self._repo.currenttransaction()
1505 1505 self._repo.dirstate.write(tr)
1506 1506
1507 1507 if poststatus:
1508 1508 for ps in poststatus:
1509 1509 ps(self, status)
1510 1510 else:
1511 1511 # in this case, writing changes out breaks
1512 1512 # consistency, because .hg/dirstate was
1513 1513 # already changed simultaneously after last
1514 1514 # caching (see also issue5584 for detail)
1515 1515 self._repo.ui.debug('skip updating dirstate: '
1516 1516 'identity mismatch\n')
1517 1517 except error.LockError:
1518 1518 pass
1519 1519 finally:
1520 1520 # Even if the wlock couldn't be grabbed, clear out the list.
1521 1521 self._repo.clearpostdsstatus()
1522 1522
1523 1523 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1524 1524 '''Gets the status from the dirstate -- internal use only.'''
1525 1525 subrepos = []
1526 1526 if '.hgsub' in self:
1527 1527 subrepos = sorted(self.substate)
1528 1528 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1529 1529 clean=clean, unknown=unknown)
1530 1530
1531 1531 # check for any possibly clean files
1532 1532 fixup = []
1533 1533 if cmp:
1534 1534 modified2, deleted2, fixup = self._checklookup(cmp)
1535 1535 s.modified.extend(modified2)
1536 1536 s.deleted.extend(deleted2)
1537 1537
1538 1538 if fixup and clean:
1539 1539 s.clean.extend(fixup)
1540 1540
1541 1541 self._poststatusfixup(s, fixup)
1542 1542
1543 1543 if match.always():
1544 1544 # cache for performance
1545 1545 if s.unknown or s.ignored or s.clean:
1546 1546 # "_status" is cached with list*=False in the normal route
1547 1547 self._status = scmutil.status(s.modified, s.added, s.removed,
1548 1548 s.deleted, [], [], [])
1549 1549 else:
1550 1550 self._status = s
1551 1551
1552 1552 return s
1553 1553
1554 1554 @propertycache
1555 1555 def _copies(self):
1556 1556 p1copies = {}
1557 1557 p2copies = {}
1558 1558 parents = self._repo.dirstate.parents()
1559 1559 p1manifest = self._repo[parents[0]].manifest()
1560 1560 p2manifest = self._repo[parents[1]].manifest()
1561 1561 narrowmatch = self._repo.narrowmatch()
1562 1562 for dst, src in self._repo.dirstate.copies().items():
1563 1563 if not narrowmatch(dst):
1564 1564 continue
1565 1565 if src in p1manifest:
1566 1566 p1copies[dst] = src
1567 1567 elif src in p2manifest:
1568 1568 p2copies[dst] = src
1569 1569 return p1copies, p2copies
1570 1570
1571 1571 @propertycache
1572 1572 def _manifest(self):
1573 1573 """generate a manifest corresponding to the values in self._status
1574 1574
1575 1575 This reuse the file nodeid from parent, but we use special node
1576 1576 identifiers for added and modified files. This is used by manifests
1577 1577 merge to see that files are different and by update logic to avoid
1578 1578 deleting newly added files.
1579 1579 """
1580 1580 return self._buildstatusmanifest(self._status)
1581 1581
1582 1582 def _buildstatusmanifest(self, status):
1583 1583 """Builds a manifest that includes the given status results."""
1584 1584 parents = self.parents()
1585 1585
1586 1586 man = parents[0].manifest().copy()
1587 1587
1588 1588 ff = self._flagfunc
1589 1589 for i, l in ((addednodeid, status.added),
1590 1590 (modifiednodeid, status.modified)):
1591 1591 for f in l:
1592 1592 man[f] = i
1593 1593 try:
1594 1594 man.setflag(f, ff(f))
1595 1595 except OSError:
1596 1596 pass
1597 1597
1598 1598 for f in status.deleted + status.removed:
1599 1599 if f in man:
1600 1600 del man[f]
1601 1601
1602 1602 return man
1603 1603
1604 1604 def _buildstatus(self, other, s, match, listignored, listclean,
1605 1605 listunknown):
1606 1606 """build a status with respect to another context
1607 1607
1608 1608 This includes logic for maintaining the fast path of status when
1609 1609 comparing the working directory against its parent, which is to skip
1610 1610 building a new manifest if self (working directory) is not comparing
1611 1611 against its parent (repo['.']).
1612 1612 """
1613 1613 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1614 1614 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1615 1615 # might have accidentally ended up with the entire contents of the file
1616 1616 # they are supposed to be linking to.
1617 1617 s.modified[:] = self._filtersuspectsymlink(s.modified)
1618 1618 if other != self._repo['.']:
1619 1619 s = super(workingctx, self)._buildstatus(other, s, match,
1620 1620 listignored, listclean,
1621 1621 listunknown)
1622 1622 return s
1623 1623
1624 1624 def _matchstatus(self, other, match):
1625 1625 """override the match method with a filter for directory patterns
1626 1626
1627 1627 We use inheritance to customize the match.bad method only in cases of
1628 1628 workingctx since it belongs only to the working directory when
1629 1629 comparing against the parent changeset.
1630 1630
1631 1631 If we aren't comparing against the working directory's parent, then we
1632 1632 just use the default match object sent to us.
1633 1633 """
1634 1634 if other != self._repo['.']:
1635 1635 def bad(f, msg):
1636 1636 # 'f' may be a directory pattern from 'match.files()',
1637 1637 # so 'f not in ctx1' is not enough
1638 1638 if f not in other and not other.hasdir(f):
1639 1639 self._repo.ui.warn('%s: %s\n' %
1640 1640 (self._repo.dirstate.pathto(f), msg))
1641 1641 match.bad = bad
1642 1642 return match
1643 1643
1644 1644 def walk(self, match):
1645 1645 '''Generates matching file names.'''
1646 1646 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1647 1647 subrepos=sorted(self.substate),
1648 1648 unknown=True, ignored=False))
1649 1649
1650 1650 def matches(self, match):
1651 1651 match = self._repo.narrowmatch(match)
1652 1652 ds = self._repo.dirstate
1653 1653 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1654 1654
1655 1655 def markcommitted(self, node):
1656 1656 with self._repo.dirstate.parentchange():
1657 1657 for f in self.modified() + self.added():
1658 1658 self._repo.dirstate.normal(f)
1659 1659 for f in self.removed():
1660 1660 self._repo.dirstate.drop(f)
1661 1661 self._repo.dirstate.setparents(node)
1662 1662
1663 1663 # write changes out explicitly, because nesting wlock at
1664 1664 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1665 1665 # from immediately doing so for subsequent changing files
1666 1666 self._repo.dirstate.write(self._repo.currenttransaction())
1667 1667
1668 1668 sparse.aftercommit(self._repo, node)
1669 1669
1670 1670 class committablefilectx(basefilectx):
1671 1671 """A committablefilectx provides common functionality for a file context
1672 1672 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1673 1673 def __init__(self, repo, path, filelog=None, ctx=None):
1674 1674 self._repo = repo
1675 1675 self._path = path
1676 1676 self._changeid = None
1677 1677 self._filerev = self._filenode = None
1678 1678
1679 1679 if filelog is not None:
1680 1680 self._filelog = filelog
1681 1681 if ctx:
1682 1682 self._changectx = ctx
1683 1683
1684 1684 def __nonzero__(self):
1685 1685 return True
1686 1686
1687 1687 __bool__ = __nonzero__
1688 1688
1689 1689 def linkrev(self):
1690 1690 # linked to self._changectx no matter if file is modified or not
1691 1691 return self.rev()
1692 1692
1693 1693 def renamed(self):
1694 1694 path = self.copysource()
1695 1695 if not path:
1696 1696 return None
1697 1697 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1698 1698
1699 1699 def parents(self):
1700 1700 '''return parent filectxs, following copies if necessary'''
1701 1701 def filenode(ctx, path):
1702 1702 return ctx._manifest.get(path, nullid)
1703 1703
1704 1704 path = self._path
1705 1705 fl = self._filelog
1706 1706 pcl = self._changectx._parents
1707 1707 renamed = self.renamed()
1708 1708
1709 1709 if renamed:
1710 1710 pl = [renamed + (None,)]
1711 1711 else:
1712 1712 pl = [(path, filenode(pcl[0], path), fl)]
1713 1713
1714 1714 for pc in pcl[1:]:
1715 1715 pl.append((path, filenode(pc, path), fl))
1716 1716
1717 1717 return [self._parentfilectx(p, fileid=n, filelog=l)
1718 1718 for p, n, l in pl if n != nullid]
1719 1719
1720 1720 def children(self):
1721 1721 return []
1722 1722
1723 1723 class workingfilectx(committablefilectx):
1724 1724 """A workingfilectx object makes access to data related to a particular
1725 1725 file in the working directory convenient."""
1726 1726 def __init__(self, repo, path, filelog=None, workingctx=None):
1727 1727 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1728 1728
1729 1729 @propertycache
1730 1730 def _changectx(self):
1731 1731 return workingctx(self._repo)
1732 1732
1733 1733 def data(self):
1734 1734 return self._repo.wread(self._path)
1735 1735 def copysource(self):
1736 1736 return self._repo.dirstate.copied(self._path)
1737 1737
1738 1738 def size(self):
1739 1739 return self._repo.wvfs.lstat(self._path).st_size
1740 1740 def lstat(self):
1741 1741 return self._repo.wvfs.lstat(self._path)
1742 1742 def date(self):
1743 1743 t, tz = self._changectx.date()
1744 1744 try:
1745 1745 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1746 1746 except OSError as err:
1747 1747 if err.errno != errno.ENOENT:
1748 1748 raise
1749 1749 return (t, tz)
1750 1750
1751 1751 def exists(self):
1752 1752 return self._repo.wvfs.exists(self._path)
1753 1753
1754 1754 def lexists(self):
1755 1755 return self._repo.wvfs.lexists(self._path)
1756 1756
1757 1757 def audit(self):
1758 1758 return self._repo.wvfs.audit(self._path)
1759 1759
1760 1760 def cmp(self, fctx):
1761 1761 """compare with other file context
1762 1762
1763 1763 returns True if different than fctx.
1764 1764 """
1765 1765 # fctx should be a filectx (not a workingfilectx)
1766 1766 # invert comparison to reuse the same code path
1767 1767 return fctx.cmp(self)
1768 1768
1769 1769 def remove(self, ignoremissing=False):
1770 1770 """wraps unlink for a repo's working directory"""
1771 1771 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1772 1772 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1773 1773 rmdir=rmdir)
1774 1774
1775 1775 def write(self, data, flags, backgroundclose=False, **kwargs):
1776 1776 """wraps repo.wwrite"""
1777 1777 return self._repo.wwrite(self._path, data, flags,
1778 1778 backgroundclose=backgroundclose,
1779 1779 **kwargs)
1780 1780
1781 1781 def markcopied(self, src):
1782 1782 """marks this file a copy of `src`"""
1783 1783 self._repo.dirstate.copy(src, self._path)
1784 1784
1785 1785 def clearunknown(self):
1786 1786 """Removes conflicting items in the working directory so that
1787 1787 ``write()`` can be called successfully.
1788 1788 """
1789 1789 wvfs = self._repo.wvfs
1790 1790 f = self._path
1791 1791 wvfs.audit(f)
1792 1792 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1793 1793 # remove files under the directory as they should already be
1794 1794 # warned and backed up
1795 1795 if wvfs.isdir(f) and not wvfs.islink(f):
1796 1796 wvfs.rmtree(f, forcibly=True)
1797 1797 for p in reversed(list(util.finddirs(f))):
1798 1798 if wvfs.isfileorlink(p):
1799 1799 wvfs.unlink(p)
1800 1800 break
1801 1801 else:
1802 1802 # don't remove files if path conflicts are not processed
1803 1803 if wvfs.isdir(f) and not wvfs.islink(f):
1804 1804 wvfs.removedirs(f)
1805 1805
1806 1806 def setflags(self, l, x):
1807 1807 self._repo.wvfs.setflags(self._path, l, x)
1808 1808
1809 1809 class overlayworkingctx(committablectx):
1810 1810 """Wraps another mutable context with a write-back cache that can be
1811 1811 converted into a commit context.
1812 1812
1813 1813 self._cache[path] maps to a dict with keys: {
1814 1814 'exists': bool?
1815 1815 'date': date?
1816 1816 'data': str?
1817 1817 'flags': str?
1818 1818 'copied': str? (path or None)
1819 1819 }
1820 1820 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1821 1821 is `False`, the file was deleted.
1822 1822 """
1823 1823
1824 1824 def __init__(self, repo):
1825 1825 super(overlayworkingctx, self).__init__(repo)
1826 1826 self.clean()
1827 1827
1828 1828 def setbase(self, wrappedctx):
1829 1829 self._wrappedctx = wrappedctx
1830 1830 self._parents = [wrappedctx]
1831 1831 # Drop old manifest cache as it is now out of date.
1832 1832 # This is necessary when, e.g., rebasing several nodes with one
1833 1833 # ``overlayworkingctx`` (e.g. with --collapse).
1834 1834 util.clearcachedproperty(self, '_manifest')
1835 1835
1836 1836 def data(self, path):
1837 1837 if self.isdirty(path):
1838 1838 if self._cache[path]['exists']:
1839 1839 if self._cache[path]['data'] is not None:
1840 1840 return self._cache[path]['data']
1841 1841 else:
1842 1842 # Must fallback here, too, because we only set flags.
1843 1843 return self._wrappedctx[path].data()
1844 1844 else:
1845 1845 raise error.ProgrammingError("No such file or directory: %s" %
1846 1846 path)
1847 1847 else:
1848 1848 return self._wrappedctx[path].data()
1849 1849
1850 1850 @propertycache
1851 1851 def _manifest(self):
1852 1852 parents = self.parents()
1853 1853 man = parents[0].manifest().copy()
1854 1854
1855 1855 flag = self._flagfunc
1856 1856 for path in self.added():
1857 1857 man[path] = addednodeid
1858 1858 man.setflag(path, flag(path))
1859 1859 for path in self.modified():
1860 1860 man[path] = modifiednodeid
1861 1861 man.setflag(path, flag(path))
1862 1862 for path in self.removed():
1863 1863 del man[path]
1864 1864 return man
1865 1865
1866 1866 @propertycache
1867 1867 def _flagfunc(self):
1868 1868 def f(path):
1869 1869 return self._cache[path]['flags']
1870 1870 return f
1871 1871
1872 1872 def files(self):
1873 1873 return sorted(self.added() + self.modified() + self.removed())
1874 1874
1875 1875 def modified(self):
1876 1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1877 1877 self._existsinparent(f)]
1878 1878
1879 1879 def added(self):
1880 1880 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1881 1881 not self._existsinparent(f)]
1882 1882
1883 1883 def removed(self):
1884 1884 return [f for f in self._cache.keys() if
1885 1885 not self._cache[f]['exists'] and self._existsinparent(f)]
1886 1886
1887 1887 def p1copies(self):
1888 1888 copies = self._repo._wrappedctx.p1copies().copy()
1889 1889 narrowmatch = self._repo.narrowmatch()
1890 1890 for f in self._cache.keys():
1891 1891 if not narrowmatch(f):
1892 1892 continue
1893 1893 copies.pop(f, None) # delete if it exists
1894 1894 source = self._cache[f]['copied']
1895 1895 if source:
1896 1896 copies[f] = source
1897 1897 return copies
1898 1898
1899 1899 def p2copies(self):
1900 1900 copies = self._repo._wrappedctx.p2copies().copy()
1901 1901 narrowmatch = self._repo.narrowmatch()
1902 1902 for f in self._cache.keys():
1903 1903 if not narrowmatch(f):
1904 1904 continue
1905 1905 copies.pop(f, None) # delete if it exists
1906 1906 source = self._cache[f]['copied']
1907 1907 if source:
1908 1908 copies[f] = source
1909 1909 return copies
1910 1910
1911 1911 def isinmemory(self):
1912 1912 return True
1913 1913
1914 1914 def filedate(self, path):
1915 1915 if self.isdirty(path):
1916 1916 return self._cache[path]['date']
1917 1917 else:
1918 1918 return self._wrappedctx[path].date()
1919 1919
1920 1920 def markcopied(self, path, origin):
1921 1921 self._markdirty(path, exists=True, date=self.filedate(path),
1922 1922 flags=self.flags(path), copied=origin)
1923 1923
1924 1924 def copydata(self, path):
1925 1925 if self.isdirty(path):
1926 1926 return self._cache[path]['copied']
1927 1927 else:
1928 1928 return None
1929 1929
1930 1930 def flags(self, path):
1931 1931 if self.isdirty(path):
1932 1932 if self._cache[path]['exists']:
1933 1933 return self._cache[path]['flags']
1934 1934 else:
1935 1935 raise error.ProgrammingError("No such file or directory: %s" %
1936 1936 self._path)
1937 1937 else:
1938 1938 return self._wrappedctx[path].flags()
1939 1939
1940 1940 def __contains__(self, key):
1941 1941 if key in self._cache:
1942 1942 return self._cache[key]['exists']
1943 1943 return key in self.p1()
1944 1944
1945 1945 def _existsinparent(self, path):
1946 1946 try:
1947 1947 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1948 1948 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1949 1949 # with an ``exists()`` function.
1950 1950 self._wrappedctx[path]
1951 1951 return True
1952 1952 except error.ManifestLookupError:
1953 1953 return False
1954 1954
1955 1955 def _auditconflicts(self, path):
1956 1956 """Replicates conflict checks done by wvfs.write().
1957 1957
1958 1958 Since we never write to the filesystem and never call `applyupdates` in
1959 1959 IMM, we'll never check that a path is actually writable -- e.g., because
1960 1960 it adds `a/foo`, but `a` is actually a file in the other commit.
1961 1961 """
1962 1962 def fail(path, component):
1963 1963 # p1() is the base and we're receiving "writes" for p2()'s
1964 1964 # files.
1965 1965 if 'l' in self.p1()[component].flags():
1966 1966 raise error.Abort("error: %s conflicts with symlink %s "
1967 1967 "in %d." % (path, component,
1968 1968 self.p1().rev()))
1969 1969 else:
1970 1970 raise error.Abort("error: '%s' conflicts with file '%s' in "
1971 1971 "%d." % (path, component,
1972 1972 self.p1().rev()))
1973 1973
1974 1974 # Test that each new directory to be created to write this path from p2
1975 1975 # is not a file in p1.
1976 1976 components = path.split('/')
1977 1977 for i in pycompat.xrange(len(components)):
1978 1978 component = "/".join(components[0:i])
1979 1979 if component in self:
1980 1980 fail(path, component)
1981 1981
1982 1982 # Test the other direction -- that this path from p2 isn't a directory
1983 1983 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1984 1984 match = self.match([path], default=b'path')
1985 1985 matches = self.p1().manifest().matches(match)
1986 1986 mfiles = matches.keys()
1987 1987 if len(mfiles) > 0:
1988 1988 if len(mfiles) == 1 and mfiles[0] == path:
1989 1989 return
1990 1990 # omit the files which are deleted in current IMM wctx
1991 1991 mfiles = [m for m in mfiles if m in self]
1992 1992 if not mfiles:
1993 1993 return
1994 1994 raise error.Abort("error: file '%s' cannot be written because "
1995 1995 " '%s/' is a directory in %s (containing %d "
1996 1996 "entries: %s)"
1997 1997 % (path, path, self.p1(), len(mfiles),
1998 1998 ', '.join(mfiles)))
1999 1999
2000 2000 def write(self, path, data, flags='', **kwargs):
2001 2001 if data is None:
2002 2002 raise error.ProgrammingError("data must be non-None")
2003 2003 self._auditconflicts(path)
2004 2004 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2005 2005 flags=flags)
2006 2006
2007 2007 def setflags(self, path, l, x):
2008 2008 flag = ''
2009 2009 if l:
2010 2010 flag = 'l'
2011 2011 elif x:
2012 2012 flag = 'x'
2013 2013 self._markdirty(path, exists=True, date=dateutil.makedate(),
2014 2014 flags=flag)
2015 2015
2016 2016 def remove(self, path):
2017 2017 self._markdirty(path, exists=False)
2018 2018
2019 2019 def exists(self, path):
2020 2020 """exists behaves like `lexists`, but needs to follow symlinks and
2021 2021 return False if they are broken.
2022 2022 """
2023 2023 if self.isdirty(path):
2024 2024 # If this path exists and is a symlink, "follow" it by calling
2025 2025 # exists on the destination path.
2026 2026 if (self._cache[path]['exists'] and
2027 2027 'l' in self._cache[path]['flags']):
2028 2028 return self.exists(self._cache[path]['data'].strip())
2029 2029 else:
2030 2030 return self._cache[path]['exists']
2031 2031
2032 2032 return self._existsinparent(path)
2033 2033
2034 2034 def lexists(self, path):
2035 2035 """lexists returns True if the path exists"""
2036 2036 if self.isdirty(path):
2037 2037 return self._cache[path]['exists']
2038 2038
2039 2039 return self._existsinparent(path)
2040 2040
2041 2041 def size(self, path):
2042 2042 if self.isdirty(path):
2043 2043 if self._cache[path]['exists']:
2044 2044 return len(self._cache[path]['data'])
2045 2045 else:
2046 2046 raise error.ProgrammingError("No such file or directory: %s" %
2047 2047 self._path)
2048 2048 return self._wrappedctx[path].size()
2049 2049
2050 2050 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2051 2051 user=None, editor=None):
2052 2052 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2053 2053 committed.
2054 2054
2055 2055 ``text`` is the commit message.
2056 2056 ``parents`` (optional) are rev numbers.
2057 2057 """
2058 2058 # Default parents to the wrapped contexts' if not passed.
2059 2059 if parents is None:
2060 2060 parents = self._wrappedctx.parents()
2061 2061 if len(parents) == 1:
2062 2062 parents = (parents[0], None)
2063 2063
2064 2064 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2065 2065 if parents[1] is None:
2066 2066 parents = (self._repo[parents[0]], None)
2067 2067 else:
2068 2068 parents = (self._repo[parents[0]], self._repo[parents[1]])
2069 2069
2070 2070 files = self.files()
2071 2071 def getfile(repo, memctx, path):
2072 2072 if self._cache[path]['exists']:
2073 2073 return memfilectx(repo, memctx, path,
2074 2074 self._cache[path]['data'],
2075 2075 'l' in self._cache[path]['flags'],
2076 2076 'x' in self._cache[path]['flags'],
2077 2077 self._cache[path]['copied'])
2078 2078 else:
2079 2079 # Returning None, but including the path in `files`, is
2080 2080 # necessary for memctx to register a deletion.
2081 2081 return None
2082 2082 return memctx(self._repo, parents, text, files, getfile, date=date,
2083 2083 extra=extra, user=user, branch=branch, editor=editor)
2084 2084
2085 2085 def isdirty(self, path):
2086 2086 return path in self._cache
2087 2087
2088 2088 def isempty(self):
2089 2089 # We need to discard any keys that are actually clean before the empty
2090 2090 # commit check.
2091 2091 self._compact()
2092 2092 return len(self._cache) == 0
2093 2093
2094 2094 def clean(self):
2095 2095 self._cache = {}
2096 2096
2097 2097 def _compact(self):
2098 2098 """Removes keys from the cache that are actually clean, by comparing
2099 2099 them with the underlying context.
2100 2100
2101 2101 This can occur during the merge process, e.g. by passing --tool :local
2102 2102 to resolve a conflict.
2103 2103 """
2104 2104 keys = []
2105 2105 # This won't be perfect, but can help performance significantly when
2106 2106 # using things like remotefilelog.
2107 2107 scmutil.prefetchfiles(
2108 2108 self.repo(), [self.p1().rev()],
2109 2109 scmutil.matchfiles(self.repo(), self._cache.keys()))
2110 2110
2111 2111 for path in self._cache.keys():
2112 2112 cache = self._cache[path]
2113 2113 try:
2114 2114 underlying = self._wrappedctx[path]
2115 2115 if (underlying.data() == cache['data'] and
2116 2116 underlying.flags() == cache['flags']):
2117 2117 keys.append(path)
2118 2118 except error.ManifestLookupError:
2119 2119 # Path not in the underlying manifest (created).
2120 2120 continue
2121 2121
2122 2122 for path in keys:
2123 2123 del self._cache[path]
2124 2124 return keys
2125 2125
2126 2126 def _markdirty(self, path, exists, data=None, date=None, flags='',
2127 2127 copied=None):
2128 2128 # data not provided, let's see if we already have some; if not, let's
2129 2129 # grab it from our underlying context, so that we always have data if
2130 2130 # the file is marked as existing.
2131 2131 if exists and data is None:
2132 2132 oldentry = self._cache.get(path) or {}
2133 2133 data = oldentry.get('data')
2134 2134 if data is None:
2135 2135 data = self._wrappedctx[path].data()
2136 2136
2137 2137 self._cache[path] = {
2138 2138 'exists': exists,
2139 2139 'data': data,
2140 2140 'date': date,
2141 2141 'flags': flags,
2142 2142 'copied': copied,
2143 2143 }
2144 2144
2145 2145 def filectx(self, path, filelog=None):
2146 2146 return overlayworkingfilectx(self._repo, path, parent=self,
2147 2147 filelog=filelog)
2148 2148
2149 2149 class overlayworkingfilectx(committablefilectx):
2150 2150 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2151 2151 cache, which can be flushed through later by calling ``flush()``."""
2152 2152
2153 2153 def __init__(self, repo, path, filelog=None, parent=None):
2154 2154 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2155 2155 parent)
2156 2156 self._repo = repo
2157 2157 self._parent = parent
2158 2158 self._path = path
2159 2159
2160 2160 def cmp(self, fctx):
2161 2161 return self.data() != fctx.data()
2162 2162
2163 2163 def changectx(self):
2164 2164 return self._parent
2165 2165
2166 2166 def data(self):
2167 2167 return self._parent.data(self._path)
2168 2168
2169 2169 def date(self):
2170 2170 return self._parent.filedate(self._path)
2171 2171
2172 2172 def exists(self):
2173 2173 return self.lexists()
2174 2174
2175 2175 def lexists(self):
2176 2176 return self._parent.exists(self._path)
2177 2177
2178 2178 def copysource(self):
2179 2179 return self._parent.copydata(self._path)
2180 2180
2181 2181 def size(self):
2182 2182 return self._parent.size(self._path)
2183 2183
2184 2184 def markcopied(self, origin):
2185 2185 self._parent.markcopied(self._path, origin)
2186 2186
2187 2187 def audit(self):
2188 2188 pass
2189 2189
2190 2190 def flags(self):
2191 2191 return self._parent.flags(self._path)
2192 2192
2193 2193 def setflags(self, islink, isexec):
2194 2194 return self._parent.setflags(self._path, islink, isexec)
2195 2195
2196 2196 def write(self, data, flags, backgroundclose=False, **kwargs):
2197 2197 return self._parent.write(self._path, data, flags, **kwargs)
2198 2198
2199 2199 def remove(self, ignoremissing=False):
2200 2200 return self._parent.remove(self._path)
2201 2201
2202 2202 def clearunknown(self):
2203 2203 pass
2204 2204
2205 2205 class workingcommitctx(workingctx):
2206 2206 """A workingcommitctx object makes access to data related to
2207 2207 the revision being committed convenient.
2208 2208
2209 2209 This hides changes in the working directory, if they aren't
2210 2210 committed in this context.
2211 2211 """
2212 2212 def __init__(self, repo, changes,
2213 2213 text="", user=None, date=None, extra=None):
2214 2214 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2215 2215 changes)
2216 2216
2217 2217 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2218 2218 """Return matched files only in ``self._status``
2219 2219
2220 2220 Uncommitted files appear "clean" via this context, even if
2221 2221 they aren't actually so in the working directory.
2222 2222 """
2223 2223 if clean:
2224 2224 clean = [f for f in self._manifest if f not in self._changedset]
2225 2225 else:
2226 2226 clean = []
2227 2227 return scmutil.status([f for f in self._status.modified if match(f)],
2228 2228 [f for f in self._status.added if match(f)],
2229 2229 [f for f in self._status.removed if match(f)],
2230 2230 [], [], [], clean)
2231 2231
2232 2232 @propertycache
2233 2233 def _changedset(self):
2234 2234 """Return the set of files changed in this context
2235 2235 """
2236 2236 changed = set(self._status.modified)
2237 2237 changed.update(self._status.added)
2238 2238 changed.update(self._status.removed)
2239 2239 return changed
2240 2240
2241 2241 def makecachingfilectxfn(func):
2242 2242 """Create a filectxfn that caches based on the path.
2243 2243
2244 2244 We can't use util.cachefunc because it uses all arguments as the cache
2245 2245 key and this creates a cycle since the arguments include the repo and
2246 2246 memctx.
2247 2247 """
2248 2248 cache = {}
2249 2249
2250 2250 def getfilectx(repo, memctx, path):
2251 2251 if path not in cache:
2252 2252 cache[path] = func(repo, memctx, path)
2253 2253 return cache[path]
2254 2254
2255 2255 return getfilectx
2256 2256
2257 2257 def memfilefromctx(ctx):
2258 2258 """Given a context return a memfilectx for ctx[path]
2259 2259
2260 2260 This is a convenience method for building a memctx based on another
2261 2261 context.
2262 2262 """
2263 2263 def getfilectx(repo, memctx, path):
2264 2264 fctx = ctx[path]
2265 2265 copysource = fctx.copysource()
2266 2266 return memfilectx(repo, memctx, path, fctx.data(),
2267 2267 islink=fctx.islink(), isexec=fctx.isexec(),
2268 2268 copysource=copysource)
2269 2269
2270 2270 return getfilectx
2271 2271
2272 2272 def memfilefrompatch(patchstore):
2273 2273 """Given a patch (e.g. patchstore object) return a memfilectx
2274 2274
2275 2275 This is a convenience method for building a memctx based on a patchstore.
2276 2276 """
2277 2277 def getfilectx(repo, memctx, path):
2278 2278 data, mode, copysource = patchstore.getfile(path)
2279 2279 if data is None:
2280 2280 return None
2281 2281 islink, isexec = mode
2282 2282 return memfilectx(repo, memctx, path, data, islink=islink,
2283 2283 isexec=isexec, copysource=copysource)
2284 2284
2285 2285 return getfilectx
2286 2286
2287 2287 class memctx(committablectx):
2288 2288 """Use memctx to perform in-memory commits via localrepo.commitctx().
2289 2289
2290 2290 Revision information is supplied at initialization time while
2291 2291 related files data and is made available through a callback
2292 2292 mechanism. 'repo' is the current localrepo, 'parents' is a
2293 2293 sequence of two parent revisions identifiers (pass None for every
2294 2294 missing parent), 'text' is the commit message and 'files' lists
2295 2295 names of files touched by the revision (normalized and relative to
2296 2296 repository root).
2297 2297
2298 2298 filectxfn(repo, memctx, path) is a callable receiving the
2299 2299 repository, the current memctx object and the normalized path of
2300 2300 requested file, relative to repository root. It is fired by the
2301 2301 commit function for every file in 'files', but calls order is
2302 2302 undefined. If the file is available in the revision being
2303 2303 committed (updated or added), filectxfn returns a memfilectx
2304 2304 object. If the file was removed, filectxfn return None for recent
2305 2305 Mercurial. Moved files are represented by marking the source file
2306 2306 removed and the new file added with copy information (see
2307 2307 memfilectx).
2308 2308
2309 2309 user receives the committer name and defaults to current
2310 2310 repository username, date is the commit date in any format
2311 2311 supported by dateutil.parsedate() and defaults to current date, extra
2312 2312 is a dictionary of metadata or is left empty.
2313 2313 """
2314 2314
2315 2315 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2316 2316 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2317 2317 # this field to determine what to do in filectxfn.
2318 2318 _returnnoneformissingfiles = True
2319 2319
2320 2320 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2321 2321 date=None, extra=None, branch=None, editor=False):
2322 2322 super(memctx, self).__init__(repo, text, user, date, extra,
2323 2323 branch=branch)
2324 2324 self._rev = None
2325 2325 self._node = None
2326 2326 parents = [(p or nullid) for p in parents]
2327 2327 p1, p2 = parents
2328 2328 self._parents = [self._repo[p] for p in (p1, p2)]
2329 2329 files = sorted(set(files))
2330 2330 self._files = files
2331 2331 self.substate = {}
2332 2332
2333 2333 if isinstance(filectxfn, patch.filestore):
2334 2334 filectxfn = memfilefrompatch(filectxfn)
2335 2335 elif not callable(filectxfn):
2336 2336 # if store is not callable, wrap it in a function
2337 2337 filectxfn = memfilefromctx(filectxfn)
2338 2338
2339 2339 # memoizing increases performance for e.g. vcs convert scenarios.
2340 2340 self._filectxfn = makecachingfilectxfn(filectxfn)
2341 2341
2342 2342 if editor:
2343 2343 self._text = editor(self._repo, self, [])
2344 2344 self._repo.savecommitmessage(self._text)
2345 2345
2346 2346 def filectx(self, path, filelog=None):
2347 2347 """get a file context from the working directory
2348 2348
2349 2349 Returns None if file doesn't exist and should be removed."""
2350 2350 return self._filectxfn(self._repo, self, path)
2351 2351
2352 2352 def commit(self):
2353 2353 """commit context to the repo"""
2354 2354 return self._repo.commitctx(self)
2355 2355
2356 2356 @propertycache
2357 2357 def _manifest(self):
2358 2358 """generate a manifest based on the return values of filectxfn"""
2359 2359
2360 2360 # keep this simple for now; just worry about p1
2361 2361 pctx = self._parents[0]
2362 2362 man = pctx.manifest().copy()
2363 2363
2364 2364 for f in self._status.modified:
2365 2365 man[f] = modifiednodeid
2366 2366
2367 2367 for f in self._status.added:
2368 2368 man[f] = addednodeid
2369 2369
2370 2370 for f in self._status.removed:
2371 2371 if f in man:
2372 2372 del man[f]
2373 2373
2374 2374 return man
2375 2375
2376 2376 @propertycache
2377 2377 def _status(self):
2378 2378 """Calculate exact status from ``files`` specified at construction
2379 2379 """
2380 2380 man1 = self.p1().manifest()
2381 2381 p2 = self._parents[1]
2382 2382 # "1 < len(self._parents)" can't be used for checking
2383 2383 # existence of the 2nd parent, because "memctx._parents" is
2384 2384 # explicitly initialized by the list, of which length is 2.
2385 2385 if p2.node() != nullid:
2386 2386 man2 = p2.manifest()
2387 2387 managing = lambda f: f in man1 or f in man2
2388 2388 else:
2389 2389 managing = lambda f: f in man1
2390 2390
2391 2391 modified, added, removed = [], [], []
2392 2392 for f in self._files:
2393 2393 if not managing(f):
2394 2394 added.append(f)
2395 2395 elif self[f]:
2396 2396 modified.append(f)
2397 2397 else:
2398 2398 removed.append(f)
2399 2399
2400 2400 return scmutil.status(modified, added, removed, [], [], [], [])
2401 2401
2402 2402 class memfilectx(committablefilectx):
2403 2403 """memfilectx represents an in-memory file to commit.
2404 2404
2405 2405 See memctx and committablefilectx for more details.
2406 2406 """
2407 2407 def __init__(self, repo, changectx, path, data, islink=False,
2408 2408 isexec=False, copysource=None):
2409 2409 """
2410 2410 path is the normalized file path relative to repository root.
2411 2411 data is the file content as a string.
2412 2412 islink is True if the file is a symbolic link.
2413 2413 isexec is True if the file is executable.
2414 2414 copied is the source file path if current file was copied in the
2415 2415 revision being committed, or None."""
2416 2416 super(memfilectx, self).__init__(repo, path, None, changectx)
2417 2417 self._data = data
2418 2418 if islink:
2419 2419 self._flags = 'l'
2420 2420 elif isexec:
2421 2421 self._flags = 'x'
2422 2422 else:
2423 2423 self._flags = ''
2424 2424 self._copysource = copysource
2425 2425
2426 2426 def copysource(self):
2427 2427 return self._copysource
2428 2428
2429 2429 def cmp(self, fctx):
2430 2430 return self.data() != fctx.data()
2431 2431
2432 2432 def data(self):
2433 2433 return self._data
2434 2434
2435 2435 def remove(self, ignoremissing=False):
2436 2436 """wraps unlink for a repo's working directory"""
2437 2437 # need to figure out what to do here
2438 2438 del self._changectx[self._path]
2439 2439
2440 2440 def write(self, data, flags, **kwargs):
2441 2441 """wraps repo.wwrite"""
2442 2442 self._data = data
2443 2443
2444 2444
2445 2445 class metadataonlyctx(committablectx):
2446 2446 """Like memctx but it's reusing the manifest of different commit.
2447 2447 Intended to be used by lightweight operations that are creating
2448 2448 metadata-only changes.
2449 2449
2450 2450 Revision information is supplied at initialization time. 'repo' is the
2451 2451 current localrepo, 'ctx' is original revision which manifest we're reuisng
2452 2452 'parents' is a sequence of two parent revisions identifiers (pass None for
2453 2453 every missing parent), 'text' is the commit.
2454 2454
2455 2455 user receives the committer name and defaults to current repository
2456 2456 username, date is the commit date in any format supported by
2457 2457 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2458 2458 metadata or is left empty.
2459 2459 """
2460 2460 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2461 2461 date=None, extra=None, editor=False):
2462 2462 if text is None:
2463 2463 text = originalctx.description()
2464 2464 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2465 2465 self._rev = None
2466 2466 self._node = None
2467 2467 self._originalctx = originalctx
2468 2468 self._manifestnode = originalctx.manifestnode()
2469 2469 if parents is None:
2470 2470 parents = originalctx.parents()
2471 2471 else:
2472 2472 parents = [repo[p] for p in parents if p is not None]
2473 2473 parents = parents[:]
2474 2474 while len(parents) < 2:
2475 2475 parents.append(repo[nullid])
2476 2476 p1, p2 = self._parents = parents
2477 2477
2478 2478 # sanity check to ensure that the reused manifest parents are
2479 2479 # manifests of our commit parents
2480 2480 mp1, mp2 = self.manifestctx().parents
2481 2481 if p1 != nullid and p1.manifestnode() != mp1:
2482 2482 raise RuntimeError(r"can't reuse the manifest: its p1 "
2483 2483 r"doesn't match the new ctx p1")
2484 2484 if p2 != nullid and p2.manifestnode() != mp2:
2485 2485 raise RuntimeError(r"can't reuse the manifest: "
2486 2486 r"its p2 doesn't match the new ctx p2")
2487 2487
2488 2488 self._files = originalctx.files()
2489 2489 self.substate = {}
2490 2490
2491 2491 if editor:
2492 2492 self._text = editor(self._repo, self, [])
2493 2493 self._repo.savecommitmessage(self._text)
2494 2494
2495 2495 def manifestnode(self):
2496 2496 return self._manifestnode
2497 2497
2498 2498 @property
2499 2499 def _manifestctx(self):
2500 2500 return self._repo.manifestlog[self._manifestnode]
2501 2501
2502 2502 def filectx(self, path, filelog=None):
2503 2503 return self._originalctx.filectx(path, filelog=filelog)
2504 2504
2505 2505 def commit(self):
2506 2506 """commit context to the repo"""
2507 2507 return self._repo.commitctx(self)
2508 2508
2509 2509 @property
2510 2510 def _manifest(self):
2511 2511 return self._originalctx.manifest()
2512 2512
2513 2513 @propertycache
2514 2514 def _status(self):
2515 2515 """Calculate exact status from ``files`` specified in the ``origctx``
2516 2516 and parents manifests.
2517 2517 """
2518 2518 man1 = self.p1().manifest()
2519 2519 p2 = self._parents[1]
2520 2520 # "1 < len(self._parents)" can't be used for checking
2521 2521 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2522 2522 # explicitly initialized by the list, of which length is 2.
2523 2523 if p2.node() != nullid:
2524 2524 man2 = p2.manifest()
2525 2525 managing = lambda f: f in man1 or f in man2
2526 2526 else:
2527 2527 managing = lambda f: f in man1
2528 2528
2529 2529 modified, added, removed = [], [], []
2530 2530 for f in self._files:
2531 2531 if not managing(f):
2532 2532 added.append(f)
2533 2533 elif f in self:
2534 2534 modified.append(f)
2535 2535 else:
2536 2536 removed.append(f)
2537 2537
2538 2538 return scmutil.status(modified, added, removed, [], [], [], [])
2539 2539
2540 2540 class arbitraryfilectx(object):
2541 2541 """Allows you to use filectx-like functions on a file in an arbitrary
2542 2542 location on disk, possibly not in the working directory.
2543 2543 """
2544 2544 def __init__(self, path, repo=None):
2545 2545 # Repo is optional because contrib/simplemerge uses this class.
2546 2546 self._repo = repo
2547 2547 self._path = path
2548 2548
2549 2549 def cmp(self, fctx):
2550 2550 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2551 2551 # path if either side is a symlink.
2552 2552 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2553 2553 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2554 2554 # Add a fast-path for merge if both sides are disk-backed.
2555 2555 # Note that filecmp uses the opposite return values (True if same)
2556 2556 # from our cmp functions (True if different).
2557 2557 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2558 2558 return self.data() != fctx.data()
2559 2559
2560 2560 def path(self):
2561 2561 return self._path
2562 2562
2563 2563 def flags(self):
2564 2564 return ''
2565 2565
2566 2566 def data(self):
2567 2567 return util.readfile(self._path)
2568 2568
2569 2569 def decodeddata(self):
2570 2570 with open(self._path, "rb") as f:
2571 2571 return f.read()
2572 2572
2573 2573 def remove(self):
2574 2574 util.unlink(self._path)
2575 2575
2576 2576 def write(self, data, flags, **kwargs):
2577 2577 assert not flags
2578 2578 with open(self._path, "wb") as f:
2579 2579 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now