##// END OF EJS Templates
context: let caller pass in branch to committablectx.__init__()...
Martin von Zweigbergk -
r42482:df2f22be default
parent child Browse files
Show More
@@ -1,2569 +1,2570 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 @propertycache
276 276 def _copies(self):
277 277 p1copies = {}
278 278 p2copies = {}
279 279 p1 = self.p1()
280 280 p2 = self.p2()
281 281 narrowmatch = self._repo.narrowmatch()
282 282 for dst in self.files():
283 283 if not narrowmatch(dst) or dst not in self:
284 284 continue
285 285 copied = self[dst].renamed()
286 286 if not copied:
287 287 continue
288 288 src, srcnode = copied
289 289 if src in p1 and p1[src].filenode() == srcnode:
290 290 p1copies[dst] = src
291 291 elif src in p2 and p2[src].filenode() == srcnode:
292 292 p2copies[dst] = src
293 293 return p1copies, p2copies
294 294 def p1copies(self):
295 295 return self._copies[0]
296 296 def p2copies(self):
297 297 return self._copies[1]
298 298
299 299 def sub(self, path, allowcreate=True):
300 300 '''return a subrepo for the stored revision of path, never wdir()'''
301 301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
302 302
303 303 def nullsub(self, path, pctx):
304 304 return subrepo.nullsubrepo(self, path, pctx)
305 305
306 306 def workingsub(self, path):
307 307 '''return a subrepo for the stored revision, or wdir if this is a wdir
308 308 context.
309 309 '''
310 310 return subrepo.subrepo(self, path, allowwdir=True)
311 311
312 312 def match(self, pats=None, include=None, exclude=None, default='glob',
313 313 listsubrepos=False, badfn=None):
314 314 r = self._repo
315 315 return matchmod.match(r.root, r.getcwd(), pats,
316 316 include, exclude, default,
317 317 auditor=r.nofsauditor, ctx=self,
318 318 listsubrepos=listsubrepos, badfn=badfn)
319 319
320 320 def diff(self, ctx2=None, match=None, changes=None, opts=None,
321 321 losedatafn=None, pathfn=None, copy=None,
322 322 copysourcematch=None, hunksfilterfn=None):
323 323 """Returns a diff generator for the given contexts and matcher"""
324 324 if ctx2 is None:
325 325 ctx2 = self.p1()
326 326 if ctx2 is not None:
327 327 ctx2 = self._repo[ctx2]
328 328 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
329 329 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
330 330 copy=copy, copysourcematch=copysourcematch,
331 331 hunksfilterfn=hunksfilterfn)
332 332
333 333 def dirs(self):
334 334 return self._manifest.dirs()
335 335
336 336 def hasdir(self, dir):
337 337 return self._manifest.hasdir(dir)
338 338
339 339 def status(self, other=None, match=None, listignored=False,
340 340 listclean=False, listunknown=False, listsubrepos=False):
341 341 """return status of files between two nodes or node and working
342 342 directory.
343 343
344 344 If other is None, compare this node with working directory.
345 345
346 346 returns (modified, added, removed, deleted, unknown, ignored, clean)
347 347 """
348 348
349 349 ctx1 = self
350 350 ctx2 = self._repo[other]
351 351
352 352 # This next code block is, admittedly, fragile logic that tests for
353 353 # reversing the contexts and wouldn't need to exist if it weren't for
354 354 # the fast (and common) code path of comparing the working directory
355 355 # with its first parent.
356 356 #
357 357 # What we're aiming for here is the ability to call:
358 358 #
359 359 # workingctx.status(parentctx)
360 360 #
361 361 # If we always built the manifest for each context and compared those,
362 362 # then we'd be done. But the special case of the above call means we
363 363 # just copy the manifest of the parent.
364 364 reversed = False
365 365 if (not isinstance(ctx1, changectx)
366 366 and isinstance(ctx2, changectx)):
367 367 reversed = True
368 368 ctx1, ctx2 = ctx2, ctx1
369 369
370 370 match = self._repo.narrowmatch(match)
371 371 match = ctx2._matchstatus(ctx1, match)
372 372 r = scmutil.status([], [], [], [], [], [], [])
373 373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 374 listunknown)
375 375
376 376 if reversed:
377 377 # Reverse added and removed. Clear deleted, unknown and ignored as
378 378 # these make no sense to reverse.
379 379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 380 r.clean)
381 381
382 382 if listsubrepos:
383 383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 384 try:
385 385 rev2 = ctx2.subrev(subpath)
386 386 except KeyError:
387 387 # A subrepo that existed in node1 was deleted between
388 388 # node1 and node2 (inclusive). Thus, ctx2's substate
389 389 # won't contain that subpath. The best we can do ignore it.
390 390 rev2 = None
391 391 submatch = matchmod.subdirmatcher(subpath, match)
392 392 s = sub.status(rev2, match=submatch, ignored=listignored,
393 393 clean=listclean, unknown=listunknown,
394 394 listsubrepos=True)
395 395 for rfiles, sfiles in zip(r, s):
396 396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397 397
398 398 for l in r:
399 399 l.sort()
400 400
401 401 return r
402 402
403 403 class changectx(basectx):
404 404 """A changecontext object makes access to data related to a particular
405 405 changeset convenient. It represents a read-only context already present in
406 406 the repo."""
407 407 def __init__(self, repo, rev, node):
408 408 super(changectx, self).__init__(repo)
409 409 self._rev = rev
410 410 self._node = node
411 411
412 412 def __hash__(self):
413 413 try:
414 414 return hash(self._rev)
415 415 except AttributeError:
416 416 return id(self)
417 417
418 418 def __nonzero__(self):
419 419 return self._rev != nullrev
420 420
421 421 __bool__ = __nonzero__
422 422
423 423 @propertycache
424 424 def _changeset(self):
425 425 return self._repo.changelog.changelogrevision(self.rev())
426 426
427 427 @propertycache
428 428 def _manifest(self):
429 429 return self._manifestctx.read()
430 430
431 431 @property
432 432 def _manifestctx(self):
433 433 return self._repo.manifestlog[self._changeset.manifest]
434 434
435 435 @propertycache
436 436 def _manifestdelta(self):
437 437 return self._manifestctx.readdelta()
438 438
439 439 @propertycache
440 440 def _parents(self):
441 441 repo = self._repo
442 442 p1, p2 = repo.changelog.parentrevs(self._rev)
443 443 if p2 == nullrev:
444 444 return [repo[p1]]
445 445 return [repo[p1], repo[p2]]
446 446
447 447 def changeset(self):
448 448 c = self._changeset
449 449 return (
450 450 c.manifest,
451 451 c.user,
452 452 c.date,
453 453 c.files,
454 454 c.description,
455 455 c.extra,
456 456 )
457 457 def manifestnode(self):
458 458 return self._changeset.manifest
459 459
460 460 def user(self):
461 461 return self._changeset.user
462 462 def date(self):
463 463 return self._changeset.date
464 464 def files(self):
465 465 return self._changeset.files
466 466 @propertycache
467 467 def _copies(self):
468 468 source = self._repo.ui.config('experimental', 'copies.read-from')
469 469 p1copies = self._changeset.p1copies
470 470 p2copies = self._changeset.p2copies
471 471 # If config says to get copy metadata only from changeset, then return
472 472 # that, defaulting to {} if there was no copy metadata.
473 473 # In compatibility mode, we return copy data from the changeset if
474 474 # it was recorded there, and otherwise we fall back to getting it from
475 475 # the filelogs (below).
476 476 if (source == 'changeset-only' or
477 477 (source == 'compatibility' and p1copies is not None)):
478 478 return p1copies or {}, p2copies or {}
479 479
480 480 # Otherwise (config said to read only from filelog, or we are in
481 481 # compatiblity mode and there is not data in the changeset), we get
482 482 # the copy metadata from the filelogs.
483 483 return super(changectx, self)._copies
484 484 def description(self):
485 485 return self._changeset.description
486 486 def branch(self):
487 487 return encoding.tolocal(self._changeset.extra.get("branch"))
488 488 def closesbranch(self):
489 489 return 'close' in self._changeset.extra
490 490 def extra(self):
491 491 """Return a dict of extra information."""
492 492 return self._changeset.extra
493 493 def tags(self):
494 494 """Return a list of byte tag names"""
495 495 return self._repo.nodetags(self._node)
496 496 def bookmarks(self):
497 497 """Return a list of byte bookmark names."""
498 498 return self._repo.nodebookmarks(self._node)
499 499 def phase(self):
500 500 return self._repo._phasecache.phase(self._repo, self._rev)
501 501 def hidden(self):
502 502 return self._rev in repoview.filterrevs(self._repo, 'visible')
503 503
504 504 def isinmemory(self):
505 505 return False
506 506
507 507 def children(self):
508 508 """return list of changectx contexts for each child changeset.
509 509
510 510 This returns only the immediate child changesets. Use descendants() to
511 511 recursively walk children.
512 512 """
513 513 c = self._repo.changelog.children(self._node)
514 514 return [self._repo[x] for x in c]
515 515
516 516 def ancestors(self):
517 517 for a in self._repo.changelog.ancestors([self._rev]):
518 518 yield self._repo[a]
519 519
520 520 def descendants(self):
521 521 """Recursively yield all children of the changeset.
522 522
523 523 For just the immediate children, use children()
524 524 """
525 525 for d in self._repo.changelog.descendants([self._rev]):
526 526 yield self._repo[d]
527 527
528 528 def filectx(self, path, fileid=None, filelog=None):
529 529 """get a file context from this changeset"""
530 530 if fileid is None:
531 531 fileid = self.filenode(path)
532 532 return filectx(self._repo, path, fileid=fileid,
533 533 changectx=self, filelog=filelog)
534 534
535 535 def ancestor(self, c2, warn=False):
536 536 """return the "best" ancestor context of self and c2
537 537
538 538 If there are multiple candidates, it will show a message and check
539 539 merge.preferancestor configuration before falling back to the
540 540 revlog ancestor."""
541 541 # deal with workingctxs
542 542 n2 = c2._node
543 543 if n2 is None:
544 544 n2 = c2._parents[0]._node
545 545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 546 if not cahs:
547 547 anc = nullid
548 548 elif len(cahs) == 1:
549 549 anc = cahs[0]
550 550 else:
551 551 # experimental config: merge.preferancestor
552 552 for r in self._repo.ui.configlist('merge', 'preferancestor'):
553 553 try:
554 554 ctx = scmutil.revsymbol(self._repo, r)
555 555 except error.RepoLookupError:
556 556 continue
557 557 anc = ctx.node()
558 558 if anc in cahs:
559 559 break
560 560 else:
561 561 anc = self._repo.changelog.ancestor(self._node, n2)
562 562 if warn:
563 563 self._repo.ui.status(
564 564 (_("note: using %s as ancestor of %s and %s\n") %
565 565 (short(anc), short(self._node), short(n2))) +
566 566 ''.join(_(" alternatively, use --config "
567 567 "merge.preferancestor=%s\n") %
568 568 short(n) for n in sorted(cahs) if n != anc))
569 569 return self._repo[anc]
570 570
571 571 def isancestorof(self, other):
572 572 """True if this changeset is an ancestor of other"""
573 573 return self._repo.changelog.isancestorrev(self._rev, other._rev)
574 574
575 575 def walk(self, match):
576 576 '''Generates matching file names.'''
577 577
578 578 # Wrap match.bad method to have message with nodeid
579 579 def bad(fn, msg):
580 580 # The manifest doesn't know about subrepos, so don't complain about
581 581 # paths into valid subrepos.
582 582 if any(fn == s or fn.startswith(s + '/')
583 583 for s in self.substate):
584 584 return
585 585 match.bad(fn, _('no such file in rev %s') % self)
586 586
587 587 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
588 588 return self._manifest.walk(m)
589 589
590 590 def matches(self, match):
591 591 return self.walk(match)
592 592
593 593 class basefilectx(object):
594 594 """A filecontext object represents the common logic for its children:
595 595 filectx: read-only access to a filerevision that is already present
596 596 in the repo,
597 597 workingfilectx: a filecontext that represents files from the working
598 598 directory,
599 599 memfilectx: a filecontext that represents files in-memory,
600 600 """
601 601 @propertycache
602 602 def _filelog(self):
603 603 return self._repo.file(self._path)
604 604
605 605 @propertycache
606 606 def _changeid(self):
607 607 if r'_changectx' in self.__dict__:
608 608 return self._changectx.rev()
609 609 elif r'_descendantrev' in self.__dict__:
610 610 # this file context was created from a revision with a known
611 611 # descendant, we can (lazily) correct for linkrev aliases
612 612 return self._adjustlinkrev(self._descendantrev)
613 613 else:
614 614 return self._filelog.linkrev(self._filerev)
615 615
616 616 @propertycache
617 617 def _filenode(self):
618 618 if r'_fileid' in self.__dict__:
619 619 return self._filelog.lookup(self._fileid)
620 620 else:
621 621 return self._changectx.filenode(self._path)
622 622
623 623 @propertycache
624 624 def _filerev(self):
625 625 return self._filelog.rev(self._filenode)
626 626
627 627 @propertycache
628 628 def _repopath(self):
629 629 return self._path
630 630
631 631 def __nonzero__(self):
632 632 try:
633 633 self._filenode
634 634 return True
635 635 except error.LookupError:
636 636 # file is missing
637 637 return False
638 638
639 639 __bool__ = __nonzero__
640 640
641 641 def __bytes__(self):
642 642 try:
643 643 return "%s@%s" % (self.path(), self._changectx)
644 644 except error.LookupError:
645 645 return "%s@???" % self.path()
646 646
647 647 __str__ = encoding.strmethod(__bytes__)
648 648
649 649 def __repr__(self):
650 650 return r"<%s %s>" % (type(self).__name__, str(self))
651 651
652 652 def __hash__(self):
653 653 try:
654 654 return hash((self._path, self._filenode))
655 655 except AttributeError:
656 656 return id(self)
657 657
658 658 def __eq__(self, other):
659 659 try:
660 660 return (type(self) == type(other) and self._path == other._path
661 661 and self._filenode == other._filenode)
662 662 except AttributeError:
663 663 return False
664 664
665 665 def __ne__(self, other):
666 666 return not (self == other)
667 667
668 668 def filerev(self):
669 669 return self._filerev
670 670 def filenode(self):
671 671 return self._filenode
672 672 @propertycache
673 673 def _flags(self):
674 674 return self._changectx.flags(self._path)
675 675 def flags(self):
676 676 return self._flags
677 677 def filelog(self):
678 678 return self._filelog
679 679 def rev(self):
680 680 return self._changeid
681 681 def linkrev(self):
682 682 return self._filelog.linkrev(self._filerev)
683 683 def node(self):
684 684 return self._changectx.node()
685 685 def hex(self):
686 686 return self._changectx.hex()
687 687 def user(self):
688 688 return self._changectx.user()
689 689 def date(self):
690 690 return self._changectx.date()
691 691 def files(self):
692 692 return self._changectx.files()
693 693 def description(self):
694 694 return self._changectx.description()
695 695 def branch(self):
696 696 return self._changectx.branch()
697 697 def extra(self):
698 698 return self._changectx.extra()
699 699 def phase(self):
700 700 return self._changectx.phase()
701 701 def phasestr(self):
702 702 return self._changectx.phasestr()
703 703 def obsolete(self):
704 704 return self._changectx.obsolete()
705 705 def instabilities(self):
706 706 return self._changectx.instabilities()
707 707 def manifest(self):
708 708 return self._changectx.manifest()
709 709 def changectx(self):
710 710 return self._changectx
711 711 def renamed(self):
712 712 return self._copied
713 713 def copysource(self):
714 714 return self._copied and self._copied[0]
715 715 def repo(self):
716 716 return self._repo
717 717 def size(self):
718 718 return len(self.data())
719 719
720 720 def path(self):
721 721 return self._path
722 722
723 723 def isbinary(self):
724 724 try:
725 725 return stringutil.binary(self.data())
726 726 except IOError:
727 727 return False
728 728 def isexec(self):
729 729 return 'x' in self.flags()
730 730 def islink(self):
731 731 return 'l' in self.flags()
732 732
733 733 def isabsent(self):
734 734 """whether this filectx represents a file not in self._changectx
735 735
736 736 This is mainly for merge code to detect change/delete conflicts. This is
737 737 expected to be True for all subclasses of basectx."""
738 738 return False
739 739
740 740 _customcmp = False
741 741 def cmp(self, fctx):
742 742 """compare with other file context
743 743
744 744 returns True if different than fctx.
745 745 """
746 746 if fctx._customcmp:
747 747 return fctx.cmp(self)
748 748
749 749 if self._filenode is None:
750 750 raise error.ProgrammingError(
751 751 'filectx.cmp() must be reimplemented if not backed by revlog')
752 752
753 753 if fctx._filenode is None:
754 754 if self._repo._encodefilterpats:
755 755 # can't rely on size() because wdir content may be decoded
756 756 return self._filelog.cmp(self._filenode, fctx.data())
757 757 if self.size() - 4 == fctx.size():
758 758 # size() can match:
759 759 # if file data starts with '\1\n', empty metadata block is
760 760 # prepended, which adds 4 bytes to filelog.size().
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() == fctx.size():
763 763 # size() matches: need to compare content
764 764 return self._filelog.cmp(self._filenode, fctx.data())
765 765
766 766 # size() differs
767 767 return True
768 768
769 769 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
770 770 """return the first ancestor of <srcrev> introducing <fnode>
771 771
772 772 If the linkrev of the file revision does not point to an ancestor of
773 773 srcrev, we'll walk down the ancestors until we find one introducing
774 774 this file revision.
775 775
776 776 :srcrev: the changeset revision we search ancestors from
777 777 :inclusive: if true, the src revision will also be checked
778 778 :stoprev: an optional revision to stop the walk at. If no introduction
779 779 of this file content could be found before this floor
780 780 revision, the function will returns "None" and stops its
781 781 iteration.
782 782 """
783 783 repo = self._repo
784 784 cl = repo.unfiltered().changelog
785 785 mfl = repo.manifestlog
786 786 # fetch the linkrev
787 787 lkr = self.linkrev()
788 788 if srcrev == lkr:
789 789 return lkr
790 790 # hack to reuse ancestor computation when searching for renames
791 791 memberanc = getattr(self, '_ancestrycontext', None)
792 792 iteranc = None
793 793 if srcrev is None:
794 794 # wctx case, used by workingfilectx during mergecopy
795 795 revs = [p.rev() for p in self._repo[None].parents()]
796 796 inclusive = True # we skipped the real (revless) source
797 797 else:
798 798 revs = [srcrev]
799 799 if memberanc is None:
800 800 memberanc = iteranc = cl.ancestors(revs, lkr,
801 801 inclusive=inclusive)
802 802 # check if this linkrev is an ancestor of srcrev
803 803 if lkr not in memberanc:
804 804 if iteranc is None:
805 805 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
806 806 fnode = self._filenode
807 807 path = self._path
808 808 for a in iteranc:
809 809 if stoprev is not None and a < stoprev:
810 810 return None
811 811 ac = cl.read(a) # get changeset data (we avoid object creation)
812 812 if path in ac[3]: # checking the 'files' field.
813 813 # The file has been touched, check if the content is
814 814 # similar to the one we search for.
815 815 if fnode == mfl[ac[0]].readfast().get(path):
816 816 return a
817 817 # In theory, we should never get out of that loop without a result.
818 818 # But if manifest uses a buggy file revision (not children of the
819 819 # one it replaces) we could. Such a buggy situation will likely
820 820 # result is crash somewhere else at to some point.
821 821 return lkr
822 822
823 823 def isintroducedafter(self, changelogrev):
824 824 """True if a filectx has been introduced after a given floor revision
825 825 """
826 826 if self.linkrev() >= changelogrev:
827 827 return True
828 828 introrev = self._introrev(stoprev=changelogrev)
829 829 if introrev is None:
830 830 return False
831 831 return introrev >= changelogrev
832 832
833 833 def introrev(self):
834 834 """return the rev of the changeset which introduced this file revision
835 835
836 836 This method is different from linkrev because it take into account the
837 837 changeset the filectx was created from. It ensures the returned
838 838 revision is one of its ancestors. This prevents bugs from
839 839 'linkrev-shadowing' when a file revision is used by multiple
840 840 changesets.
841 841 """
842 842 return self._introrev()
843 843
844 844 def _introrev(self, stoprev=None):
845 845 """
846 846 Same as `introrev` but, with an extra argument to limit changelog
847 847 iteration range in some internal usecase.
848 848
849 849 If `stoprev` is set, the `introrev` will not be searched past that
850 850 `stoprev` revision and "None" might be returned. This is useful to
851 851 limit the iteration range.
852 852 """
853 853 toprev = None
854 854 attrs = vars(self)
855 855 if r'_changeid' in attrs:
856 856 # We have a cached value already
857 857 toprev = self._changeid
858 858 elif r'_changectx' in attrs:
859 859 # We know which changelog entry we are coming from
860 860 toprev = self._changectx.rev()
861 861
862 862 if toprev is not None:
863 863 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
864 864 elif r'_descendantrev' in attrs:
865 865 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
866 866 # be nice and cache the result of the computation
867 867 if introrev is not None:
868 868 self._changeid = introrev
869 869 return introrev
870 870 else:
871 871 return self.linkrev()
872 872
873 873 def introfilectx(self):
874 874 """Return filectx having identical contents, but pointing to the
875 875 changeset revision where this filectx was introduced"""
876 876 introrev = self.introrev()
877 877 if self.rev() == introrev:
878 878 return self
879 879 return self.filectx(self.filenode(), changeid=introrev)
880 880
881 881 def _parentfilectx(self, path, fileid, filelog):
882 882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 884 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
885 885 # If self is associated with a changeset (probably explicitly
886 886 # fed), ensure the created filectx is associated with a
887 887 # changeset that is an ancestor of self.changectx.
888 888 # This lets us later use _adjustlinkrev to get a correct link.
889 889 fctx._descendantrev = self.rev()
890 890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 891 elif r'_descendantrev' in vars(self):
892 892 # Otherwise propagate _descendantrev if we have one associated.
893 893 fctx._descendantrev = self._descendantrev
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 return fctx
896 896
897 897 def parents(self):
898 898 _path = self._path
899 899 fl = self._filelog
900 900 parents = self._filelog.parents(self._filenode)
901 901 pl = [(_path, node, fl) for node in parents if node != nullid]
902 902
903 903 r = fl.renamed(self._filenode)
904 904 if r:
905 905 # - In the simple rename case, both parent are nullid, pl is empty.
906 906 # - In case of merge, only one of the parent is null id and should
907 907 # be replaced with the rename information. This parent is -always-
908 908 # the first one.
909 909 #
910 910 # As null id have always been filtered out in the previous list
911 911 # comprehension, inserting to 0 will always result in "replacing
912 912 # first nullid parent with rename information.
913 913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 914
915 915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 916
917 917 def p1(self):
918 918 return self.parents()[0]
919 919
920 920 def p2(self):
921 921 p = self.parents()
922 922 if len(p) == 2:
923 923 return p[1]
924 924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 925
926 926 def annotate(self, follow=False, skiprevs=None, diffopts=None):
927 927 """Returns a list of annotateline objects for each line in the file
928 928
929 929 - line.fctx is the filectx of the node where that line was last changed
930 930 - line.lineno is the line number at the first appearance in the managed
931 931 file
932 932 - line.text is the data on that line (including newline character)
933 933 """
934 934 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
935 935
936 936 def parents(f):
937 937 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
938 938 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
939 939 # from the topmost introrev (= srcrev) down to p.linkrev() if it
940 940 # isn't an ancestor of the srcrev.
941 941 f._changeid
942 942 pl = f.parents()
943 943
944 944 # Don't return renamed parents if we aren't following.
945 945 if not follow:
946 946 pl = [p for p in pl if p.path() == f.path()]
947 947
948 948 # renamed filectx won't have a filelog yet, so set it
949 949 # from the cache to save time
950 950 for p in pl:
951 951 if not r'_filelog' in p.__dict__:
952 952 p._filelog = getlog(p.path())
953 953
954 954 return pl
955 955
956 956 # use linkrev to find the first changeset where self appeared
957 957 base = self.introfilectx()
958 958 if getattr(base, '_ancestrycontext', None) is None:
959 959 cl = self._repo.changelog
960 960 if base.rev() is None:
961 961 # wctx is not inclusive, but works because _ancestrycontext
962 962 # is used to test filelog revisions
963 963 ac = cl.ancestors([p.rev() for p in base.parents()],
964 964 inclusive=True)
965 965 else:
966 966 ac = cl.ancestors([base.rev()], inclusive=True)
967 967 base._ancestrycontext = ac
968 968
969 969 return dagop.annotate(base, parents, skiprevs=skiprevs,
970 970 diffopts=diffopts)
971 971
972 972 def ancestors(self, followfirst=False):
973 973 visit = {}
974 974 c = self
975 975 if followfirst:
976 976 cut = 1
977 977 else:
978 978 cut = None
979 979
980 980 while True:
981 981 for parent in c.parents()[:cut]:
982 982 visit[(parent.linkrev(), parent.filenode())] = parent
983 983 if not visit:
984 984 break
985 985 c = visit.pop(max(visit))
986 986 yield c
987 987
988 988 def decodeddata(self):
989 989 """Returns `data()` after running repository decoding filters.
990 990
991 991 This is often equivalent to how the data would be expressed on disk.
992 992 """
993 993 return self._repo.wwritedata(self.path(), self.data())
994 994
995 995 class filectx(basefilectx):
996 996 """A filecontext object makes access to data related to a particular
997 997 filerevision convenient."""
998 998 def __init__(self, repo, path, changeid=None, fileid=None,
999 999 filelog=None, changectx=None):
1000 1000 """changeid must be a revision number, if specified.
1001 1001 fileid can be a file revision or node."""
1002 1002 self._repo = repo
1003 1003 self._path = path
1004 1004
1005 1005 assert (changeid is not None
1006 1006 or fileid is not None
1007 1007 or changectx is not None), (
1008 1008 "bad args: changeid=%r, fileid=%r, changectx=%r"
1009 1009 % (changeid, fileid, changectx))
1010 1010
1011 1011 if filelog is not None:
1012 1012 self._filelog = filelog
1013 1013
1014 1014 if changeid is not None:
1015 1015 self._changeid = changeid
1016 1016 if changectx is not None:
1017 1017 self._changectx = changectx
1018 1018 if fileid is not None:
1019 1019 self._fileid = fileid
1020 1020
1021 1021 @propertycache
1022 1022 def _changectx(self):
1023 1023 try:
1024 1024 return self._repo[self._changeid]
1025 1025 except error.FilteredRepoLookupError:
1026 1026 # Linkrev may point to any revision in the repository. When the
1027 1027 # repository is filtered this may lead to `filectx` trying to build
1028 1028 # `changectx` for filtered revision. In such case we fallback to
1029 1029 # creating `changectx` on the unfiltered version of the reposition.
1030 1030 # This fallback should not be an issue because `changectx` from
1031 1031 # `filectx` are not used in complex operations that care about
1032 1032 # filtering.
1033 1033 #
1034 1034 # This fallback is a cheap and dirty fix that prevent several
1035 1035 # crashes. It does not ensure the behavior is correct. However the
1036 1036 # behavior was not correct before filtering either and "incorrect
1037 1037 # behavior" is seen as better as "crash"
1038 1038 #
1039 1039 # Linkrevs have several serious troubles with filtering that are
1040 1040 # complicated to solve. Proper handling of the issue here should be
1041 1041 # considered when solving linkrev issue are on the table.
1042 1042 return self._repo.unfiltered()[self._changeid]
1043 1043
1044 1044 def filectx(self, fileid, changeid=None):
1045 1045 '''opens an arbitrary revision of the file without
1046 1046 opening a new filelog'''
1047 1047 return filectx(self._repo, self._path, fileid=fileid,
1048 1048 filelog=self._filelog, changeid=changeid)
1049 1049
1050 1050 def rawdata(self):
1051 1051 return self._filelog.revision(self._filenode, raw=True)
1052 1052
1053 1053 def rawflags(self):
1054 1054 """low-level revlog flags"""
1055 1055 return self._filelog.flags(self._filerev)
1056 1056
1057 1057 def data(self):
1058 1058 try:
1059 1059 return self._filelog.read(self._filenode)
1060 1060 except error.CensoredNodeError:
1061 1061 if self._repo.ui.config("censor", "policy") == "ignore":
1062 1062 return ""
1063 1063 raise error.Abort(_("censored node: %s") % short(self._filenode),
1064 1064 hint=_("set censor.policy to ignore errors"))
1065 1065
1066 1066 def size(self):
1067 1067 return self._filelog.size(self._filerev)
1068 1068
1069 1069 @propertycache
1070 1070 def _copied(self):
1071 1071 """check if file was actually renamed in this changeset revision
1072 1072
1073 1073 If rename logged in file revision, we report copy for changeset only
1074 1074 if file revisions linkrev points back to the changeset in question
1075 1075 or both changeset parents contain different file revisions.
1076 1076 """
1077 1077
1078 1078 renamed = self._filelog.renamed(self._filenode)
1079 1079 if not renamed:
1080 1080 return None
1081 1081
1082 1082 if self.rev() == self.linkrev():
1083 1083 return renamed
1084 1084
1085 1085 name = self.path()
1086 1086 fnode = self._filenode
1087 1087 for p in self._changectx.parents():
1088 1088 try:
1089 1089 if fnode == p.filenode(name):
1090 1090 return None
1091 1091 except error.LookupError:
1092 1092 pass
1093 1093 return renamed
1094 1094
1095 1095 def children(self):
1096 1096 # hard for renames
1097 1097 c = self._filelog.children(self._filenode)
1098 1098 return [filectx(self._repo, self._path, fileid=x,
1099 1099 filelog=self._filelog) for x in c]
1100 1100
1101 1101 class committablectx(basectx):
1102 1102 """A committablectx object provides common functionality for a context that
1103 1103 wants the ability to commit, e.g. workingctx or memctx."""
1104 1104 def __init__(self, repo, text="", user=None, date=None, extra=None,
1105 changes=None):
1105 changes=None, branch=None):
1106 1106 super(committablectx, self).__init__(repo)
1107 1107 self._rev = None
1108 1108 self._node = None
1109 1109 self._text = text
1110 1110 if date:
1111 1111 self._date = dateutil.parsedate(date)
1112 1112 if user:
1113 1113 self._user = user
1114 1114 if changes:
1115 1115 self._status = changes
1116 1116
1117 1117 self._extra = {}
1118 1118 if extra:
1119 1119 self._extra = extra.copy()
1120 if 'branch' not in self._extra:
1120 if branch is not None:
1121 self._extra['branch'] = encoding.fromlocal(branch)
1122 elif 'branch' not in self._extra:
1121 1123 try:
1122 1124 branch = encoding.fromlocal(self._repo.dirstate.branch())
1123 1125 except UnicodeDecodeError:
1124 1126 raise error.Abort(_('branch name not in UTF-8!'))
1125 1127 self._extra['branch'] = branch
1126 1128 if self._extra['branch'] == '':
1127 1129 self._extra['branch'] = 'default'
1128 1130
1129 1131 def __bytes__(self):
1130 1132 return bytes(self._parents[0]) + "+"
1131 1133
1132 1134 __str__ = encoding.strmethod(__bytes__)
1133 1135
1134 1136 def __nonzero__(self):
1135 1137 return True
1136 1138
1137 1139 __bool__ = __nonzero__
1138 1140
1139 1141 @propertycache
1140 1142 def _status(self):
1141 1143 return self._repo.status()
1142 1144
1143 1145 @propertycache
1144 1146 def _user(self):
1145 1147 return self._repo.ui.username()
1146 1148
1147 1149 @propertycache
1148 1150 def _date(self):
1149 1151 ui = self._repo.ui
1150 1152 date = ui.configdate('devel', 'default-date')
1151 1153 if date is None:
1152 1154 date = dateutil.makedate()
1153 1155 return date
1154 1156
1155 1157 def subrev(self, subpath):
1156 1158 return None
1157 1159
1158 1160 def manifestnode(self):
1159 1161 return None
1160 1162 def user(self):
1161 1163 return self._user or self._repo.ui.username()
1162 1164 def date(self):
1163 1165 return self._date
1164 1166 def description(self):
1165 1167 return self._text
1166 1168 def files(self):
1167 1169 return sorted(self._status.modified + self._status.added +
1168 1170 self._status.removed)
1169 1171 def modified(self):
1170 1172 return self._status.modified
1171 1173 def added(self):
1172 1174 return self._status.added
1173 1175 def removed(self):
1174 1176 return self._status.removed
1175 1177 def deleted(self):
1176 1178 return self._status.deleted
1177 1179 def branch(self):
1178 1180 return encoding.tolocal(self._extra['branch'])
1179 1181 def closesbranch(self):
1180 1182 return 'close' in self._extra
1181 1183 def extra(self):
1182 1184 return self._extra
1183 1185
1184 1186 def isinmemory(self):
1185 1187 return False
1186 1188
1187 1189 def tags(self):
1188 1190 return []
1189 1191
1190 1192 def bookmarks(self):
1191 1193 b = []
1192 1194 for p in self.parents():
1193 1195 b.extend(p.bookmarks())
1194 1196 return b
1195 1197
1196 1198 def phase(self):
1197 1199 phase = phases.draft # default phase to draft
1198 1200 for p in self.parents():
1199 1201 phase = max(phase, p.phase())
1200 1202 return phase
1201 1203
1202 1204 def hidden(self):
1203 1205 return False
1204 1206
1205 1207 def children(self):
1206 1208 return []
1207 1209
1208 1210 def ancestor(self, c2):
1209 1211 """return the "best" ancestor context of self and c2"""
1210 1212 return self._parents[0].ancestor(c2) # punt on two parents for now
1211 1213
1212 1214 def ancestors(self):
1213 1215 for p in self._parents:
1214 1216 yield p
1215 1217 for a in self._repo.changelog.ancestors(
1216 1218 [p.rev() for p in self._parents]):
1217 1219 yield self._repo[a]
1218 1220
1219 1221 def markcommitted(self, node):
1220 1222 """Perform post-commit cleanup necessary after committing this ctx
1221 1223
1222 1224 Specifically, this updates backing stores this working context
1223 1225 wraps to reflect the fact that the changes reflected by this
1224 1226 workingctx have been committed. For example, it marks
1225 1227 modified and added files as normal in the dirstate.
1226 1228
1227 1229 """
1228 1230
1229 1231 def dirty(self, missing=False, merge=True, branch=True):
1230 1232 return False
1231 1233
1232 1234 class workingctx(committablectx):
1233 1235 """A workingctx object makes access to data related to
1234 1236 the current working directory convenient.
1235 1237 date - any valid date string or (unixtime, offset), or None.
1236 1238 user - username string, or None.
1237 1239 extra - a dictionary of extra values, or None.
1238 1240 changes - a list of file lists as returned by localrepo.status()
1239 1241 or None to use the repository status.
1240 1242 """
1241 1243 def __init__(self, repo, text="", user=None, date=None, extra=None,
1242 1244 changes=None):
1243 1245 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1244 1246
1245 1247 def __iter__(self):
1246 1248 d = self._repo.dirstate
1247 1249 for f in d:
1248 1250 if d[f] != 'r':
1249 1251 yield f
1250 1252
1251 1253 def __contains__(self, key):
1252 1254 return self._repo.dirstate[key] not in "?r"
1253 1255
1254 1256 def hex(self):
1255 1257 return wdirhex
1256 1258
1257 1259 @propertycache
1258 1260 def _parents(self):
1259 1261 p = self._repo.dirstate.parents()
1260 1262 if p[1] == nullid:
1261 1263 p = p[:-1]
1262 1264 # use unfiltered repo to delay/avoid loading obsmarkers
1263 1265 unfi = self._repo.unfiltered()
1264 1266 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1265 1267
1266 1268 def _fileinfo(self, path):
1267 1269 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1268 1270 self._manifest
1269 1271 return super(workingctx, self)._fileinfo(path)
1270 1272
1271 1273 def _buildflagfunc(self):
1272 1274 # Create a fallback function for getting file flags when the
1273 1275 # filesystem doesn't support them
1274 1276
1275 1277 copiesget = self._repo.dirstate.copies().get
1276 1278 parents = self.parents()
1277 1279 if len(parents) < 2:
1278 1280 # when we have one parent, it's easy: copy from parent
1279 1281 man = parents[0].manifest()
1280 1282 def func(f):
1281 1283 f = copiesget(f, f)
1282 1284 return man.flags(f)
1283 1285 else:
1284 1286 # merges are tricky: we try to reconstruct the unstored
1285 1287 # result from the merge (issue1802)
1286 1288 p1, p2 = parents
1287 1289 pa = p1.ancestor(p2)
1288 1290 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1289 1291
1290 1292 def func(f):
1291 1293 f = copiesget(f, f) # may be wrong for merges with copies
1292 1294 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1293 1295 if fl1 == fl2:
1294 1296 return fl1
1295 1297 if fl1 == fla:
1296 1298 return fl2
1297 1299 if fl2 == fla:
1298 1300 return fl1
1299 1301 return '' # punt for conflicts
1300 1302
1301 1303 return func
1302 1304
1303 1305 @propertycache
1304 1306 def _flagfunc(self):
1305 1307 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1306 1308
1307 1309 def flags(self, path):
1308 1310 if r'_manifest' in self.__dict__:
1309 1311 try:
1310 1312 return self._manifest.flags(path)
1311 1313 except KeyError:
1312 1314 return ''
1313 1315
1314 1316 try:
1315 1317 return self._flagfunc(path)
1316 1318 except OSError:
1317 1319 return ''
1318 1320
1319 1321 def filectx(self, path, filelog=None):
1320 1322 """get a file context from the working directory"""
1321 1323 return workingfilectx(self._repo, path, workingctx=self,
1322 1324 filelog=filelog)
1323 1325
1324 1326 def dirty(self, missing=False, merge=True, branch=True):
1325 1327 "check whether a working directory is modified"
1326 1328 # check subrepos first
1327 1329 for s in sorted(self.substate):
1328 1330 if self.sub(s).dirty(missing=missing):
1329 1331 return True
1330 1332 # check current working dir
1331 1333 return ((merge and self.p2()) or
1332 1334 (branch and self.branch() != self.p1().branch()) or
1333 1335 self.modified() or self.added() or self.removed() or
1334 1336 (missing and self.deleted()))
1335 1337
1336 1338 def add(self, list, prefix=""):
1337 1339 with self._repo.wlock():
1338 1340 ui, ds = self._repo.ui, self._repo.dirstate
1339 1341 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1340 1342 rejected = []
1341 1343 lstat = self._repo.wvfs.lstat
1342 1344 for f in list:
1343 1345 # ds.pathto() returns an absolute file when this is invoked from
1344 1346 # the keyword extension. That gets flagged as non-portable on
1345 1347 # Windows, since it contains the drive letter and colon.
1346 1348 scmutil.checkportable(ui, os.path.join(prefix, f))
1347 1349 try:
1348 1350 st = lstat(f)
1349 1351 except OSError:
1350 1352 ui.warn(_("%s does not exist!\n") % uipath(f))
1351 1353 rejected.append(f)
1352 1354 continue
1353 1355 limit = ui.configbytes('ui', 'large-file-limit')
1354 1356 if limit != 0 and st.st_size > limit:
1355 1357 ui.warn(_("%s: up to %d MB of RAM may be required "
1356 1358 "to manage this file\n"
1357 1359 "(use 'hg revert %s' to cancel the "
1358 1360 "pending addition)\n")
1359 1361 % (f, 3 * st.st_size // 1000000, uipath(f)))
1360 1362 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1361 1363 ui.warn(_("%s not added: only files and symlinks "
1362 1364 "supported currently\n") % uipath(f))
1363 1365 rejected.append(f)
1364 1366 elif ds[f] in 'amn':
1365 1367 ui.warn(_("%s already tracked!\n") % uipath(f))
1366 1368 elif ds[f] == 'r':
1367 1369 ds.normallookup(f)
1368 1370 else:
1369 1371 ds.add(f)
1370 1372 return rejected
1371 1373
1372 1374 def forget(self, files, prefix=""):
1373 1375 with self._repo.wlock():
1374 1376 ds = self._repo.dirstate
1375 1377 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1376 1378 rejected = []
1377 1379 for f in files:
1378 1380 if f not in ds:
1379 1381 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1380 1382 rejected.append(f)
1381 1383 elif ds[f] != 'a':
1382 1384 ds.remove(f)
1383 1385 else:
1384 1386 ds.drop(f)
1385 1387 return rejected
1386 1388
1387 1389 def copy(self, source, dest):
1388 1390 try:
1389 1391 st = self._repo.wvfs.lstat(dest)
1390 1392 except OSError as err:
1391 1393 if err.errno != errno.ENOENT:
1392 1394 raise
1393 1395 self._repo.ui.warn(_("%s does not exist!\n")
1394 1396 % self._repo.dirstate.pathto(dest))
1395 1397 return
1396 1398 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1397 1399 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1398 1400 "symbolic link\n")
1399 1401 % self._repo.dirstate.pathto(dest))
1400 1402 else:
1401 1403 with self._repo.wlock():
1402 1404 ds = self._repo.dirstate
1403 1405 if ds[dest] in '?':
1404 1406 ds.add(dest)
1405 1407 elif ds[dest] in 'r':
1406 1408 ds.normallookup(dest)
1407 1409 ds.copy(source, dest)
1408 1410
1409 1411 def match(self, pats=None, include=None, exclude=None, default='glob',
1410 1412 listsubrepos=False, badfn=None):
1411 1413 r = self._repo
1412 1414
1413 1415 # Only a case insensitive filesystem needs magic to translate user input
1414 1416 # to actual case in the filesystem.
1415 1417 icasefs = not util.fscasesensitive(r.root)
1416 1418 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1417 1419 default, auditor=r.auditor, ctx=self,
1418 1420 listsubrepos=listsubrepos, badfn=badfn,
1419 1421 icasefs=icasefs)
1420 1422
1421 1423 def _filtersuspectsymlink(self, files):
1422 1424 if not files or self._repo.dirstate._checklink:
1423 1425 return files
1424 1426
1425 1427 # Symlink placeholders may get non-symlink-like contents
1426 1428 # via user error or dereferencing by NFS or Samba servers,
1427 1429 # so we filter out any placeholders that don't look like a
1428 1430 # symlink
1429 1431 sane = []
1430 1432 for f in files:
1431 1433 if self.flags(f) == 'l':
1432 1434 d = self[f].data()
1433 1435 if (d == '' or len(d) >= 1024 or '\n' in d
1434 1436 or stringutil.binary(d)):
1435 1437 self._repo.ui.debug('ignoring suspect symlink placeholder'
1436 1438 ' "%s"\n' % f)
1437 1439 continue
1438 1440 sane.append(f)
1439 1441 return sane
1440 1442
1441 1443 def _checklookup(self, files):
1442 1444 # check for any possibly clean files
1443 1445 if not files:
1444 1446 return [], [], []
1445 1447
1446 1448 modified = []
1447 1449 deleted = []
1448 1450 fixup = []
1449 1451 pctx = self._parents[0]
1450 1452 # do a full compare of any files that might have changed
1451 1453 for f in sorted(files):
1452 1454 try:
1453 1455 # This will return True for a file that got replaced by a
1454 1456 # directory in the interim, but fixing that is pretty hard.
1455 1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1456 1458 or pctx[f].cmp(self[f])):
1457 1459 modified.append(f)
1458 1460 else:
1459 1461 fixup.append(f)
1460 1462 except (IOError, OSError):
1461 1463 # A file become inaccessible in between? Mark it as deleted,
1462 1464 # matching dirstate behavior (issue5584).
1463 1465 # The dirstate has more complex behavior around whether a
1464 1466 # missing file matches a directory, etc, but we don't need to
1465 1467 # bother with that: if f has made it to this point, we're sure
1466 1468 # it's in the dirstate.
1467 1469 deleted.append(f)
1468 1470
1469 1471 return modified, deleted, fixup
1470 1472
1471 1473 def _poststatusfixup(self, status, fixup):
1472 1474 """update dirstate for files that are actually clean"""
1473 1475 poststatus = self._repo.postdsstatus()
1474 1476 if fixup or poststatus:
1475 1477 try:
1476 1478 oldid = self._repo.dirstate.identity()
1477 1479
1478 1480 # updating the dirstate is optional
1479 1481 # so we don't wait on the lock
1480 1482 # wlock can invalidate the dirstate, so cache normal _after_
1481 1483 # taking the lock
1482 1484 with self._repo.wlock(False):
1483 1485 if self._repo.dirstate.identity() == oldid:
1484 1486 if fixup:
1485 1487 normal = self._repo.dirstate.normal
1486 1488 for f in fixup:
1487 1489 normal(f)
1488 1490 # write changes out explicitly, because nesting
1489 1491 # wlock at runtime may prevent 'wlock.release()'
1490 1492 # after this block from doing so for subsequent
1491 1493 # changing files
1492 1494 tr = self._repo.currenttransaction()
1493 1495 self._repo.dirstate.write(tr)
1494 1496
1495 1497 if poststatus:
1496 1498 for ps in poststatus:
1497 1499 ps(self, status)
1498 1500 else:
1499 1501 # in this case, writing changes out breaks
1500 1502 # consistency, because .hg/dirstate was
1501 1503 # already changed simultaneously after last
1502 1504 # caching (see also issue5584 for detail)
1503 1505 self._repo.ui.debug('skip updating dirstate: '
1504 1506 'identity mismatch\n')
1505 1507 except error.LockError:
1506 1508 pass
1507 1509 finally:
1508 1510 # Even if the wlock couldn't be grabbed, clear out the list.
1509 1511 self._repo.clearpostdsstatus()
1510 1512
1511 1513 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1512 1514 '''Gets the status from the dirstate -- internal use only.'''
1513 1515 subrepos = []
1514 1516 if '.hgsub' in self:
1515 1517 subrepos = sorted(self.substate)
1516 1518 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1517 1519 clean=clean, unknown=unknown)
1518 1520
1519 1521 # check for any possibly clean files
1520 1522 fixup = []
1521 1523 if cmp:
1522 1524 modified2, deleted2, fixup = self._checklookup(cmp)
1523 1525 s.modified.extend(modified2)
1524 1526 s.deleted.extend(deleted2)
1525 1527
1526 1528 if fixup and clean:
1527 1529 s.clean.extend(fixup)
1528 1530
1529 1531 self._poststatusfixup(s, fixup)
1530 1532
1531 1533 if match.always():
1532 1534 # cache for performance
1533 1535 if s.unknown or s.ignored or s.clean:
1534 1536 # "_status" is cached with list*=False in the normal route
1535 1537 self._status = scmutil.status(s.modified, s.added, s.removed,
1536 1538 s.deleted, [], [], [])
1537 1539 else:
1538 1540 self._status = s
1539 1541
1540 1542 return s
1541 1543
1542 1544 @propertycache
1543 1545 def _copies(self):
1544 1546 p1copies = {}
1545 1547 p2copies = {}
1546 1548 parents = self._repo.dirstate.parents()
1547 1549 p1manifest = self._repo[parents[0]].manifest()
1548 1550 p2manifest = self._repo[parents[1]].manifest()
1549 1551 narrowmatch = self._repo.narrowmatch()
1550 1552 for dst, src in self._repo.dirstate.copies().items():
1551 1553 if not narrowmatch(dst):
1552 1554 continue
1553 1555 if src in p1manifest:
1554 1556 p1copies[dst] = src
1555 1557 elif src in p2manifest:
1556 1558 p2copies[dst] = src
1557 1559 return p1copies, p2copies
1558 1560 def p1copies(self):
1559 1561 return self._copies[0]
1560 1562 def p2copies(self):
1561 1563 return self._copies[1]
1562 1564
1563 1565 @propertycache
1564 1566 def _manifest(self):
1565 1567 """generate a manifest corresponding to the values in self._status
1566 1568
1567 1569 This reuse the file nodeid from parent, but we use special node
1568 1570 identifiers for added and modified files. This is used by manifests
1569 1571 merge to see that files are different and by update logic to avoid
1570 1572 deleting newly added files.
1571 1573 """
1572 1574 return self._buildstatusmanifest(self._status)
1573 1575
1574 1576 def _buildstatusmanifest(self, status):
1575 1577 """Builds a manifest that includes the given status results."""
1576 1578 parents = self.parents()
1577 1579
1578 1580 man = parents[0].manifest().copy()
1579 1581
1580 1582 ff = self._flagfunc
1581 1583 for i, l in ((addednodeid, status.added),
1582 1584 (modifiednodeid, status.modified)):
1583 1585 for f in l:
1584 1586 man[f] = i
1585 1587 try:
1586 1588 man.setflag(f, ff(f))
1587 1589 except OSError:
1588 1590 pass
1589 1591
1590 1592 for f in status.deleted + status.removed:
1591 1593 if f in man:
1592 1594 del man[f]
1593 1595
1594 1596 return man
1595 1597
1596 1598 def _buildstatus(self, other, s, match, listignored, listclean,
1597 1599 listunknown):
1598 1600 """build a status with respect to another context
1599 1601
1600 1602 This includes logic for maintaining the fast path of status when
1601 1603 comparing the working directory against its parent, which is to skip
1602 1604 building a new manifest if self (working directory) is not comparing
1603 1605 against its parent (repo['.']).
1604 1606 """
1605 1607 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1606 1608 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1607 1609 # might have accidentally ended up with the entire contents of the file
1608 1610 # they are supposed to be linking to.
1609 1611 s.modified[:] = self._filtersuspectsymlink(s.modified)
1610 1612 if other != self._repo['.']:
1611 1613 s = super(workingctx, self)._buildstatus(other, s, match,
1612 1614 listignored, listclean,
1613 1615 listunknown)
1614 1616 return s
1615 1617
1616 1618 def _matchstatus(self, other, match):
1617 1619 """override the match method with a filter for directory patterns
1618 1620
1619 1621 We use inheritance to customize the match.bad method only in cases of
1620 1622 workingctx since it belongs only to the working directory when
1621 1623 comparing against the parent changeset.
1622 1624
1623 1625 If we aren't comparing against the working directory's parent, then we
1624 1626 just use the default match object sent to us.
1625 1627 """
1626 1628 if other != self._repo['.']:
1627 1629 def bad(f, msg):
1628 1630 # 'f' may be a directory pattern from 'match.files()',
1629 1631 # so 'f not in ctx1' is not enough
1630 1632 if f not in other and not other.hasdir(f):
1631 1633 self._repo.ui.warn('%s: %s\n' %
1632 1634 (self._repo.dirstate.pathto(f), msg))
1633 1635 match.bad = bad
1634 1636 return match
1635 1637
1636 1638 def walk(self, match):
1637 1639 '''Generates matching file names.'''
1638 1640 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1639 1641 subrepos=sorted(self.substate),
1640 1642 unknown=True, ignored=False))
1641 1643
1642 1644 def matches(self, match):
1643 1645 match = self._repo.narrowmatch(match)
1644 1646 ds = self._repo.dirstate
1645 1647 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1646 1648
1647 1649 def markcommitted(self, node):
1648 1650 with self._repo.dirstate.parentchange():
1649 1651 for f in self.modified() + self.added():
1650 1652 self._repo.dirstate.normal(f)
1651 1653 for f in self.removed():
1652 1654 self._repo.dirstate.drop(f)
1653 1655 self._repo.dirstate.setparents(node)
1654 1656
1655 1657 # write changes out explicitly, because nesting wlock at
1656 1658 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1657 1659 # from immediately doing so for subsequent changing files
1658 1660 self._repo.dirstate.write(self._repo.currenttransaction())
1659 1661
1660 1662 sparse.aftercommit(self._repo, node)
1661 1663
1662 1664 class committablefilectx(basefilectx):
1663 1665 """A committablefilectx provides common functionality for a file context
1664 1666 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1665 1667 def __init__(self, repo, path, filelog=None, ctx=None):
1666 1668 self._repo = repo
1667 1669 self._path = path
1668 1670 self._changeid = None
1669 1671 self._filerev = self._filenode = None
1670 1672
1671 1673 if filelog is not None:
1672 1674 self._filelog = filelog
1673 1675 if ctx:
1674 1676 self._changectx = ctx
1675 1677
1676 1678 def __nonzero__(self):
1677 1679 return True
1678 1680
1679 1681 __bool__ = __nonzero__
1680 1682
1681 1683 def linkrev(self):
1682 1684 # linked to self._changectx no matter if file is modified or not
1683 1685 return self.rev()
1684 1686
1685 1687 def renamed(self):
1686 1688 path = self.copysource()
1687 1689 if not path:
1688 1690 return None
1689 1691 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1690 1692
1691 1693 def parents(self):
1692 1694 '''return parent filectxs, following copies if necessary'''
1693 1695 def filenode(ctx, path):
1694 1696 return ctx._manifest.get(path, nullid)
1695 1697
1696 1698 path = self._path
1697 1699 fl = self._filelog
1698 1700 pcl = self._changectx._parents
1699 1701 renamed = self.renamed()
1700 1702
1701 1703 if renamed:
1702 1704 pl = [renamed + (None,)]
1703 1705 else:
1704 1706 pl = [(path, filenode(pcl[0], path), fl)]
1705 1707
1706 1708 for pc in pcl[1:]:
1707 1709 pl.append((path, filenode(pc, path), fl))
1708 1710
1709 1711 return [self._parentfilectx(p, fileid=n, filelog=l)
1710 1712 for p, n, l in pl if n != nullid]
1711 1713
1712 1714 def children(self):
1713 1715 return []
1714 1716
1715 1717 class workingfilectx(committablefilectx):
1716 1718 """A workingfilectx object makes access to data related to a particular
1717 1719 file in the working directory convenient."""
1718 1720 def __init__(self, repo, path, filelog=None, workingctx=None):
1719 1721 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1720 1722
1721 1723 @propertycache
1722 1724 def _changectx(self):
1723 1725 return workingctx(self._repo)
1724 1726
1725 1727 def data(self):
1726 1728 return self._repo.wread(self._path)
1727 1729 def copysource(self):
1728 1730 return self._repo.dirstate.copied(self._path)
1729 1731
1730 1732 def size(self):
1731 1733 return self._repo.wvfs.lstat(self._path).st_size
1732 1734 def date(self):
1733 1735 t, tz = self._changectx.date()
1734 1736 try:
1735 1737 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1736 1738 except OSError as err:
1737 1739 if err.errno != errno.ENOENT:
1738 1740 raise
1739 1741 return (t, tz)
1740 1742
1741 1743 def exists(self):
1742 1744 return self._repo.wvfs.exists(self._path)
1743 1745
1744 1746 def lexists(self):
1745 1747 return self._repo.wvfs.lexists(self._path)
1746 1748
1747 1749 def audit(self):
1748 1750 return self._repo.wvfs.audit(self._path)
1749 1751
1750 1752 def cmp(self, fctx):
1751 1753 """compare with other file context
1752 1754
1753 1755 returns True if different than fctx.
1754 1756 """
1755 1757 # fctx should be a filectx (not a workingfilectx)
1756 1758 # invert comparison to reuse the same code path
1757 1759 return fctx.cmp(self)
1758 1760
1759 1761 def remove(self, ignoremissing=False):
1760 1762 """wraps unlink for a repo's working directory"""
1761 1763 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1762 1764 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1763 1765 rmdir=rmdir)
1764 1766
1765 1767 def write(self, data, flags, backgroundclose=False, **kwargs):
1766 1768 """wraps repo.wwrite"""
1767 1769 self._repo.wwrite(self._path, data, flags,
1768 1770 backgroundclose=backgroundclose,
1769 1771 **kwargs)
1770 1772
1771 1773 def markcopied(self, src):
1772 1774 """marks this file a copy of `src`"""
1773 1775 if self._repo.dirstate[self._path] in "nma":
1774 1776 self._repo.dirstate.copy(src, self._path)
1775 1777
1776 1778 def clearunknown(self):
1777 1779 """Removes conflicting items in the working directory so that
1778 1780 ``write()`` can be called successfully.
1779 1781 """
1780 1782 wvfs = self._repo.wvfs
1781 1783 f = self._path
1782 1784 wvfs.audit(f)
1783 1785 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1784 1786 # remove files under the directory as they should already be
1785 1787 # warned and backed up
1786 1788 if wvfs.isdir(f) and not wvfs.islink(f):
1787 1789 wvfs.rmtree(f, forcibly=True)
1788 1790 for p in reversed(list(util.finddirs(f))):
1789 1791 if wvfs.isfileorlink(p):
1790 1792 wvfs.unlink(p)
1791 1793 break
1792 1794 else:
1793 1795 # don't remove files if path conflicts are not processed
1794 1796 if wvfs.isdir(f) and not wvfs.islink(f):
1795 1797 wvfs.removedirs(f)
1796 1798
1797 1799 def setflags(self, l, x):
1798 1800 self._repo.wvfs.setflags(self._path, l, x)
1799 1801
1800 1802 class overlayworkingctx(committablectx):
1801 1803 """Wraps another mutable context with a write-back cache that can be
1802 1804 converted into a commit context.
1803 1805
1804 1806 self._cache[path] maps to a dict with keys: {
1805 1807 'exists': bool?
1806 1808 'date': date?
1807 1809 'data': str?
1808 1810 'flags': str?
1809 1811 'copied': str? (path or None)
1810 1812 }
1811 1813 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1812 1814 is `False`, the file was deleted.
1813 1815 """
1814 1816
1815 1817 def __init__(self, repo):
1816 1818 super(overlayworkingctx, self).__init__(repo)
1817 1819 self.clean()
1818 1820
1819 1821 def setbase(self, wrappedctx):
1820 1822 self._wrappedctx = wrappedctx
1821 1823 self._parents = [wrappedctx]
1822 1824 # Drop old manifest cache as it is now out of date.
1823 1825 # This is necessary when, e.g., rebasing several nodes with one
1824 1826 # ``overlayworkingctx`` (e.g. with --collapse).
1825 1827 util.clearcachedproperty(self, '_manifest')
1826 1828
1827 1829 def data(self, path):
1828 1830 if self.isdirty(path):
1829 1831 if self._cache[path]['exists']:
1830 1832 if self._cache[path]['data'] is not None:
1831 1833 return self._cache[path]['data']
1832 1834 else:
1833 1835 # Must fallback here, too, because we only set flags.
1834 1836 return self._wrappedctx[path].data()
1835 1837 else:
1836 1838 raise error.ProgrammingError("No such file or directory: %s" %
1837 1839 path)
1838 1840 else:
1839 1841 return self._wrappedctx[path].data()
1840 1842
1841 1843 @propertycache
1842 1844 def _manifest(self):
1843 1845 parents = self.parents()
1844 1846 man = parents[0].manifest().copy()
1845 1847
1846 1848 flag = self._flagfunc
1847 1849 for path in self.added():
1848 1850 man[path] = addednodeid
1849 1851 man.setflag(path, flag(path))
1850 1852 for path in self.modified():
1851 1853 man[path] = modifiednodeid
1852 1854 man.setflag(path, flag(path))
1853 1855 for path in self.removed():
1854 1856 del man[path]
1855 1857 return man
1856 1858
1857 1859 @propertycache
1858 1860 def _flagfunc(self):
1859 1861 def f(path):
1860 1862 return self._cache[path]['flags']
1861 1863 return f
1862 1864
1863 1865 def files(self):
1864 1866 return sorted(self.added() + self.modified() + self.removed())
1865 1867
1866 1868 def modified(self):
1867 1869 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1868 1870 self._existsinparent(f)]
1869 1871
1870 1872 def added(self):
1871 1873 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1872 1874 not self._existsinparent(f)]
1873 1875
1874 1876 def removed(self):
1875 1877 return [f for f in self._cache.keys() if
1876 1878 not self._cache[f]['exists'] and self._existsinparent(f)]
1877 1879
1878 1880 def p1copies(self):
1879 1881 copies = self._repo._wrappedctx.p1copies().copy()
1880 1882 narrowmatch = self._repo.narrowmatch()
1881 1883 for f in self._cache.keys():
1882 1884 if not narrowmatch(f):
1883 1885 continue
1884 1886 copies.pop(f, None) # delete if it exists
1885 1887 source = self._cache[f]['copied']
1886 1888 if source:
1887 1889 copies[f] = source
1888 1890 return copies
1889 1891
1890 1892 def p2copies(self):
1891 1893 copies = self._repo._wrappedctx.p2copies().copy()
1892 1894 narrowmatch = self._repo.narrowmatch()
1893 1895 for f in self._cache.keys():
1894 1896 if not narrowmatch(f):
1895 1897 continue
1896 1898 copies.pop(f, None) # delete if it exists
1897 1899 source = self._cache[f]['copied']
1898 1900 if source:
1899 1901 copies[f] = source
1900 1902 return copies
1901 1903
1902 1904 def isinmemory(self):
1903 1905 return True
1904 1906
1905 1907 def filedate(self, path):
1906 1908 if self.isdirty(path):
1907 1909 return self._cache[path]['date']
1908 1910 else:
1909 1911 return self._wrappedctx[path].date()
1910 1912
1911 1913 def markcopied(self, path, origin):
1912 1914 self._markdirty(path, exists=True, date=self.filedate(path),
1913 1915 flags=self.flags(path), copied=origin)
1914 1916
1915 1917 def copydata(self, path):
1916 1918 if self.isdirty(path):
1917 1919 return self._cache[path]['copied']
1918 1920 else:
1919 1921 return None
1920 1922
1921 1923 def flags(self, path):
1922 1924 if self.isdirty(path):
1923 1925 if self._cache[path]['exists']:
1924 1926 return self._cache[path]['flags']
1925 1927 else:
1926 1928 raise error.ProgrammingError("No such file or directory: %s" %
1927 1929 self._path)
1928 1930 else:
1929 1931 return self._wrappedctx[path].flags()
1930 1932
1931 1933 def __contains__(self, key):
1932 1934 if key in self._cache:
1933 1935 return self._cache[key]['exists']
1934 1936 return key in self.p1()
1935 1937
1936 1938 def _existsinparent(self, path):
1937 1939 try:
1938 1940 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1939 1941 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1940 1942 # with an ``exists()`` function.
1941 1943 self._wrappedctx[path]
1942 1944 return True
1943 1945 except error.ManifestLookupError:
1944 1946 return False
1945 1947
1946 1948 def _auditconflicts(self, path):
1947 1949 """Replicates conflict checks done by wvfs.write().
1948 1950
1949 1951 Since we never write to the filesystem and never call `applyupdates` in
1950 1952 IMM, we'll never check that a path is actually writable -- e.g., because
1951 1953 it adds `a/foo`, but `a` is actually a file in the other commit.
1952 1954 """
1953 1955 def fail(path, component):
1954 1956 # p1() is the base and we're receiving "writes" for p2()'s
1955 1957 # files.
1956 1958 if 'l' in self.p1()[component].flags():
1957 1959 raise error.Abort("error: %s conflicts with symlink %s "
1958 1960 "in %d." % (path, component,
1959 1961 self.p1().rev()))
1960 1962 else:
1961 1963 raise error.Abort("error: '%s' conflicts with file '%s' in "
1962 1964 "%d." % (path, component,
1963 1965 self.p1().rev()))
1964 1966
1965 1967 # Test that each new directory to be created to write this path from p2
1966 1968 # is not a file in p1.
1967 1969 components = path.split('/')
1968 1970 for i in pycompat.xrange(len(components)):
1969 1971 component = "/".join(components[0:i])
1970 1972 if component in self:
1971 1973 fail(path, component)
1972 1974
1973 1975 # Test the other direction -- that this path from p2 isn't a directory
1974 1976 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1975 1977 match = self.match([path], default=b'path')
1976 1978 matches = self.p1().manifest().matches(match)
1977 1979 mfiles = matches.keys()
1978 1980 if len(mfiles) > 0:
1979 1981 if len(mfiles) == 1 and mfiles[0] == path:
1980 1982 return
1981 1983 # omit the files which are deleted in current IMM wctx
1982 1984 mfiles = [m for m in mfiles if m in self]
1983 1985 if not mfiles:
1984 1986 return
1985 1987 raise error.Abort("error: file '%s' cannot be written because "
1986 1988 " '%s/' is a directory in %s (containing %d "
1987 1989 "entries: %s)"
1988 1990 % (path, path, self.p1(), len(mfiles),
1989 1991 ', '.join(mfiles)))
1990 1992
1991 1993 def write(self, path, data, flags='', **kwargs):
1992 1994 if data is None:
1993 1995 raise error.ProgrammingError("data must be non-None")
1994 1996 self._auditconflicts(path)
1995 1997 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1996 1998 flags=flags)
1997 1999
1998 2000 def setflags(self, path, l, x):
1999 2001 flag = ''
2000 2002 if l:
2001 2003 flag = 'l'
2002 2004 elif x:
2003 2005 flag = 'x'
2004 2006 self._markdirty(path, exists=True, date=dateutil.makedate(),
2005 2007 flags=flag)
2006 2008
2007 2009 def remove(self, path):
2008 2010 self._markdirty(path, exists=False)
2009 2011
2010 2012 def exists(self, path):
2011 2013 """exists behaves like `lexists`, but needs to follow symlinks and
2012 2014 return False if they are broken.
2013 2015 """
2014 2016 if self.isdirty(path):
2015 2017 # If this path exists and is a symlink, "follow" it by calling
2016 2018 # exists on the destination path.
2017 2019 if (self._cache[path]['exists'] and
2018 2020 'l' in self._cache[path]['flags']):
2019 2021 return self.exists(self._cache[path]['data'].strip())
2020 2022 else:
2021 2023 return self._cache[path]['exists']
2022 2024
2023 2025 return self._existsinparent(path)
2024 2026
2025 2027 def lexists(self, path):
2026 2028 """lexists returns True if the path exists"""
2027 2029 if self.isdirty(path):
2028 2030 return self._cache[path]['exists']
2029 2031
2030 2032 return self._existsinparent(path)
2031 2033
2032 2034 def size(self, path):
2033 2035 if self.isdirty(path):
2034 2036 if self._cache[path]['exists']:
2035 2037 return len(self._cache[path]['data'])
2036 2038 else:
2037 2039 raise error.ProgrammingError("No such file or directory: %s" %
2038 2040 self._path)
2039 2041 return self._wrappedctx[path].size()
2040 2042
2041 2043 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2042 2044 user=None, editor=None):
2043 2045 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2044 2046 committed.
2045 2047
2046 2048 ``text`` is the commit message.
2047 2049 ``parents`` (optional) are rev numbers.
2048 2050 """
2049 2051 # Default parents to the wrapped contexts' if not passed.
2050 2052 if parents is None:
2051 2053 parents = self._wrappedctx.parents()
2052 2054 if len(parents) == 1:
2053 2055 parents = (parents[0], None)
2054 2056
2055 2057 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2056 2058 if parents[1] is None:
2057 2059 parents = (self._repo[parents[0]], None)
2058 2060 else:
2059 2061 parents = (self._repo[parents[0]], self._repo[parents[1]])
2060 2062
2061 2063 files = self.files()
2062 2064 def getfile(repo, memctx, path):
2063 2065 if self._cache[path]['exists']:
2064 2066 return memfilectx(repo, memctx, path,
2065 2067 self._cache[path]['data'],
2066 2068 'l' in self._cache[path]['flags'],
2067 2069 'x' in self._cache[path]['flags'],
2068 2070 self._cache[path]['copied'])
2069 2071 else:
2070 2072 # Returning None, but including the path in `files`, is
2071 2073 # necessary for memctx to register a deletion.
2072 2074 return None
2073 2075 return memctx(self._repo, parents, text, files, getfile, date=date,
2074 2076 extra=extra, user=user, branch=branch, editor=editor)
2075 2077
2076 2078 def isdirty(self, path):
2077 2079 return path in self._cache
2078 2080
2079 2081 def isempty(self):
2080 2082 # We need to discard any keys that are actually clean before the empty
2081 2083 # commit check.
2082 2084 self._compact()
2083 2085 return len(self._cache) == 0
2084 2086
2085 2087 def clean(self):
2086 2088 self._cache = {}
2087 2089
2088 2090 def _compact(self):
2089 2091 """Removes keys from the cache that are actually clean, by comparing
2090 2092 them with the underlying context.
2091 2093
2092 2094 This can occur during the merge process, e.g. by passing --tool :local
2093 2095 to resolve a conflict.
2094 2096 """
2095 2097 keys = []
2096 2098 # This won't be perfect, but can help performance significantly when
2097 2099 # using things like remotefilelog.
2098 2100 scmutil.prefetchfiles(
2099 2101 self.repo(), [self.p1().rev()],
2100 2102 scmutil.matchfiles(self.repo(), self._cache.keys()))
2101 2103
2102 2104 for path in self._cache.keys():
2103 2105 cache = self._cache[path]
2104 2106 try:
2105 2107 underlying = self._wrappedctx[path]
2106 2108 if (underlying.data() == cache['data'] and
2107 2109 underlying.flags() == cache['flags']):
2108 2110 keys.append(path)
2109 2111 except error.ManifestLookupError:
2110 2112 # Path not in the underlying manifest (created).
2111 2113 continue
2112 2114
2113 2115 for path in keys:
2114 2116 del self._cache[path]
2115 2117 return keys
2116 2118
2117 2119 def _markdirty(self, path, exists, data=None, date=None, flags='',
2118 2120 copied=None):
2119 2121 # data not provided, let's see if we already have some; if not, let's
2120 2122 # grab it from our underlying context, so that we always have data if
2121 2123 # the file is marked as existing.
2122 2124 if exists and data is None:
2123 2125 oldentry = self._cache.get(path) or {}
2124 2126 data = oldentry.get('data') or self._wrappedctx[path].data()
2125 2127
2126 2128 self._cache[path] = {
2127 2129 'exists': exists,
2128 2130 'data': data,
2129 2131 'date': date,
2130 2132 'flags': flags,
2131 2133 'copied': copied,
2132 2134 }
2133 2135
2134 2136 def filectx(self, path, filelog=None):
2135 2137 return overlayworkingfilectx(self._repo, path, parent=self,
2136 2138 filelog=filelog)
2137 2139
2138 2140 class overlayworkingfilectx(committablefilectx):
2139 2141 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2140 2142 cache, which can be flushed through later by calling ``flush()``."""
2141 2143
2142 2144 def __init__(self, repo, path, filelog=None, parent=None):
2143 2145 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2144 2146 parent)
2145 2147 self._repo = repo
2146 2148 self._parent = parent
2147 2149 self._path = path
2148 2150
2149 2151 def cmp(self, fctx):
2150 2152 return self.data() != fctx.data()
2151 2153
2152 2154 def changectx(self):
2153 2155 return self._parent
2154 2156
2155 2157 def data(self):
2156 2158 return self._parent.data(self._path)
2157 2159
2158 2160 def date(self):
2159 2161 return self._parent.filedate(self._path)
2160 2162
2161 2163 def exists(self):
2162 2164 return self.lexists()
2163 2165
2164 2166 def lexists(self):
2165 2167 return self._parent.exists(self._path)
2166 2168
2167 2169 def copysource(self):
2168 2170 return self._parent.copydata(self._path)
2169 2171
2170 2172 def size(self):
2171 2173 return self._parent.size(self._path)
2172 2174
2173 2175 def markcopied(self, origin):
2174 2176 self._parent.markcopied(self._path, origin)
2175 2177
2176 2178 def audit(self):
2177 2179 pass
2178 2180
2179 2181 def flags(self):
2180 2182 return self._parent.flags(self._path)
2181 2183
2182 2184 def setflags(self, islink, isexec):
2183 2185 return self._parent.setflags(self._path, islink, isexec)
2184 2186
2185 2187 def write(self, data, flags, backgroundclose=False, **kwargs):
2186 2188 return self._parent.write(self._path, data, flags, **kwargs)
2187 2189
2188 2190 def remove(self, ignoremissing=False):
2189 2191 return self._parent.remove(self._path)
2190 2192
2191 2193 def clearunknown(self):
2192 2194 pass
2193 2195
2194 2196 class workingcommitctx(workingctx):
2195 2197 """A workingcommitctx object makes access to data related to
2196 2198 the revision being committed convenient.
2197 2199
2198 2200 This hides changes in the working directory, if they aren't
2199 2201 committed in this context.
2200 2202 """
2201 2203 def __init__(self, repo, changes,
2202 2204 text="", user=None, date=None, extra=None):
2203 2205 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2204 2206 changes)
2205 2207
2206 2208 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2207 2209 """Return matched files only in ``self._status``
2208 2210
2209 2211 Uncommitted files appear "clean" via this context, even if
2210 2212 they aren't actually so in the working directory.
2211 2213 """
2212 2214 if clean:
2213 2215 clean = [f for f in self._manifest if f not in self._changedset]
2214 2216 else:
2215 2217 clean = []
2216 2218 return scmutil.status([f for f in self._status.modified if match(f)],
2217 2219 [f for f in self._status.added if match(f)],
2218 2220 [f for f in self._status.removed if match(f)],
2219 2221 [], [], [], clean)
2220 2222
2221 2223 @propertycache
2222 2224 def _changedset(self):
2223 2225 """Return the set of files changed in this context
2224 2226 """
2225 2227 changed = set(self._status.modified)
2226 2228 changed.update(self._status.added)
2227 2229 changed.update(self._status.removed)
2228 2230 return changed
2229 2231
2230 2232 def makecachingfilectxfn(func):
2231 2233 """Create a filectxfn that caches based on the path.
2232 2234
2233 2235 We can't use util.cachefunc because it uses all arguments as the cache
2234 2236 key and this creates a cycle since the arguments include the repo and
2235 2237 memctx.
2236 2238 """
2237 2239 cache = {}
2238 2240
2239 2241 def getfilectx(repo, memctx, path):
2240 2242 if path not in cache:
2241 2243 cache[path] = func(repo, memctx, path)
2242 2244 return cache[path]
2243 2245
2244 2246 return getfilectx
2245 2247
2246 2248 def memfilefromctx(ctx):
2247 2249 """Given a context return a memfilectx for ctx[path]
2248 2250
2249 2251 This is a convenience method for building a memctx based on another
2250 2252 context.
2251 2253 """
2252 2254 def getfilectx(repo, memctx, path):
2253 2255 fctx = ctx[path]
2254 2256 copysource = fctx.copysource()
2255 2257 return memfilectx(repo, memctx, path, fctx.data(),
2256 2258 islink=fctx.islink(), isexec=fctx.isexec(),
2257 2259 copysource=copysource)
2258 2260
2259 2261 return getfilectx
2260 2262
2261 2263 def memfilefrompatch(patchstore):
2262 2264 """Given a patch (e.g. patchstore object) return a memfilectx
2263 2265
2264 2266 This is a convenience method for building a memctx based on a patchstore.
2265 2267 """
2266 2268 def getfilectx(repo, memctx, path):
2267 2269 data, mode, copysource = patchstore.getfile(path)
2268 2270 if data is None:
2269 2271 return None
2270 2272 islink, isexec = mode
2271 2273 return memfilectx(repo, memctx, path, data, islink=islink,
2272 2274 isexec=isexec, copysource=copysource)
2273 2275
2274 2276 return getfilectx
2275 2277
2276 2278 class memctx(committablectx):
2277 2279 """Use memctx to perform in-memory commits via localrepo.commitctx().
2278 2280
2279 2281 Revision information is supplied at initialization time while
2280 2282 related files data and is made available through a callback
2281 2283 mechanism. 'repo' is the current localrepo, 'parents' is a
2282 2284 sequence of two parent revisions identifiers (pass None for every
2283 2285 missing parent), 'text' is the commit message and 'files' lists
2284 2286 names of files touched by the revision (normalized and relative to
2285 2287 repository root).
2286 2288
2287 2289 filectxfn(repo, memctx, path) is a callable receiving the
2288 2290 repository, the current memctx object and the normalized path of
2289 2291 requested file, relative to repository root. It is fired by the
2290 2292 commit function for every file in 'files', but calls order is
2291 2293 undefined. If the file is available in the revision being
2292 2294 committed (updated or added), filectxfn returns a memfilectx
2293 2295 object. If the file was removed, filectxfn return None for recent
2294 2296 Mercurial. Moved files are represented by marking the source file
2295 2297 removed and the new file added with copy information (see
2296 2298 memfilectx).
2297 2299
2298 2300 user receives the committer name and defaults to current
2299 2301 repository username, date is the commit date in any format
2300 2302 supported by dateutil.parsedate() and defaults to current date, extra
2301 2303 is a dictionary of metadata or is left empty.
2302 2304 """
2303 2305
2304 2306 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2305 2307 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2306 2308 # this field to determine what to do in filectxfn.
2307 2309 _returnnoneformissingfiles = True
2308 2310
2309 2311 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2310 2312 date=None, extra=None, branch=None, editor=False):
2311 super(memctx, self).__init__(repo, text, user, date, extra)
2313 super(memctx, self).__init__(repo, text, user, date, extra,
2314 branch=branch)
2312 2315 self._rev = None
2313 2316 self._node = None
2314 2317 parents = [(p or nullid) for p in parents]
2315 2318 p1, p2 = parents
2316 2319 self._parents = [self._repo[p] for p in (p1, p2)]
2317 2320 files = sorted(set(files))
2318 2321 self._files = files
2319 if branch is not None:
2320 self._extra['branch'] = encoding.fromlocal(branch)
2321 2322 self.substate = {}
2322 2323
2323 2324 if isinstance(filectxfn, patch.filestore):
2324 2325 filectxfn = memfilefrompatch(filectxfn)
2325 2326 elif not callable(filectxfn):
2326 2327 # if store is not callable, wrap it in a function
2327 2328 filectxfn = memfilefromctx(filectxfn)
2328 2329
2329 2330 # memoizing increases performance for e.g. vcs convert scenarios.
2330 2331 self._filectxfn = makecachingfilectxfn(filectxfn)
2331 2332
2332 2333 if editor:
2333 2334 self._text = editor(self._repo, self, [])
2334 2335 self._repo.savecommitmessage(self._text)
2335 2336
2336 2337 def filectx(self, path, filelog=None):
2337 2338 """get a file context from the working directory
2338 2339
2339 2340 Returns None if file doesn't exist and should be removed."""
2340 2341 return self._filectxfn(self._repo, self, path)
2341 2342
2342 2343 def commit(self):
2343 2344 """commit context to the repo"""
2344 2345 return self._repo.commitctx(self)
2345 2346
2346 2347 @propertycache
2347 2348 def _manifest(self):
2348 2349 """generate a manifest based on the return values of filectxfn"""
2349 2350
2350 2351 # keep this simple for now; just worry about p1
2351 2352 pctx = self._parents[0]
2352 2353 man = pctx.manifest().copy()
2353 2354
2354 2355 for f in self._status.modified:
2355 2356 man[f] = modifiednodeid
2356 2357
2357 2358 for f in self._status.added:
2358 2359 man[f] = addednodeid
2359 2360
2360 2361 for f in self._status.removed:
2361 2362 if f in man:
2362 2363 del man[f]
2363 2364
2364 2365 return man
2365 2366
2366 2367 @propertycache
2367 2368 def _status(self):
2368 2369 """Calculate exact status from ``files`` specified at construction
2369 2370 """
2370 2371 man1 = self.p1().manifest()
2371 2372 p2 = self._parents[1]
2372 2373 # "1 < len(self._parents)" can't be used for checking
2373 2374 # existence of the 2nd parent, because "memctx._parents" is
2374 2375 # explicitly initialized by the list, of which length is 2.
2375 2376 if p2.node() != nullid:
2376 2377 man2 = p2.manifest()
2377 2378 managing = lambda f: f in man1 or f in man2
2378 2379 else:
2379 2380 managing = lambda f: f in man1
2380 2381
2381 2382 modified, added, removed = [], [], []
2382 2383 for f in self._files:
2383 2384 if not managing(f):
2384 2385 added.append(f)
2385 2386 elif self[f]:
2386 2387 modified.append(f)
2387 2388 else:
2388 2389 removed.append(f)
2389 2390
2390 2391 return scmutil.status(modified, added, removed, [], [], [], [])
2391 2392
2392 2393 class memfilectx(committablefilectx):
2393 2394 """memfilectx represents an in-memory file to commit.
2394 2395
2395 2396 See memctx and committablefilectx for more details.
2396 2397 """
2397 2398 def __init__(self, repo, changectx, path, data, islink=False,
2398 2399 isexec=False, copysource=None):
2399 2400 """
2400 2401 path is the normalized file path relative to repository root.
2401 2402 data is the file content as a string.
2402 2403 islink is True if the file is a symbolic link.
2403 2404 isexec is True if the file is executable.
2404 2405 copied is the source file path if current file was copied in the
2405 2406 revision being committed, or None."""
2406 2407 super(memfilectx, self).__init__(repo, path, None, changectx)
2407 2408 self._data = data
2408 2409 if islink:
2409 2410 self._flags = 'l'
2410 2411 elif isexec:
2411 2412 self._flags = 'x'
2412 2413 else:
2413 2414 self._flags = ''
2414 2415 self._copysource = copysource
2415 2416
2416 2417 def copysource(self):
2417 2418 return self._copysource
2418 2419
2419 2420 def cmp(self, fctx):
2420 2421 return self.data() != fctx.data()
2421 2422
2422 2423 def data(self):
2423 2424 return self._data
2424 2425
2425 2426 def remove(self, ignoremissing=False):
2426 2427 """wraps unlink for a repo's working directory"""
2427 2428 # need to figure out what to do here
2428 2429 del self._changectx[self._path]
2429 2430
2430 2431 def write(self, data, flags, **kwargs):
2431 2432 """wraps repo.wwrite"""
2432 2433 self._data = data
2433 2434
2434 2435
2435 2436 class metadataonlyctx(committablectx):
2436 2437 """Like memctx but it's reusing the manifest of different commit.
2437 2438 Intended to be used by lightweight operations that are creating
2438 2439 metadata-only changes.
2439 2440
2440 2441 Revision information is supplied at initialization time. 'repo' is the
2441 2442 current localrepo, 'ctx' is original revision which manifest we're reuisng
2442 2443 'parents' is a sequence of two parent revisions identifiers (pass None for
2443 2444 every missing parent), 'text' is the commit.
2444 2445
2445 2446 user receives the committer name and defaults to current repository
2446 2447 username, date is the commit date in any format supported by
2447 2448 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2448 2449 metadata or is left empty.
2449 2450 """
2450 2451 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2451 2452 date=None, extra=None, editor=False):
2452 2453 if text is None:
2453 2454 text = originalctx.description()
2454 2455 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2455 2456 self._rev = None
2456 2457 self._node = None
2457 2458 self._originalctx = originalctx
2458 2459 self._manifestnode = originalctx.manifestnode()
2459 2460 if parents is None:
2460 2461 parents = originalctx.parents()
2461 2462 else:
2462 2463 parents = [repo[p] for p in parents if p is not None]
2463 2464 parents = parents[:]
2464 2465 while len(parents) < 2:
2465 2466 parents.append(repo[nullid])
2466 2467 p1, p2 = self._parents = parents
2467 2468
2468 2469 # sanity check to ensure that the reused manifest parents are
2469 2470 # manifests of our commit parents
2470 2471 mp1, mp2 = self.manifestctx().parents
2471 2472 if p1 != nullid and p1.manifestnode() != mp1:
2472 2473 raise RuntimeError(r"can't reuse the manifest: its p1 "
2473 2474 r"doesn't match the new ctx p1")
2474 2475 if p2 != nullid and p2.manifestnode() != mp2:
2475 2476 raise RuntimeError(r"can't reuse the manifest: "
2476 2477 r"its p2 doesn't match the new ctx p2")
2477 2478
2478 2479 self._files = originalctx.files()
2479 2480 self.substate = {}
2480 2481
2481 2482 if editor:
2482 2483 self._text = editor(self._repo, self, [])
2483 2484 self._repo.savecommitmessage(self._text)
2484 2485
2485 2486 def manifestnode(self):
2486 2487 return self._manifestnode
2487 2488
2488 2489 @property
2489 2490 def _manifestctx(self):
2490 2491 return self._repo.manifestlog[self._manifestnode]
2491 2492
2492 2493 def filectx(self, path, filelog=None):
2493 2494 return self._originalctx.filectx(path, filelog=filelog)
2494 2495
2495 2496 def commit(self):
2496 2497 """commit context to the repo"""
2497 2498 return self._repo.commitctx(self)
2498 2499
2499 2500 @property
2500 2501 def _manifest(self):
2501 2502 return self._originalctx.manifest()
2502 2503
2503 2504 @propertycache
2504 2505 def _status(self):
2505 2506 """Calculate exact status from ``files`` specified in the ``origctx``
2506 2507 and parents manifests.
2507 2508 """
2508 2509 man1 = self.p1().manifest()
2509 2510 p2 = self._parents[1]
2510 2511 # "1 < len(self._parents)" can't be used for checking
2511 2512 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2512 2513 # explicitly initialized by the list, of which length is 2.
2513 2514 if p2.node() != nullid:
2514 2515 man2 = p2.manifest()
2515 2516 managing = lambda f: f in man1 or f in man2
2516 2517 else:
2517 2518 managing = lambda f: f in man1
2518 2519
2519 2520 modified, added, removed = [], [], []
2520 2521 for f in self._files:
2521 2522 if not managing(f):
2522 2523 added.append(f)
2523 2524 elif f in self:
2524 2525 modified.append(f)
2525 2526 else:
2526 2527 removed.append(f)
2527 2528
2528 2529 return scmutil.status(modified, added, removed, [], [], [], [])
2529 2530
2530 2531 class arbitraryfilectx(object):
2531 2532 """Allows you to use filectx-like functions on a file in an arbitrary
2532 2533 location on disk, possibly not in the working directory.
2533 2534 """
2534 2535 def __init__(self, path, repo=None):
2535 2536 # Repo is optional because contrib/simplemerge uses this class.
2536 2537 self._repo = repo
2537 2538 self._path = path
2538 2539
2539 2540 def cmp(self, fctx):
2540 2541 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2541 2542 # path if either side is a symlink.
2542 2543 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2543 2544 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2544 2545 # Add a fast-path for merge if both sides are disk-backed.
2545 2546 # Note that filecmp uses the opposite return values (True if same)
2546 2547 # from our cmp functions (True if different).
2547 2548 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2548 2549 return self.data() != fctx.data()
2549 2550
2550 2551 def path(self):
2551 2552 return self._path
2552 2553
2553 2554 def flags(self):
2554 2555 return ''
2555 2556
2556 2557 def data(self):
2557 2558 return util.readfile(self._path)
2558 2559
2559 2560 def decodeddata(self):
2560 2561 with open(self._path, "rb") as f:
2561 2562 return f.read()
2562 2563
2563 2564 def remove(self):
2564 2565 util.unlink(self._path)
2565 2566
2566 2567 def write(self, data, flags, **kwargs):
2567 2568 assert not flags
2568 2569 with open(self._path, "wb") as f:
2569 2570 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now