##// END OF EJS Templates
context: only bother looking for broken dirstate for 20-byte changeid...
Martin von Zweigbergk -
r37872:fdd8da79 default
parent child Browse files
Show More
@@ -1,2541 +1,2540
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirfilenodeids,
26 26 wdirid,
27 27 )
28 28 from . import (
29 29 dagop,
30 30 encoding,
31 31 error,
32 32 fileset,
33 33 match as matchmod,
34 34 obsolete as obsmod,
35 35 patch,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 repoview,
40 40 revlog,
41 41 scmutil,
42 42 sparse,
43 43 subrepo,
44 44 subrepoutil,
45 45 util,
46 46 )
47 47 from .utils import (
48 48 dateutil,
49 49 stringutil,
50 50 )
51 51
52 52 propertycache = util.propertycache
53 53
54 54 nonascii = re.compile(br'[^\x21-\x7f]').search
55 55
56 56 class basectx(object):
57 57 """A basectx object represents the common logic for its children:
58 58 changectx: read-only context that is already present in the repo,
59 59 workingctx: a context that represents the working directory and can
60 60 be committed,
61 61 memctx: a context that represents changes in-memory and can also
62 62 be committed."""
63 63
64 64 def __init__(self, repo):
65 65 self._repo = repo
66 66
67 67 def __bytes__(self):
68 68 return short(self.node())
69 69
70 70 __str__ = encoding.strmethod(__bytes__)
71 71
72 72 def __repr__(self):
73 73 return r"<%s %s>" % (type(self).__name__, str(self))
74 74
75 75 def __eq__(self, other):
76 76 try:
77 77 return type(self) == type(other) and self._rev == other._rev
78 78 except AttributeError:
79 79 return False
80 80
81 81 def __ne__(self, other):
82 82 return not (self == other)
83 83
84 84 def __contains__(self, key):
85 85 return key in self._manifest
86 86
87 87 def __getitem__(self, key):
88 88 return self.filectx(key)
89 89
90 90 def __iter__(self):
91 91 return iter(self._manifest)
92 92
93 93 def _buildstatusmanifest(self, status):
94 94 """Builds a manifest that includes the given status results, if this is
95 95 a working copy context. For non-working copy contexts, it just returns
96 96 the normal manifest."""
97 97 return self.manifest()
98 98
99 99 def _matchstatus(self, other, match):
100 100 """This internal method provides a way for child objects to override the
101 101 match operator.
102 102 """
103 103 return match
104 104
105 105 def _buildstatus(self, other, s, match, listignored, listclean,
106 106 listunknown):
107 107 """build a status with respect to another context"""
108 108 # Load earliest manifest first for caching reasons. More specifically,
109 109 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 110 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 111 # 1000 and cache it so that when you read 1001, we just need to apply a
112 112 # delta to what's in the cache. So that's one full reconstruction + one
113 113 # delta application.
114 114 mf2 = None
115 115 if self.rev() is not None and self.rev() < other.rev():
116 116 mf2 = self._buildstatusmanifest(s)
117 117 mf1 = other._buildstatusmanifest(s)
118 118 if mf2 is None:
119 119 mf2 = self._buildstatusmanifest(s)
120 120
121 121 modified, added = [], []
122 122 removed = []
123 123 clean = []
124 124 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 125 deletedset = set(deleted)
126 126 d = mf1.diff(mf2, match=match, clean=listclean)
127 127 for fn, value in d.iteritems():
128 128 if fn in deletedset:
129 129 continue
130 130 if value is None:
131 131 clean.append(fn)
132 132 continue
133 133 (node1, flag1), (node2, flag2) = value
134 134 if node1 is None:
135 135 added.append(fn)
136 136 elif node2 is None:
137 137 removed.append(fn)
138 138 elif flag1 != flag2:
139 139 modified.append(fn)
140 140 elif node2 not in wdirfilenodeids:
141 141 # When comparing files between two commits, we save time by
142 142 # not comparing the file contents when the nodeids differ.
143 143 # Note that this means we incorrectly report a reverted change
144 144 # to a file as a modification.
145 145 modified.append(fn)
146 146 elif self[fn].cmp(other[fn]):
147 147 modified.append(fn)
148 148 else:
149 149 clean.append(fn)
150 150
151 151 if removed:
152 152 # need to filter files if they are already reported as removed
153 153 unknown = [fn for fn in unknown if fn not in mf1 and
154 154 (not match or match(fn))]
155 155 ignored = [fn for fn in ignored if fn not in mf1 and
156 156 (not match or match(fn))]
157 157 # if they're deleted, don't report them as removed
158 158 removed = [fn for fn in removed if fn not in deletedset]
159 159
160 160 return scmutil.status(modified, added, removed, deleted, unknown,
161 161 ignored, clean)
162 162
163 163 @propertycache
164 164 def substate(self):
165 165 return subrepoutil.state(self, self._repo.ui)
166 166
167 167 def subrev(self, subpath):
168 168 return self.substate[subpath][1]
169 169
170 170 def rev(self):
171 171 return self._rev
172 172 def node(self):
173 173 return self._node
174 174 def hex(self):
175 175 return hex(self.node())
176 176 def manifest(self):
177 177 return self._manifest
178 178 def manifestctx(self):
179 179 return self._manifestctx
180 180 def repo(self):
181 181 return self._repo
182 182 def phasestr(self):
183 183 return phases.phasenames[self.phase()]
184 184 def mutable(self):
185 185 return self.phase() > phases.public
186 186
187 187 def getfileset(self, expr):
188 188 return fileset.getfileset(self, expr)
189 189
190 190 def obsolete(self):
191 191 """True if the changeset is obsolete"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 193
194 194 def extinct(self):
195 195 """True if the changeset is extinct"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 197
198 198 def orphan(self):
199 199 """True if the changeset is not obsolete but it's ancestor are"""
200 200 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201 201
202 202 def phasedivergent(self):
203 203 """True if the changeset try to be a successor of a public changeset
204 204
205 205 Only non-public and non-obsolete changesets may be bumped.
206 206 """
207 207 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208 208
209 209 def contentdivergent(self):
210 210 """Is a successors of a changeset with multiple possible successors set
211 211
212 212 Only non-public and non-obsolete changesets may be divergent.
213 213 """
214 214 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215 215
216 216 def isunstable(self):
217 217 """True if the changeset is either unstable, bumped or divergent"""
218 218 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219 219
220 220 def instabilities(self):
221 221 """return the list of instabilities affecting this changeset.
222 222
223 223 Instabilities are returned as strings. possible values are:
224 224 - orphan,
225 225 - phase-divergent,
226 226 - content-divergent.
227 227 """
228 228 instabilities = []
229 229 if self.orphan():
230 230 instabilities.append('orphan')
231 231 if self.phasedivergent():
232 232 instabilities.append('phase-divergent')
233 233 if self.contentdivergent():
234 234 instabilities.append('content-divergent')
235 235 return instabilities
236 236
237 237 def parents(self):
238 238 """return contexts for each parent changeset"""
239 239 return self._parents
240 240
241 241 def p1(self):
242 242 return self._parents[0]
243 243
244 244 def p2(self):
245 245 parents = self._parents
246 246 if len(parents) == 2:
247 247 return parents[1]
248 248 return changectx(self._repo, nullrev)
249 249
250 250 def _fileinfo(self, path):
251 251 if r'_manifest' in self.__dict__:
252 252 try:
253 253 return self._manifest[path], self._manifest.flags(path)
254 254 except KeyError:
255 255 raise error.ManifestLookupError(self._node, path,
256 256 _('not found in manifest'))
257 257 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 258 if path in self._manifestdelta:
259 259 return (self._manifestdelta[path],
260 260 self._manifestdelta.flags(path))
261 261 mfl = self._repo.manifestlog
262 262 try:
263 263 node, flag = mfl[self._changeset.manifest].find(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(self._node, path,
266 266 _('not found in manifest'))
267 267
268 268 return node, flag
269 269
270 270 def filenode(self, path):
271 271 return self._fileinfo(path)[0]
272 272
273 273 def flags(self, path):
274 274 try:
275 275 return self._fileinfo(path)[1]
276 276 except error.LookupError:
277 277 return ''
278 278
279 279 def sub(self, path, allowcreate=True):
280 280 '''return a subrepo for the stored revision of path, never wdir()'''
281 281 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 282
283 283 def nullsub(self, path, pctx):
284 284 return subrepo.nullsubrepo(self, path, pctx)
285 285
286 286 def workingsub(self, path):
287 287 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 288 context.
289 289 '''
290 290 return subrepo.subrepo(self, path, allowwdir=True)
291 291
292 292 def match(self, pats=None, include=None, exclude=None, default='glob',
293 293 listsubrepos=False, badfn=None):
294 294 r = self._repo
295 295 return matchmod.match(r.root, r.getcwd(), pats,
296 296 include, exclude, default,
297 297 auditor=r.nofsauditor, ctx=self,
298 298 listsubrepos=listsubrepos, badfn=badfn)
299 299
300 300 def diff(self, ctx2=None, match=None, **opts):
301 301 """Returns a diff generator for the given contexts and matcher"""
302 302 if ctx2 is None:
303 303 ctx2 = self.p1()
304 304 if ctx2 is not None:
305 305 ctx2 = self._repo[ctx2]
306 306 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 307 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 for l in r:
375 375 l.sort()
376 376
377 377 return r
378 378
379 379 class changectx(basectx):
380 380 """A changecontext object makes access to data related to a particular
381 381 changeset convenient. It represents a read-only context already present in
382 382 the repo."""
383 383 def __init__(self, repo, changeid='.'):
384 384 """changeid is a revision number, node, or tag"""
385 385 super(changectx, self).__init__(repo)
386 386
387 387 try:
388 388 if isinstance(changeid, int):
389 389 self._node = repo.changelog.node(changeid)
390 390 self._rev = changeid
391 391 return
392 392 if changeid == 'null':
393 393 self._node = nullid
394 394 self._rev = nullrev
395 395 return
396 396 if changeid == 'tip':
397 397 self._node = repo.changelog.tip()
398 398 self._rev = repo.changelog.rev(self._node)
399 399 return
400 400 if (changeid == '.'
401 401 or repo.local() and changeid == repo.dirstate.p1()):
402 402 # this is a hack to delay/avoid loading obsmarkers
403 403 # when we know that '.' won't be hidden
404 404 self._node = repo.dirstate.p1()
405 405 self._rev = repo.unfiltered().changelog.rev(self._node)
406 406 return
407 407 if len(changeid) == 20:
408 408 try:
409 409 self._node = changeid
410 410 self._rev = repo.changelog.rev(changeid)
411 411 return
412 412 except error.FilteredLookupError:
413 413 raise
414 414 except LookupError:
415 pass
415 # check if it might have come from damaged dirstate
416 #
417 # XXX we could avoid the unfiltered if we had a recognizable
418 # exception for filtered changeset access
419 if (repo.local()
420 and changeid in repo.unfiltered().dirstate.parents()):
421 msg = _("working directory has unknown parent '%s'!")
422 raise error.Abort(msg % short(changeid))
416 423
417 424 if len(changeid) == 40:
418 425 try:
419 426 self._node = bin(changeid)
420 427 self._rev = repo.changelog.rev(self._node)
421 428 return
422 429 except error.FilteredLookupError:
423 430 raise
424 431 except (TypeError, LookupError):
425 432 pass
426 433
427 434 # lookup failed
428 # check if it might have come from damaged dirstate
429 #
430 # XXX we could avoid the unfiltered if we had a recognizable
431 # exception for filtered changeset access
432 if (repo.local()
433 and changeid in repo.unfiltered().dirstate.parents()):
434 msg = _("working directory has unknown parent '%s'!")
435 raise error.Abort(msg % short(changeid))
436 435 try:
437 436 if len(changeid) == 20 and nonascii(changeid):
438 437 changeid = hex(changeid)
439 438 except TypeError:
440 439 pass
441 440 except (error.FilteredIndexError, error.FilteredLookupError):
442 441 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
443 442 % changeid)
444 443 except error.FilteredRepoLookupError:
445 444 raise
446 445 except IndexError:
447 446 pass
448 447 raise error.RepoLookupError(
449 448 _("unknown revision '%s'") % changeid)
450 449
451 450 def __hash__(self):
452 451 try:
453 452 return hash(self._rev)
454 453 except AttributeError:
455 454 return id(self)
456 455
457 456 def __nonzero__(self):
458 457 return self._rev != nullrev
459 458
460 459 __bool__ = __nonzero__
461 460
462 461 @propertycache
463 462 def _changeset(self):
464 463 return self._repo.changelog.changelogrevision(self.rev())
465 464
466 465 @propertycache
467 466 def _manifest(self):
468 467 return self._manifestctx.read()
469 468
470 469 @property
471 470 def _manifestctx(self):
472 471 return self._repo.manifestlog[self._changeset.manifest]
473 472
474 473 @propertycache
475 474 def _manifestdelta(self):
476 475 return self._manifestctx.readdelta()
477 476
478 477 @propertycache
479 478 def _parents(self):
480 479 repo = self._repo
481 480 p1, p2 = repo.changelog.parentrevs(self._rev)
482 481 if p2 == nullrev:
483 482 return [changectx(repo, p1)]
484 483 return [changectx(repo, p1), changectx(repo, p2)]
485 484
486 485 def changeset(self):
487 486 c = self._changeset
488 487 return (
489 488 c.manifest,
490 489 c.user,
491 490 c.date,
492 491 c.files,
493 492 c.description,
494 493 c.extra,
495 494 )
496 495 def manifestnode(self):
497 496 return self._changeset.manifest
498 497
499 498 def user(self):
500 499 return self._changeset.user
501 500 def date(self):
502 501 return self._changeset.date
503 502 def files(self):
504 503 return self._changeset.files
505 504 def description(self):
506 505 return self._changeset.description
507 506 def branch(self):
508 507 return encoding.tolocal(self._changeset.extra.get("branch"))
509 508 def closesbranch(self):
510 509 return 'close' in self._changeset.extra
511 510 def extra(self):
512 511 """Return a dict of extra information."""
513 512 return self._changeset.extra
514 513 def tags(self):
515 514 """Return a list of byte tag names"""
516 515 return self._repo.nodetags(self._node)
517 516 def bookmarks(self):
518 517 """Return a list of byte bookmark names."""
519 518 return self._repo.nodebookmarks(self._node)
520 519 def phase(self):
521 520 return self._repo._phasecache.phase(self._repo, self._rev)
522 521 def hidden(self):
523 522 return self._rev in repoview.filterrevs(self._repo, 'visible')
524 523
525 524 def isinmemory(self):
526 525 return False
527 526
528 527 def children(self):
529 528 """return list of changectx contexts for each child changeset.
530 529
531 530 This returns only the immediate child changesets. Use descendants() to
532 531 recursively walk children.
533 532 """
534 533 c = self._repo.changelog.children(self._node)
535 534 return [changectx(self._repo, x) for x in c]
536 535
537 536 def ancestors(self):
538 537 for a in self._repo.changelog.ancestors([self._rev]):
539 538 yield changectx(self._repo, a)
540 539
541 540 def descendants(self):
542 541 """Recursively yield all children of the changeset.
543 542
544 543 For just the immediate children, use children()
545 544 """
546 545 for d in self._repo.changelog.descendants([self._rev]):
547 546 yield changectx(self._repo, d)
548 547
549 548 def filectx(self, path, fileid=None, filelog=None):
550 549 """get a file context from this changeset"""
551 550 if fileid is None:
552 551 fileid = self.filenode(path)
553 552 return filectx(self._repo, path, fileid=fileid,
554 553 changectx=self, filelog=filelog)
555 554
556 555 def ancestor(self, c2, warn=False):
557 556 """return the "best" ancestor context of self and c2
558 557
559 558 If there are multiple candidates, it will show a message and check
560 559 merge.preferancestor configuration before falling back to the
561 560 revlog ancestor."""
562 561 # deal with workingctxs
563 562 n2 = c2._node
564 563 if n2 is None:
565 564 n2 = c2._parents[0]._node
566 565 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 566 if not cahs:
568 567 anc = nullid
569 568 elif len(cahs) == 1:
570 569 anc = cahs[0]
571 570 else:
572 571 # experimental config: merge.preferancestor
573 572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
574 573 try:
575 574 ctx = scmutil.revsymbol(self._repo, r)
576 575 except error.RepoLookupError:
577 576 continue
578 577 anc = ctx.node()
579 578 if anc in cahs:
580 579 break
581 580 else:
582 581 anc = self._repo.changelog.ancestor(self._node, n2)
583 582 if warn:
584 583 self._repo.ui.status(
585 584 (_("note: using %s as ancestor of %s and %s\n") %
586 585 (short(anc), short(self._node), short(n2))) +
587 586 ''.join(_(" alternatively, use --config "
588 587 "merge.preferancestor=%s\n") %
589 588 short(n) for n in sorted(cahs) if n != anc))
590 589 return changectx(self._repo, anc)
591 590
592 591 def descendant(self, other):
593 592 """True if other is descendant of this changeset"""
594 593 return self._repo.changelog.descendant(self._rev, other._rev)
595 594
596 595 def walk(self, match):
597 596 '''Generates matching file names.'''
598 597
599 598 # Wrap match.bad method to have message with nodeid
600 599 def bad(fn, msg):
601 600 # The manifest doesn't know about subrepos, so don't complain about
602 601 # paths into valid subrepos.
603 602 if any(fn == s or fn.startswith(s + '/')
604 603 for s in self.substate):
605 604 return
606 605 match.bad(fn, _('no such file in rev %s') % self)
607 606
608 607 m = matchmod.badmatch(match, bad)
609 608 return self._manifest.walk(m)
610 609
611 610 def matches(self, match):
612 611 return self.walk(match)
613 612
614 613 class basefilectx(object):
615 614 """A filecontext object represents the common logic for its children:
616 615 filectx: read-only access to a filerevision that is already present
617 616 in the repo,
618 617 workingfilectx: a filecontext that represents files from the working
619 618 directory,
620 619 memfilectx: a filecontext that represents files in-memory,
621 620 overlayfilectx: duplicate another filecontext with some fields overridden.
622 621 """
623 622 @propertycache
624 623 def _filelog(self):
625 624 return self._repo.file(self._path)
626 625
627 626 @propertycache
628 627 def _changeid(self):
629 628 if r'_changeid' in self.__dict__:
630 629 return self._changeid
631 630 elif r'_changectx' in self.__dict__:
632 631 return self._changectx.rev()
633 632 elif r'_descendantrev' in self.__dict__:
634 633 # this file context was created from a revision with a known
635 634 # descendant, we can (lazily) correct for linkrev aliases
636 635 return self._adjustlinkrev(self._descendantrev)
637 636 else:
638 637 return self._filelog.linkrev(self._filerev)
639 638
640 639 @propertycache
641 640 def _filenode(self):
642 641 if r'_fileid' in self.__dict__:
643 642 return self._filelog.lookup(self._fileid)
644 643 else:
645 644 return self._changectx.filenode(self._path)
646 645
647 646 @propertycache
648 647 def _filerev(self):
649 648 return self._filelog.rev(self._filenode)
650 649
651 650 @propertycache
652 651 def _repopath(self):
653 652 return self._path
654 653
655 654 def __nonzero__(self):
656 655 try:
657 656 self._filenode
658 657 return True
659 658 except error.LookupError:
660 659 # file is missing
661 660 return False
662 661
663 662 __bool__ = __nonzero__
664 663
665 664 def __bytes__(self):
666 665 try:
667 666 return "%s@%s" % (self.path(), self._changectx)
668 667 except error.LookupError:
669 668 return "%s@???" % self.path()
670 669
671 670 __str__ = encoding.strmethod(__bytes__)
672 671
673 672 def __repr__(self):
674 673 return r"<%s %s>" % (type(self).__name__, str(self))
675 674
676 675 def __hash__(self):
677 676 try:
678 677 return hash((self._path, self._filenode))
679 678 except AttributeError:
680 679 return id(self)
681 680
682 681 def __eq__(self, other):
683 682 try:
684 683 return (type(self) == type(other) and self._path == other._path
685 684 and self._filenode == other._filenode)
686 685 except AttributeError:
687 686 return False
688 687
689 688 def __ne__(self, other):
690 689 return not (self == other)
691 690
692 691 def filerev(self):
693 692 return self._filerev
694 693 def filenode(self):
695 694 return self._filenode
696 695 @propertycache
697 696 def _flags(self):
698 697 return self._changectx.flags(self._path)
699 698 def flags(self):
700 699 return self._flags
701 700 def filelog(self):
702 701 return self._filelog
703 702 def rev(self):
704 703 return self._changeid
705 704 def linkrev(self):
706 705 return self._filelog.linkrev(self._filerev)
707 706 def node(self):
708 707 return self._changectx.node()
709 708 def hex(self):
710 709 return self._changectx.hex()
711 710 def user(self):
712 711 return self._changectx.user()
713 712 def date(self):
714 713 return self._changectx.date()
715 714 def files(self):
716 715 return self._changectx.files()
717 716 def description(self):
718 717 return self._changectx.description()
719 718 def branch(self):
720 719 return self._changectx.branch()
721 720 def extra(self):
722 721 return self._changectx.extra()
723 722 def phase(self):
724 723 return self._changectx.phase()
725 724 def phasestr(self):
726 725 return self._changectx.phasestr()
727 726 def obsolete(self):
728 727 return self._changectx.obsolete()
729 728 def instabilities(self):
730 729 return self._changectx.instabilities()
731 730 def manifest(self):
732 731 return self._changectx.manifest()
733 732 def changectx(self):
734 733 return self._changectx
735 734 def renamed(self):
736 735 return self._copied
737 736 def repo(self):
738 737 return self._repo
739 738 def size(self):
740 739 return len(self.data())
741 740
742 741 def path(self):
743 742 return self._path
744 743
745 744 def isbinary(self):
746 745 try:
747 746 return stringutil.binary(self.data())
748 747 except IOError:
749 748 return False
750 749 def isexec(self):
751 750 return 'x' in self.flags()
752 751 def islink(self):
753 752 return 'l' in self.flags()
754 753
755 754 def isabsent(self):
756 755 """whether this filectx represents a file not in self._changectx
757 756
758 757 This is mainly for merge code to detect change/delete conflicts. This is
759 758 expected to be True for all subclasses of basectx."""
760 759 return False
761 760
762 761 _customcmp = False
763 762 def cmp(self, fctx):
764 763 """compare with other file context
765 764
766 765 returns True if different than fctx.
767 766 """
768 767 if fctx._customcmp:
769 768 return fctx.cmp(self)
770 769
771 770 if (fctx._filenode is None
772 771 and (self._repo._encodefilterpats
773 772 # if file data starts with '\1\n', empty metadata block is
774 773 # prepended, which adds 4 bytes to filelog.size().
775 774 or self.size() - 4 == fctx.size())
776 775 or self.size() == fctx.size()):
777 776 return self._filelog.cmp(self._filenode, fctx.data())
778 777
779 778 return True
780 779
781 780 def _adjustlinkrev(self, srcrev, inclusive=False):
782 781 """return the first ancestor of <srcrev> introducing <fnode>
783 782
784 783 If the linkrev of the file revision does not point to an ancestor of
785 784 srcrev, we'll walk down the ancestors until we find one introducing
786 785 this file revision.
787 786
788 787 :srcrev: the changeset revision we search ancestors from
789 788 :inclusive: if true, the src revision will also be checked
790 789 """
791 790 repo = self._repo
792 791 cl = repo.unfiltered().changelog
793 792 mfl = repo.manifestlog
794 793 # fetch the linkrev
795 794 lkr = self.linkrev()
796 795 # hack to reuse ancestor computation when searching for renames
797 796 memberanc = getattr(self, '_ancestrycontext', None)
798 797 iteranc = None
799 798 if srcrev is None:
800 799 # wctx case, used by workingfilectx during mergecopy
801 800 revs = [p.rev() for p in self._repo[None].parents()]
802 801 inclusive = True # we skipped the real (revless) source
803 802 else:
804 803 revs = [srcrev]
805 804 if memberanc is None:
806 805 memberanc = iteranc = cl.ancestors(revs, lkr,
807 806 inclusive=inclusive)
808 807 # check if this linkrev is an ancestor of srcrev
809 808 if lkr not in memberanc:
810 809 if iteranc is None:
811 810 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 811 fnode = self._filenode
813 812 path = self._path
814 813 for a in iteranc:
815 814 ac = cl.read(a) # get changeset data (we avoid object creation)
816 815 if path in ac[3]: # checking the 'files' field.
817 816 # The file has been touched, check if the content is
818 817 # similar to the one we search for.
819 818 if fnode == mfl[ac[0]].readfast().get(path):
820 819 return a
821 820 # In theory, we should never get out of that loop without a result.
822 821 # But if manifest uses a buggy file revision (not children of the
823 822 # one it replaces) we could. Such a buggy situation will likely
824 823 # result is crash somewhere else at to some point.
825 824 return lkr
826 825
827 826 def introrev(self):
828 827 """return the rev of the changeset which introduced this file revision
829 828
830 829 This method is different from linkrev because it take into account the
831 830 changeset the filectx was created from. It ensures the returned
832 831 revision is one of its ancestors. This prevents bugs from
833 832 'linkrev-shadowing' when a file revision is used by multiple
834 833 changesets.
835 834 """
836 835 lkr = self.linkrev()
837 836 attrs = vars(self)
838 837 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
839 838 if noctx or self.rev() == lkr:
840 839 return self.linkrev()
841 840 return self._adjustlinkrev(self.rev(), inclusive=True)
842 841
843 842 def introfilectx(self):
844 843 """Return filectx having identical contents, but pointing to the
845 844 changeset revision where this filectx was introduced"""
846 845 introrev = self.introrev()
847 846 if self.rev() == introrev:
848 847 return self
849 848 return self.filectx(self.filenode(), changeid=introrev)
850 849
851 850 def _parentfilectx(self, path, fileid, filelog):
852 851 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
853 852 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
854 853 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
855 854 # If self is associated with a changeset (probably explicitly
856 855 # fed), ensure the created filectx is associated with a
857 856 # changeset that is an ancestor of self.changectx.
858 857 # This lets us later use _adjustlinkrev to get a correct link.
859 858 fctx._descendantrev = self.rev()
860 859 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 860 elif r'_descendantrev' in vars(self):
862 861 # Otherwise propagate _descendantrev if we have one associated.
863 862 fctx._descendantrev = self._descendantrev
864 863 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 864 return fctx
866 865
867 866 def parents(self):
868 867 _path = self._path
869 868 fl = self._filelog
870 869 parents = self._filelog.parents(self._filenode)
871 870 pl = [(_path, node, fl) for node in parents if node != nullid]
872 871
873 872 r = fl.renamed(self._filenode)
874 873 if r:
875 874 # - In the simple rename case, both parent are nullid, pl is empty.
876 875 # - In case of merge, only one of the parent is null id and should
877 876 # be replaced with the rename information. This parent is -always-
878 877 # the first one.
879 878 #
880 879 # As null id have always been filtered out in the previous list
881 880 # comprehension, inserting to 0 will always result in "replacing
882 881 # first nullid parent with rename information.
883 882 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
884 883
885 884 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
886 885
887 886 def p1(self):
888 887 return self.parents()[0]
889 888
890 889 def p2(self):
891 890 p = self.parents()
892 891 if len(p) == 2:
893 892 return p[1]
894 893 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
895 894
896 895 def annotate(self, follow=False, skiprevs=None, diffopts=None):
897 896 """Returns a list of annotateline objects for each line in the file
898 897
899 898 - line.fctx is the filectx of the node where that line was last changed
900 899 - line.lineno is the line number at the first appearance in the managed
901 900 file
902 901 - line.text is the data on that line (including newline character)
903 902 """
904 903 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 904
906 905 def parents(f):
907 906 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 907 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 908 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 909 # isn't an ancestor of the srcrev.
911 910 f._changeid
912 911 pl = f.parents()
913 912
914 913 # Don't return renamed parents if we aren't following.
915 914 if not follow:
916 915 pl = [p for p in pl if p.path() == f.path()]
917 916
918 917 # renamed filectx won't have a filelog yet, so set it
919 918 # from the cache to save time
920 919 for p in pl:
921 920 if not r'_filelog' in p.__dict__:
922 921 p._filelog = getlog(p.path())
923 922
924 923 return pl
925 924
926 925 # use linkrev to find the first changeset where self appeared
927 926 base = self.introfilectx()
928 927 if getattr(base, '_ancestrycontext', None) is None:
929 928 cl = self._repo.changelog
930 929 if base.rev() is None:
931 930 # wctx is not inclusive, but works because _ancestrycontext
932 931 # is used to test filelog revisions
933 932 ac = cl.ancestors([p.rev() for p in base.parents()],
934 933 inclusive=True)
935 934 else:
936 935 ac = cl.ancestors([base.rev()], inclusive=True)
937 936 base._ancestrycontext = ac
938 937
939 938 return dagop.annotate(base, parents, skiprevs=skiprevs,
940 939 diffopts=diffopts)
941 940
942 941 def ancestors(self, followfirst=False):
943 942 visit = {}
944 943 c = self
945 944 if followfirst:
946 945 cut = 1
947 946 else:
948 947 cut = None
949 948
950 949 while True:
951 950 for parent in c.parents()[:cut]:
952 951 visit[(parent.linkrev(), parent.filenode())] = parent
953 952 if not visit:
954 953 break
955 954 c = visit.pop(max(visit))
956 955 yield c
957 956
958 957 def decodeddata(self):
959 958 """Returns `data()` after running repository decoding filters.
960 959
961 960 This is often equivalent to how the data would be expressed on disk.
962 961 """
963 962 return self._repo.wwritedata(self.path(), self.data())
964 963
965 964 class filectx(basefilectx):
966 965 """A filecontext object makes access to data related to a particular
967 966 filerevision convenient."""
968 967 def __init__(self, repo, path, changeid=None, fileid=None,
969 968 filelog=None, changectx=None):
970 969 """changeid can be a changeset revision, node, or tag.
971 970 fileid can be a file revision or node."""
972 971 self._repo = repo
973 972 self._path = path
974 973
975 974 assert (changeid is not None
976 975 or fileid is not None
977 976 or changectx is not None), \
978 977 ("bad args: changeid=%r, fileid=%r, changectx=%r"
979 978 % (changeid, fileid, changectx))
980 979
981 980 if filelog is not None:
982 981 self._filelog = filelog
983 982
984 983 if changeid is not None:
985 984 self._changeid = changeid
986 985 if changectx is not None:
987 986 self._changectx = changectx
988 987 if fileid is not None:
989 988 self._fileid = fileid
990 989
991 990 @propertycache
992 991 def _changectx(self):
993 992 try:
994 993 return changectx(self._repo, self._changeid)
995 994 except error.FilteredRepoLookupError:
996 995 # Linkrev may point to any revision in the repository. When the
997 996 # repository is filtered this may lead to `filectx` trying to build
998 997 # `changectx` for filtered revision. In such case we fallback to
999 998 # creating `changectx` on the unfiltered version of the reposition.
1000 999 # This fallback should not be an issue because `changectx` from
1001 1000 # `filectx` are not used in complex operations that care about
1002 1001 # filtering.
1003 1002 #
1004 1003 # This fallback is a cheap and dirty fix that prevent several
1005 1004 # crashes. It does not ensure the behavior is correct. However the
1006 1005 # behavior was not correct before filtering either and "incorrect
1007 1006 # behavior" is seen as better as "crash"
1008 1007 #
1009 1008 # Linkrevs have several serious troubles with filtering that are
1010 1009 # complicated to solve. Proper handling of the issue here should be
1011 1010 # considered when solving linkrev issue are on the table.
1012 1011 return changectx(self._repo.unfiltered(), self._changeid)
1013 1012
1014 1013 def filectx(self, fileid, changeid=None):
1015 1014 '''opens an arbitrary revision of the file without
1016 1015 opening a new filelog'''
1017 1016 return filectx(self._repo, self._path, fileid=fileid,
1018 1017 filelog=self._filelog, changeid=changeid)
1019 1018
1020 1019 def rawdata(self):
1021 1020 return self._filelog.revision(self._filenode, raw=True)
1022 1021
1023 1022 def rawflags(self):
1024 1023 """low-level revlog flags"""
1025 1024 return self._filelog.flags(self._filerev)
1026 1025
1027 1026 def data(self):
1028 1027 try:
1029 1028 return self._filelog.read(self._filenode)
1030 1029 except error.CensoredNodeError:
1031 1030 if self._repo.ui.config("censor", "policy") == "ignore":
1032 1031 return ""
1033 1032 raise error.Abort(_("censored node: %s") % short(self._filenode),
1034 1033 hint=_("set censor.policy to ignore errors"))
1035 1034
1036 1035 def size(self):
1037 1036 return self._filelog.size(self._filerev)
1038 1037
1039 1038 @propertycache
1040 1039 def _copied(self):
1041 1040 """check if file was actually renamed in this changeset revision
1042 1041
1043 1042 If rename logged in file revision, we report copy for changeset only
1044 1043 if file revisions linkrev points back to the changeset in question
1045 1044 or both changeset parents contain different file revisions.
1046 1045 """
1047 1046
1048 1047 renamed = self._filelog.renamed(self._filenode)
1049 1048 if not renamed:
1050 1049 return renamed
1051 1050
1052 1051 if self.rev() == self.linkrev():
1053 1052 return renamed
1054 1053
1055 1054 name = self.path()
1056 1055 fnode = self._filenode
1057 1056 for p in self._changectx.parents():
1058 1057 try:
1059 1058 if fnode == p.filenode(name):
1060 1059 return None
1061 1060 except error.LookupError:
1062 1061 pass
1063 1062 return renamed
1064 1063
1065 1064 def children(self):
1066 1065 # hard for renames
1067 1066 c = self._filelog.children(self._filenode)
1068 1067 return [filectx(self._repo, self._path, fileid=x,
1069 1068 filelog=self._filelog) for x in c]
1070 1069
1071 1070 class committablectx(basectx):
1072 1071 """A committablectx object provides common functionality for a context that
1073 1072 wants the ability to commit, e.g. workingctx or memctx."""
1074 1073 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 1074 changes=None):
1076 1075 super(committablectx, self).__init__(repo)
1077 1076 self._rev = None
1078 1077 self._node = None
1079 1078 self._text = text
1080 1079 if date:
1081 1080 self._date = dateutil.parsedate(date)
1082 1081 if user:
1083 1082 self._user = user
1084 1083 if changes:
1085 1084 self._status = changes
1086 1085
1087 1086 self._extra = {}
1088 1087 if extra:
1089 1088 self._extra = extra.copy()
1090 1089 if 'branch' not in self._extra:
1091 1090 try:
1092 1091 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 1092 except UnicodeDecodeError:
1094 1093 raise error.Abort(_('branch name not in UTF-8!'))
1095 1094 self._extra['branch'] = branch
1096 1095 if self._extra['branch'] == '':
1097 1096 self._extra['branch'] = 'default'
1098 1097
1099 1098 def __bytes__(self):
1100 1099 return bytes(self._parents[0]) + "+"
1101 1100
1102 1101 __str__ = encoding.strmethod(__bytes__)
1103 1102
1104 1103 def __nonzero__(self):
1105 1104 return True
1106 1105
1107 1106 __bool__ = __nonzero__
1108 1107
1109 1108 def _buildflagfunc(self):
1110 1109 # Create a fallback function for getting file flags when the
1111 1110 # filesystem doesn't support them
1112 1111
1113 1112 copiesget = self._repo.dirstate.copies().get
1114 1113 parents = self.parents()
1115 1114 if len(parents) < 2:
1116 1115 # when we have one parent, it's easy: copy from parent
1117 1116 man = parents[0].manifest()
1118 1117 def func(f):
1119 1118 f = copiesget(f, f)
1120 1119 return man.flags(f)
1121 1120 else:
1122 1121 # merges are tricky: we try to reconstruct the unstored
1123 1122 # result from the merge (issue1802)
1124 1123 p1, p2 = parents
1125 1124 pa = p1.ancestor(p2)
1126 1125 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1127 1126
1128 1127 def func(f):
1129 1128 f = copiesget(f, f) # may be wrong for merges with copies
1130 1129 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1131 1130 if fl1 == fl2:
1132 1131 return fl1
1133 1132 if fl1 == fla:
1134 1133 return fl2
1135 1134 if fl2 == fla:
1136 1135 return fl1
1137 1136 return '' # punt for conflicts
1138 1137
1139 1138 return func
1140 1139
1141 1140 @propertycache
1142 1141 def _flagfunc(self):
1143 1142 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1144 1143
1145 1144 @propertycache
1146 1145 def _status(self):
1147 1146 return self._repo.status()
1148 1147
1149 1148 @propertycache
1150 1149 def _user(self):
1151 1150 return self._repo.ui.username()
1152 1151
1153 1152 @propertycache
1154 1153 def _date(self):
1155 1154 ui = self._repo.ui
1156 1155 date = ui.configdate('devel', 'default-date')
1157 1156 if date is None:
1158 1157 date = dateutil.makedate()
1159 1158 return date
1160 1159
1161 1160 def subrev(self, subpath):
1162 1161 return None
1163 1162
1164 1163 def manifestnode(self):
1165 1164 return None
1166 1165 def user(self):
1167 1166 return self._user or self._repo.ui.username()
1168 1167 def date(self):
1169 1168 return self._date
1170 1169 def description(self):
1171 1170 return self._text
1172 1171 def files(self):
1173 1172 return sorted(self._status.modified + self._status.added +
1174 1173 self._status.removed)
1175 1174
1176 1175 def modified(self):
1177 1176 return self._status.modified
1178 1177 def added(self):
1179 1178 return self._status.added
1180 1179 def removed(self):
1181 1180 return self._status.removed
1182 1181 def deleted(self):
1183 1182 return self._status.deleted
1184 1183 def branch(self):
1185 1184 return encoding.tolocal(self._extra['branch'])
1186 1185 def closesbranch(self):
1187 1186 return 'close' in self._extra
1188 1187 def extra(self):
1189 1188 return self._extra
1190 1189
1191 1190 def isinmemory(self):
1192 1191 return False
1193 1192
1194 1193 def tags(self):
1195 1194 return []
1196 1195
1197 1196 def bookmarks(self):
1198 1197 b = []
1199 1198 for p in self.parents():
1200 1199 b.extend(p.bookmarks())
1201 1200 return b
1202 1201
1203 1202 def phase(self):
1204 1203 phase = phases.draft # default phase to draft
1205 1204 for p in self.parents():
1206 1205 phase = max(phase, p.phase())
1207 1206 return phase
1208 1207
1209 1208 def hidden(self):
1210 1209 return False
1211 1210
1212 1211 def children(self):
1213 1212 return []
1214 1213
1215 1214 def flags(self, path):
1216 1215 if r'_manifest' in self.__dict__:
1217 1216 try:
1218 1217 return self._manifest.flags(path)
1219 1218 except KeyError:
1220 1219 return ''
1221 1220
1222 1221 try:
1223 1222 return self._flagfunc(path)
1224 1223 except OSError:
1225 1224 return ''
1226 1225
1227 1226 def ancestor(self, c2):
1228 1227 """return the "best" ancestor context of self and c2"""
1229 1228 return self._parents[0].ancestor(c2) # punt on two parents for now
1230 1229
1231 1230 def walk(self, match):
1232 1231 '''Generates matching file names.'''
1233 1232 return sorted(self._repo.dirstate.walk(match,
1234 1233 subrepos=sorted(self.substate),
1235 1234 unknown=True, ignored=False))
1236 1235
1237 1236 def matches(self, match):
1238 1237 return sorted(self._repo.dirstate.matches(match))
1239 1238
1240 1239 def ancestors(self):
1241 1240 for p in self._parents:
1242 1241 yield p
1243 1242 for a in self._repo.changelog.ancestors(
1244 1243 [p.rev() for p in self._parents]):
1245 1244 yield changectx(self._repo, a)
1246 1245
1247 1246 def markcommitted(self, node):
1248 1247 """Perform post-commit cleanup necessary after committing this ctx
1249 1248
1250 1249 Specifically, this updates backing stores this working context
1251 1250 wraps to reflect the fact that the changes reflected by this
1252 1251 workingctx have been committed. For example, it marks
1253 1252 modified and added files as normal in the dirstate.
1254 1253
1255 1254 """
1256 1255
1257 1256 with self._repo.dirstate.parentchange():
1258 1257 for f in self.modified() + self.added():
1259 1258 self._repo.dirstate.normal(f)
1260 1259 for f in self.removed():
1261 1260 self._repo.dirstate.drop(f)
1262 1261 self._repo.dirstate.setparents(node)
1263 1262
1264 1263 # write changes out explicitly, because nesting wlock at
1265 1264 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1266 1265 # from immediately doing so for subsequent changing files
1267 1266 self._repo.dirstate.write(self._repo.currenttransaction())
1268 1267
1269 1268 def dirty(self, missing=False, merge=True, branch=True):
1270 1269 return False
1271 1270
1272 1271 class workingctx(committablectx):
1273 1272 """A workingctx object makes access to data related to
1274 1273 the current working directory convenient.
1275 1274 date - any valid date string or (unixtime, offset), or None.
1276 1275 user - username string, or None.
1277 1276 extra - a dictionary of extra values, or None.
1278 1277 changes - a list of file lists as returned by localrepo.status()
1279 1278 or None to use the repository status.
1280 1279 """
1281 1280 def __init__(self, repo, text="", user=None, date=None, extra=None,
1282 1281 changes=None):
1283 1282 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1284 1283
1285 1284 def __iter__(self):
1286 1285 d = self._repo.dirstate
1287 1286 for f in d:
1288 1287 if d[f] != 'r':
1289 1288 yield f
1290 1289
1291 1290 def __contains__(self, key):
1292 1291 return self._repo.dirstate[key] not in "?r"
1293 1292
1294 1293 def hex(self):
1295 1294 return hex(wdirid)
1296 1295
1297 1296 @propertycache
1298 1297 def _parents(self):
1299 1298 p = self._repo.dirstate.parents()
1300 1299 if p[1] == nullid:
1301 1300 p = p[:-1]
1302 1301 return [changectx(self._repo, x) for x in p]
1303 1302
1304 1303 def _fileinfo(self, path):
1305 1304 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1306 1305 self._manifest
1307 1306 return super(workingctx, self)._fileinfo(path)
1308 1307
1309 1308 def filectx(self, path, filelog=None):
1310 1309 """get a file context from the working directory"""
1311 1310 return workingfilectx(self._repo, path, workingctx=self,
1312 1311 filelog=filelog)
1313 1312
1314 1313 def dirty(self, missing=False, merge=True, branch=True):
1315 1314 "check whether a working directory is modified"
1316 1315 # check subrepos first
1317 1316 for s in sorted(self.substate):
1318 1317 if self.sub(s).dirty(missing=missing):
1319 1318 return True
1320 1319 # check current working dir
1321 1320 return ((merge and self.p2()) or
1322 1321 (branch and self.branch() != self.p1().branch()) or
1323 1322 self.modified() or self.added() or self.removed() or
1324 1323 (missing and self.deleted()))
1325 1324
1326 1325 def add(self, list, prefix=""):
1327 1326 with self._repo.wlock():
1328 1327 ui, ds = self._repo.ui, self._repo.dirstate
1329 1328 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1330 1329 rejected = []
1331 1330 lstat = self._repo.wvfs.lstat
1332 1331 for f in list:
1333 1332 # ds.pathto() returns an absolute file when this is invoked from
1334 1333 # the keyword extension. That gets flagged as non-portable on
1335 1334 # Windows, since it contains the drive letter and colon.
1336 1335 scmutil.checkportable(ui, os.path.join(prefix, f))
1337 1336 try:
1338 1337 st = lstat(f)
1339 1338 except OSError:
1340 1339 ui.warn(_("%s does not exist!\n") % uipath(f))
1341 1340 rejected.append(f)
1342 1341 continue
1343 1342 if st.st_size > 10000000:
1344 1343 ui.warn(_("%s: up to %d MB of RAM may be required "
1345 1344 "to manage this file\n"
1346 1345 "(use 'hg revert %s' to cancel the "
1347 1346 "pending addition)\n")
1348 1347 % (f, 3 * st.st_size // 1000000, uipath(f)))
1349 1348 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1350 1349 ui.warn(_("%s not added: only files and symlinks "
1351 1350 "supported currently\n") % uipath(f))
1352 1351 rejected.append(f)
1353 1352 elif ds[f] in 'amn':
1354 1353 ui.warn(_("%s already tracked!\n") % uipath(f))
1355 1354 elif ds[f] == 'r':
1356 1355 ds.normallookup(f)
1357 1356 else:
1358 1357 ds.add(f)
1359 1358 return rejected
1360 1359
1361 1360 def forget(self, files, prefix=""):
1362 1361 with self._repo.wlock():
1363 1362 ds = self._repo.dirstate
1364 1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1365 1364 rejected = []
1366 1365 for f in files:
1367 1366 if f not in self._repo.dirstate:
1368 1367 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1369 1368 rejected.append(f)
1370 1369 elif self._repo.dirstate[f] != 'a':
1371 1370 self._repo.dirstate.remove(f)
1372 1371 else:
1373 1372 self._repo.dirstate.drop(f)
1374 1373 return rejected
1375 1374
1376 1375 def undelete(self, list):
1377 1376 pctxs = self.parents()
1378 1377 with self._repo.wlock():
1379 1378 ds = self._repo.dirstate
1380 1379 for f in list:
1381 1380 if self._repo.dirstate[f] != 'r':
1382 1381 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1383 1382 else:
1384 1383 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1385 1384 t = fctx.data()
1386 1385 self._repo.wwrite(f, t, fctx.flags())
1387 1386 self._repo.dirstate.normal(f)
1388 1387
1389 1388 def copy(self, source, dest):
1390 1389 try:
1391 1390 st = self._repo.wvfs.lstat(dest)
1392 1391 except OSError as err:
1393 1392 if err.errno != errno.ENOENT:
1394 1393 raise
1395 1394 self._repo.ui.warn(_("%s does not exist!\n")
1396 1395 % self._repo.dirstate.pathto(dest))
1397 1396 return
1398 1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1399 1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1400 1399 "symbolic link\n")
1401 1400 % self._repo.dirstate.pathto(dest))
1402 1401 else:
1403 1402 with self._repo.wlock():
1404 1403 if self._repo.dirstate[dest] in '?':
1405 1404 self._repo.dirstate.add(dest)
1406 1405 elif self._repo.dirstate[dest] in 'r':
1407 1406 self._repo.dirstate.normallookup(dest)
1408 1407 self._repo.dirstate.copy(source, dest)
1409 1408
1410 1409 def match(self, pats=None, include=None, exclude=None, default='glob',
1411 1410 listsubrepos=False, badfn=None):
1412 1411 r = self._repo
1413 1412
1414 1413 # Only a case insensitive filesystem needs magic to translate user input
1415 1414 # to actual case in the filesystem.
1416 1415 icasefs = not util.fscasesensitive(r.root)
1417 1416 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1418 1417 default, auditor=r.auditor, ctx=self,
1419 1418 listsubrepos=listsubrepos, badfn=badfn,
1420 1419 icasefs=icasefs)
1421 1420
1422 1421 def _filtersuspectsymlink(self, files):
1423 1422 if not files or self._repo.dirstate._checklink:
1424 1423 return files
1425 1424
1426 1425 # Symlink placeholders may get non-symlink-like contents
1427 1426 # via user error or dereferencing by NFS or Samba servers,
1428 1427 # so we filter out any placeholders that don't look like a
1429 1428 # symlink
1430 1429 sane = []
1431 1430 for f in files:
1432 1431 if self.flags(f) == 'l':
1433 1432 d = self[f].data()
1434 1433 if (d == '' or len(d) >= 1024 or '\n' in d
1435 1434 or stringutil.binary(d)):
1436 1435 self._repo.ui.debug('ignoring suspect symlink placeholder'
1437 1436 ' "%s"\n' % f)
1438 1437 continue
1439 1438 sane.append(f)
1440 1439 return sane
1441 1440
1442 1441 def _checklookup(self, files):
1443 1442 # check for any possibly clean files
1444 1443 if not files:
1445 1444 return [], [], []
1446 1445
1447 1446 modified = []
1448 1447 deleted = []
1449 1448 fixup = []
1450 1449 pctx = self._parents[0]
1451 1450 # do a full compare of any files that might have changed
1452 1451 for f in sorted(files):
1453 1452 try:
1454 1453 # This will return True for a file that got replaced by a
1455 1454 # directory in the interim, but fixing that is pretty hard.
1456 1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1457 1456 or pctx[f].cmp(self[f])):
1458 1457 modified.append(f)
1459 1458 else:
1460 1459 fixup.append(f)
1461 1460 except (IOError, OSError):
1462 1461 # A file become inaccessible in between? Mark it as deleted,
1463 1462 # matching dirstate behavior (issue5584).
1464 1463 # The dirstate has more complex behavior around whether a
1465 1464 # missing file matches a directory, etc, but we don't need to
1466 1465 # bother with that: if f has made it to this point, we're sure
1467 1466 # it's in the dirstate.
1468 1467 deleted.append(f)
1469 1468
1470 1469 return modified, deleted, fixup
1471 1470
1472 1471 def _poststatusfixup(self, status, fixup):
1473 1472 """update dirstate for files that are actually clean"""
1474 1473 poststatus = self._repo.postdsstatus()
1475 1474 if fixup or poststatus:
1476 1475 try:
1477 1476 oldid = self._repo.dirstate.identity()
1478 1477
1479 1478 # updating the dirstate is optional
1480 1479 # so we don't wait on the lock
1481 1480 # wlock can invalidate the dirstate, so cache normal _after_
1482 1481 # taking the lock
1483 1482 with self._repo.wlock(False):
1484 1483 if self._repo.dirstate.identity() == oldid:
1485 1484 if fixup:
1486 1485 normal = self._repo.dirstate.normal
1487 1486 for f in fixup:
1488 1487 normal(f)
1489 1488 # write changes out explicitly, because nesting
1490 1489 # wlock at runtime may prevent 'wlock.release()'
1491 1490 # after this block from doing so for subsequent
1492 1491 # changing files
1493 1492 tr = self._repo.currenttransaction()
1494 1493 self._repo.dirstate.write(tr)
1495 1494
1496 1495 if poststatus:
1497 1496 for ps in poststatus:
1498 1497 ps(self, status)
1499 1498 else:
1500 1499 # in this case, writing changes out breaks
1501 1500 # consistency, because .hg/dirstate was
1502 1501 # already changed simultaneously after last
1503 1502 # caching (see also issue5584 for detail)
1504 1503 self._repo.ui.debug('skip updating dirstate: '
1505 1504 'identity mismatch\n')
1506 1505 except error.LockError:
1507 1506 pass
1508 1507 finally:
1509 1508 # Even if the wlock couldn't be grabbed, clear out the list.
1510 1509 self._repo.clearpostdsstatus()
1511 1510
1512 1511 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1513 1512 '''Gets the status from the dirstate -- internal use only.'''
1514 1513 subrepos = []
1515 1514 if '.hgsub' in self:
1516 1515 subrepos = sorted(self.substate)
1517 1516 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1518 1517 clean=clean, unknown=unknown)
1519 1518
1520 1519 # check for any possibly clean files
1521 1520 fixup = []
1522 1521 if cmp:
1523 1522 modified2, deleted2, fixup = self._checklookup(cmp)
1524 1523 s.modified.extend(modified2)
1525 1524 s.deleted.extend(deleted2)
1526 1525
1527 1526 if fixup and clean:
1528 1527 s.clean.extend(fixup)
1529 1528
1530 1529 self._poststatusfixup(s, fixup)
1531 1530
1532 1531 if match.always():
1533 1532 # cache for performance
1534 1533 if s.unknown or s.ignored or s.clean:
1535 1534 # "_status" is cached with list*=False in the normal route
1536 1535 self._status = scmutil.status(s.modified, s.added, s.removed,
1537 1536 s.deleted, [], [], [])
1538 1537 else:
1539 1538 self._status = s
1540 1539
1541 1540 return s
1542 1541
1543 1542 @propertycache
1544 1543 def _manifest(self):
1545 1544 """generate a manifest corresponding to the values in self._status
1546 1545
1547 1546 This reuse the file nodeid from parent, but we use special node
1548 1547 identifiers for added and modified files. This is used by manifests
1549 1548 merge to see that files are different and by update logic to avoid
1550 1549 deleting newly added files.
1551 1550 """
1552 1551 return self._buildstatusmanifest(self._status)
1553 1552
1554 1553 def _buildstatusmanifest(self, status):
1555 1554 """Builds a manifest that includes the given status results."""
1556 1555 parents = self.parents()
1557 1556
1558 1557 man = parents[0].manifest().copy()
1559 1558
1560 1559 ff = self._flagfunc
1561 1560 for i, l in ((addednodeid, status.added),
1562 1561 (modifiednodeid, status.modified)):
1563 1562 for f in l:
1564 1563 man[f] = i
1565 1564 try:
1566 1565 man.setflag(f, ff(f))
1567 1566 except OSError:
1568 1567 pass
1569 1568
1570 1569 for f in status.deleted + status.removed:
1571 1570 if f in man:
1572 1571 del man[f]
1573 1572
1574 1573 return man
1575 1574
1576 1575 def _buildstatus(self, other, s, match, listignored, listclean,
1577 1576 listunknown):
1578 1577 """build a status with respect to another context
1579 1578
1580 1579 This includes logic for maintaining the fast path of status when
1581 1580 comparing the working directory against its parent, which is to skip
1582 1581 building a new manifest if self (working directory) is not comparing
1583 1582 against its parent (repo['.']).
1584 1583 """
1585 1584 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1586 1585 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1587 1586 # might have accidentally ended up with the entire contents of the file
1588 1587 # they are supposed to be linking to.
1589 1588 s.modified[:] = self._filtersuspectsymlink(s.modified)
1590 1589 if other != self._repo['.']:
1591 1590 s = super(workingctx, self)._buildstatus(other, s, match,
1592 1591 listignored, listclean,
1593 1592 listunknown)
1594 1593 return s
1595 1594
1596 1595 def _matchstatus(self, other, match):
1597 1596 """override the match method with a filter for directory patterns
1598 1597
1599 1598 We use inheritance to customize the match.bad method only in cases of
1600 1599 workingctx since it belongs only to the working directory when
1601 1600 comparing against the parent changeset.
1602 1601
1603 1602 If we aren't comparing against the working directory's parent, then we
1604 1603 just use the default match object sent to us.
1605 1604 """
1606 1605 if other != self._repo['.']:
1607 1606 def bad(f, msg):
1608 1607 # 'f' may be a directory pattern from 'match.files()',
1609 1608 # so 'f not in ctx1' is not enough
1610 1609 if f not in other and not other.hasdir(f):
1611 1610 self._repo.ui.warn('%s: %s\n' %
1612 1611 (self._repo.dirstate.pathto(f), msg))
1613 1612 match.bad = bad
1614 1613 return match
1615 1614
1616 1615 def markcommitted(self, node):
1617 1616 super(workingctx, self).markcommitted(node)
1618 1617
1619 1618 sparse.aftercommit(self._repo, node)
1620 1619
1621 1620 class committablefilectx(basefilectx):
1622 1621 """A committablefilectx provides common functionality for a file context
1623 1622 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1624 1623 def __init__(self, repo, path, filelog=None, ctx=None):
1625 1624 self._repo = repo
1626 1625 self._path = path
1627 1626 self._changeid = None
1628 1627 self._filerev = self._filenode = None
1629 1628
1630 1629 if filelog is not None:
1631 1630 self._filelog = filelog
1632 1631 if ctx:
1633 1632 self._changectx = ctx
1634 1633
1635 1634 def __nonzero__(self):
1636 1635 return True
1637 1636
1638 1637 __bool__ = __nonzero__
1639 1638
1640 1639 def linkrev(self):
1641 1640 # linked to self._changectx no matter if file is modified or not
1642 1641 return self.rev()
1643 1642
1644 1643 def parents(self):
1645 1644 '''return parent filectxs, following copies if necessary'''
1646 1645 def filenode(ctx, path):
1647 1646 return ctx._manifest.get(path, nullid)
1648 1647
1649 1648 path = self._path
1650 1649 fl = self._filelog
1651 1650 pcl = self._changectx._parents
1652 1651 renamed = self.renamed()
1653 1652
1654 1653 if renamed:
1655 1654 pl = [renamed + (None,)]
1656 1655 else:
1657 1656 pl = [(path, filenode(pcl[0], path), fl)]
1658 1657
1659 1658 for pc in pcl[1:]:
1660 1659 pl.append((path, filenode(pc, path), fl))
1661 1660
1662 1661 return [self._parentfilectx(p, fileid=n, filelog=l)
1663 1662 for p, n, l in pl if n != nullid]
1664 1663
1665 1664 def children(self):
1666 1665 return []
1667 1666
1668 1667 class workingfilectx(committablefilectx):
1669 1668 """A workingfilectx object makes access to data related to a particular
1670 1669 file in the working directory convenient."""
1671 1670 def __init__(self, repo, path, filelog=None, workingctx=None):
1672 1671 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1673 1672
1674 1673 @propertycache
1675 1674 def _changectx(self):
1676 1675 return workingctx(self._repo)
1677 1676
1678 1677 def data(self):
1679 1678 return self._repo.wread(self._path)
1680 1679 def renamed(self):
1681 1680 rp = self._repo.dirstate.copied(self._path)
1682 1681 if not rp:
1683 1682 return None
1684 1683 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1685 1684
1686 1685 def size(self):
1687 1686 return self._repo.wvfs.lstat(self._path).st_size
1688 1687 def date(self):
1689 1688 t, tz = self._changectx.date()
1690 1689 try:
1691 1690 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1692 1691 except OSError as err:
1693 1692 if err.errno != errno.ENOENT:
1694 1693 raise
1695 1694 return (t, tz)
1696 1695
1697 1696 def exists(self):
1698 1697 return self._repo.wvfs.exists(self._path)
1699 1698
1700 1699 def lexists(self):
1701 1700 return self._repo.wvfs.lexists(self._path)
1702 1701
1703 1702 def audit(self):
1704 1703 return self._repo.wvfs.audit(self._path)
1705 1704
1706 1705 def cmp(self, fctx):
1707 1706 """compare with other file context
1708 1707
1709 1708 returns True if different than fctx.
1710 1709 """
1711 1710 # fctx should be a filectx (not a workingfilectx)
1712 1711 # invert comparison to reuse the same code path
1713 1712 return fctx.cmp(self)
1714 1713
1715 1714 def remove(self, ignoremissing=False):
1716 1715 """wraps unlink for a repo's working directory"""
1717 1716 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1718 1717
1719 1718 def write(self, data, flags, backgroundclose=False, **kwargs):
1720 1719 """wraps repo.wwrite"""
1721 1720 self._repo.wwrite(self._path, data, flags,
1722 1721 backgroundclose=backgroundclose,
1723 1722 **kwargs)
1724 1723
1725 1724 def markcopied(self, src):
1726 1725 """marks this file a copy of `src`"""
1727 1726 if self._repo.dirstate[self._path] in "nma":
1728 1727 self._repo.dirstate.copy(src, self._path)
1729 1728
1730 1729 def clearunknown(self):
1731 1730 """Removes conflicting items in the working directory so that
1732 1731 ``write()`` can be called successfully.
1733 1732 """
1734 1733 wvfs = self._repo.wvfs
1735 1734 f = self._path
1736 1735 wvfs.audit(f)
1737 1736 if wvfs.isdir(f) and not wvfs.islink(f):
1738 1737 wvfs.rmtree(f, forcibly=True)
1739 1738 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1740 1739 for p in reversed(list(util.finddirs(f))):
1741 1740 if wvfs.isfileorlink(p):
1742 1741 wvfs.unlink(p)
1743 1742 break
1744 1743
1745 1744 def setflags(self, l, x):
1746 1745 self._repo.wvfs.setflags(self._path, l, x)
1747 1746
1748 1747 class overlayworkingctx(committablectx):
1749 1748 """Wraps another mutable context with a write-back cache that can be
1750 1749 converted into a commit context.
1751 1750
1752 1751 self._cache[path] maps to a dict with keys: {
1753 1752 'exists': bool?
1754 1753 'date': date?
1755 1754 'data': str?
1756 1755 'flags': str?
1757 1756 'copied': str? (path or None)
1758 1757 }
1759 1758 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1760 1759 is `False`, the file was deleted.
1761 1760 """
1762 1761
1763 1762 def __init__(self, repo):
1764 1763 super(overlayworkingctx, self).__init__(repo)
1765 1764 self.clean()
1766 1765
1767 1766 def setbase(self, wrappedctx):
1768 1767 self._wrappedctx = wrappedctx
1769 1768 self._parents = [wrappedctx]
1770 1769 # Drop old manifest cache as it is now out of date.
1771 1770 # This is necessary when, e.g., rebasing several nodes with one
1772 1771 # ``overlayworkingctx`` (e.g. with --collapse).
1773 1772 util.clearcachedproperty(self, '_manifest')
1774 1773
1775 1774 def data(self, path):
1776 1775 if self.isdirty(path):
1777 1776 if self._cache[path]['exists']:
1778 1777 if self._cache[path]['data']:
1779 1778 return self._cache[path]['data']
1780 1779 else:
1781 1780 # Must fallback here, too, because we only set flags.
1782 1781 return self._wrappedctx[path].data()
1783 1782 else:
1784 1783 raise error.ProgrammingError("No such file or directory: %s" %
1785 1784 path)
1786 1785 else:
1787 1786 return self._wrappedctx[path].data()
1788 1787
1789 1788 @propertycache
1790 1789 def _manifest(self):
1791 1790 parents = self.parents()
1792 1791 man = parents[0].manifest().copy()
1793 1792
1794 1793 flag = self._flagfunc
1795 1794 for path in self.added():
1796 1795 man[path] = addednodeid
1797 1796 man.setflag(path, flag(path))
1798 1797 for path in self.modified():
1799 1798 man[path] = modifiednodeid
1800 1799 man.setflag(path, flag(path))
1801 1800 for path in self.removed():
1802 1801 del man[path]
1803 1802 return man
1804 1803
1805 1804 @propertycache
1806 1805 def _flagfunc(self):
1807 1806 def f(path):
1808 1807 return self._cache[path]['flags']
1809 1808 return f
1810 1809
1811 1810 def files(self):
1812 1811 return sorted(self.added() + self.modified() + self.removed())
1813 1812
1814 1813 def modified(self):
1815 1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1816 1815 self._existsinparent(f)]
1817 1816
1818 1817 def added(self):
1819 1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1820 1819 not self._existsinparent(f)]
1821 1820
1822 1821 def removed(self):
1823 1822 return [f for f in self._cache.keys() if
1824 1823 not self._cache[f]['exists'] and self._existsinparent(f)]
1825 1824
1826 1825 def isinmemory(self):
1827 1826 return True
1828 1827
1829 1828 def filedate(self, path):
1830 1829 if self.isdirty(path):
1831 1830 return self._cache[path]['date']
1832 1831 else:
1833 1832 return self._wrappedctx[path].date()
1834 1833
1835 1834 def markcopied(self, path, origin):
1836 1835 if self.isdirty(path):
1837 1836 self._cache[path]['copied'] = origin
1838 1837 else:
1839 1838 raise error.ProgrammingError('markcopied() called on clean context')
1840 1839
1841 1840 def copydata(self, path):
1842 1841 if self.isdirty(path):
1843 1842 return self._cache[path]['copied']
1844 1843 else:
1845 1844 raise error.ProgrammingError('copydata() called on clean context')
1846 1845
1847 1846 def flags(self, path):
1848 1847 if self.isdirty(path):
1849 1848 if self._cache[path]['exists']:
1850 1849 return self._cache[path]['flags']
1851 1850 else:
1852 1851 raise error.ProgrammingError("No such file or directory: %s" %
1853 1852 self._path)
1854 1853 else:
1855 1854 return self._wrappedctx[path].flags()
1856 1855
1857 1856 def _existsinparent(self, path):
1858 1857 try:
1859 1858 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1860 1859 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1861 1860 # with an ``exists()`` function.
1862 1861 self._wrappedctx[path]
1863 1862 return True
1864 1863 except error.ManifestLookupError:
1865 1864 return False
1866 1865
1867 1866 def _auditconflicts(self, path):
1868 1867 """Replicates conflict checks done by wvfs.write().
1869 1868
1870 1869 Since we never write to the filesystem and never call `applyupdates` in
1871 1870 IMM, we'll never check that a path is actually writable -- e.g., because
1872 1871 it adds `a/foo`, but `a` is actually a file in the other commit.
1873 1872 """
1874 1873 def fail(path, component):
1875 1874 # p1() is the base and we're receiving "writes" for p2()'s
1876 1875 # files.
1877 1876 if 'l' in self.p1()[component].flags():
1878 1877 raise error.Abort("error: %s conflicts with symlink %s "
1879 1878 "in %s." % (path, component,
1880 1879 self.p1().rev()))
1881 1880 else:
1882 1881 raise error.Abort("error: '%s' conflicts with file '%s' in "
1883 1882 "%s." % (path, component,
1884 1883 self.p1().rev()))
1885 1884
1886 1885 # Test that each new directory to be created to write this path from p2
1887 1886 # is not a file in p1.
1888 1887 components = path.split('/')
1889 1888 for i in xrange(len(components)):
1890 1889 component = "/".join(components[0:i])
1891 1890 if component in self.p1():
1892 1891 fail(path, component)
1893 1892
1894 1893 # Test the other direction -- that this path from p2 isn't a directory
1895 1894 # in p1 (test that p1 doesn't any paths matching `path/*`).
1896 1895 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1897 1896 matches = self.p1().manifest().matches(match)
1898 1897 if len(matches) > 0:
1899 1898 if len(matches) == 1 and matches.keys()[0] == path:
1900 1899 return
1901 1900 raise error.Abort("error: file '%s' cannot be written because "
1902 1901 " '%s/' is a folder in %s (containing %d "
1903 1902 "entries: %s)"
1904 1903 % (path, path, self.p1(), len(matches),
1905 1904 ', '.join(matches.keys())))
1906 1905
1907 1906 def write(self, path, data, flags='', **kwargs):
1908 1907 if data is None:
1909 1908 raise error.ProgrammingError("data must be non-None")
1910 1909 self._auditconflicts(path)
1911 1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1912 1911 flags=flags)
1913 1912
1914 1913 def setflags(self, path, l, x):
1915 1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1916 1915 flags=(l and 'l' or '') + (x and 'x' or ''))
1917 1916
1918 1917 def remove(self, path):
1919 1918 self._markdirty(path, exists=False)
1920 1919
1921 1920 def exists(self, path):
1922 1921 """exists behaves like `lexists`, but needs to follow symlinks and
1923 1922 return False if they are broken.
1924 1923 """
1925 1924 if self.isdirty(path):
1926 1925 # If this path exists and is a symlink, "follow" it by calling
1927 1926 # exists on the destination path.
1928 1927 if (self._cache[path]['exists'] and
1929 1928 'l' in self._cache[path]['flags']):
1930 1929 return self.exists(self._cache[path]['data'].strip())
1931 1930 else:
1932 1931 return self._cache[path]['exists']
1933 1932
1934 1933 return self._existsinparent(path)
1935 1934
1936 1935 def lexists(self, path):
1937 1936 """lexists returns True if the path exists"""
1938 1937 if self.isdirty(path):
1939 1938 return self._cache[path]['exists']
1940 1939
1941 1940 return self._existsinparent(path)
1942 1941
1943 1942 def size(self, path):
1944 1943 if self.isdirty(path):
1945 1944 if self._cache[path]['exists']:
1946 1945 return len(self._cache[path]['data'])
1947 1946 else:
1948 1947 raise error.ProgrammingError("No such file or directory: %s" %
1949 1948 self._path)
1950 1949 return self._wrappedctx[path].size()
1951 1950
1952 1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1953 1952 user=None, editor=None):
1954 1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1955 1954 committed.
1956 1955
1957 1956 ``text`` is the commit message.
1958 1957 ``parents`` (optional) are rev numbers.
1959 1958 """
1960 1959 # Default parents to the wrapped contexts' if not passed.
1961 1960 if parents is None:
1962 1961 parents = self._wrappedctx.parents()
1963 1962 if len(parents) == 1:
1964 1963 parents = (parents[0], None)
1965 1964
1966 1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1967 1966 if parents[1] is None:
1968 1967 parents = (self._repo[parents[0]], None)
1969 1968 else:
1970 1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1971 1970
1972 1971 files = self._cache.keys()
1973 1972 def getfile(repo, memctx, path):
1974 1973 if self._cache[path]['exists']:
1975 1974 return memfilectx(repo, memctx, path,
1976 1975 self._cache[path]['data'],
1977 1976 'l' in self._cache[path]['flags'],
1978 1977 'x' in self._cache[path]['flags'],
1979 1978 self._cache[path]['copied'])
1980 1979 else:
1981 1980 # Returning None, but including the path in `files`, is
1982 1981 # necessary for memctx to register a deletion.
1983 1982 return None
1984 1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1985 1984 extra=extra, user=user, branch=branch, editor=editor)
1986 1985
1987 1986 def isdirty(self, path):
1988 1987 return path in self._cache
1989 1988
1990 1989 def isempty(self):
1991 1990 # We need to discard any keys that are actually clean before the empty
1992 1991 # commit check.
1993 1992 self._compact()
1994 1993 return len(self._cache) == 0
1995 1994
1996 1995 def clean(self):
1997 1996 self._cache = {}
1998 1997
1999 1998 def _compact(self):
2000 1999 """Removes keys from the cache that are actually clean, by comparing
2001 2000 them with the underlying context.
2002 2001
2003 2002 This can occur during the merge process, e.g. by passing --tool :local
2004 2003 to resolve a conflict.
2005 2004 """
2006 2005 keys = []
2007 2006 for path in self._cache.keys():
2008 2007 cache = self._cache[path]
2009 2008 try:
2010 2009 underlying = self._wrappedctx[path]
2011 2010 if (underlying.data() == cache['data'] and
2012 2011 underlying.flags() == cache['flags']):
2013 2012 keys.append(path)
2014 2013 except error.ManifestLookupError:
2015 2014 # Path not in the underlying manifest (created).
2016 2015 continue
2017 2016
2018 2017 for path in keys:
2019 2018 del self._cache[path]
2020 2019 return keys
2021 2020
2022 2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2023 2022 self._cache[path] = {
2024 2023 'exists': exists,
2025 2024 'data': data,
2026 2025 'date': date,
2027 2026 'flags': flags,
2028 2027 'copied': None,
2029 2028 }
2030 2029
2031 2030 def filectx(self, path, filelog=None):
2032 2031 return overlayworkingfilectx(self._repo, path, parent=self,
2033 2032 filelog=filelog)
2034 2033
2035 2034 class overlayworkingfilectx(committablefilectx):
2036 2035 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2037 2036 cache, which can be flushed through later by calling ``flush()``."""
2038 2037
2039 2038 def __init__(self, repo, path, filelog=None, parent=None):
2040 2039 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2041 2040 parent)
2042 2041 self._repo = repo
2043 2042 self._parent = parent
2044 2043 self._path = path
2045 2044
2046 2045 def cmp(self, fctx):
2047 2046 return self.data() != fctx.data()
2048 2047
2049 2048 def changectx(self):
2050 2049 return self._parent
2051 2050
2052 2051 def data(self):
2053 2052 return self._parent.data(self._path)
2054 2053
2055 2054 def date(self):
2056 2055 return self._parent.filedate(self._path)
2057 2056
2058 2057 def exists(self):
2059 2058 return self.lexists()
2060 2059
2061 2060 def lexists(self):
2062 2061 return self._parent.exists(self._path)
2063 2062
2064 2063 def renamed(self):
2065 2064 path = self._parent.copydata(self._path)
2066 2065 if not path:
2067 2066 return None
2068 2067 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2069 2068
2070 2069 def size(self):
2071 2070 return self._parent.size(self._path)
2072 2071
2073 2072 def markcopied(self, origin):
2074 2073 self._parent.markcopied(self._path, origin)
2075 2074
2076 2075 def audit(self):
2077 2076 pass
2078 2077
2079 2078 def flags(self):
2080 2079 return self._parent.flags(self._path)
2081 2080
2082 2081 def setflags(self, islink, isexec):
2083 2082 return self._parent.setflags(self._path, islink, isexec)
2084 2083
2085 2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2086 2085 return self._parent.write(self._path, data, flags, **kwargs)
2087 2086
2088 2087 def remove(self, ignoremissing=False):
2089 2088 return self._parent.remove(self._path)
2090 2089
2091 2090 def clearunknown(self):
2092 2091 pass
2093 2092
2094 2093 class workingcommitctx(workingctx):
2095 2094 """A workingcommitctx object makes access to data related to
2096 2095 the revision being committed convenient.
2097 2096
2098 2097 This hides changes in the working directory, if they aren't
2099 2098 committed in this context.
2100 2099 """
2101 2100 def __init__(self, repo, changes,
2102 2101 text="", user=None, date=None, extra=None):
2103 2102 super(workingctx, self).__init__(repo, text, user, date, extra,
2104 2103 changes)
2105 2104
2106 2105 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2107 2106 """Return matched files only in ``self._status``
2108 2107
2109 2108 Uncommitted files appear "clean" via this context, even if
2110 2109 they aren't actually so in the working directory.
2111 2110 """
2112 2111 if clean:
2113 2112 clean = [f for f in self._manifest if f not in self._changedset]
2114 2113 else:
2115 2114 clean = []
2116 2115 return scmutil.status([f for f in self._status.modified if match(f)],
2117 2116 [f for f in self._status.added if match(f)],
2118 2117 [f for f in self._status.removed if match(f)],
2119 2118 [], [], [], clean)
2120 2119
2121 2120 @propertycache
2122 2121 def _changedset(self):
2123 2122 """Return the set of files changed in this context
2124 2123 """
2125 2124 changed = set(self._status.modified)
2126 2125 changed.update(self._status.added)
2127 2126 changed.update(self._status.removed)
2128 2127 return changed
2129 2128
2130 2129 def makecachingfilectxfn(func):
2131 2130 """Create a filectxfn that caches based on the path.
2132 2131
2133 2132 We can't use util.cachefunc because it uses all arguments as the cache
2134 2133 key and this creates a cycle since the arguments include the repo and
2135 2134 memctx.
2136 2135 """
2137 2136 cache = {}
2138 2137
2139 2138 def getfilectx(repo, memctx, path):
2140 2139 if path not in cache:
2141 2140 cache[path] = func(repo, memctx, path)
2142 2141 return cache[path]
2143 2142
2144 2143 return getfilectx
2145 2144
2146 2145 def memfilefromctx(ctx):
2147 2146 """Given a context return a memfilectx for ctx[path]
2148 2147
2149 2148 This is a convenience method for building a memctx based on another
2150 2149 context.
2151 2150 """
2152 2151 def getfilectx(repo, memctx, path):
2153 2152 fctx = ctx[path]
2154 2153 # this is weird but apparently we only keep track of one parent
2155 2154 # (why not only store that instead of a tuple?)
2156 2155 copied = fctx.renamed()
2157 2156 if copied:
2158 2157 copied = copied[0]
2159 2158 return memfilectx(repo, memctx, path, fctx.data(),
2160 2159 islink=fctx.islink(), isexec=fctx.isexec(),
2161 2160 copied=copied)
2162 2161
2163 2162 return getfilectx
2164 2163
2165 2164 def memfilefrompatch(patchstore):
2166 2165 """Given a patch (e.g. patchstore object) return a memfilectx
2167 2166
2168 2167 This is a convenience method for building a memctx based on a patchstore.
2169 2168 """
2170 2169 def getfilectx(repo, memctx, path):
2171 2170 data, mode, copied = patchstore.getfile(path)
2172 2171 if data is None:
2173 2172 return None
2174 2173 islink, isexec = mode
2175 2174 return memfilectx(repo, memctx, path, data, islink=islink,
2176 2175 isexec=isexec, copied=copied)
2177 2176
2178 2177 return getfilectx
2179 2178
2180 2179 class memctx(committablectx):
2181 2180 """Use memctx to perform in-memory commits via localrepo.commitctx().
2182 2181
2183 2182 Revision information is supplied at initialization time while
2184 2183 related files data and is made available through a callback
2185 2184 mechanism. 'repo' is the current localrepo, 'parents' is a
2186 2185 sequence of two parent revisions identifiers (pass None for every
2187 2186 missing parent), 'text' is the commit message and 'files' lists
2188 2187 names of files touched by the revision (normalized and relative to
2189 2188 repository root).
2190 2189
2191 2190 filectxfn(repo, memctx, path) is a callable receiving the
2192 2191 repository, the current memctx object and the normalized path of
2193 2192 requested file, relative to repository root. It is fired by the
2194 2193 commit function for every file in 'files', but calls order is
2195 2194 undefined. If the file is available in the revision being
2196 2195 committed (updated or added), filectxfn returns a memfilectx
2197 2196 object. If the file was removed, filectxfn return None for recent
2198 2197 Mercurial. Moved files are represented by marking the source file
2199 2198 removed and the new file added with copy information (see
2200 2199 memfilectx).
2201 2200
2202 2201 user receives the committer name and defaults to current
2203 2202 repository username, date is the commit date in any format
2204 2203 supported by dateutil.parsedate() and defaults to current date, extra
2205 2204 is a dictionary of metadata or is left empty.
2206 2205 """
2207 2206
2208 2207 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2209 2208 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2210 2209 # this field to determine what to do in filectxfn.
2211 2210 _returnnoneformissingfiles = True
2212 2211
2213 2212 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2214 2213 date=None, extra=None, branch=None, editor=False):
2215 2214 super(memctx, self).__init__(repo, text, user, date, extra)
2216 2215 self._rev = None
2217 2216 self._node = None
2218 2217 parents = [(p or nullid) for p in parents]
2219 2218 p1, p2 = parents
2220 2219 self._parents = [self._repo[p] for p in (p1, p2)]
2221 2220 files = sorted(set(files))
2222 2221 self._files = files
2223 2222 if branch is not None:
2224 2223 self._extra['branch'] = encoding.fromlocal(branch)
2225 2224 self.substate = {}
2226 2225
2227 2226 if isinstance(filectxfn, patch.filestore):
2228 2227 filectxfn = memfilefrompatch(filectxfn)
2229 2228 elif not callable(filectxfn):
2230 2229 # if store is not callable, wrap it in a function
2231 2230 filectxfn = memfilefromctx(filectxfn)
2232 2231
2233 2232 # memoizing increases performance for e.g. vcs convert scenarios.
2234 2233 self._filectxfn = makecachingfilectxfn(filectxfn)
2235 2234
2236 2235 if editor:
2237 2236 self._text = editor(self._repo, self, [])
2238 2237 self._repo.savecommitmessage(self._text)
2239 2238
2240 2239 def filectx(self, path, filelog=None):
2241 2240 """get a file context from the working directory
2242 2241
2243 2242 Returns None if file doesn't exist and should be removed."""
2244 2243 return self._filectxfn(self._repo, self, path)
2245 2244
2246 2245 def commit(self):
2247 2246 """commit context to the repo"""
2248 2247 return self._repo.commitctx(self)
2249 2248
2250 2249 @propertycache
2251 2250 def _manifest(self):
2252 2251 """generate a manifest based on the return values of filectxfn"""
2253 2252
2254 2253 # keep this simple for now; just worry about p1
2255 2254 pctx = self._parents[0]
2256 2255 man = pctx.manifest().copy()
2257 2256
2258 2257 for f in self._status.modified:
2259 2258 p1node = nullid
2260 2259 p2node = nullid
2261 2260 p = pctx[f].parents() # if file isn't in pctx, check p2?
2262 2261 if len(p) > 0:
2263 2262 p1node = p[0].filenode()
2264 2263 if len(p) > 1:
2265 2264 p2node = p[1].filenode()
2266 2265 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2267 2266
2268 2267 for f in self._status.added:
2269 2268 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2270 2269
2271 2270 for f in self._status.removed:
2272 2271 if f in man:
2273 2272 del man[f]
2274 2273
2275 2274 return man
2276 2275
2277 2276 @propertycache
2278 2277 def _status(self):
2279 2278 """Calculate exact status from ``files`` specified at construction
2280 2279 """
2281 2280 man1 = self.p1().manifest()
2282 2281 p2 = self._parents[1]
2283 2282 # "1 < len(self._parents)" can't be used for checking
2284 2283 # existence of the 2nd parent, because "memctx._parents" is
2285 2284 # explicitly initialized by the list, of which length is 2.
2286 2285 if p2.node() != nullid:
2287 2286 man2 = p2.manifest()
2288 2287 managing = lambda f: f in man1 or f in man2
2289 2288 else:
2290 2289 managing = lambda f: f in man1
2291 2290
2292 2291 modified, added, removed = [], [], []
2293 2292 for f in self._files:
2294 2293 if not managing(f):
2295 2294 added.append(f)
2296 2295 elif self[f]:
2297 2296 modified.append(f)
2298 2297 else:
2299 2298 removed.append(f)
2300 2299
2301 2300 return scmutil.status(modified, added, removed, [], [], [], [])
2302 2301
2303 2302 class memfilectx(committablefilectx):
2304 2303 """memfilectx represents an in-memory file to commit.
2305 2304
2306 2305 See memctx and committablefilectx for more details.
2307 2306 """
2308 2307 def __init__(self, repo, changectx, path, data, islink=False,
2309 2308 isexec=False, copied=None):
2310 2309 """
2311 2310 path is the normalized file path relative to repository root.
2312 2311 data is the file content as a string.
2313 2312 islink is True if the file is a symbolic link.
2314 2313 isexec is True if the file is executable.
2315 2314 copied is the source file path if current file was copied in the
2316 2315 revision being committed, or None."""
2317 2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2318 2317 self._data = data
2319 2318 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2320 2319 self._copied = None
2321 2320 if copied:
2322 2321 self._copied = (copied, nullid)
2323 2322
2324 2323 def data(self):
2325 2324 return self._data
2326 2325
2327 2326 def remove(self, ignoremissing=False):
2328 2327 """wraps unlink for a repo's working directory"""
2329 2328 # need to figure out what to do here
2330 2329 del self._changectx[self._path]
2331 2330
2332 2331 def write(self, data, flags, **kwargs):
2333 2332 """wraps repo.wwrite"""
2334 2333 self._data = data
2335 2334
2336 2335 class overlayfilectx(committablefilectx):
2337 2336 """Like memfilectx but take an original filectx and optional parameters to
2338 2337 override parts of it. This is useful when fctx.data() is expensive (i.e.
2339 2338 flag processor is expensive) and raw data, flags, and filenode could be
2340 2339 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2341 2340 """
2342 2341
2343 2342 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2344 2343 copied=None, ctx=None):
2345 2344 """originalfctx: filecontext to duplicate
2346 2345
2347 2346 datafunc: None or a function to override data (file content). It is a
2348 2347 function to be lazy. path, flags, copied, ctx: None or overridden value
2349 2348
2350 2349 copied could be (path, rev), or False. copied could also be just path,
2351 2350 and will be converted to (path, nullid). This simplifies some callers.
2352 2351 """
2353 2352
2354 2353 if path is None:
2355 2354 path = originalfctx.path()
2356 2355 if ctx is None:
2357 2356 ctx = originalfctx.changectx()
2358 2357 ctxmatch = lambda: True
2359 2358 else:
2360 2359 ctxmatch = lambda: ctx == originalfctx.changectx()
2361 2360
2362 2361 repo = originalfctx.repo()
2363 2362 flog = originalfctx.filelog()
2364 2363 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2365 2364
2366 2365 if copied is None:
2367 2366 copied = originalfctx.renamed()
2368 2367 copiedmatch = lambda: True
2369 2368 else:
2370 2369 if copied and not isinstance(copied, tuple):
2371 2370 # repo._filecommit will recalculate copyrev so nullid is okay
2372 2371 copied = (copied, nullid)
2373 2372 copiedmatch = lambda: copied == originalfctx.renamed()
2374 2373
2375 2374 # When data, copied (could affect data), ctx (could affect filelog
2376 2375 # parents) are not overridden, rawdata, rawflags, and filenode may be
2377 2376 # reused (repo._filecommit should double check filelog parents).
2378 2377 #
2379 2378 # path, flags are not hashed in filelog (but in manifestlog) so they do
2380 2379 # not affect reusable here.
2381 2380 #
2382 2381 # If ctx or copied is overridden to a same value with originalfctx,
2383 2382 # still consider it's reusable. originalfctx.renamed() may be a bit
2384 2383 # expensive so it's not called unless necessary. Assuming datafunc is
2385 2384 # always expensive, do not call it for this "reusable" test.
2386 2385 reusable = datafunc is None and ctxmatch() and copiedmatch()
2387 2386
2388 2387 if datafunc is None:
2389 2388 datafunc = originalfctx.data
2390 2389 if flags is None:
2391 2390 flags = originalfctx.flags()
2392 2391
2393 2392 self._datafunc = datafunc
2394 2393 self._flags = flags
2395 2394 self._copied = copied
2396 2395
2397 2396 if reusable:
2398 2397 # copy extra fields from originalfctx
2399 2398 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2400 2399 for attr_ in attrs:
2401 2400 if util.safehasattr(originalfctx, attr_):
2402 2401 setattr(self, attr_, getattr(originalfctx, attr_))
2403 2402
2404 2403 def data(self):
2405 2404 return self._datafunc()
2406 2405
2407 2406 class metadataonlyctx(committablectx):
2408 2407 """Like memctx but it's reusing the manifest of different commit.
2409 2408 Intended to be used by lightweight operations that are creating
2410 2409 metadata-only changes.
2411 2410
2412 2411 Revision information is supplied at initialization time. 'repo' is the
2413 2412 current localrepo, 'ctx' is original revision which manifest we're reuisng
2414 2413 'parents' is a sequence of two parent revisions identifiers (pass None for
2415 2414 every missing parent), 'text' is the commit.
2416 2415
2417 2416 user receives the committer name and defaults to current repository
2418 2417 username, date is the commit date in any format supported by
2419 2418 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2420 2419 metadata or is left empty.
2421 2420 """
2422 2421 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2423 2422 date=None, extra=None, editor=False):
2424 2423 if text is None:
2425 2424 text = originalctx.description()
2426 2425 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2427 2426 self._rev = None
2428 2427 self._node = None
2429 2428 self._originalctx = originalctx
2430 2429 self._manifestnode = originalctx.manifestnode()
2431 2430 if parents is None:
2432 2431 parents = originalctx.parents()
2433 2432 else:
2434 2433 parents = [repo[p] for p in parents if p is not None]
2435 2434 parents = parents[:]
2436 2435 while len(parents) < 2:
2437 2436 parents.append(repo[nullid])
2438 2437 p1, p2 = self._parents = parents
2439 2438
2440 2439 # sanity check to ensure that the reused manifest parents are
2441 2440 # manifests of our commit parents
2442 2441 mp1, mp2 = self.manifestctx().parents
2443 2442 if p1 != nullid and p1.manifestnode() != mp1:
2444 2443 raise RuntimeError('can\'t reuse the manifest: '
2445 2444 'its p1 doesn\'t match the new ctx p1')
2446 2445 if p2 != nullid and p2.manifestnode() != mp2:
2447 2446 raise RuntimeError('can\'t reuse the manifest: '
2448 2447 'its p2 doesn\'t match the new ctx p2')
2449 2448
2450 2449 self._files = originalctx.files()
2451 2450 self.substate = {}
2452 2451
2453 2452 if editor:
2454 2453 self._text = editor(self._repo, self, [])
2455 2454 self._repo.savecommitmessage(self._text)
2456 2455
2457 2456 def manifestnode(self):
2458 2457 return self._manifestnode
2459 2458
2460 2459 @property
2461 2460 def _manifestctx(self):
2462 2461 return self._repo.manifestlog[self._manifestnode]
2463 2462
2464 2463 def filectx(self, path, filelog=None):
2465 2464 return self._originalctx.filectx(path, filelog=filelog)
2466 2465
2467 2466 def commit(self):
2468 2467 """commit context to the repo"""
2469 2468 return self._repo.commitctx(self)
2470 2469
2471 2470 @property
2472 2471 def _manifest(self):
2473 2472 return self._originalctx.manifest()
2474 2473
2475 2474 @propertycache
2476 2475 def _status(self):
2477 2476 """Calculate exact status from ``files`` specified in the ``origctx``
2478 2477 and parents manifests.
2479 2478 """
2480 2479 man1 = self.p1().manifest()
2481 2480 p2 = self._parents[1]
2482 2481 # "1 < len(self._parents)" can't be used for checking
2483 2482 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2484 2483 # explicitly initialized by the list, of which length is 2.
2485 2484 if p2.node() != nullid:
2486 2485 man2 = p2.manifest()
2487 2486 managing = lambda f: f in man1 or f in man2
2488 2487 else:
2489 2488 managing = lambda f: f in man1
2490 2489
2491 2490 modified, added, removed = [], [], []
2492 2491 for f in self._files:
2493 2492 if not managing(f):
2494 2493 added.append(f)
2495 2494 elif f in self:
2496 2495 modified.append(f)
2497 2496 else:
2498 2497 removed.append(f)
2499 2498
2500 2499 return scmutil.status(modified, added, removed, [], [], [], [])
2501 2500
2502 2501 class arbitraryfilectx(object):
2503 2502 """Allows you to use filectx-like functions on a file in an arbitrary
2504 2503 location on disk, possibly not in the working directory.
2505 2504 """
2506 2505 def __init__(self, path, repo=None):
2507 2506 # Repo is optional because contrib/simplemerge uses this class.
2508 2507 self._repo = repo
2509 2508 self._path = path
2510 2509
2511 2510 def cmp(self, fctx):
2512 2511 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2513 2512 # path if either side is a symlink.
2514 2513 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2515 2514 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2516 2515 # Add a fast-path for merge if both sides are disk-backed.
2517 2516 # Note that filecmp uses the opposite return values (True if same)
2518 2517 # from our cmp functions (True if different).
2519 2518 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2520 2519 return self.data() != fctx.data()
2521 2520
2522 2521 def path(self):
2523 2522 return self._path
2524 2523
2525 2524 def flags(self):
2526 2525 return ''
2527 2526
2528 2527 def data(self):
2529 2528 return util.readfile(self._path)
2530 2529
2531 2530 def decodeddata(self):
2532 2531 with open(self._path, "rb") as f:
2533 2532 return f.read()
2534 2533
2535 2534 def remove(self):
2536 2535 util.unlink(self._path)
2537 2536
2538 2537 def write(self, data, flags, **kwargs):
2539 2538 assert not flags
2540 2539 with open(self._path, "w") as f:
2541 2540 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now