##// END OF EJS Templates
context: remove unneeded alias of diffopts
Yuya Nishihara -
r38601:7f4bf811 default
parent child Browse files
Show More
@@ -1,2542 +1,2540
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirfilenodeids,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 sparse,
42 42 subrepo,
43 43 subrepoutil,
44 44 util,
45 45 )
46 46 from .utils import (
47 47 dateutil,
48 48 stringutil,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 class basectx(object):
54 54 """A basectx object represents the common logic for its children:
55 55 changectx: read-only context that is already present in the repo,
56 56 workingctx: a context that represents the working directory and can
57 57 be committed,
58 58 memctx: a context that represents changes in-memory and can also
59 59 be committed."""
60 60
61 61 def __init__(self, repo):
62 62 self._repo = repo
63 63
64 64 def __bytes__(self):
65 65 return short(self.node())
66 66
67 67 __str__ = encoding.strmethod(__bytes__)
68 68
69 69 def __repr__(self):
70 70 return r"<%s %s>" % (type(self).__name__, str(self))
71 71
72 72 def __eq__(self, other):
73 73 try:
74 74 return type(self) == type(other) and self._rev == other._rev
75 75 except AttributeError:
76 76 return False
77 77
78 78 def __ne__(self, other):
79 79 return not (self == other)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 return iter(self._manifest)
89 89
90 90 def _buildstatusmanifest(self, status):
91 91 """Builds a manifest that includes the given status results, if this is
92 92 a working copy context. For non-working copy contexts, it just returns
93 93 the normal manifest."""
94 94 return self.manifest()
95 95
96 96 def _matchstatus(self, other, match):
97 97 """This internal method provides a way for child objects to override the
98 98 match operator.
99 99 """
100 100 return match
101 101
102 102 def _buildstatus(self, other, s, match, listignored, listclean,
103 103 listunknown):
104 104 """build a status with respect to another context"""
105 105 # Load earliest manifest first for caching reasons. More specifically,
106 106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 109 # delta to what's in the cache. So that's one full reconstruction + one
110 110 # delta application.
111 111 mf2 = None
112 112 if self.rev() is not None and self.rev() < other.rev():
113 113 mf2 = self._buildstatusmanifest(s)
114 114 mf1 = other._buildstatusmanifest(s)
115 115 if mf2 is None:
116 116 mf2 = self._buildstatusmanifest(s)
117 117
118 118 modified, added = [], []
119 119 removed = []
120 120 clean = []
121 121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 122 deletedset = set(deleted)
123 123 d = mf1.diff(mf2, match=match, clean=listclean)
124 124 for fn, value in d.iteritems():
125 125 if fn in deletedset:
126 126 continue
127 127 if value is None:
128 128 clean.append(fn)
129 129 continue
130 130 (node1, flag1), (node2, flag2) = value
131 131 if node1 is None:
132 132 added.append(fn)
133 133 elif node2 is None:
134 134 removed.append(fn)
135 135 elif flag1 != flag2:
136 136 modified.append(fn)
137 137 elif node2 not in wdirfilenodeids:
138 138 # When comparing files between two commits, we save time by
139 139 # not comparing the file contents when the nodeids differ.
140 140 # Note that this means we incorrectly report a reverted change
141 141 # to a file as a modification.
142 142 modified.append(fn)
143 143 elif self[fn].cmp(other[fn]):
144 144 modified.append(fn)
145 145 else:
146 146 clean.append(fn)
147 147
148 148 if removed:
149 149 # need to filter files if they are already reported as removed
150 150 unknown = [fn for fn in unknown if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 ignored = [fn for fn in ignored if fn not in mf1 and
153 153 (not match or match(fn))]
154 154 # if they're deleted, don't report them as removed
155 155 removed = [fn for fn in removed if fn not in deletedset]
156 156
157 157 return scmutil.status(modified, added, removed, deleted, unknown,
158 158 ignored, clean)
159 159
160 160 @propertycache
161 161 def substate(self):
162 162 return subrepoutil.state(self, self._repo.ui)
163 163
164 164 def subrev(self, subpath):
165 165 return self.substate[subpath][1]
166 166
167 167 def rev(self):
168 168 return self._rev
169 169 def node(self):
170 170 return self._node
171 171 def hex(self):
172 172 return hex(self.node())
173 173 def manifest(self):
174 174 return self._manifest
175 175 def manifestctx(self):
176 176 return self._manifestctx
177 177 def repo(self):
178 178 return self._repo
179 179 def phasestr(self):
180 180 return phases.phasenames[self.phase()]
181 181 def mutable(self):
182 182 return self.phase() > phases.public
183 183
184 184 def getfileset(self, expr):
185 185 return fileset.getfileset(self, expr)
186 186
187 187 def obsolete(self):
188 188 """True if the changeset is obsolete"""
189 189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190 190
191 191 def extinct(self):
192 192 """True if the changeset is extinct"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194 194
195 195 def orphan(self):
196 196 """True if the changeset is not obsolete but it's ancestor are"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198 198
199 199 def phasedivergent(self):
200 200 """True if the changeset try to be a successor of a public changeset
201 201
202 202 Only non-public and non-obsolete changesets may be bumped.
203 203 """
204 204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205 205
206 206 def contentdivergent(self):
207 207 """Is a successors of a changeset with multiple possible successors set
208 208
209 209 Only non-public and non-obsolete changesets may be divergent.
210 210 """
211 211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212 212
213 213 def isunstable(self):
214 214 """True if the changeset is either unstable, bumped or divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return changectx(self._repo, nullrev)
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 def sub(self, path, allowcreate=True):
277 277 '''return a subrepo for the stored revision of path, never wdir()'''
278 278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279 279
280 280 def nullsub(self, path, pctx):
281 281 return subrepo.nullsubrepo(self, path, pctx)
282 282
283 283 def workingsub(self, path):
284 284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 285 context.
286 286 '''
287 287 return subrepo.subrepo(self, path, allowwdir=True)
288 288
289 289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 290 listsubrepos=False, badfn=None):
291 291 r = self._repo
292 292 return matchmod.match(r.root, r.getcwd(), pats,
293 293 include, exclude, default,
294 294 auditor=r.nofsauditor, ctx=self,
295 295 listsubrepos=listsubrepos, badfn=badfn)
296 296
297 297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 298 losedatafn=None, prefix='', relroot='', copy=None,
299 299 hunksfilterfn=None):
300 300 """Returns a diff generator for the given contexts and matcher"""
301 301 if ctx2 is None:
302 302 ctx2 = self.p1()
303 303 if ctx2 is not None:
304 304 ctx2 = self._repo[ctx2]
305
306 diffopts = opts
307 305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
308 opts=diffopts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
309 307 relroot=relroot, copy=copy,
310 308 hunksfilterfn=hunksfilterfn)
311 309
312 310 def dirs(self):
313 311 return self._manifest.dirs()
314 312
315 313 def hasdir(self, dir):
316 314 return self._manifest.hasdir(dir)
317 315
318 316 def status(self, other=None, match=None, listignored=False,
319 317 listclean=False, listunknown=False, listsubrepos=False):
320 318 """return status of files between two nodes or node and working
321 319 directory.
322 320
323 321 If other is None, compare this node with working directory.
324 322
325 323 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 324 """
327 325
328 326 ctx1 = self
329 327 ctx2 = self._repo[other]
330 328
331 329 # This next code block is, admittedly, fragile logic that tests for
332 330 # reversing the contexts and wouldn't need to exist if it weren't for
333 331 # the fast (and common) code path of comparing the working directory
334 332 # with its first parent.
335 333 #
336 334 # What we're aiming for here is the ability to call:
337 335 #
338 336 # workingctx.status(parentctx)
339 337 #
340 338 # If we always built the manifest for each context and compared those,
341 339 # then we'd be done. But the special case of the above call means we
342 340 # just copy the manifest of the parent.
343 341 reversed = False
344 342 if (not isinstance(ctx1, changectx)
345 343 and isinstance(ctx2, changectx)):
346 344 reversed = True
347 345 ctx1, ctx2 = ctx2, ctx1
348 346
349 347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
350 348 match = ctx2._matchstatus(ctx1, match)
351 349 r = scmutil.status([], [], [], [], [], [], [])
352 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 351 listunknown)
354 352
355 353 if reversed:
356 354 # Reverse added and removed. Clear deleted, unknown and ignored as
357 355 # these make no sense to reverse.
358 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 357 r.clean)
360 358
361 359 if listsubrepos:
362 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 361 try:
364 362 rev2 = ctx2.subrev(subpath)
365 363 except KeyError:
366 364 # A subrepo that existed in node1 was deleted between
367 365 # node1 and node2 (inclusive). Thus, ctx2's substate
368 366 # won't contain that subpath. The best we can do ignore it.
369 367 rev2 = None
370 368 submatch = matchmod.subdirmatcher(subpath, match)
371 369 s = sub.status(rev2, match=submatch, ignored=listignored,
372 370 clean=listclean, unknown=listunknown,
373 371 listsubrepos=True)
374 372 for rfiles, sfiles in zip(r, s):
375 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 374
377 375 for l in r:
378 376 l.sort()
379 377
380 378 return r
381 379
382 380 class changectx(basectx):
383 381 """A changecontext object makes access to data related to a particular
384 382 changeset convenient. It represents a read-only context already present in
385 383 the repo."""
386 384 def __init__(self, repo, changeid='.'):
387 385 """changeid is a revision number, node, or tag"""
388 386 super(changectx, self).__init__(repo)
389 387
390 388 try:
391 389 if isinstance(changeid, int):
392 390 self._node = repo.changelog.node(changeid)
393 391 self._rev = changeid
394 392 return
395 393 elif changeid == 'null':
396 394 self._node = nullid
397 395 self._rev = nullrev
398 396 return
399 397 elif changeid == 'tip':
400 398 self._node = repo.changelog.tip()
401 399 self._rev = repo.changelog.rev(self._node)
402 400 return
403 401 elif (changeid == '.'
404 402 or repo.local() and changeid == repo.dirstate.p1()):
405 403 # this is a hack to delay/avoid loading obsmarkers
406 404 # when we know that '.' won't be hidden
407 405 self._node = repo.dirstate.p1()
408 406 self._rev = repo.unfiltered().changelog.rev(self._node)
409 407 return
410 408 elif len(changeid) == 20:
411 409 try:
412 410 self._node = changeid
413 411 self._rev = repo.changelog.rev(changeid)
414 412 return
415 413 except error.FilteredLookupError:
416 414 raise
417 415 except LookupError:
418 416 # check if it might have come from damaged dirstate
419 417 #
420 418 # XXX we could avoid the unfiltered if we had a recognizable
421 419 # exception for filtered changeset access
422 420 if (repo.local()
423 421 and changeid in repo.unfiltered().dirstate.parents()):
424 422 msg = _("working directory has unknown parent '%s'!")
425 423 raise error.Abort(msg % short(changeid))
426 424 changeid = hex(changeid) # for the error message
427 425
428 426 elif len(changeid) == 40:
429 427 try:
430 428 self._node = bin(changeid)
431 429 self._rev = repo.changelog.rev(self._node)
432 430 return
433 431 except error.FilteredLookupError:
434 432 raise
435 433 except (TypeError, LookupError):
436 434 pass
437 435
438 436 # lookup failed
439 437 except (error.FilteredIndexError, error.FilteredLookupError):
440 438 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
441 439 % pycompat.bytestr(changeid))
442 440 except error.FilteredRepoLookupError:
443 441 raise
444 442 except IndexError:
445 443 pass
446 444 raise error.RepoLookupError(
447 445 _("unknown revision '%s'") % changeid)
448 446
449 447 def __hash__(self):
450 448 try:
451 449 return hash(self._rev)
452 450 except AttributeError:
453 451 return id(self)
454 452
455 453 def __nonzero__(self):
456 454 return self._rev != nullrev
457 455
458 456 __bool__ = __nonzero__
459 457
460 458 @propertycache
461 459 def _changeset(self):
462 460 return self._repo.changelog.changelogrevision(self.rev())
463 461
464 462 @propertycache
465 463 def _manifest(self):
466 464 return self._manifestctx.read()
467 465
468 466 @property
469 467 def _manifestctx(self):
470 468 return self._repo.manifestlog[self._changeset.manifest]
471 469
472 470 @propertycache
473 471 def _manifestdelta(self):
474 472 return self._manifestctx.readdelta()
475 473
476 474 @propertycache
477 475 def _parents(self):
478 476 repo = self._repo
479 477 p1, p2 = repo.changelog.parentrevs(self._rev)
480 478 if p2 == nullrev:
481 479 return [changectx(repo, p1)]
482 480 return [changectx(repo, p1), changectx(repo, p2)]
483 481
484 482 def changeset(self):
485 483 c = self._changeset
486 484 return (
487 485 c.manifest,
488 486 c.user,
489 487 c.date,
490 488 c.files,
491 489 c.description,
492 490 c.extra,
493 491 )
494 492 def manifestnode(self):
495 493 return self._changeset.manifest
496 494
497 495 def user(self):
498 496 return self._changeset.user
499 497 def date(self):
500 498 return self._changeset.date
501 499 def files(self):
502 500 return self._changeset.files
503 501 def description(self):
504 502 return self._changeset.description
505 503 def branch(self):
506 504 return encoding.tolocal(self._changeset.extra.get("branch"))
507 505 def closesbranch(self):
508 506 return 'close' in self._changeset.extra
509 507 def extra(self):
510 508 """Return a dict of extra information."""
511 509 return self._changeset.extra
512 510 def tags(self):
513 511 """Return a list of byte tag names"""
514 512 return self._repo.nodetags(self._node)
515 513 def bookmarks(self):
516 514 """Return a list of byte bookmark names."""
517 515 return self._repo.nodebookmarks(self._node)
518 516 def phase(self):
519 517 return self._repo._phasecache.phase(self._repo, self._rev)
520 518 def hidden(self):
521 519 return self._rev in repoview.filterrevs(self._repo, 'visible')
522 520
523 521 def isinmemory(self):
524 522 return False
525 523
526 524 def children(self):
527 525 """return list of changectx contexts for each child changeset.
528 526
529 527 This returns only the immediate child changesets. Use descendants() to
530 528 recursively walk children.
531 529 """
532 530 c = self._repo.changelog.children(self._node)
533 531 return [changectx(self._repo, x) for x in c]
534 532
535 533 def ancestors(self):
536 534 for a in self._repo.changelog.ancestors([self._rev]):
537 535 yield changectx(self._repo, a)
538 536
539 537 def descendants(self):
540 538 """Recursively yield all children of the changeset.
541 539
542 540 For just the immediate children, use children()
543 541 """
544 542 for d in self._repo.changelog.descendants([self._rev]):
545 543 yield changectx(self._repo, d)
546 544
547 545 def filectx(self, path, fileid=None, filelog=None):
548 546 """get a file context from this changeset"""
549 547 if fileid is None:
550 548 fileid = self.filenode(path)
551 549 return filectx(self._repo, path, fileid=fileid,
552 550 changectx=self, filelog=filelog)
553 551
554 552 def ancestor(self, c2, warn=False):
555 553 """return the "best" ancestor context of self and c2
556 554
557 555 If there are multiple candidates, it will show a message and check
558 556 merge.preferancestor configuration before falling back to the
559 557 revlog ancestor."""
560 558 # deal with workingctxs
561 559 n2 = c2._node
562 560 if n2 is None:
563 561 n2 = c2._parents[0]._node
564 562 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
565 563 if not cahs:
566 564 anc = nullid
567 565 elif len(cahs) == 1:
568 566 anc = cahs[0]
569 567 else:
570 568 # experimental config: merge.preferancestor
571 569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
572 570 try:
573 571 ctx = scmutil.revsymbol(self._repo, r)
574 572 except error.RepoLookupError:
575 573 continue
576 574 anc = ctx.node()
577 575 if anc in cahs:
578 576 break
579 577 else:
580 578 anc = self._repo.changelog.ancestor(self._node, n2)
581 579 if warn:
582 580 self._repo.ui.status(
583 581 (_("note: using %s as ancestor of %s and %s\n") %
584 582 (short(anc), short(self._node), short(n2))) +
585 583 ''.join(_(" alternatively, use --config "
586 584 "merge.preferancestor=%s\n") %
587 585 short(n) for n in sorted(cahs) if n != anc))
588 586 return changectx(self._repo, anc)
589 587
590 588 def descendant(self, other):
591 589 """True if other is descendant of this changeset"""
592 590 return self._repo.changelog.descendant(self._rev, other._rev)
593 591
594 592 def walk(self, match):
595 593 '''Generates matching file names.'''
596 594
597 595 # Wrap match.bad method to have message with nodeid
598 596 def bad(fn, msg):
599 597 # The manifest doesn't know about subrepos, so don't complain about
600 598 # paths into valid subrepos.
601 599 if any(fn == s or fn.startswith(s + '/')
602 600 for s in self.substate):
603 601 return
604 602 match.bad(fn, _('no such file in rev %s') % self)
605 603
606 604 m = matchmod.badmatch(match, bad)
607 605 return self._manifest.walk(m)
608 606
609 607 def matches(self, match):
610 608 return self.walk(match)
611 609
612 610 class basefilectx(object):
613 611 """A filecontext object represents the common logic for its children:
614 612 filectx: read-only access to a filerevision that is already present
615 613 in the repo,
616 614 workingfilectx: a filecontext that represents files from the working
617 615 directory,
618 616 memfilectx: a filecontext that represents files in-memory,
619 617 overlayfilectx: duplicate another filecontext with some fields overridden.
620 618 """
621 619 @propertycache
622 620 def _filelog(self):
623 621 return self._repo.file(self._path)
624 622
625 623 @propertycache
626 624 def _changeid(self):
627 625 if r'_changeid' in self.__dict__:
628 626 return self._changeid
629 627 elif r'_changectx' in self.__dict__:
630 628 return self._changectx.rev()
631 629 elif r'_descendantrev' in self.__dict__:
632 630 # this file context was created from a revision with a known
633 631 # descendant, we can (lazily) correct for linkrev aliases
634 632 return self._adjustlinkrev(self._descendantrev)
635 633 else:
636 634 return self._filelog.linkrev(self._filerev)
637 635
638 636 @propertycache
639 637 def _filenode(self):
640 638 if r'_fileid' in self.__dict__:
641 639 return self._filelog.lookup(self._fileid)
642 640 else:
643 641 return self._changectx.filenode(self._path)
644 642
645 643 @propertycache
646 644 def _filerev(self):
647 645 return self._filelog.rev(self._filenode)
648 646
649 647 @propertycache
650 648 def _repopath(self):
651 649 return self._path
652 650
653 651 def __nonzero__(self):
654 652 try:
655 653 self._filenode
656 654 return True
657 655 except error.LookupError:
658 656 # file is missing
659 657 return False
660 658
661 659 __bool__ = __nonzero__
662 660
663 661 def __bytes__(self):
664 662 try:
665 663 return "%s@%s" % (self.path(), self._changectx)
666 664 except error.LookupError:
667 665 return "%s@???" % self.path()
668 666
669 667 __str__ = encoding.strmethod(__bytes__)
670 668
671 669 def __repr__(self):
672 670 return r"<%s %s>" % (type(self).__name__, str(self))
673 671
674 672 def __hash__(self):
675 673 try:
676 674 return hash((self._path, self._filenode))
677 675 except AttributeError:
678 676 return id(self)
679 677
680 678 def __eq__(self, other):
681 679 try:
682 680 return (type(self) == type(other) and self._path == other._path
683 681 and self._filenode == other._filenode)
684 682 except AttributeError:
685 683 return False
686 684
687 685 def __ne__(self, other):
688 686 return not (self == other)
689 687
690 688 def filerev(self):
691 689 return self._filerev
692 690 def filenode(self):
693 691 return self._filenode
694 692 @propertycache
695 693 def _flags(self):
696 694 return self._changectx.flags(self._path)
697 695 def flags(self):
698 696 return self._flags
699 697 def filelog(self):
700 698 return self._filelog
701 699 def rev(self):
702 700 return self._changeid
703 701 def linkrev(self):
704 702 return self._filelog.linkrev(self._filerev)
705 703 def node(self):
706 704 return self._changectx.node()
707 705 def hex(self):
708 706 return self._changectx.hex()
709 707 def user(self):
710 708 return self._changectx.user()
711 709 def date(self):
712 710 return self._changectx.date()
713 711 def files(self):
714 712 return self._changectx.files()
715 713 def description(self):
716 714 return self._changectx.description()
717 715 def branch(self):
718 716 return self._changectx.branch()
719 717 def extra(self):
720 718 return self._changectx.extra()
721 719 def phase(self):
722 720 return self._changectx.phase()
723 721 def phasestr(self):
724 722 return self._changectx.phasestr()
725 723 def obsolete(self):
726 724 return self._changectx.obsolete()
727 725 def instabilities(self):
728 726 return self._changectx.instabilities()
729 727 def manifest(self):
730 728 return self._changectx.manifest()
731 729 def changectx(self):
732 730 return self._changectx
733 731 def renamed(self):
734 732 return self._copied
735 733 def repo(self):
736 734 return self._repo
737 735 def size(self):
738 736 return len(self.data())
739 737
740 738 def path(self):
741 739 return self._path
742 740
743 741 def isbinary(self):
744 742 try:
745 743 return stringutil.binary(self.data())
746 744 except IOError:
747 745 return False
748 746 def isexec(self):
749 747 return 'x' in self.flags()
750 748 def islink(self):
751 749 return 'l' in self.flags()
752 750
753 751 def isabsent(self):
754 752 """whether this filectx represents a file not in self._changectx
755 753
756 754 This is mainly for merge code to detect change/delete conflicts. This is
757 755 expected to be True for all subclasses of basectx."""
758 756 return False
759 757
760 758 _customcmp = False
761 759 def cmp(self, fctx):
762 760 """compare with other file context
763 761
764 762 returns True if different than fctx.
765 763 """
766 764 if fctx._customcmp:
767 765 return fctx.cmp(self)
768 766
769 767 if (fctx._filenode is None
770 768 and (self._repo._encodefilterpats
771 769 # if file data starts with '\1\n', empty metadata block is
772 770 # prepended, which adds 4 bytes to filelog.size().
773 771 or self.size() - 4 == fctx.size())
774 772 or self.size() == fctx.size()):
775 773 return self._filelog.cmp(self._filenode, fctx.data())
776 774
777 775 return True
778 776
779 777 def _adjustlinkrev(self, srcrev, inclusive=False):
780 778 """return the first ancestor of <srcrev> introducing <fnode>
781 779
782 780 If the linkrev of the file revision does not point to an ancestor of
783 781 srcrev, we'll walk down the ancestors until we find one introducing
784 782 this file revision.
785 783
786 784 :srcrev: the changeset revision we search ancestors from
787 785 :inclusive: if true, the src revision will also be checked
788 786 """
789 787 repo = self._repo
790 788 cl = repo.unfiltered().changelog
791 789 mfl = repo.manifestlog
792 790 # fetch the linkrev
793 791 lkr = self.linkrev()
794 792 # hack to reuse ancestor computation when searching for renames
795 793 memberanc = getattr(self, '_ancestrycontext', None)
796 794 iteranc = None
797 795 if srcrev is None:
798 796 # wctx case, used by workingfilectx during mergecopy
799 797 revs = [p.rev() for p in self._repo[None].parents()]
800 798 inclusive = True # we skipped the real (revless) source
801 799 else:
802 800 revs = [srcrev]
803 801 if memberanc is None:
804 802 memberanc = iteranc = cl.ancestors(revs, lkr,
805 803 inclusive=inclusive)
806 804 # check if this linkrev is an ancestor of srcrev
807 805 if lkr not in memberanc:
808 806 if iteranc is None:
809 807 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
810 808 fnode = self._filenode
811 809 path = self._path
812 810 for a in iteranc:
813 811 ac = cl.read(a) # get changeset data (we avoid object creation)
814 812 if path in ac[3]: # checking the 'files' field.
815 813 # The file has been touched, check if the content is
816 814 # similar to the one we search for.
817 815 if fnode == mfl[ac[0]].readfast().get(path):
818 816 return a
819 817 # In theory, we should never get out of that loop without a result.
820 818 # But if manifest uses a buggy file revision (not children of the
821 819 # one it replaces) we could. Such a buggy situation will likely
822 820 # result is crash somewhere else at to some point.
823 821 return lkr
824 822
825 823 def introrev(self):
826 824 """return the rev of the changeset which introduced this file revision
827 825
828 826 This method is different from linkrev because it take into account the
829 827 changeset the filectx was created from. It ensures the returned
830 828 revision is one of its ancestors. This prevents bugs from
831 829 'linkrev-shadowing' when a file revision is used by multiple
832 830 changesets.
833 831 """
834 832 lkr = self.linkrev()
835 833 attrs = vars(self)
836 834 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
837 835 if noctx or self.rev() == lkr:
838 836 return self.linkrev()
839 837 return self._adjustlinkrev(self.rev(), inclusive=True)
840 838
841 839 def introfilectx(self):
842 840 """Return filectx having identical contents, but pointing to the
843 841 changeset revision where this filectx was introduced"""
844 842 introrev = self.introrev()
845 843 if self.rev() == introrev:
846 844 return self
847 845 return self.filectx(self.filenode(), changeid=introrev)
848 846
849 847 def _parentfilectx(self, path, fileid, filelog):
850 848 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
851 849 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
852 850 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
853 851 # If self is associated with a changeset (probably explicitly
854 852 # fed), ensure the created filectx is associated with a
855 853 # changeset that is an ancestor of self.changectx.
856 854 # This lets us later use _adjustlinkrev to get a correct link.
857 855 fctx._descendantrev = self.rev()
858 856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
859 857 elif r'_descendantrev' in vars(self):
860 858 # Otherwise propagate _descendantrev if we have one associated.
861 859 fctx._descendantrev = self._descendantrev
862 860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
863 861 return fctx
864 862
865 863 def parents(self):
866 864 _path = self._path
867 865 fl = self._filelog
868 866 parents = self._filelog.parents(self._filenode)
869 867 pl = [(_path, node, fl) for node in parents if node != nullid]
870 868
871 869 r = fl.renamed(self._filenode)
872 870 if r:
873 871 # - In the simple rename case, both parent are nullid, pl is empty.
874 872 # - In case of merge, only one of the parent is null id and should
875 873 # be replaced with the rename information. This parent is -always-
876 874 # the first one.
877 875 #
878 876 # As null id have always been filtered out in the previous list
879 877 # comprehension, inserting to 0 will always result in "replacing
880 878 # first nullid parent with rename information.
881 879 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
882 880
883 881 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
884 882
885 883 def p1(self):
886 884 return self.parents()[0]
887 885
888 886 def p2(self):
889 887 p = self.parents()
890 888 if len(p) == 2:
891 889 return p[1]
892 890 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
893 891
894 892 def annotate(self, follow=False, skiprevs=None, diffopts=None):
895 893 """Returns a list of annotateline objects for each line in the file
896 894
897 895 - line.fctx is the filectx of the node where that line was last changed
898 896 - line.lineno is the line number at the first appearance in the managed
899 897 file
900 898 - line.text is the data on that line (including newline character)
901 899 """
902 900 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
903 901
904 902 def parents(f):
905 903 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
906 904 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
907 905 # from the topmost introrev (= srcrev) down to p.linkrev() if it
908 906 # isn't an ancestor of the srcrev.
909 907 f._changeid
910 908 pl = f.parents()
911 909
912 910 # Don't return renamed parents if we aren't following.
913 911 if not follow:
914 912 pl = [p for p in pl if p.path() == f.path()]
915 913
916 914 # renamed filectx won't have a filelog yet, so set it
917 915 # from the cache to save time
918 916 for p in pl:
919 917 if not r'_filelog' in p.__dict__:
920 918 p._filelog = getlog(p.path())
921 919
922 920 return pl
923 921
924 922 # use linkrev to find the first changeset where self appeared
925 923 base = self.introfilectx()
926 924 if getattr(base, '_ancestrycontext', None) is None:
927 925 cl = self._repo.changelog
928 926 if base.rev() is None:
929 927 # wctx is not inclusive, but works because _ancestrycontext
930 928 # is used to test filelog revisions
931 929 ac = cl.ancestors([p.rev() for p in base.parents()],
932 930 inclusive=True)
933 931 else:
934 932 ac = cl.ancestors([base.rev()], inclusive=True)
935 933 base._ancestrycontext = ac
936 934
937 935 return dagop.annotate(base, parents, skiprevs=skiprevs,
938 936 diffopts=diffopts)
939 937
940 938 def ancestors(self, followfirst=False):
941 939 visit = {}
942 940 c = self
943 941 if followfirst:
944 942 cut = 1
945 943 else:
946 944 cut = None
947 945
948 946 while True:
949 947 for parent in c.parents()[:cut]:
950 948 visit[(parent.linkrev(), parent.filenode())] = parent
951 949 if not visit:
952 950 break
953 951 c = visit.pop(max(visit))
954 952 yield c
955 953
956 954 def decodeddata(self):
957 955 """Returns `data()` after running repository decoding filters.
958 956
959 957 This is often equivalent to how the data would be expressed on disk.
960 958 """
961 959 return self._repo.wwritedata(self.path(), self.data())
962 960
963 961 class filectx(basefilectx):
964 962 """A filecontext object makes access to data related to a particular
965 963 filerevision convenient."""
966 964 def __init__(self, repo, path, changeid=None, fileid=None,
967 965 filelog=None, changectx=None):
968 966 """changeid can be a changeset revision, node, or tag.
969 967 fileid can be a file revision or node."""
970 968 self._repo = repo
971 969 self._path = path
972 970
973 971 assert (changeid is not None
974 972 or fileid is not None
975 973 or changectx is not None), \
976 974 ("bad args: changeid=%r, fileid=%r, changectx=%r"
977 975 % (changeid, fileid, changectx))
978 976
979 977 if filelog is not None:
980 978 self._filelog = filelog
981 979
982 980 if changeid is not None:
983 981 self._changeid = changeid
984 982 if changectx is not None:
985 983 self._changectx = changectx
986 984 if fileid is not None:
987 985 self._fileid = fileid
988 986
989 987 @propertycache
990 988 def _changectx(self):
991 989 try:
992 990 return changectx(self._repo, self._changeid)
993 991 except error.FilteredRepoLookupError:
994 992 # Linkrev may point to any revision in the repository. When the
995 993 # repository is filtered this may lead to `filectx` trying to build
996 994 # `changectx` for filtered revision. In such case we fallback to
997 995 # creating `changectx` on the unfiltered version of the reposition.
998 996 # This fallback should not be an issue because `changectx` from
999 997 # `filectx` are not used in complex operations that care about
1000 998 # filtering.
1001 999 #
1002 1000 # This fallback is a cheap and dirty fix that prevent several
1003 1001 # crashes. It does not ensure the behavior is correct. However the
1004 1002 # behavior was not correct before filtering either and "incorrect
1005 1003 # behavior" is seen as better as "crash"
1006 1004 #
1007 1005 # Linkrevs have several serious troubles with filtering that are
1008 1006 # complicated to solve. Proper handling of the issue here should be
1009 1007 # considered when solving linkrev issue are on the table.
1010 1008 return changectx(self._repo.unfiltered(), self._changeid)
1011 1009
1012 1010 def filectx(self, fileid, changeid=None):
1013 1011 '''opens an arbitrary revision of the file without
1014 1012 opening a new filelog'''
1015 1013 return filectx(self._repo, self._path, fileid=fileid,
1016 1014 filelog=self._filelog, changeid=changeid)
1017 1015
1018 1016 def rawdata(self):
1019 1017 return self._filelog.revision(self._filenode, raw=True)
1020 1018
1021 1019 def rawflags(self):
1022 1020 """low-level revlog flags"""
1023 1021 return self._filelog.flags(self._filerev)
1024 1022
1025 1023 def data(self):
1026 1024 try:
1027 1025 return self._filelog.read(self._filenode)
1028 1026 except error.CensoredNodeError:
1029 1027 if self._repo.ui.config("censor", "policy") == "ignore":
1030 1028 return ""
1031 1029 raise error.Abort(_("censored node: %s") % short(self._filenode),
1032 1030 hint=_("set censor.policy to ignore errors"))
1033 1031
1034 1032 def size(self):
1035 1033 return self._filelog.size(self._filerev)
1036 1034
1037 1035 @propertycache
1038 1036 def _copied(self):
1039 1037 """check if file was actually renamed in this changeset revision
1040 1038
1041 1039 If rename logged in file revision, we report copy for changeset only
1042 1040 if file revisions linkrev points back to the changeset in question
1043 1041 or both changeset parents contain different file revisions.
1044 1042 """
1045 1043
1046 1044 renamed = self._filelog.renamed(self._filenode)
1047 1045 if not renamed:
1048 1046 return renamed
1049 1047
1050 1048 if self.rev() == self.linkrev():
1051 1049 return renamed
1052 1050
1053 1051 name = self.path()
1054 1052 fnode = self._filenode
1055 1053 for p in self._changectx.parents():
1056 1054 try:
1057 1055 if fnode == p.filenode(name):
1058 1056 return None
1059 1057 except error.LookupError:
1060 1058 pass
1061 1059 return renamed
1062 1060
1063 1061 def children(self):
1064 1062 # hard for renames
1065 1063 c = self._filelog.children(self._filenode)
1066 1064 return [filectx(self._repo, self._path, fileid=x,
1067 1065 filelog=self._filelog) for x in c]
1068 1066
1069 1067 class committablectx(basectx):
1070 1068 """A committablectx object provides common functionality for a context that
1071 1069 wants the ability to commit, e.g. workingctx or memctx."""
1072 1070 def __init__(self, repo, text="", user=None, date=None, extra=None,
1073 1071 changes=None):
1074 1072 super(committablectx, self).__init__(repo)
1075 1073 self._rev = None
1076 1074 self._node = None
1077 1075 self._text = text
1078 1076 if date:
1079 1077 self._date = dateutil.parsedate(date)
1080 1078 if user:
1081 1079 self._user = user
1082 1080 if changes:
1083 1081 self._status = changes
1084 1082
1085 1083 self._extra = {}
1086 1084 if extra:
1087 1085 self._extra = extra.copy()
1088 1086 if 'branch' not in self._extra:
1089 1087 try:
1090 1088 branch = encoding.fromlocal(self._repo.dirstate.branch())
1091 1089 except UnicodeDecodeError:
1092 1090 raise error.Abort(_('branch name not in UTF-8!'))
1093 1091 self._extra['branch'] = branch
1094 1092 if self._extra['branch'] == '':
1095 1093 self._extra['branch'] = 'default'
1096 1094
1097 1095 def __bytes__(self):
1098 1096 return bytes(self._parents[0]) + "+"
1099 1097
1100 1098 __str__ = encoding.strmethod(__bytes__)
1101 1099
1102 1100 def __nonzero__(self):
1103 1101 return True
1104 1102
1105 1103 __bool__ = __nonzero__
1106 1104
1107 1105 def _buildflagfunc(self):
1108 1106 # Create a fallback function for getting file flags when the
1109 1107 # filesystem doesn't support them
1110 1108
1111 1109 copiesget = self._repo.dirstate.copies().get
1112 1110 parents = self.parents()
1113 1111 if len(parents) < 2:
1114 1112 # when we have one parent, it's easy: copy from parent
1115 1113 man = parents[0].manifest()
1116 1114 def func(f):
1117 1115 f = copiesget(f, f)
1118 1116 return man.flags(f)
1119 1117 else:
1120 1118 # merges are tricky: we try to reconstruct the unstored
1121 1119 # result from the merge (issue1802)
1122 1120 p1, p2 = parents
1123 1121 pa = p1.ancestor(p2)
1124 1122 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1125 1123
1126 1124 def func(f):
1127 1125 f = copiesget(f, f) # may be wrong for merges with copies
1128 1126 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1129 1127 if fl1 == fl2:
1130 1128 return fl1
1131 1129 if fl1 == fla:
1132 1130 return fl2
1133 1131 if fl2 == fla:
1134 1132 return fl1
1135 1133 return '' # punt for conflicts
1136 1134
1137 1135 return func
1138 1136
1139 1137 @propertycache
1140 1138 def _flagfunc(self):
1141 1139 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1142 1140
1143 1141 @propertycache
1144 1142 def _status(self):
1145 1143 return self._repo.status()
1146 1144
1147 1145 @propertycache
1148 1146 def _user(self):
1149 1147 return self._repo.ui.username()
1150 1148
1151 1149 @propertycache
1152 1150 def _date(self):
1153 1151 ui = self._repo.ui
1154 1152 date = ui.configdate('devel', 'default-date')
1155 1153 if date is None:
1156 1154 date = dateutil.makedate()
1157 1155 return date
1158 1156
1159 1157 def subrev(self, subpath):
1160 1158 return None
1161 1159
1162 1160 def manifestnode(self):
1163 1161 return None
1164 1162 def user(self):
1165 1163 return self._user or self._repo.ui.username()
1166 1164 def date(self):
1167 1165 return self._date
1168 1166 def description(self):
1169 1167 return self._text
1170 1168 def files(self):
1171 1169 return sorted(self._status.modified + self._status.added +
1172 1170 self._status.removed)
1173 1171
1174 1172 def modified(self):
1175 1173 return self._status.modified
1176 1174 def added(self):
1177 1175 return self._status.added
1178 1176 def removed(self):
1179 1177 return self._status.removed
1180 1178 def deleted(self):
1181 1179 return self._status.deleted
1182 1180 def branch(self):
1183 1181 return encoding.tolocal(self._extra['branch'])
1184 1182 def closesbranch(self):
1185 1183 return 'close' in self._extra
1186 1184 def extra(self):
1187 1185 return self._extra
1188 1186
1189 1187 def isinmemory(self):
1190 1188 return False
1191 1189
1192 1190 def tags(self):
1193 1191 return []
1194 1192
1195 1193 def bookmarks(self):
1196 1194 b = []
1197 1195 for p in self.parents():
1198 1196 b.extend(p.bookmarks())
1199 1197 return b
1200 1198
1201 1199 def phase(self):
1202 1200 phase = phases.draft # default phase to draft
1203 1201 for p in self.parents():
1204 1202 phase = max(phase, p.phase())
1205 1203 return phase
1206 1204
1207 1205 def hidden(self):
1208 1206 return False
1209 1207
1210 1208 def children(self):
1211 1209 return []
1212 1210
1213 1211 def flags(self, path):
1214 1212 if r'_manifest' in self.__dict__:
1215 1213 try:
1216 1214 return self._manifest.flags(path)
1217 1215 except KeyError:
1218 1216 return ''
1219 1217
1220 1218 try:
1221 1219 return self._flagfunc(path)
1222 1220 except OSError:
1223 1221 return ''
1224 1222
1225 1223 def ancestor(self, c2):
1226 1224 """return the "best" ancestor context of self and c2"""
1227 1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1228 1226
1229 1227 def walk(self, match):
1230 1228 '''Generates matching file names.'''
1231 1229 return sorted(self._repo.dirstate.walk(match,
1232 1230 subrepos=sorted(self.substate),
1233 1231 unknown=True, ignored=False))
1234 1232
1235 1233 def matches(self, match):
1236 1234 ds = self._repo.dirstate
1237 1235 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1238 1236
1239 1237 def ancestors(self):
1240 1238 for p in self._parents:
1241 1239 yield p
1242 1240 for a in self._repo.changelog.ancestors(
1243 1241 [p.rev() for p in self._parents]):
1244 1242 yield changectx(self._repo, a)
1245 1243
1246 1244 def markcommitted(self, node):
1247 1245 """Perform post-commit cleanup necessary after committing this ctx
1248 1246
1249 1247 Specifically, this updates backing stores this working context
1250 1248 wraps to reflect the fact that the changes reflected by this
1251 1249 workingctx have been committed. For example, it marks
1252 1250 modified and added files as normal in the dirstate.
1253 1251
1254 1252 """
1255 1253
1256 1254 with self._repo.dirstate.parentchange():
1257 1255 for f in self.modified() + self.added():
1258 1256 self._repo.dirstate.normal(f)
1259 1257 for f in self.removed():
1260 1258 self._repo.dirstate.drop(f)
1261 1259 self._repo.dirstate.setparents(node)
1262 1260
1263 1261 # write changes out explicitly, because nesting wlock at
1264 1262 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1265 1263 # from immediately doing so for subsequent changing files
1266 1264 self._repo.dirstate.write(self._repo.currenttransaction())
1267 1265
1268 1266 def dirty(self, missing=False, merge=True, branch=True):
1269 1267 return False
1270 1268
1271 1269 class workingctx(committablectx):
1272 1270 """A workingctx object makes access to data related to
1273 1271 the current working directory convenient.
1274 1272 date - any valid date string or (unixtime, offset), or None.
1275 1273 user - username string, or None.
1276 1274 extra - a dictionary of extra values, or None.
1277 1275 changes - a list of file lists as returned by localrepo.status()
1278 1276 or None to use the repository status.
1279 1277 """
1280 1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1281 1279 changes=None):
1282 1280 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1283 1281
1284 1282 def __iter__(self):
1285 1283 d = self._repo.dirstate
1286 1284 for f in d:
1287 1285 if d[f] != 'r':
1288 1286 yield f
1289 1287
1290 1288 def __contains__(self, key):
1291 1289 return self._repo.dirstate[key] not in "?r"
1292 1290
1293 1291 def hex(self):
1294 1292 return hex(wdirid)
1295 1293
1296 1294 @propertycache
1297 1295 def _parents(self):
1298 1296 p = self._repo.dirstate.parents()
1299 1297 if p[1] == nullid:
1300 1298 p = p[:-1]
1301 1299 return [changectx(self._repo, x) for x in p]
1302 1300
1303 1301 def _fileinfo(self, path):
1304 1302 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1305 1303 self._manifest
1306 1304 return super(workingctx, self)._fileinfo(path)
1307 1305
1308 1306 def filectx(self, path, filelog=None):
1309 1307 """get a file context from the working directory"""
1310 1308 return workingfilectx(self._repo, path, workingctx=self,
1311 1309 filelog=filelog)
1312 1310
1313 1311 def dirty(self, missing=False, merge=True, branch=True):
1314 1312 "check whether a working directory is modified"
1315 1313 # check subrepos first
1316 1314 for s in sorted(self.substate):
1317 1315 if self.sub(s).dirty(missing=missing):
1318 1316 return True
1319 1317 # check current working dir
1320 1318 return ((merge and self.p2()) or
1321 1319 (branch and self.branch() != self.p1().branch()) or
1322 1320 self.modified() or self.added() or self.removed() or
1323 1321 (missing and self.deleted()))
1324 1322
1325 1323 def add(self, list, prefix=""):
1326 1324 with self._repo.wlock():
1327 1325 ui, ds = self._repo.ui, self._repo.dirstate
1328 1326 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1329 1327 rejected = []
1330 1328 lstat = self._repo.wvfs.lstat
1331 1329 for f in list:
1332 1330 # ds.pathto() returns an absolute file when this is invoked from
1333 1331 # the keyword extension. That gets flagged as non-portable on
1334 1332 # Windows, since it contains the drive letter and colon.
1335 1333 scmutil.checkportable(ui, os.path.join(prefix, f))
1336 1334 try:
1337 1335 st = lstat(f)
1338 1336 except OSError:
1339 1337 ui.warn(_("%s does not exist!\n") % uipath(f))
1340 1338 rejected.append(f)
1341 1339 continue
1342 1340 if st.st_size > 10000000:
1343 1341 ui.warn(_("%s: up to %d MB of RAM may be required "
1344 1342 "to manage this file\n"
1345 1343 "(use 'hg revert %s' to cancel the "
1346 1344 "pending addition)\n")
1347 1345 % (f, 3 * st.st_size // 1000000, uipath(f)))
1348 1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1349 1347 ui.warn(_("%s not added: only files and symlinks "
1350 1348 "supported currently\n") % uipath(f))
1351 1349 rejected.append(f)
1352 1350 elif ds[f] in 'amn':
1353 1351 ui.warn(_("%s already tracked!\n") % uipath(f))
1354 1352 elif ds[f] == 'r':
1355 1353 ds.normallookup(f)
1356 1354 else:
1357 1355 ds.add(f)
1358 1356 return rejected
1359 1357
1360 1358 def forget(self, files, prefix=""):
1361 1359 with self._repo.wlock():
1362 1360 ds = self._repo.dirstate
1363 1361 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1364 1362 rejected = []
1365 1363 for f in files:
1366 1364 if f not in self._repo.dirstate:
1367 1365 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1368 1366 rejected.append(f)
1369 1367 elif self._repo.dirstate[f] != 'a':
1370 1368 self._repo.dirstate.remove(f)
1371 1369 else:
1372 1370 self._repo.dirstate.drop(f)
1373 1371 return rejected
1374 1372
1375 1373 def undelete(self, list):
1376 1374 pctxs = self.parents()
1377 1375 with self._repo.wlock():
1378 1376 ds = self._repo.dirstate
1379 1377 for f in list:
1380 1378 if self._repo.dirstate[f] != 'r':
1381 1379 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1382 1380 else:
1383 1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1384 1382 t = fctx.data()
1385 1383 self._repo.wwrite(f, t, fctx.flags())
1386 1384 self._repo.dirstate.normal(f)
1387 1385
1388 1386 def copy(self, source, dest):
1389 1387 try:
1390 1388 st = self._repo.wvfs.lstat(dest)
1391 1389 except OSError as err:
1392 1390 if err.errno != errno.ENOENT:
1393 1391 raise
1394 1392 self._repo.ui.warn(_("%s does not exist!\n")
1395 1393 % self._repo.dirstate.pathto(dest))
1396 1394 return
1397 1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1398 1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1399 1397 "symbolic link\n")
1400 1398 % self._repo.dirstate.pathto(dest))
1401 1399 else:
1402 1400 with self._repo.wlock():
1403 1401 if self._repo.dirstate[dest] in '?':
1404 1402 self._repo.dirstate.add(dest)
1405 1403 elif self._repo.dirstate[dest] in 'r':
1406 1404 self._repo.dirstate.normallookup(dest)
1407 1405 self._repo.dirstate.copy(source, dest)
1408 1406
1409 1407 def match(self, pats=None, include=None, exclude=None, default='glob',
1410 1408 listsubrepos=False, badfn=None):
1411 1409 r = self._repo
1412 1410
1413 1411 # Only a case insensitive filesystem needs magic to translate user input
1414 1412 # to actual case in the filesystem.
1415 1413 icasefs = not util.fscasesensitive(r.root)
1416 1414 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1417 1415 default, auditor=r.auditor, ctx=self,
1418 1416 listsubrepos=listsubrepos, badfn=badfn,
1419 1417 icasefs=icasefs)
1420 1418
1421 1419 def _filtersuspectsymlink(self, files):
1422 1420 if not files or self._repo.dirstate._checklink:
1423 1421 return files
1424 1422
1425 1423 # Symlink placeholders may get non-symlink-like contents
1426 1424 # via user error or dereferencing by NFS or Samba servers,
1427 1425 # so we filter out any placeholders that don't look like a
1428 1426 # symlink
1429 1427 sane = []
1430 1428 for f in files:
1431 1429 if self.flags(f) == 'l':
1432 1430 d = self[f].data()
1433 1431 if (d == '' or len(d) >= 1024 or '\n' in d
1434 1432 or stringutil.binary(d)):
1435 1433 self._repo.ui.debug('ignoring suspect symlink placeholder'
1436 1434 ' "%s"\n' % f)
1437 1435 continue
1438 1436 sane.append(f)
1439 1437 return sane
1440 1438
1441 1439 def _checklookup(self, files):
1442 1440 # check for any possibly clean files
1443 1441 if not files:
1444 1442 return [], [], []
1445 1443
1446 1444 modified = []
1447 1445 deleted = []
1448 1446 fixup = []
1449 1447 pctx = self._parents[0]
1450 1448 # do a full compare of any files that might have changed
1451 1449 for f in sorted(files):
1452 1450 try:
1453 1451 # This will return True for a file that got replaced by a
1454 1452 # directory in the interim, but fixing that is pretty hard.
1455 1453 if (f not in pctx or self.flags(f) != pctx.flags(f)
1456 1454 or pctx[f].cmp(self[f])):
1457 1455 modified.append(f)
1458 1456 else:
1459 1457 fixup.append(f)
1460 1458 except (IOError, OSError):
1461 1459 # A file become inaccessible in between? Mark it as deleted,
1462 1460 # matching dirstate behavior (issue5584).
1463 1461 # The dirstate has more complex behavior around whether a
1464 1462 # missing file matches a directory, etc, but we don't need to
1465 1463 # bother with that: if f has made it to this point, we're sure
1466 1464 # it's in the dirstate.
1467 1465 deleted.append(f)
1468 1466
1469 1467 return modified, deleted, fixup
1470 1468
1471 1469 def _poststatusfixup(self, status, fixup):
1472 1470 """update dirstate for files that are actually clean"""
1473 1471 poststatus = self._repo.postdsstatus()
1474 1472 if fixup or poststatus:
1475 1473 try:
1476 1474 oldid = self._repo.dirstate.identity()
1477 1475
1478 1476 # updating the dirstate is optional
1479 1477 # so we don't wait on the lock
1480 1478 # wlock can invalidate the dirstate, so cache normal _after_
1481 1479 # taking the lock
1482 1480 with self._repo.wlock(False):
1483 1481 if self._repo.dirstate.identity() == oldid:
1484 1482 if fixup:
1485 1483 normal = self._repo.dirstate.normal
1486 1484 for f in fixup:
1487 1485 normal(f)
1488 1486 # write changes out explicitly, because nesting
1489 1487 # wlock at runtime may prevent 'wlock.release()'
1490 1488 # after this block from doing so for subsequent
1491 1489 # changing files
1492 1490 tr = self._repo.currenttransaction()
1493 1491 self._repo.dirstate.write(tr)
1494 1492
1495 1493 if poststatus:
1496 1494 for ps in poststatus:
1497 1495 ps(self, status)
1498 1496 else:
1499 1497 # in this case, writing changes out breaks
1500 1498 # consistency, because .hg/dirstate was
1501 1499 # already changed simultaneously after last
1502 1500 # caching (see also issue5584 for detail)
1503 1501 self._repo.ui.debug('skip updating dirstate: '
1504 1502 'identity mismatch\n')
1505 1503 except error.LockError:
1506 1504 pass
1507 1505 finally:
1508 1506 # Even if the wlock couldn't be grabbed, clear out the list.
1509 1507 self._repo.clearpostdsstatus()
1510 1508
1511 1509 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1512 1510 '''Gets the status from the dirstate -- internal use only.'''
1513 1511 subrepos = []
1514 1512 if '.hgsub' in self:
1515 1513 subrepos = sorted(self.substate)
1516 1514 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1517 1515 clean=clean, unknown=unknown)
1518 1516
1519 1517 # check for any possibly clean files
1520 1518 fixup = []
1521 1519 if cmp:
1522 1520 modified2, deleted2, fixup = self._checklookup(cmp)
1523 1521 s.modified.extend(modified2)
1524 1522 s.deleted.extend(deleted2)
1525 1523
1526 1524 if fixup and clean:
1527 1525 s.clean.extend(fixup)
1528 1526
1529 1527 self._poststatusfixup(s, fixup)
1530 1528
1531 1529 if match.always():
1532 1530 # cache for performance
1533 1531 if s.unknown or s.ignored or s.clean:
1534 1532 # "_status" is cached with list*=False in the normal route
1535 1533 self._status = scmutil.status(s.modified, s.added, s.removed,
1536 1534 s.deleted, [], [], [])
1537 1535 else:
1538 1536 self._status = s
1539 1537
1540 1538 return s
1541 1539
1542 1540 @propertycache
1543 1541 def _manifest(self):
1544 1542 """generate a manifest corresponding to the values in self._status
1545 1543
1546 1544 This reuse the file nodeid from parent, but we use special node
1547 1545 identifiers for added and modified files. This is used by manifests
1548 1546 merge to see that files are different and by update logic to avoid
1549 1547 deleting newly added files.
1550 1548 """
1551 1549 return self._buildstatusmanifest(self._status)
1552 1550
1553 1551 def _buildstatusmanifest(self, status):
1554 1552 """Builds a manifest that includes the given status results."""
1555 1553 parents = self.parents()
1556 1554
1557 1555 man = parents[0].manifest().copy()
1558 1556
1559 1557 ff = self._flagfunc
1560 1558 for i, l in ((addednodeid, status.added),
1561 1559 (modifiednodeid, status.modified)):
1562 1560 for f in l:
1563 1561 man[f] = i
1564 1562 try:
1565 1563 man.setflag(f, ff(f))
1566 1564 except OSError:
1567 1565 pass
1568 1566
1569 1567 for f in status.deleted + status.removed:
1570 1568 if f in man:
1571 1569 del man[f]
1572 1570
1573 1571 return man
1574 1572
1575 1573 def _buildstatus(self, other, s, match, listignored, listclean,
1576 1574 listunknown):
1577 1575 """build a status with respect to another context
1578 1576
1579 1577 This includes logic for maintaining the fast path of status when
1580 1578 comparing the working directory against its parent, which is to skip
1581 1579 building a new manifest if self (working directory) is not comparing
1582 1580 against its parent (repo['.']).
1583 1581 """
1584 1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1585 1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1586 1584 # might have accidentally ended up with the entire contents of the file
1587 1585 # they are supposed to be linking to.
1588 1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1589 1587 if other != self._repo['.']:
1590 1588 s = super(workingctx, self)._buildstatus(other, s, match,
1591 1589 listignored, listclean,
1592 1590 listunknown)
1593 1591 return s
1594 1592
1595 1593 def _matchstatus(self, other, match):
1596 1594 """override the match method with a filter for directory patterns
1597 1595
1598 1596 We use inheritance to customize the match.bad method only in cases of
1599 1597 workingctx since it belongs only to the working directory when
1600 1598 comparing against the parent changeset.
1601 1599
1602 1600 If we aren't comparing against the working directory's parent, then we
1603 1601 just use the default match object sent to us.
1604 1602 """
1605 1603 if other != self._repo['.']:
1606 1604 def bad(f, msg):
1607 1605 # 'f' may be a directory pattern from 'match.files()',
1608 1606 # so 'f not in ctx1' is not enough
1609 1607 if f not in other and not other.hasdir(f):
1610 1608 self._repo.ui.warn('%s: %s\n' %
1611 1609 (self._repo.dirstate.pathto(f), msg))
1612 1610 match.bad = bad
1613 1611 return match
1614 1612
1615 1613 def markcommitted(self, node):
1616 1614 super(workingctx, self).markcommitted(node)
1617 1615
1618 1616 sparse.aftercommit(self._repo, node)
1619 1617
1620 1618 class committablefilectx(basefilectx):
1621 1619 """A committablefilectx provides common functionality for a file context
1622 1620 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1623 1621 def __init__(self, repo, path, filelog=None, ctx=None):
1624 1622 self._repo = repo
1625 1623 self._path = path
1626 1624 self._changeid = None
1627 1625 self._filerev = self._filenode = None
1628 1626
1629 1627 if filelog is not None:
1630 1628 self._filelog = filelog
1631 1629 if ctx:
1632 1630 self._changectx = ctx
1633 1631
1634 1632 def __nonzero__(self):
1635 1633 return True
1636 1634
1637 1635 __bool__ = __nonzero__
1638 1636
1639 1637 def linkrev(self):
1640 1638 # linked to self._changectx no matter if file is modified or not
1641 1639 return self.rev()
1642 1640
1643 1641 def parents(self):
1644 1642 '''return parent filectxs, following copies if necessary'''
1645 1643 def filenode(ctx, path):
1646 1644 return ctx._manifest.get(path, nullid)
1647 1645
1648 1646 path = self._path
1649 1647 fl = self._filelog
1650 1648 pcl = self._changectx._parents
1651 1649 renamed = self.renamed()
1652 1650
1653 1651 if renamed:
1654 1652 pl = [renamed + (None,)]
1655 1653 else:
1656 1654 pl = [(path, filenode(pcl[0], path), fl)]
1657 1655
1658 1656 for pc in pcl[1:]:
1659 1657 pl.append((path, filenode(pc, path), fl))
1660 1658
1661 1659 return [self._parentfilectx(p, fileid=n, filelog=l)
1662 1660 for p, n, l in pl if n != nullid]
1663 1661
1664 1662 def children(self):
1665 1663 return []
1666 1664
1667 1665 class workingfilectx(committablefilectx):
1668 1666 """A workingfilectx object makes access to data related to a particular
1669 1667 file in the working directory convenient."""
1670 1668 def __init__(self, repo, path, filelog=None, workingctx=None):
1671 1669 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1672 1670
1673 1671 @propertycache
1674 1672 def _changectx(self):
1675 1673 return workingctx(self._repo)
1676 1674
1677 1675 def data(self):
1678 1676 return self._repo.wread(self._path)
1679 1677 def renamed(self):
1680 1678 rp = self._repo.dirstate.copied(self._path)
1681 1679 if not rp:
1682 1680 return None
1683 1681 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1684 1682
1685 1683 def size(self):
1686 1684 return self._repo.wvfs.lstat(self._path).st_size
1687 1685 def date(self):
1688 1686 t, tz = self._changectx.date()
1689 1687 try:
1690 1688 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1691 1689 except OSError as err:
1692 1690 if err.errno != errno.ENOENT:
1693 1691 raise
1694 1692 return (t, tz)
1695 1693
1696 1694 def exists(self):
1697 1695 return self._repo.wvfs.exists(self._path)
1698 1696
1699 1697 def lexists(self):
1700 1698 return self._repo.wvfs.lexists(self._path)
1701 1699
1702 1700 def audit(self):
1703 1701 return self._repo.wvfs.audit(self._path)
1704 1702
1705 1703 def cmp(self, fctx):
1706 1704 """compare with other file context
1707 1705
1708 1706 returns True if different than fctx.
1709 1707 """
1710 1708 # fctx should be a filectx (not a workingfilectx)
1711 1709 # invert comparison to reuse the same code path
1712 1710 return fctx.cmp(self)
1713 1711
1714 1712 def remove(self, ignoremissing=False):
1715 1713 """wraps unlink for a repo's working directory"""
1716 1714 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1717 1715 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1718 1716 rmdir=rmdir)
1719 1717
1720 1718 def write(self, data, flags, backgroundclose=False, **kwargs):
1721 1719 """wraps repo.wwrite"""
1722 1720 self._repo.wwrite(self._path, data, flags,
1723 1721 backgroundclose=backgroundclose,
1724 1722 **kwargs)
1725 1723
1726 1724 def markcopied(self, src):
1727 1725 """marks this file a copy of `src`"""
1728 1726 if self._repo.dirstate[self._path] in "nma":
1729 1727 self._repo.dirstate.copy(src, self._path)
1730 1728
1731 1729 def clearunknown(self):
1732 1730 """Removes conflicting items in the working directory so that
1733 1731 ``write()`` can be called successfully.
1734 1732 """
1735 1733 wvfs = self._repo.wvfs
1736 1734 f = self._path
1737 1735 wvfs.audit(f)
1738 1736 if wvfs.isdir(f) and not wvfs.islink(f):
1739 1737 wvfs.rmtree(f, forcibly=True)
1740 1738 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1741 1739 for p in reversed(list(util.finddirs(f))):
1742 1740 if wvfs.isfileorlink(p):
1743 1741 wvfs.unlink(p)
1744 1742 break
1745 1743
1746 1744 def setflags(self, l, x):
1747 1745 self._repo.wvfs.setflags(self._path, l, x)
1748 1746
1749 1747 class overlayworkingctx(committablectx):
1750 1748 """Wraps another mutable context with a write-back cache that can be
1751 1749 converted into a commit context.
1752 1750
1753 1751 self._cache[path] maps to a dict with keys: {
1754 1752 'exists': bool?
1755 1753 'date': date?
1756 1754 'data': str?
1757 1755 'flags': str?
1758 1756 'copied': str? (path or None)
1759 1757 }
1760 1758 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1761 1759 is `False`, the file was deleted.
1762 1760 """
1763 1761
1764 1762 def __init__(self, repo):
1765 1763 super(overlayworkingctx, self).__init__(repo)
1766 1764 self.clean()
1767 1765
1768 1766 def setbase(self, wrappedctx):
1769 1767 self._wrappedctx = wrappedctx
1770 1768 self._parents = [wrappedctx]
1771 1769 # Drop old manifest cache as it is now out of date.
1772 1770 # This is necessary when, e.g., rebasing several nodes with one
1773 1771 # ``overlayworkingctx`` (e.g. with --collapse).
1774 1772 util.clearcachedproperty(self, '_manifest')
1775 1773
1776 1774 def data(self, path):
1777 1775 if self.isdirty(path):
1778 1776 if self._cache[path]['exists']:
1779 1777 if self._cache[path]['data']:
1780 1778 return self._cache[path]['data']
1781 1779 else:
1782 1780 # Must fallback here, too, because we only set flags.
1783 1781 return self._wrappedctx[path].data()
1784 1782 else:
1785 1783 raise error.ProgrammingError("No such file or directory: %s" %
1786 1784 path)
1787 1785 else:
1788 1786 return self._wrappedctx[path].data()
1789 1787
1790 1788 @propertycache
1791 1789 def _manifest(self):
1792 1790 parents = self.parents()
1793 1791 man = parents[0].manifest().copy()
1794 1792
1795 1793 flag = self._flagfunc
1796 1794 for path in self.added():
1797 1795 man[path] = addednodeid
1798 1796 man.setflag(path, flag(path))
1799 1797 for path in self.modified():
1800 1798 man[path] = modifiednodeid
1801 1799 man.setflag(path, flag(path))
1802 1800 for path in self.removed():
1803 1801 del man[path]
1804 1802 return man
1805 1803
1806 1804 @propertycache
1807 1805 def _flagfunc(self):
1808 1806 def f(path):
1809 1807 return self._cache[path]['flags']
1810 1808 return f
1811 1809
1812 1810 def files(self):
1813 1811 return sorted(self.added() + self.modified() + self.removed())
1814 1812
1815 1813 def modified(self):
1816 1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1817 1815 self._existsinparent(f)]
1818 1816
1819 1817 def added(self):
1820 1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1821 1819 not self._existsinparent(f)]
1822 1820
1823 1821 def removed(self):
1824 1822 return [f for f in self._cache.keys() if
1825 1823 not self._cache[f]['exists'] and self._existsinparent(f)]
1826 1824
1827 1825 def isinmemory(self):
1828 1826 return True
1829 1827
1830 1828 def filedate(self, path):
1831 1829 if self.isdirty(path):
1832 1830 return self._cache[path]['date']
1833 1831 else:
1834 1832 return self._wrappedctx[path].date()
1835 1833
1836 1834 def markcopied(self, path, origin):
1837 1835 if self.isdirty(path):
1838 1836 self._cache[path]['copied'] = origin
1839 1837 else:
1840 1838 raise error.ProgrammingError('markcopied() called on clean context')
1841 1839
1842 1840 def copydata(self, path):
1843 1841 if self.isdirty(path):
1844 1842 return self._cache[path]['copied']
1845 1843 else:
1846 1844 raise error.ProgrammingError('copydata() called on clean context')
1847 1845
1848 1846 def flags(self, path):
1849 1847 if self.isdirty(path):
1850 1848 if self._cache[path]['exists']:
1851 1849 return self._cache[path]['flags']
1852 1850 else:
1853 1851 raise error.ProgrammingError("No such file or directory: %s" %
1854 1852 self._path)
1855 1853 else:
1856 1854 return self._wrappedctx[path].flags()
1857 1855
1858 1856 def _existsinparent(self, path):
1859 1857 try:
1860 1858 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1861 1859 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1862 1860 # with an ``exists()`` function.
1863 1861 self._wrappedctx[path]
1864 1862 return True
1865 1863 except error.ManifestLookupError:
1866 1864 return False
1867 1865
1868 1866 def _auditconflicts(self, path):
1869 1867 """Replicates conflict checks done by wvfs.write().
1870 1868
1871 1869 Since we never write to the filesystem and never call `applyupdates` in
1872 1870 IMM, we'll never check that a path is actually writable -- e.g., because
1873 1871 it adds `a/foo`, but `a` is actually a file in the other commit.
1874 1872 """
1875 1873 def fail(path, component):
1876 1874 # p1() is the base and we're receiving "writes" for p2()'s
1877 1875 # files.
1878 1876 if 'l' in self.p1()[component].flags():
1879 1877 raise error.Abort("error: %s conflicts with symlink %s "
1880 1878 "in %s." % (path, component,
1881 1879 self.p1().rev()))
1882 1880 else:
1883 1881 raise error.Abort("error: '%s' conflicts with file '%s' in "
1884 1882 "%s." % (path, component,
1885 1883 self.p1().rev()))
1886 1884
1887 1885 # Test that each new directory to be created to write this path from p2
1888 1886 # is not a file in p1.
1889 1887 components = path.split('/')
1890 1888 for i in xrange(len(components)):
1891 1889 component = "/".join(components[0:i])
1892 1890 if component in self.p1():
1893 1891 fail(path, component)
1894 1892
1895 1893 # Test the other direction -- that this path from p2 isn't a directory
1896 1894 # in p1 (test that p1 doesn't any paths matching `path/*`).
1897 1895 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1898 1896 matches = self.p1().manifest().matches(match)
1899 1897 if len(matches) > 0:
1900 1898 if len(matches) == 1 and matches.keys()[0] == path:
1901 1899 return
1902 1900 raise error.Abort("error: file '%s' cannot be written because "
1903 1901 " '%s/' is a folder in %s (containing %d "
1904 1902 "entries: %s)"
1905 1903 % (path, path, self.p1(), len(matches),
1906 1904 ', '.join(matches.keys())))
1907 1905
1908 1906 def write(self, path, data, flags='', **kwargs):
1909 1907 if data is None:
1910 1908 raise error.ProgrammingError("data must be non-None")
1911 1909 self._auditconflicts(path)
1912 1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1913 1911 flags=flags)
1914 1912
1915 1913 def setflags(self, path, l, x):
1916 1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1917 1915 flags=(l and 'l' or '') + (x and 'x' or ''))
1918 1916
1919 1917 def remove(self, path):
1920 1918 self._markdirty(path, exists=False)
1921 1919
1922 1920 def exists(self, path):
1923 1921 """exists behaves like `lexists`, but needs to follow symlinks and
1924 1922 return False if they are broken.
1925 1923 """
1926 1924 if self.isdirty(path):
1927 1925 # If this path exists and is a symlink, "follow" it by calling
1928 1926 # exists on the destination path.
1929 1927 if (self._cache[path]['exists'] and
1930 1928 'l' in self._cache[path]['flags']):
1931 1929 return self.exists(self._cache[path]['data'].strip())
1932 1930 else:
1933 1931 return self._cache[path]['exists']
1934 1932
1935 1933 return self._existsinparent(path)
1936 1934
1937 1935 def lexists(self, path):
1938 1936 """lexists returns True if the path exists"""
1939 1937 if self.isdirty(path):
1940 1938 return self._cache[path]['exists']
1941 1939
1942 1940 return self._existsinparent(path)
1943 1941
1944 1942 def size(self, path):
1945 1943 if self.isdirty(path):
1946 1944 if self._cache[path]['exists']:
1947 1945 return len(self._cache[path]['data'])
1948 1946 else:
1949 1947 raise error.ProgrammingError("No such file or directory: %s" %
1950 1948 self._path)
1951 1949 return self._wrappedctx[path].size()
1952 1950
1953 1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1954 1952 user=None, editor=None):
1955 1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1956 1954 committed.
1957 1955
1958 1956 ``text`` is the commit message.
1959 1957 ``parents`` (optional) are rev numbers.
1960 1958 """
1961 1959 # Default parents to the wrapped contexts' if not passed.
1962 1960 if parents is None:
1963 1961 parents = self._wrappedctx.parents()
1964 1962 if len(parents) == 1:
1965 1963 parents = (parents[0], None)
1966 1964
1967 1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1968 1966 if parents[1] is None:
1969 1967 parents = (self._repo[parents[0]], None)
1970 1968 else:
1971 1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1972 1970
1973 1971 files = self._cache.keys()
1974 1972 def getfile(repo, memctx, path):
1975 1973 if self._cache[path]['exists']:
1976 1974 return memfilectx(repo, memctx, path,
1977 1975 self._cache[path]['data'],
1978 1976 'l' in self._cache[path]['flags'],
1979 1977 'x' in self._cache[path]['flags'],
1980 1978 self._cache[path]['copied'])
1981 1979 else:
1982 1980 # Returning None, but including the path in `files`, is
1983 1981 # necessary for memctx to register a deletion.
1984 1982 return None
1985 1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1986 1984 extra=extra, user=user, branch=branch, editor=editor)
1987 1985
1988 1986 def isdirty(self, path):
1989 1987 return path in self._cache
1990 1988
1991 1989 def isempty(self):
1992 1990 # We need to discard any keys that are actually clean before the empty
1993 1991 # commit check.
1994 1992 self._compact()
1995 1993 return len(self._cache) == 0
1996 1994
1997 1995 def clean(self):
1998 1996 self._cache = {}
1999 1997
2000 1998 def _compact(self):
2001 1999 """Removes keys from the cache that are actually clean, by comparing
2002 2000 them with the underlying context.
2003 2001
2004 2002 This can occur during the merge process, e.g. by passing --tool :local
2005 2003 to resolve a conflict.
2006 2004 """
2007 2005 keys = []
2008 2006 for path in self._cache.keys():
2009 2007 cache = self._cache[path]
2010 2008 try:
2011 2009 underlying = self._wrappedctx[path]
2012 2010 if (underlying.data() == cache['data'] and
2013 2011 underlying.flags() == cache['flags']):
2014 2012 keys.append(path)
2015 2013 except error.ManifestLookupError:
2016 2014 # Path not in the underlying manifest (created).
2017 2015 continue
2018 2016
2019 2017 for path in keys:
2020 2018 del self._cache[path]
2021 2019 return keys
2022 2020
2023 2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2024 2022 self._cache[path] = {
2025 2023 'exists': exists,
2026 2024 'data': data,
2027 2025 'date': date,
2028 2026 'flags': flags,
2029 2027 'copied': None,
2030 2028 }
2031 2029
2032 2030 def filectx(self, path, filelog=None):
2033 2031 return overlayworkingfilectx(self._repo, path, parent=self,
2034 2032 filelog=filelog)
2035 2033
2036 2034 class overlayworkingfilectx(committablefilectx):
2037 2035 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2038 2036 cache, which can be flushed through later by calling ``flush()``."""
2039 2037
2040 2038 def __init__(self, repo, path, filelog=None, parent=None):
2041 2039 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2042 2040 parent)
2043 2041 self._repo = repo
2044 2042 self._parent = parent
2045 2043 self._path = path
2046 2044
2047 2045 def cmp(self, fctx):
2048 2046 return self.data() != fctx.data()
2049 2047
2050 2048 def changectx(self):
2051 2049 return self._parent
2052 2050
2053 2051 def data(self):
2054 2052 return self._parent.data(self._path)
2055 2053
2056 2054 def date(self):
2057 2055 return self._parent.filedate(self._path)
2058 2056
2059 2057 def exists(self):
2060 2058 return self.lexists()
2061 2059
2062 2060 def lexists(self):
2063 2061 return self._parent.exists(self._path)
2064 2062
2065 2063 def renamed(self):
2066 2064 path = self._parent.copydata(self._path)
2067 2065 if not path:
2068 2066 return None
2069 2067 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2070 2068
2071 2069 def size(self):
2072 2070 return self._parent.size(self._path)
2073 2071
2074 2072 def markcopied(self, origin):
2075 2073 self._parent.markcopied(self._path, origin)
2076 2074
2077 2075 def audit(self):
2078 2076 pass
2079 2077
2080 2078 def flags(self):
2081 2079 return self._parent.flags(self._path)
2082 2080
2083 2081 def setflags(self, islink, isexec):
2084 2082 return self._parent.setflags(self._path, islink, isexec)
2085 2083
2086 2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2087 2085 return self._parent.write(self._path, data, flags, **kwargs)
2088 2086
2089 2087 def remove(self, ignoremissing=False):
2090 2088 return self._parent.remove(self._path)
2091 2089
2092 2090 def clearunknown(self):
2093 2091 pass
2094 2092
2095 2093 class workingcommitctx(workingctx):
2096 2094 """A workingcommitctx object makes access to data related to
2097 2095 the revision being committed convenient.
2098 2096
2099 2097 This hides changes in the working directory, if they aren't
2100 2098 committed in this context.
2101 2099 """
2102 2100 def __init__(self, repo, changes,
2103 2101 text="", user=None, date=None, extra=None):
2104 2102 super(workingctx, self).__init__(repo, text, user, date, extra,
2105 2103 changes)
2106 2104
2107 2105 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2108 2106 """Return matched files only in ``self._status``
2109 2107
2110 2108 Uncommitted files appear "clean" via this context, even if
2111 2109 they aren't actually so in the working directory.
2112 2110 """
2113 2111 if clean:
2114 2112 clean = [f for f in self._manifest if f not in self._changedset]
2115 2113 else:
2116 2114 clean = []
2117 2115 return scmutil.status([f for f in self._status.modified if match(f)],
2118 2116 [f for f in self._status.added if match(f)],
2119 2117 [f for f in self._status.removed if match(f)],
2120 2118 [], [], [], clean)
2121 2119
2122 2120 @propertycache
2123 2121 def _changedset(self):
2124 2122 """Return the set of files changed in this context
2125 2123 """
2126 2124 changed = set(self._status.modified)
2127 2125 changed.update(self._status.added)
2128 2126 changed.update(self._status.removed)
2129 2127 return changed
2130 2128
2131 2129 def makecachingfilectxfn(func):
2132 2130 """Create a filectxfn that caches based on the path.
2133 2131
2134 2132 We can't use util.cachefunc because it uses all arguments as the cache
2135 2133 key and this creates a cycle since the arguments include the repo and
2136 2134 memctx.
2137 2135 """
2138 2136 cache = {}
2139 2137
2140 2138 def getfilectx(repo, memctx, path):
2141 2139 if path not in cache:
2142 2140 cache[path] = func(repo, memctx, path)
2143 2141 return cache[path]
2144 2142
2145 2143 return getfilectx
2146 2144
2147 2145 def memfilefromctx(ctx):
2148 2146 """Given a context return a memfilectx for ctx[path]
2149 2147
2150 2148 This is a convenience method for building a memctx based on another
2151 2149 context.
2152 2150 """
2153 2151 def getfilectx(repo, memctx, path):
2154 2152 fctx = ctx[path]
2155 2153 # this is weird but apparently we only keep track of one parent
2156 2154 # (why not only store that instead of a tuple?)
2157 2155 copied = fctx.renamed()
2158 2156 if copied:
2159 2157 copied = copied[0]
2160 2158 return memfilectx(repo, memctx, path, fctx.data(),
2161 2159 islink=fctx.islink(), isexec=fctx.isexec(),
2162 2160 copied=copied)
2163 2161
2164 2162 return getfilectx
2165 2163
2166 2164 def memfilefrompatch(patchstore):
2167 2165 """Given a patch (e.g. patchstore object) return a memfilectx
2168 2166
2169 2167 This is a convenience method for building a memctx based on a patchstore.
2170 2168 """
2171 2169 def getfilectx(repo, memctx, path):
2172 2170 data, mode, copied = patchstore.getfile(path)
2173 2171 if data is None:
2174 2172 return None
2175 2173 islink, isexec = mode
2176 2174 return memfilectx(repo, memctx, path, data, islink=islink,
2177 2175 isexec=isexec, copied=copied)
2178 2176
2179 2177 return getfilectx
2180 2178
2181 2179 class memctx(committablectx):
2182 2180 """Use memctx to perform in-memory commits via localrepo.commitctx().
2183 2181
2184 2182 Revision information is supplied at initialization time while
2185 2183 related files data and is made available through a callback
2186 2184 mechanism. 'repo' is the current localrepo, 'parents' is a
2187 2185 sequence of two parent revisions identifiers (pass None for every
2188 2186 missing parent), 'text' is the commit message and 'files' lists
2189 2187 names of files touched by the revision (normalized and relative to
2190 2188 repository root).
2191 2189
2192 2190 filectxfn(repo, memctx, path) is a callable receiving the
2193 2191 repository, the current memctx object and the normalized path of
2194 2192 requested file, relative to repository root. It is fired by the
2195 2193 commit function for every file in 'files', but calls order is
2196 2194 undefined. If the file is available in the revision being
2197 2195 committed (updated or added), filectxfn returns a memfilectx
2198 2196 object. If the file was removed, filectxfn return None for recent
2199 2197 Mercurial. Moved files are represented by marking the source file
2200 2198 removed and the new file added with copy information (see
2201 2199 memfilectx).
2202 2200
2203 2201 user receives the committer name and defaults to current
2204 2202 repository username, date is the commit date in any format
2205 2203 supported by dateutil.parsedate() and defaults to current date, extra
2206 2204 is a dictionary of metadata or is left empty.
2207 2205 """
2208 2206
2209 2207 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2210 2208 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2211 2209 # this field to determine what to do in filectxfn.
2212 2210 _returnnoneformissingfiles = True
2213 2211
2214 2212 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2215 2213 date=None, extra=None, branch=None, editor=False):
2216 2214 super(memctx, self).__init__(repo, text, user, date, extra)
2217 2215 self._rev = None
2218 2216 self._node = None
2219 2217 parents = [(p or nullid) for p in parents]
2220 2218 p1, p2 = parents
2221 2219 self._parents = [self._repo[p] for p in (p1, p2)]
2222 2220 files = sorted(set(files))
2223 2221 self._files = files
2224 2222 if branch is not None:
2225 2223 self._extra['branch'] = encoding.fromlocal(branch)
2226 2224 self.substate = {}
2227 2225
2228 2226 if isinstance(filectxfn, patch.filestore):
2229 2227 filectxfn = memfilefrompatch(filectxfn)
2230 2228 elif not callable(filectxfn):
2231 2229 # if store is not callable, wrap it in a function
2232 2230 filectxfn = memfilefromctx(filectxfn)
2233 2231
2234 2232 # memoizing increases performance for e.g. vcs convert scenarios.
2235 2233 self._filectxfn = makecachingfilectxfn(filectxfn)
2236 2234
2237 2235 if editor:
2238 2236 self._text = editor(self._repo, self, [])
2239 2237 self._repo.savecommitmessage(self._text)
2240 2238
2241 2239 def filectx(self, path, filelog=None):
2242 2240 """get a file context from the working directory
2243 2241
2244 2242 Returns None if file doesn't exist and should be removed."""
2245 2243 return self._filectxfn(self._repo, self, path)
2246 2244
2247 2245 def commit(self):
2248 2246 """commit context to the repo"""
2249 2247 return self._repo.commitctx(self)
2250 2248
2251 2249 @propertycache
2252 2250 def _manifest(self):
2253 2251 """generate a manifest based on the return values of filectxfn"""
2254 2252
2255 2253 # keep this simple for now; just worry about p1
2256 2254 pctx = self._parents[0]
2257 2255 man = pctx.manifest().copy()
2258 2256
2259 2257 for f in self._status.modified:
2260 2258 p1node = nullid
2261 2259 p2node = nullid
2262 2260 p = pctx[f].parents() # if file isn't in pctx, check p2?
2263 2261 if len(p) > 0:
2264 2262 p1node = p[0].filenode()
2265 2263 if len(p) > 1:
2266 2264 p2node = p[1].filenode()
2267 2265 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2268 2266
2269 2267 for f in self._status.added:
2270 2268 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2271 2269
2272 2270 for f in self._status.removed:
2273 2271 if f in man:
2274 2272 del man[f]
2275 2273
2276 2274 return man
2277 2275
2278 2276 @propertycache
2279 2277 def _status(self):
2280 2278 """Calculate exact status from ``files`` specified at construction
2281 2279 """
2282 2280 man1 = self.p1().manifest()
2283 2281 p2 = self._parents[1]
2284 2282 # "1 < len(self._parents)" can't be used for checking
2285 2283 # existence of the 2nd parent, because "memctx._parents" is
2286 2284 # explicitly initialized by the list, of which length is 2.
2287 2285 if p2.node() != nullid:
2288 2286 man2 = p2.manifest()
2289 2287 managing = lambda f: f in man1 or f in man2
2290 2288 else:
2291 2289 managing = lambda f: f in man1
2292 2290
2293 2291 modified, added, removed = [], [], []
2294 2292 for f in self._files:
2295 2293 if not managing(f):
2296 2294 added.append(f)
2297 2295 elif self[f]:
2298 2296 modified.append(f)
2299 2297 else:
2300 2298 removed.append(f)
2301 2299
2302 2300 return scmutil.status(modified, added, removed, [], [], [], [])
2303 2301
2304 2302 class memfilectx(committablefilectx):
2305 2303 """memfilectx represents an in-memory file to commit.
2306 2304
2307 2305 See memctx and committablefilectx for more details.
2308 2306 """
2309 2307 def __init__(self, repo, changectx, path, data, islink=False,
2310 2308 isexec=False, copied=None):
2311 2309 """
2312 2310 path is the normalized file path relative to repository root.
2313 2311 data is the file content as a string.
2314 2312 islink is True if the file is a symbolic link.
2315 2313 isexec is True if the file is executable.
2316 2314 copied is the source file path if current file was copied in the
2317 2315 revision being committed, or None."""
2318 2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2319 2317 self._data = data
2320 2318 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2321 2319 self._copied = None
2322 2320 if copied:
2323 2321 self._copied = (copied, nullid)
2324 2322
2325 2323 def data(self):
2326 2324 return self._data
2327 2325
2328 2326 def remove(self, ignoremissing=False):
2329 2327 """wraps unlink for a repo's working directory"""
2330 2328 # need to figure out what to do here
2331 2329 del self._changectx[self._path]
2332 2330
2333 2331 def write(self, data, flags, **kwargs):
2334 2332 """wraps repo.wwrite"""
2335 2333 self._data = data
2336 2334
2337 2335 class overlayfilectx(committablefilectx):
2338 2336 """Like memfilectx but take an original filectx and optional parameters to
2339 2337 override parts of it. This is useful when fctx.data() is expensive (i.e.
2340 2338 flag processor is expensive) and raw data, flags, and filenode could be
2341 2339 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2342 2340 """
2343 2341
2344 2342 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2345 2343 copied=None, ctx=None):
2346 2344 """originalfctx: filecontext to duplicate
2347 2345
2348 2346 datafunc: None or a function to override data (file content). It is a
2349 2347 function to be lazy. path, flags, copied, ctx: None or overridden value
2350 2348
2351 2349 copied could be (path, rev), or False. copied could also be just path,
2352 2350 and will be converted to (path, nullid). This simplifies some callers.
2353 2351 """
2354 2352
2355 2353 if path is None:
2356 2354 path = originalfctx.path()
2357 2355 if ctx is None:
2358 2356 ctx = originalfctx.changectx()
2359 2357 ctxmatch = lambda: True
2360 2358 else:
2361 2359 ctxmatch = lambda: ctx == originalfctx.changectx()
2362 2360
2363 2361 repo = originalfctx.repo()
2364 2362 flog = originalfctx.filelog()
2365 2363 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2366 2364
2367 2365 if copied is None:
2368 2366 copied = originalfctx.renamed()
2369 2367 copiedmatch = lambda: True
2370 2368 else:
2371 2369 if copied and not isinstance(copied, tuple):
2372 2370 # repo._filecommit will recalculate copyrev so nullid is okay
2373 2371 copied = (copied, nullid)
2374 2372 copiedmatch = lambda: copied == originalfctx.renamed()
2375 2373
2376 2374 # When data, copied (could affect data), ctx (could affect filelog
2377 2375 # parents) are not overridden, rawdata, rawflags, and filenode may be
2378 2376 # reused (repo._filecommit should double check filelog parents).
2379 2377 #
2380 2378 # path, flags are not hashed in filelog (but in manifestlog) so they do
2381 2379 # not affect reusable here.
2382 2380 #
2383 2381 # If ctx or copied is overridden to a same value with originalfctx,
2384 2382 # still consider it's reusable. originalfctx.renamed() may be a bit
2385 2383 # expensive so it's not called unless necessary. Assuming datafunc is
2386 2384 # always expensive, do not call it for this "reusable" test.
2387 2385 reusable = datafunc is None and ctxmatch() and copiedmatch()
2388 2386
2389 2387 if datafunc is None:
2390 2388 datafunc = originalfctx.data
2391 2389 if flags is None:
2392 2390 flags = originalfctx.flags()
2393 2391
2394 2392 self._datafunc = datafunc
2395 2393 self._flags = flags
2396 2394 self._copied = copied
2397 2395
2398 2396 if reusable:
2399 2397 # copy extra fields from originalfctx
2400 2398 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2401 2399 for attr_ in attrs:
2402 2400 if util.safehasattr(originalfctx, attr_):
2403 2401 setattr(self, attr_, getattr(originalfctx, attr_))
2404 2402
2405 2403 def data(self):
2406 2404 return self._datafunc()
2407 2405
2408 2406 class metadataonlyctx(committablectx):
2409 2407 """Like memctx but it's reusing the manifest of different commit.
2410 2408 Intended to be used by lightweight operations that are creating
2411 2409 metadata-only changes.
2412 2410
2413 2411 Revision information is supplied at initialization time. 'repo' is the
2414 2412 current localrepo, 'ctx' is original revision which manifest we're reuisng
2415 2413 'parents' is a sequence of two parent revisions identifiers (pass None for
2416 2414 every missing parent), 'text' is the commit.
2417 2415
2418 2416 user receives the committer name and defaults to current repository
2419 2417 username, date is the commit date in any format supported by
2420 2418 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2421 2419 metadata or is left empty.
2422 2420 """
2423 2421 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2424 2422 date=None, extra=None, editor=False):
2425 2423 if text is None:
2426 2424 text = originalctx.description()
2427 2425 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2428 2426 self._rev = None
2429 2427 self._node = None
2430 2428 self._originalctx = originalctx
2431 2429 self._manifestnode = originalctx.manifestnode()
2432 2430 if parents is None:
2433 2431 parents = originalctx.parents()
2434 2432 else:
2435 2433 parents = [repo[p] for p in parents if p is not None]
2436 2434 parents = parents[:]
2437 2435 while len(parents) < 2:
2438 2436 parents.append(repo[nullid])
2439 2437 p1, p2 = self._parents = parents
2440 2438
2441 2439 # sanity check to ensure that the reused manifest parents are
2442 2440 # manifests of our commit parents
2443 2441 mp1, mp2 = self.manifestctx().parents
2444 2442 if p1 != nullid and p1.manifestnode() != mp1:
2445 2443 raise RuntimeError('can\'t reuse the manifest: '
2446 2444 'its p1 doesn\'t match the new ctx p1')
2447 2445 if p2 != nullid and p2.manifestnode() != mp2:
2448 2446 raise RuntimeError('can\'t reuse the manifest: '
2449 2447 'its p2 doesn\'t match the new ctx p2')
2450 2448
2451 2449 self._files = originalctx.files()
2452 2450 self.substate = {}
2453 2451
2454 2452 if editor:
2455 2453 self._text = editor(self._repo, self, [])
2456 2454 self._repo.savecommitmessage(self._text)
2457 2455
2458 2456 def manifestnode(self):
2459 2457 return self._manifestnode
2460 2458
2461 2459 @property
2462 2460 def _manifestctx(self):
2463 2461 return self._repo.manifestlog[self._manifestnode]
2464 2462
2465 2463 def filectx(self, path, filelog=None):
2466 2464 return self._originalctx.filectx(path, filelog=filelog)
2467 2465
2468 2466 def commit(self):
2469 2467 """commit context to the repo"""
2470 2468 return self._repo.commitctx(self)
2471 2469
2472 2470 @property
2473 2471 def _manifest(self):
2474 2472 return self._originalctx.manifest()
2475 2473
2476 2474 @propertycache
2477 2475 def _status(self):
2478 2476 """Calculate exact status from ``files`` specified in the ``origctx``
2479 2477 and parents manifests.
2480 2478 """
2481 2479 man1 = self.p1().manifest()
2482 2480 p2 = self._parents[1]
2483 2481 # "1 < len(self._parents)" can't be used for checking
2484 2482 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2485 2483 # explicitly initialized by the list, of which length is 2.
2486 2484 if p2.node() != nullid:
2487 2485 man2 = p2.manifest()
2488 2486 managing = lambda f: f in man1 or f in man2
2489 2487 else:
2490 2488 managing = lambda f: f in man1
2491 2489
2492 2490 modified, added, removed = [], [], []
2493 2491 for f in self._files:
2494 2492 if not managing(f):
2495 2493 added.append(f)
2496 2494 elif f in self:
2497 2495 modified.append(f)
2498 2496 else:
2499 2497 removed.append(f)
2500 2498
2501 2499 return scmutil.status(modified, added, removed, [], [], [], [])
2502 2500
2503 2501 class arbitraryfilectx(object):
2504 2502 """Allows you to use filectx-like functions on a file in an arbitrary
2505 2503 location on disk, possibly not in the working directory.
2506 2504 """
2507 2505 def __init__(self, path, repo=None):
2508 2506 # Repo is optional because contrib/simplemerge uses this class.
2509 2507 self._repo = repo
2510 2508 self._path = path
2511 2509
2512 2510 def cmp(self, fctx):
2513 2511 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2514 2512 # path if either side is a symlink.
2515 2513 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2516 2514 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2517 2515 # Add a fast-path for merge if both sides are disk-backed.
2518 2516 # Note that filecmp uses the opposite return values (True if same)
2519 2517 # from our cmp functions (True if different).
2520 2518 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2521 2519 return self.data() != fctx.data()
2522 2520
2523 2521 def path(self):
2524 2522 return self._path
2525 2523
2526 2524 def flags(self):
2527 2525 return ''
2528 2526
2529 2527 def data(self):
2530 2528 return util.readfile(self._path)
2531 2529
2532 2530 def decodeddata(self):
2533 2531 with open(self._path, "rb") as f:
2534 2532 return f.read()
2535 2533
2536 2534 def remove(self):
2537 2535 util.unlink(self._path)
2538 2536
2539 2537 def write(self, data, flags, **kwargs):
2540 2538 assert not flags
2541 2539 with open(self._path, "w") as f:
2542 2540 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now