##// END OF EJS Templates
context: no longer accept diff options as dictionnary...
Boris Feld -
r38588:62249cfe default
parent child Browse files
Show More
@@ -1,2546 +1,2542
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirfilenodeids,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 mdiff,
34 33 obsolete as obsmod,
35 34 patch,
36 35 pathutil,
37 36 phases,
38 37 pycompat,
39 38 repoview,
40 39 revlog,
41 40 scmutil,
42 41 sparse,
43 42 subrepo,
44 43 subrepoutil,
45 44 util,
46 45 )
47 46 from .utils import (
48 47 dateutil,
49 48 stringutil,
50 49 )
51 50
52 51 propertycache = util.propertycache
53 52
54 53 class basectx(object):
55 54 """A basectx object represents the common logic for its children:
56 55 changectx: read-only context that is already present in the repo,
57 56 workingctx: a context that represents the working directory and can
58 57 be committed,
59 58 memctx: a context that represents changes in-memory and can also
60 59 be committed."""
61 60
62 61 def __init__(self, repo):
63 62 self._repo = repo
64 63
65 64 def __bytes__(self):
66 65 return short(self.node())
67 66
68 67 __str__ = encoding.strmethod(__bytes__)
69 68
70 69 def __repr__(self):
71 70 return r"<%s %s>" % (type(self).__name__, str(self))
72 71
73 72 def __eq__(self, other):
74 73 try:
75 74 return type(self) == type(other) and self._rev == other._rev
76 75 except AttributeError:
77 76 return False
78 77
79 78 def __ne__(self, other):
80 79 return not (self == other)
81 80
82 81 def __contains__(self, key):
83 82 return key in self._manifest
84 83
85 84 def __getitem__(self, key):
86 85 return self.filectx(key)
87 86
88 87 def __iter__(self):
89 88 return iter(self._manifest)
90 89
91 90 def _buildstatusmanifest(self, status):
92 91 """Builds a manifest that includes the given status results, if this is
93 92 a working copy context. For non-working copy contexts, it just returns
94 93 the normal manifest."""
95 94 return self.manifest()
96 95
97 96 def _matchstatus(self, other, match):
98 97 """This internal method provides a way for child objects to override the
99 98 match operator.
100 99 """
101 100 return match
102 101
103 102 def _buildstatus(self, other, s, match, listignored, listclean,
104 103 listunknown):
105 104 """build a status with respect to another context"""
106 105 # Load earliest manifest first for caching reasons. More specifically,
107 106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 108 # 1000 and cache it so that when you read 1001, we just need to apply a
110 109 # delta to what's in the cache. So that's one full reconstruction + one
111 110 # delta application.
112 111 mf2 = None
113 112 if self.rev() is not None and self.rev() < other.rev():
114 113 mf2 = self._buildstatusmanifest(s)
115 114 mf1 = other._buildstatusmanifest(s)
116 115 if mf2 is None:
117 116 mf2 = self._buildstatusmanifest(s)
118 117
119 118 modified, added = [], []
120 119 removed = []
121 120 clean = []
122 121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 122 deletedset = set(deleted)
124 123 d = mf1.diff(mf2, match=match, clean=listclean)
125 124 for fn, value in d.iteritems():
126 125 if fn in deletedset:
127 126 continue
128 127 if value is None:
129 128 clean.append(fn)
130 129 continue
131 130 (node1, flag1), (node2, flag2) = value
132 131 if node1 is None:
133 132 added.append(fn)
134 133 elif node2 is None:
135 134 removed.append(fn)
136 135 elif flag1 != flag2:
137 136 modified.append(fn)
138 137 elif node2 not in wdirfilenodeids:
139 138 # When comparing files between two commits, we save time by
140 139 # not comparing the file contents when the nodeids differ.
141 140 # Note that this means we incorrectly report a reverted change
142 141 # to a file as a modification.
143 142 modified.append(fn)
144 143 elif self[fn].cmp(other[fn]):
145 144 modified.append(fn)
146 145 else:
147 146 clean.append(fn)
148 147
149 148 if removed:
150 149 # need to filter files if they are already reported as removed
151 150 unknown = [fn for fn in unknown if fn not in mf1 and
152 151 (not match or match(fn))]
153 152 ignored = [fn for fn in ignored if fn not in mf1 and
154 153 (not match or match(fn))]
155 154 # if they're deleted, don't report them as removed
156 155 removed = [fn for fn in removed if fn not in deletedset]
157 156
158 157 return scmutil.status(modified, added, removed, deleted, unknown,
159 158 ignored, clean)
160 159
161 160 @propertycache
162 161 def substate(self):
163 162 return subrepoutil.state(self, self._repo.ui)
164 163
165 164 def subrev(self, subpath):
166 165 return self.substate[subpath][1]
167 166
168 167 def rev(self):
169 168 return self._rev
170 169 def node(self):
171 170 return self._node
172 171 def hex(self):
173 172 return hex(self.node())
174 173 def manifest(self):
175 174 return self._manifest
176 175 def manifestctx(self):
177 176 return self._manifestctx
178 177 def repo(self):
179 178 return self._repo
180 179 def phasestr(self):
181 180 return phases.phasenames[self.phase()]
182 181 def mutable(self):
183 182 return self.phase() > phases.public
184 183
185 184 def getfileset(self, expr):
186 185 return fileset.getfileset(self, expr)
187 186
188 187 def obsolete(self):
189 188 """True if the changeset is obsolete"""
190 189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
191 190
192 191 def extinct(self):
193 192 """True if the changeset is extinct"""
194 193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
195 194
196 195 def orphan(self):
197 196 """True if the changeset is not obsolete but it's ancestor are"""
198 197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
199 198
200 199 def phasedivergent(self):
201 200 """True if the changeset try to be a successor of a public changeset
202 201
203 202 Only non-public and non-obsolete changesets may be bumped.
204 203 """
205 204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
206 205
207 206 def contentdivergent(self):
208 207 """Is a successors of a changeset with multiple possible successors set
209 208
210 209 Only non-public and non-obsolete changesets may be divergent.
211 210 """
212 211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
213 212
214 213 def isunstable(self):
215 214 """True if the changeset is either unstable, bumped or divergent"""
216 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
217 216
218 217 def instabilities(self):
219 218 """return the list of instabilities affecting this changeset.
220 219
221 220 Instabilities are returned as strings. possible values are:
222 221 - orphan,
223 222 - phase-divergent,
224 223 - content-divergent.
225 224 """
226 225 instabilities = []
227 226 if self.orphan():
228 227 instabilities.append('orphan')
229 228 if self.phasedivergent():
230 229 instabilities.append('phase-divergent')
231 230 if self.contentdivergent():
232 231 instabilities.append('content-divergent')
233 232 return instabilities
234 233
235 234 def parents(self):
236 235 """return contexts for each parent changeset"""
237 236 return self._parents
238 237
239 238 def p1(self):
240 239 return self._parents[0]
241 240
242 241 def p2(self):
243 242 parents = self._parents
244 243 if len(parents) == 2:
245 244 return parents[1]
246 245 return changectx(self._repo, nullrev)
247 246
248 247 def _fileinfo(self, path):
249 248 if r'_manifest' in self.__dict__:
250 249 try:
251 250 return self._manifest[path], self._manifest.flags(path)
252 251 except KeyError:
253 252 raise error.ManifestLookupError(self._node, path,
254 253 _('not found in manifest'))
255 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
256 255 if path in self._manifestdelta:
257 256 return (self._manifestdelta[path],
258 257 self._manifestdelta.flags(path))
259 258 mfl = self._repo.manifestlog
260 259 try:
261 260 node, flag = mfl[self._changeset.manifest].find(path)
262 261 except KeyError:
263 262 raise error.ManifestLookupError(self._node, path,
264 263 _('not found in manifest'))
265 264
266 265 return node, flag
267 266
268 267 def filenode(self, path):
269 268 return self._fileinfo(path)[0]
270 269
271 270 def flags(self, path):
272 271 try:
273 272 return self._fileinfo(path)[1]
274 273 except error.LookupError:
275 274 return ''
276 275
277 276 def sub(self, path, allowcreate=True):
278 277 '''return a subrepo for the stored revision of path, never wdir()'''
279 278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 279
281 280 def nullsub(self, path, pctx):
282 281 return subrepo.nullsubrepo(self, path, pctx)
283 282
284 283 def workingsub(self, path):
285 284 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 285 context.
287 286 '''
288 287 return subrepo.subrepo(self, path, allowwdir=True)
289 288
290 289 def match(self, pats=None, include=None, exclude=None, default='glob',
291 290 listsubrepos=False, badfn=None):
292 291 r = self._repo
293 292 return matchmod.match(r.root, r.getcwd(), pats,
294 293 include, exclude, default,
295 294 auditor=r.nofsauditor, ctx=self,
296 295 listsubrepos=listsubrepos, badfn=badfn)
297 296
298 297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
299 298 losedatafn=None, prefix='', relroot='', copy=None,
300 299 hunksfilterfn=None):
301 300 """Returns a diff generator for the given contexts and matcher"""
302 301 if ctx2 is None:
303 302 ctx2 = self.p1()
304 303 if ctx2 is not None:
305 304 ctx2 = self._repo[ctx2]
306 305
307 if isinstance(opts, mdiff.diffopts):
308 306 diffopts = opts
309 else:
310 diffopts = patch.diffopts(self._repo.ui, opts)
311 307 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
312 308 opts=diffopts, losedatafn=losedatafn, prefix=prefix,
313 309 relroot=relroot, copy=copy,
314 310 hunksfilterfn=hunksfilterfn)
315 311
316 312 def dirs(self):
317 313 return self._manifest.dirs()
318 314
319 315 def hasdir(self, dir):
320 316 return self._manifest.hasdir(dir)
321 317
322 318 def status(self, other=None, match=None, listignored=False,
323 319 listclean=False, listunknown=False, listsubrepos=False):
324 320 """return status of files between two nodes or node and working
325 321 directory.
326 322
327 323 If other is None, compare this node with working directory.
328 324
329 325 returns (modified, added, removed, deleted, unknown, ignored, clean)
330 326 """
331 327
332 328 ctx1 = self
333 329 ctx2 = self._repo[other]
334 330
335 331 # This next code block is, admittedly, fragile logic that tests for
336 332 # reversing the contexts and wouldn't need to exist if it weren't for
337 333 # the fast (and common) code path of comparing the working directory
338 334 # with its first parent.
339 335 #
340 336 # What we're aiming for here is the ability to call:
341 337 #
342 338 # workingctx.status(parentctx)
343 339 #
344 340 # If we always built the manifest for each context and compared those,
345 341 # then we'd be done. But the special case of the above call means we
346 342 # just copy the manifest of the parent.
347 343 reversed = False
348 344 if (not isinstance(ctx1, changectx)
349 345 and isinstance(ctx2, changectx)):
350 346 reversed = True
351 347 ctx1, ctx2 = ctx2, ctx1
352 348
353 349 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
354 350 match = ctx2._matchstatus(ctx1, match)
355 351 r = scmutil.status([], [], [], [], [], [], [])
356 352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
357 353 listunknown)
358 354
359 355 if reversed:
360 356 # Reverse added and removed. Clear deleted, unknown and ignored as
361 357 # these make no sense to reverse.
362 358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
363 359 r.clean)
364 360
365 361 if listsubrepos:
366 362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
367 363 try:
368 364 rev2 = ctx2.subrev(subpath)
369 365 except KeyError:
370 366 # A subrepo that existed in node1 was deleted between
371 367 # node1 and node2 (inclusive). Thus, ctx2's substate
372 368 # won't contain that subpath. The best we can do ignore it.
373 369 rev2 = None
374 370 submatch = matchmod.subdirmatcher(subpath, match)
375 371 s = sub.status(rev2, match=submatch, ignored=listignored,
376 372 clean=listclean, unknown=listunknown,
377 373 listsubrepos=True)
378 374 for rfiles, sfiles in zip(r, s):
379 375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
380 376
381 377 for l in r:
382 378 l.sort()
383 379
384 380 return r
385 381
386 382 class changectx(basectx):
387 383 """A changecontext object makes access to data related to a particular
388 384 changeset convenient. It represents a read-only context already present in
389 385 the repo."""
390 386 def __init__(self, repo, changeid='.'):
391 387 """changeid is a revision number, node, or tag"""
392 388 super(changectx, self).__init__(repo)
393 389
394 390 try:
395 391 if isinstance(changeid, int):
396 392 self._node = repo.changelog.node(changeid)
397 393 self._rev = changeid
398 394 return
399 395 elif changeid == 'null':
400 396 self._node = nullid
401 397 self._rev = nullrev
402 398 return
403 399 elif changeid == 'tip':
404 400 self._node = repo.changelog.tip()
405 401 self._rev = repo.changelog.rev(self._node)
406 402 return
407 403 elif (changeid == '.'
408 404 or repo.local() and changeid == repo.dirstate.p1()):
409 405 # this is a hack to delay/avoid loading obsmarkers
410 406 # when we know that '.' won't be hidden
411 407 self._node = repo.dirstate.p1()
412 408 self._rev = repo.unfiltered().changelog.rev(self._node)
413 409 return
414 410 elif len(changeid) == 20:
415 411 try:
416 412 self._node = changeid
417 413 self._rev = repo.changelog.rev(changeid)
418 414 return
419 415 except error.FilteredLookupError:
420 416 raise
421 417 except LookupError:
422 418 # check if it might have come from damaged dirstate
423 419 #
424 420 # XXX we could avoid the unfiltered if we had a recognizable
425 421 # exception for filtered changeset access
426 422 if (repo.local()
427 423 and changeid in repo.unfiltered().dirstate.parents()):
428 424 msg = _("working directory has unknown parent '%s'!")
429 425 raise error.Abort(msg % short(changeid))
430 426 changeid = hex(changeid) # for the error message
431 427
432 428 elif len(changeid) == 40:
433 429 try:
434 430 self._node = bin(changeid)
435 431 self._rev = repo.changelog.rev(self._node)
436 432 return
437 433 except error.FilteredLookupError:
438 434 raise
439 435 except (TypeError, LookupError):
440 436 pass
441 437
442 438 # lookup failed
443 439 except (error.FilteredIndexError, error.FilteredLookupError):
444 440 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
445 441 % pycompat.bytestr(changeid))
446 442 except error.FilteredRepoLookupError:
447 443 raise
448 444 except IndexError:
449 445 pass
450 446 raise error.RepoLookupError(
451 447 _("unknown revision '%s'") % changeid)
452 448
453 449 def __hash__(self):
454 450 try:
455 451 return hash(self._rev)
456 452 except AttributeError:
457 453 return id(self)
458 454
459 455 def __nonzero__(self):
460 456 return self._rev != nullrev
461 457
462 458 __bool__ = __nonzero__
463 459
464 460 @propertycache
465 461 def _changeset(self):
466 462 return self._repo.changelog.changelogrevision(self.rev())
467 463
468 464 @propertycache
469 465 def _manifest(self):
470 466 return self._manifestctx.read()
471 467
472 468 @property
473 469 def _manifestctx(self):
474 470 return self._repo.manifestlog[self._changeset.manifest]
475 471
476 472 @propertycache
477 473 def _manifestdelta(self):
478 474 return self._manifestctx.readdelta()
479 475
480 476 @propertycache
481 477 def _parents(self):
482 478 repo = self._repo
483 479 p1, p2 = repo.changelog.parentrevs(self._rev)
484 480 if p2 == nullrev:
485 481 return [changectx(repo, p1)]
486 482 return [changectx(repo, p1), changectx(repo, p2)]
487 483
488 484 def changeset(self):
489 485 c = self._changeset
490 486 return (
491 487 c.manifest,
492 488 c.user,
493 489 c.date,
494 490 c.files,
495 491 c.description,
496 492 c.extra,
497 493 )
498 494 def manifestnode(self):
499 495 return self._changeset.manifest
500 496
501 497 def user(self):
502 498 return self._changeset.user
503 499 def date(self):
504 500 return self._changeset.date
505 501 def files(self):
506 502 return self._changeset.files
507 503 def description(self):
508 504 return self._changeset.description
509 505 def branch(self):
510 506 return encoding.tolocal(self._changeset.extra.get("branch"))
511 507 def closesbranch(self):
512 508 return 'close' in self._changeset.extra
513 509 def extra(self):
514 510 """Return a dict of extra information."""
515 511 return self._changeset.extra
516 512 def tags(self):
517 513 """Return a list of byte tag names"""
518 514 return self._repo.nodetags(self._node)
519 515 def bookmarks(self):
520 516 """Return a list of byte bookmark names."""
521 517 return self._repo.nodebookmarks(self._node)
522 518 def phase(self):
523 519 return self._repo._phasecache.phase(self._repo, self._rev)
524 520 def hidden(self):
525 521 return self._rev in repoview.filterrevs(self._repo, 'visible')
526 522
527 523 def isinmemory(self):
528 524 return False
529 525
530 526 def children(self):
531 527 """return list of changectx contexts for each child changeset.
532 528
533 529 This returns only the immediate child changesets. Use descendants() to
534 530 recursively walk children.
535 531 """
536 532 c = self._repo.changelog.children(self._node)
537 533 return [changectx(self._repo, x) for x in c]
538 534
539 535 def ancestors(self):
540 536 for a in self._repo.changelog.ancestors([self._rev]):
541 537 yield changectx(self._repo, a)
542 538
543 539 def descendants(self):
544 540 """Recursively yield all children of the changeset.
545 541
546 542 For just the immediate children, use children()
547 543 """
548 544 for d in self._repo.changelog.descendants([self._rev]):
549 545 yield changectx(self._repo, d)
550 546
551 547 def filectx(self, path, fileid=None, filelog=None):
552 548 """get a file context from this changeset"""
553 549 if fileid is None:
554 550 fileid = self.filenode(path)
555 551 return filectx(self._repo, path, fileid=fileid,
556 552 changectx=self, filelog=filelog)
557 553
558 554 def ancestor(self, c2, warn=False):
559 555 """return the "best" ancestor context of self and c2
560 556
561 557 If there are multiple candidates, it will show a message and check
562 558 merge.preferancestor configuration before falling back to the
563 559 revlog ancestor."""
564 560 # deal with workingctxs
565 561 n2 = c2._node
566 562 if n2 is None:
567 563 n2 = c2._parents[0]._node
568 564 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
569 565 if not cahs:
570 566 anc = nullid
571 567 elif len(cahs) == 1:
572 568 anc = cahs[0]
573 569 else:
574 570 # experimental config: merge.preferancestor
575 571 for r in self._repo.ui.configlist('merge', 'preferancestor'):
576 572 try:
577 573 ctx = scmutil.revsymbol(self._repo, r)
578 574 except error.RepoLookupError:
579 575 continue
580 576 anc = ctx.node()
581 577 if anc in cahs:
582 578 break
583 579 else:
584 580 anc = self._repo.changelog.ancestor(self._node, n2)
585 581 if warn:
586 582 self._repo.ui.status(
587 583 (_("note: using %s as ancestor of %s and %s\n") %
588 584 (short(anc), short(self._node), short(n2))) +
589 585 ''.join(_(" alternatively, use --config "
590 586 "merge.preferancestor=%s\n") %
591 587 short(n) for n in sorted(cahs) if n != anc))
592 588 return changectx(self._repo, anc)
593 589
594 590 def descendant(self, other):
595 591 """True if other is descendant of this changeset"""
596 592 return self._repo.changelog.descendant(self._rev, other._rev)
597 593
598 594 def walk(self, match):
599 595 '''Generates matching file names.'''
600 596
601 597 # Wrap match.bad method to have message with nodeid
602 598 def bad(fn, msg):
603 599 # The manifest doesn't know about subrepos, so don't complain about
604 600 # paths into valid subrepos.
605 601 if any(fn == s or fn.startswith(s + '/')
606 602 for s in self.substate):
607 603 return
608 604 match.bad(fn, _('no such file in rev %s') % self)
609 605
610 606 m = matchmod.badmatch(match, bad)
611 607 return self._manifest.walk(m)
612 608
613 609 def matches(self, match):
614 610 return self.walk(match)
615 611
616 612 class basefilectx(object):
617 613 """A filecontext object represents the common logic for its children:
618 614 filectx: read-only access to a filerevision that is already present
619 615 in the repo,
620 616 workingfilectx: a filecontext that represents files from the working
621 617 directory,
622 618 memfilectx: a filecontext that represents files in-memory,
623 619 overlayfilectx: duplicate another filecontext with some fields overridden.
624 620 """
625 621 @propertycache
626 622 def _filelog(self):
627 623 return self._repo.file(self._path)
628 624
629 625 @propertycache
630 626 def _changeid(self):
631 627 if r'_changeid' in self.__dict__:
632 628 return self._changeid
633 629 elif r'_changectx' in self.__dict__:
634 630 return self._changectx.rev()
635 631 elif r'_descendantrev' in self.__dict__:
636 632 # this file context was created from a revision with a known
637 633 # descendant, we can (lazily) correct for linkrev aliases
638 634 return self._adjustlinkrev(self._descendantrev)
639 635 else:
640 636 return self._filelog.linkrev(self._filerev)
641 637
642 638 @propertycache
643 639 def _filenode(self):
644 640 if r'_fileid' in self.__dict__:
645 641 return self._filelog.lookup(self._fileid)
646 642 else:
647 643 return self._changectx.filenode(self._path)
648 644
649 645 @propertycache
650 646 def _filerev(self):
651 647 return self._filelog.rev(self._filenode)
652 648
653 649 @propertycache
654 650 def _repopath(self):
655 651 return self._path
656 652
657 653 def __nonzero__(self):
658 654 try:
659 655 self._filenode
660 656 return True
661 657 except error.LookupError:
662 658 # file is missing
663 659 return False
664 660
665 661 __bool__ = __nonzero__
666 662
667 663 def __bytes__(self):
668 664 try:
669 665 return "%s@%s" % (self.path(), self._changectx)
670 666 except error.LookupError:
671 667 return "%s@???" % self.path()
672 668
673 669 __str__ = encoding.strmethod(__bytes__)
674 670
675 671 def __repr__(self):
676 672 return r"<%s %s>" % (type(self).__name__, str(self))
677 673
678 674 def __hash__(self):
679 675 try:
680 676 return hash((self._path, self._filenode))
681 677 except AttributeError:
682 678 return id(self)
683 679
684 680 def __eq__(self, other):
685 681 try:
686 682 return (type(self) == type(other) and self._path == other._path
687 683 and self._filenode == other._filenode)
688 684 except AttributeError:
689 685 return False
690 686
691 687 def __ne__(self, other):
692 688 return not (self == other)
693 689
694 690 def filerev(self):
695 691 return self._filerev
696 692 def filenode(self):
697 693 return self._filenode
698 694 @propertycache
699 695 def _flags(self):
700 696 return self._changectx.flags(self._path)
701 697 def flags(self):
702 698 return self._flags
703 699 def filelog(self):
704 700 return self._filelog
705 701 def rev(self):
706 702 return self._changeid
707 703 def linkrev(self):
708 704 return self._filelog.linkrev(self._filerev)
709 705 def node(self):
710 706 return self._changectx.node()
711 707 def hex(self):
712 708 return self._changectx.hex()
713 709 def user(self):
714 710 return self._changectx.user()
715 711 def date(self):
716 712 return self._changectx.date()
717 713 def files(self):
718 714 return self._changectx.files()
719 715 def description(self):
720 716 return self._changectx.description()
721 717 def branch(self):
722 718 return self._changectx.branch()
723 719 def extra(self):
724 720 return self._changectx.extra()
725 721 def phase(self):
726 722 return self._changectx.phase()
727 723 def phasestr(self):
728 724 return self._changectx.phasestr()
729 725 def obsolete(self):
730 726 return self._changectx.obsolete()
731 727 def instabilities(self):
732 728 return self._changectx.instabilities()
733 729 def manifest(self):
734 730 return self._changectx.manifest()
735 731 def changectx(self):
736 732 return self._changectx
737 733 def renamed(self):
738 734 return self._copied
739 735 def repo(self):
740 736 return self._repo
741 737 def size(self):
742 738 return len(self.data())
743 739
744 740 def path(self):
745 741 return self._path
746 742
747 743 def isbinary(self):
748 744 try:
749 745 return stringutil.binary(self.data())
750 746 except IOError:
751 747 return False
752 748 def isexec(self):
753 749 return 'x' in self.flags()
754 750 def islink(self):
755 751 return 'l' in self.flags()
756 752
757 753 def isabsent(self):
758 754 """whether this filectx represents a file not in self._changectx
759 755
760 756 This is mainly for merge code to detect change/delete conflicts. This is
761 757 expected to be True for all subclasses of basectx."""
762 758 return False
763 759
764 760 _customcmp = False
765 761 def cmp(self, fctx):
766 762 """compare with other file context
767 763
768 764 returns True if different than fctx.
769 765 """
770 766 if fctx._customcmp:
771 767 return fctx.cmp(self)
772 768
773 769 if (fctx._filenode is None
774 770 and (self._repo._encodefilterpats
775 771 # if file data starts with '\1\n', empty metadata block is
776 772 # prepended, which adds 4 bytes to filelog.size().
777 773 or self.size() - 4 == fctx.size())
778 774 or self.size() == fctx.size()):
779 775 return self._filelog.cmp(self._filenode, fctx.data())
780 776
781 777 return True
782 778
783 779 def _adjustlinkrev(self, srcrev, inclusive=False):
784 780 """return the first ancestor of <srcrev> introducing <fnode>
785 781
786 782 If the linkrev of the file revision does not point to an ancestor of
787 783 srcrev, we'll walk down the ancestors until we find one introducing
788 784 this file revision.
789 785
790 786 :srcrev: the changeset revision we search ancestors from
791 787 :inclusive: if true, the src revision will also be checked
792 788 """
793 789 repo = self._repo
794 790 cl = repo.unfiltered().changelog
795 791 mfl = repo.manifestlog
796 792 # fetch the linkrev
797 793 lkr = self.linkrev()
798 794 # hack to reuse ancestor computation when searching for renames
799 795 memberanc = getattr(self, '_ancestrycontext', None)
800 796 iteranc = None
801 797 if srcrev is None:
802 798 # wctx case, used by workingfilectx during mergecopy
803 799 revs = [p.rev() for p in self._repo[None].parents()]
804 800 inclusive = True # we skipped the real (revless) source
805 801 else:
806 802 revs = [srcrev]
807 803 if memberanc is None:
808 804 memberanc = iteranc = cl.ancestors(revs, lkr,
809 805 inclusive=inclusive)
810 806 # check if this linkrev is an ancestor of srcrev
811 807 if lkr not in memberanc:
812 808 if iteranc is None:
813 809 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
814 810 fnode = self._filenode
815 811 path = self._path
816 812 for a in iteranc:
817 813 ac = cl.read(a) # get changeset data (we avoid object creation)
818 814 if path in ac[3]: # checking the 'files' field.
819 815 # The file has been touched, check if the content is
820 816 # similar to the one we search for.
821 817 if fnode == mfl[ac[0]].readfast().get(path):
822 818 return a
823 819 # In theory, we should never get out of that loop without a result.
824 820 # But if manifest uses a buggy file revision (not children of the
825 821 # one it replaces) we could. Such a buggy situation will likely
826 822 # result is crash somewhere else at to some point.
827 823 return lkr
828 824
829 825 def introrev(self):
830 826 """return the rev of the changeset which introduced this file revision
831 827
832 828 This method is different from linkrev because it take into account the
833 829 changeset the filectx was created from. It ensures the returned
834 830 revision is one of its ancestors. This prevents bugs from
835 831 'linkrev-shadowing' when a file revision is used by multiple
836 832 changesets.
837 833 """
838 834 lkr = self.linkrev()
839 835 attrs = vars(self)
840 836 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
841 837 if noctx or self.rev() == lkr:
842 838 return self.linkrev()
843 839 return self._adjustlinkrev(self.rev(), inclusive=True)
844 840
845 841 def introfilectx(self):
846 842 """Return filectx having identical contents, but pointing to the
847 843 changeset revision where this filectx was introduced"""
848 844 introrev = self.introrev()
849 845 if self.rev() == introrev:
850 846 return self
851 847 return self.filectx(self.filenode(), changeid=introrev)
852 848
853 849 def _parentfilectx(self, path, fileid, filelog):
854 850 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
855 851 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
856 852 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
857 853 # If self is associated with a changeset (probably explicitly
858 854 # fed), ensure the created filectx is associated with a
859 855 # changeset that is an ancestor of self.changectx.
860 856 # This lets us later use _adjustlinkrev to get a correct link.
861 857 fctx._descendantrev = self.rev()
862 858 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
863 859 elif r'_descendantrev' in vars(self):
864 860 # Otherwise propagate _descendantrev if we have one associated.
865 861 fctx._descendantrev = self._descendantrev
866 862 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
867 863 return fctx
868 864
869 865 def parents(self):
870 866 _path = self._path
871 867 fl = self._filelog
872 868 parents = self._filelog.parents(self._filenode)
873 869 pl = [(_path, node, fl) for node in parents if node != nullid]
874 870
875 871 r = fl.renamed(self._filenode)
876 872 if r:
877 873 # - In the simple rename case, both parent are nullid, pl is empty.
878 874 # - In case of merge, only one of the parent is null id and should
879 875 # be replaced with the rename information. This parent is -always-
880 876 # the first one.
881 877 #
882 878 # As null id have always been filtered out in the previous list
883 879 # comprehension, inserting to 0 will always result in "replacing
884 880 # first nullid parent with rename information.
885 881 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
886 882
887 883 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
888 884
889 885 def p1(self):
890 886 return self.parents()[0]
891 887
892 888 def p2(self):
893 889 p = self.parents()
894 890 if len(p) == 2:
895 891 return p[1]
896 892 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
897 893
898 894 def annotate(self, follow=False, skiprevs=None, diffopts=None):
899 895 """Returns a list of annotateline objects for each line in the file
900 896
901 897 - line.fctx is the filectx of the node where that line was last changed
902 898 - line.lineno is the line number at the first appearance in the managed
903 899 file
904 900 - line.text is the data on that line (including newline character)
905 901 """
906 902 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
907 903
908 904 def parents(f):
909 905 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
910 906 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
911 907 # from the topmost introrev (= srcrev) down to p.linkrev() if it
912 908 # isn't an ancestor of the srcrev.
913 909 f._changeid
914 910 pl = f.parents()
915 911
916 912 # Don't return renamed parents if we aren't following.
917 913 if not follow:
918 914 pl = [p for p in pl if p.path() == f.path()]
919 915
920 916 # renamed filectx won't have a filelog yet, so set it
921 917 # from the cache to save time
922 918 for p in pl:
923 919 if not r'_filelog' in p.__dict__:
924 920 p._filelog = getlog(p.path())
925 921
926 922 return pl
927 923
928 924 # use linkrev to find the first changeset where self appeared
929 925 base = self.introfilectx()
930 926 if getattr(base, '_ancestrycontext', None) is None:
931 927 cl = self._repo.changelog
932 928 if base.rev() is None:
933 929 # wctx is not inclusive, but works because _ancestrycontext
934 930 # is used to test filelog revisions
935 931 ac = cl.ancestors([p.rev() for p in base.parents()],
936 932 inclusive=True)
937 933 else:
938 934 ac = cl.ancestors([base.rev()], inclusive=True)
939 935 base._ancestrycontext = ac
940 936
941 937 return dagop.annotate(base, parents, skiprevs=skiprevs,
942 938 diffopts=diffopts)
943 939
944 940 def ancestors(self, followfirst=False):
945 941 visit = {}
946 942 c = self
947 943 if followfirst:
948 944 cut = 1
949 945 else:
950 946 cut = None
951 947
952 948 while True:
953 949 for parent in c.parents()[:cut]:
954 950 visit[(parent.linkrev(), parent.filenode())] = parent
955 951 if not visit:
956 952 break
957 953 c = visit.pop(max(visit))
958 954 yield c
959 955
960 956 def decodeddata(self):
961 957 """Returns `data()` after running repository decoding filters.
962 958
963 959 This is often equivalent to how the data would be expressed on disk.
964 960 """
965 961 return self._repo.wwritedata(self.path(), self.data())
966 962
967 963 class filectx(basefilectx):
968 964 """A filecontext object makes access to data related to a particular
969 965 filerevision convenient."""
970 966 def __init__(self, repo, path, changeid=None, fileid=None,
971 967 filelog=None, changectx=None):
972 968 """changeid can be a changeset revision, node, or tag.
973 969 fileid can be a file revision or node."""
974 970 self._repo = repo
975 971 self._path = path
976 972
977 973 assert (changeid is not None
978 974 or fileid is not None
979 975 or changectx is not None), \
980 976 ("bad args: changeid=%r, fileid=%r, changectx=%r"
981 977 % (changeid, fileid, changectx))
982 978
983 979 if filelog is not None:
984 980 self._filelog = filelog
985 981
986 982 if changeid is not None:
987 983 self._changeid = changeid
988 984 if changectx is not None:
989 985 self._changectx = changectx
990 986 if fileid is not None:
991 987 self._fileid = fileid
992 988
993 989 @propertycache
994 990 def _changectx(self):
995 991 try:
996 992 return changectx(self._repo, self._changeid)
997 993 except error.FilteredRepoLookupError:
998 994 # Linkrev may point to any revision in the repository. When the
999 995 # repository is filtered this may lead to `filectx` trying to build
1000 996 # `changectx` for filtered revision. In such case we fallback to
1001 997 # creating `changectx` on the unfiltered version of the reposition.
1002 998 # This fallback should not be an issue because `changectx` from
1003 999 # `filectx` are not used in complex operations that care about
1004 1000 # filtering.
1005 1001 #
1006 1002 # This fallback is a cheap and dirty fix that prevent several
1007 1003 # crashes. It does not ensure the behavior is correct. However the
1008 1004 # behavior was not correct before filtering either and "incorrect
1009 1005 # behavior" is seen as better as "crash"
1010 1006 #
1011 1007 # Linkrevs have several serious troubles with filtering that are
1012 1008 # complicated to solve. Proper handling of the issue here should be
1013 1009 # considered when solving linkrev issue are on the table.
1014 1010 return changectx(self._repo.unfiltered(), self._changeid)
1015 1011
1016 1012 def filectx(self, fileid, changeid=None):
1017 1013 '''opens an arbitrary revision of the file without
1018 1014 opening a new filelog'''
1019 1015 return filectx(self._repo, self._path, fileid=fileid,
1020 1016 filelog=self._filelog, changeid=changeid)
1021 1017
1022 1018 def rawdata(self):
1023 1019 return self._filelog.revision(self._filenode, raw=True)
1024 1020
1025 1021 def rawflags(self):
1026 1022 """low-level revlog flags"""
1027 1023 return self._filelog.flags(self._filerev)
1028 1024
1029 1025 def data(self):
1030 1026 try:
1031 1027 return self._filelog.read(self._filenode)
1032 1028 except error.CensoredNodeError:
1033 1029 if self._repo.ui.config("censor", "policy") == "ignore":
1034 1030 return ""
1035 1031 raise error.Abort(_("censored node: %s") % short(self._filenode),
1036 1032 hint=_("set censor.policy to ignore errors"))
1037 1033
1038 1034 def size(self):
1039 1035 return self._filelog.size(self._filerev)
1040 1036
1041 1037 @propertycache
1042 1038 def _copied(self):
1043 1039 """check if file was actually renamed in this changeset revision
1044 1040
1045 1041 If rename logged in file revision, we report copy for changeset only
1046 1042 if file revisions linkrev points back to the changeset in question
1047 1043 or both changeset parents contain different file revisions.
1048 1044 """
1049 1045
1050 1046 renamed = self._filelog.renamed(self._filenode)
1051 1047 if not renamed:
1052 1048 return renamed
1053 1049
1054 1050 if self.rev() == self.linkrev():
1055 1051 return renamed
1056 1052
1057 1053 name = self.path()
1058 1054 fnode = self._filenode
1059 1055 for p in self._changectx.parents():
1060 1056 try:
1061 1057 if fnode == p.filenode(name):
1062 1058 return None
1063 1059 except error.LookupError:
1064 1060 pass
1065 1061 return renamed
1066 1062
1067 1063 def children(self):
1068 1064 # hard for renames
1069 1065 c = self._filelog.children(self._filenode)
1070 1066 return [filectx(self._repo, self._path, fileid=x,
1071 1067 filelog=self._filelog) for x in c]
1072 1068
1073 1069 class committablectx(basectx):
1074 1070 """A committablectx object provides common functionality for a context that
1075 1071 wants the ability to commit, e.g. workingctx or memctx."""
1076 1072 def __init__(self, repo, text="", user=None, date=None, extra=None,
1077 1073 changes=None):
1078 1074 super(committablectx, self).__init__(repo)
1079 1075 self._rev = None
1080 1076 self._node = None
1081 1077 self._text = text
1082 1078 if date:
1083 1079 self._date = dateutil.parsedate(date)
1084 1080 if user:
1085 1081 self._user = user
1086 1082 if changes:
1087 1083 self._status = changes
1088 1084
1089 1085 self._extra = {}
1090 1086 if extra:
1091 1087 self._extra = extra.copy()
1092 1088 if 'branch' not in self._extra:
1093 1089 try:
1094 1090 branch = encoding.fromlocal(self._repo.dirstate.branch())
1095 1091 except UnicodeDecodeError:
1096 1092 raise error.Abort(_('branch name not in UTF-8!'))
1097 1093 self._extra['branch'] = branch
1098 1094 if self._extra['branch'] == '':
1099 1095 self._extra['branch'] = 'default'
1100 1096
1101 1097 def __bytes__(self):
1102 1098 return bytes(self._parents[0]) + "+"
1103 1099
1104 1100 __str__ = encoding.strmethod(__bytes__)
1105 1101
1106 1102 def __nonzero__(self):
1107 1103 return True
1108 1104
1109 1105 __bool__ = __nonzero__
1110 1106
1111 1107 def _buildflagfunc(self):
1112 1108 # Create a fallback function for getting file flags when the
1113 1109 # filesystem doesn't support them
1114 1110
1115 1111 copiesget = self._repo.dirstate.copies().get
1116 1112 parents = self.parents()
1117 1113 if len(parents) < 2:
1118 1114 # when we have one parent, it's easy: copy from parent
1119 1115 man = parents[0].manifest()
1120 1116 def func(f):
1121 1117 f = copiesget(f, f)
1122 1118 return man.flags(f)
1123 1119 else:
1124 1120 # merges are tricky: we try to reconstruct the unstored
1125 1121 # result from the merge (issue1802)
1126 1122 p1, p2 = parents
1127 1123 pa = p1.ancestor(p2)
1128 1124 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1129 1125
1130 1126 def func(f):
1131 1127 f = copiesget(f, f) # may be wrong for merges with copies
1132 1128 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1133 1129 if fl1 == fl2:
1134 1130 return fl1
1135 1131 if fl1 == fla:
1136 1132 return fl2
1137 1133 if fl2 == fla:
1138 1134 return fl1
1139 1135 return '' # punt for conflicts
1140 1136
1141 1137 return func
1142 1138
1143 1139 @propertycache
1144 1140 def _flagfunc(self):
1145 1141 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1146 1142
1147 1143 @propertycache
1148 1144 def _status(self):
1149 1145 return self._repo.status()
1150 1146
1151 1147 @propertycache
1152 1148 def _user(self):
1153 1149 return self._repo.ui.username()
1154 1150
1155 1151 @propertycache
1156 1152 def _date(self):
1157 1153 ui = self._repo.ui
1158 1154 date = ui.configdate('devel', 'default-date')
1159 1155 if date is None:
1160 1156 date = dateutil.makedate()
1161 1157 return date
1162 1158
1163 1159 def subrev(self, subpath):
1164 1160 return None
1165 1161
1166 1162 def manifestnode(self):
1167 1163 return None
1168 1164 def user(self):
1169 1165 return self._user or self._repo.ui.username()
1170 1166 def date(self):
1171 1167 return self._date
1172 1168 def description(self):
1173 1169 return self._text
1174 1170 def files(self):
1175 1171 return sorted(self._status.modified + self._status.added +
1176 1172 self._status.removed)
1177 1173
1178 1174 def modified(self):
1179 1175 return self._status.modified
1180 1176 def added(self):
1181 1177 return self._status.added
1182 1178 def removed(self):
1183 1179 return self._status.removed
1184 1180 def deleted(self):
1185 1181 return self._status.deleted
1186 1182 def branch(self):
1187 1183 return encoding.tolocal(self._extra['branch'])
1188 1184 def closesbranch(self):
1189 1185 return 'close' in self._extra
1190 1186 def extra(self):
1191 1187 return self._extra
1192 1188
1193 1189 def isinmemory(self):
1194 1190 return False
1195 1191
1196 1192 def tags(self):
1197 1193 return []
1198 1194
1199 1195 def bookmarks(self):
1200 1196 b = []
1201 1197 for p in self.parents():
1202 1198 b.extend(p.bookmarks())
1203 1199 return b
1204 1200
1205 1201 def phase(self):
1206 1202 phase = phases.draft # default phase to draft
1207 1203 for p in self.parents():
1208 1204 phase = max(phase, p.phase())
1209 1205 return phase
1210 1206
1211 1207 def hidden(self):
1212 1208 return False
1213 1209
1214 1210 def children(self):
1215 1211 return []
1216 1212
1217 1213 def flags(self, path):
1218 1214 if r'_manifest' in self.__dict__:
1219 1215 try:
1220 1216 return self._manifest.flags(path)
1221 1217 except KeyError:
1222 1218 return ''
1223 1219
1224 1220 try:
1225 1221 return self._flagfunc(path)
1226 1222 except OSError:
1227 1223 return ''
1228 1224
1229 1225 def ancestor(self, c2):
1230 1226 """return the "best" ancestor context of self and c2"""
1231 1227 return self._parents[0].ancestor(c2) # punt on two parents for now
1232 1228
1233 1229 def walk(self, match):
1234 1230 '''Generates matching file names.'''
1235 1231 return sorted(self._repo.dirstate.walk(match,
1236 1232 subrepos=sorted(self.substate),
1237 1233 unknown=True, ignored=False))
1238 1234
1239 1235 def matches(self, match):
1240 1236 ds = self._repo.dirstate
1241 1237 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1242 1238
1243 1239 def ancestors(self):
1244 1240 for p in self._parents:
1245 1241 yield p
1246 1242 for a in self._repo.changelog.ancestors(
1247 1243 [p.rev() for p in self._parents]):
1248 1244 yield changectx(self._repo, a)
1249 1245
1250 1246 def markcommitted(self, node):
1251 1247 """Perform post-commit cleanup necessary after committing this ctx
1252 1248
1253 1249 Specifically, this updates backing stores this working context
1254 1250 wraps to reflect the fact that the changes reflected by this
1255 1251 workingctx have been committed. For example, it marks
1256 1252 modified and added files as normal in the dirstate.
1257 1253
1258 1254 """
1259 1255
1260 1256 with self._repo.dirstate.parentchange():
1261 1257 for f in self.modified() + self.added():
1262 1258 self._repo.dirstate.normal(f)
1263 1259 for f in self.removed():
1264 1260 self._repo.dirstate.drop(f)
1265 1261 self._repo.dirstate.setparents(node)
1266 1262
1267 1263 # write changes out explicitly, because nesting wlock at
1268 1264 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1269 1265 # from immediately doing so for subsequent changing files
1270 1266 self._repo.dirstate.write(self._repo.currenttransaction())
1271 1267
1272 1268 def dirty(self, missing=False, merge=True, branch=True):
1273 1269 return False
1274 1270
1275 1271 class workingctx(committablectx):
1276 1272 """A workingctx object makes access to data related to
1277 1273 the current working directory convenient.
1278 1274 date - any valid date string or (unixtime, offset), or None.
1279 1275 user - username string, or None.
1280 1276 extra - a dictionary of extra values, or None.
1281 1277 changes - a list of file lists as returned by localrepo.status()
1282 1278 or None to use the repository status.
1283 1279 """
1284 1280 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 1281 changes=None):
1286 1282 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1287 1283
1288 1284 def __iter__(self):
1289 1285 d = self._repo.dirstate
1290 1286 for f in d:
1291 1287 if d[f] != 'r':
1292 1288 yield f
1293 1289
1294 1290 def __contains__(self, key):
1295 1291 return self._repo.dirstate[key] not in "?r"
1296 1292
1297 1293 def hex(self):
1298 1294 return hex(wdirid)
1299 1295
1300 1296 @propertycache
1301 1297 def _parents(self):
1302 1298 p = self._repo.dirstate.parents()
1303 1299 if p[1] == nullid:
1304 1300 p = p[:-1]
1305 1301 return [changectx(self._repo, x) for x in p]
1306 1302
1307 1303 def _fileinfo(self, path):
1308 1304 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1309 1305 self._manifest
1310 1306 return super(workingctx, self)._fileinfo(path)
1311 1307
1312 1308 def filectx(self, path, filelog=None):
1313 1309 """get a file context from the working directory"""
1314 1310 return workingfilectx(self._repo, path, workingctx=self,
1315 1311 filelog=filelog)
1316 1312
1317 1313 def dirty(self, missing=False, merge=True, branch=True):
1318 1314 "check whether a working directory is modified"
1319 1315 # check subrepos first
1320 1316 for s in sorted(self.substate):
1321 1317 if self.sub(s).dirty(missing=missing):
1322 1318 return True
1323 1319 # check current working dir
1324 1320 return ((merge and self.p2()) or
1325 1321 (branch and self.branch() != self.p1().branch()) or
1326 1322 self.modified() or self.added() or self.removed() or
1327 1323 (missing and self.deleted()))
1328 1324
1329 1325 def add(self, list, prefix=""):
1330 1326 with self._repo.wlock():
1331 1327 ui, ds = self._repo.ui, self._repo.dirstate
1332 1328 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1333 1329 rejected = []
1334 1330 lstat = self._repo.wvfs.lstat
1335 1331 for f in list:
1336 1332 # ds.pathto() returns an absolute file when this is invoked from
1337 1333 # the keyword extension. That gets flagged as non-portable on
1338 1334 # Windows, since it contains the drive letter and colon.
1339 1335 scmutil.checkportable(ui, os.path.join(prefix, f))
1340 1336 try:
1341 1337 st = lstat(f)
1342 1338 except OSError:
1343 1339 ui.warn(_("%s does not exist!\n") % uipath(f))
1344 1340 rejected.append(f)
1345 1341 continue
1346 1342 if st.st_size > 10000000:
1347 1343 ui.warn(_("%s: up to %d MB of RAM may be required "
1348 1344 "to manage this file\n"
1349 1345 "(use 'hg revert %s' to cancel the "
1350 1346 "pending addition)\n")
1351 1347 % (f, 3 * st.st_size // 1000000, uipath(f)))
1352 1348 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1353 1349 ui.warn(_("%s not added: only files and symlinks "
1354 1350 "supported currently\n") % uipath(f))
1355 1351 rejected.append(f)
1356 1352 elif ds[f] in 'amn':
1357 1353 ui.warn(_("%s already tracked!\n") % uipath(f))
1358 1354 elif ds[f] == 'r':
1359 1355 ds.normallookup(f)
1360 1356 else:
1361 1357 ds.add(f)
1362 1358 return rejected
1363 1359
1364 1360 def forget(self, files, prefix=""):
1365 1361 with self._repo.wlock():
1366 1362 ds = self._repo.dirstate
1367 1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1368 1364 rejected = []
1369 1365 for f in files:
1370 1366 if f not in self._repo.dirstate:
1371 1367 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1372 1368 rejected.append(f)
1373 1369 elif self._repo.dirstate[f] != 'a':
1374 1370 self._repo.dirstate.remove(f)
1375 1371 else:
1376 1372 self._repo.dirstate.drop(f)
1377 1373 return rejected
1378 1374
1379 1375 def undelete(self, list):
1380 1376 pctxs = self.parents()
1381 1377 with self._repo.wlock():
1382 1378 ds = self._repo.dirstate
1383 1379 for f in list:
1384 1380 if self._repo.dirstate[f] != 'r':
1385 1381 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1386 1382 else:
1387 1383 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1388 1384 t = fctx.data()
1389 1385 self._repo.wwrite(f, t, fctx.flags())
1390 1386 self._repo.dirstate.normal(f)
1391 1387
1392 1388 def copy(self, source, dest):
1393 1389 try:
1394 1390 st = self._repo.wvfs.lstat(dest)
1395 1391 except OSError as err:
1396 1392 if err.errno != errno.ENOENT:
1397 1393 raise
1398 1394 self._repo.ui.warn(_("%s does not exist!\n")
1399 1395 % self._repo.dirstate.pathto(dest))
1400 1396 return
1401 1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1402 1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1403 1399 "symbolic link\n")
1404 1400 % self._repo.dirstate.pathto(dest))
1405 1401 else:
1406 1402 with self._repo.wlock():
1407 1403 if self._repo.dirstate[dest] in '?':
1408 1404 self._repo.dirstate.add(dest)
1409 1405 elif self._repo.dirstate[dest] in 'r':
1410 1406 self._repo.dirstate.normallookup(dest)
1411 1407 self._repo.dirstate.copy(source, dest)
1412 1408
1413 1409 def match(self, pats=None, include=None, exclude=None, default='glob',
1414 1410 listsubrepos=False, badfn=None):
1415 1411 r = self._repo
1416 1412
1417 1413 # Only a case insensitive filesystem needs magic to translate user input
1418 1414 # to actual case in the filesystem.
1419 1415 icasefs = not util.fscasesensitive(r.root)
1420 1416 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1421 1417 default, auditor=r.auditor, ctx=self,
1422 1418 listsubrepos=listsubrepos, badfn=badfn,
1423 1419 icasefs=icasefs)
1424 1420
1425 1421 def _filtersuspectsymlink(self, files):
1426 1422 if not files or self._repo.dirstate._checklink:
1427 1423 return files
1428 1424
1429 1425 # Symlink placeholders may get non-symlink-like contents
1430 1426 # via user error or dereferencing by NFS or Samba servers,
1431 1427 # so we filter out any placeholders that don't look like a
1432 1428 # symlink
1433 1429 sane = []
1434 1430 for f in files:
1435 1431 if self.flags(f) == 'l':
1436 1432 d = self[f].data()
1437 1433 if (d == '' or len(d) >= 1024 or '\n' in d
1438 1434 or stringutil.binary(d)):
1439 1435 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 1436 ' "%s"\n' % f)
1441 1437 continue
1442 1438 sane.append(f)
1443 1439 return sane
1444 1440
1445 1441 def _checklookup(self, files):
1446 1442 # check for any possibly clean files
1447 1443 if not files:
1448 1444 return [], [], []
1449 1445
1450 1446 modified = []
1451 1447 deleted = []
1452 1448 fixup = []
1453 1449 pctx = self._parents[0]
1454 1450 # do a full compare of any files that might have changed
1455 1451 for f in sorted(files):
1456 1452 try:
1457 1453 # This will return True for a file that got replaced by a
1458 1454 # directory in the interim, but fixing that is pretty hard.
1459 1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 1456 or pctx[f].cmp(self[f])):
1461 1457 modified.append(f)
1462 1458 else:
1463 1459 fixup.append(f)
1464 1460 except (IOError, OSError):
1465 1461 # A file become inaccessible in between? Mark it as deleted,
1466 1462 # matching dirstate behavior (issue5584).
1467 1463 # The dirstate has more complex behavior around whether a
1468 1464 # missing file matches a directory, etc, but we don't need to
1469 1465 # bother with that: if f has made it to this point, we're sure
1470 1466 # it's in the dirstate.
1471 1467 deleted.append(f)
1472 1468
1473 1469 return modified, deleted, fixup
1474 1470
1475 1471 def _poststatusfixup(self, status, fixup):
1476 1472 """update dirstate for files that are actually clean"""
1477 1473 poststatus = self._repo.postdsstatus()
1478 1474 if fixup or poststatus:
1479 1475 try:
1480 1476 oldid = self._repo.dirstate.identity()
1481 1477
1482 1478 # updating the dirstate is optional
1483 1479 # so we don't wait on the lock
1484 1480 # wlock can invalidate the dirstate, so cache normal _after_
1485 1481 # taking the lock
1486 1482 with self._repo.wlock(False):
1487 1483 if self._repo.dirstate.identity() == oldid:
1488 1484 if fixup:
1489 1485 normal = self._repo.dirstate.normal
1490 1486 for f in fixup:
1491 1487 normal(f)
1492 1488 # write changes out explicitly, because nesting
1493 1489 # wlock at runtime may prevent 'wlock.release()'
1494 1490 # after this block from doing so for subsequent
1495 1491 # changing files
1496 1492 tr = self._repo.currenttransaction()
1497 1493 self._repo.dirstate.write(tr)
1498 1494
1499 1495 if poststatus:
1500 1496 for ps in poststatus:
1501 1497 ps(self, status)
1502 1498 else:
1503 1499 # in this case, writing changes out breaks
1504 1500 # consistency, because .hg/dirstate was
1505 1501 # already changed simultaneously after last
1506 1502 # caching (see also issue5584 for detail)
1507 1503 self._repo.ui.debug('skip updating dirstate: '
1508 1504 'identity mismatch\n')
1509 1505 except error.LockError:
1510 1506 pass
1511 1507 finally:
1512 1508 # Even if the wlock couldn't be grabbed, clear out the list.
1513 1509 self._repo.clearpostdsstatus()
1514 1510
1515 1511 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1516 1512 '''Gets the status from the dirstate -- internal use only.'''
1517 1513 subrepos = []
1518 1514 if '.hgsub' in self:
1519 1515 subrepos = sorted(self.substate)
1520 1516 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1521 1517 clean=clean, unknown=unknown)
1522 1518
1523 1519 # check for any possibly clean files
1524 1520 fixup = []
1525 1521 if cmp:
1526 1522 modified2, deleted2, fixup = self._checklookup(cmp)
1527 1523 s.modified.extend(modified2)
1528 1524 s.deleted.extend(deleted2)
1529 1525
1530 1526 if fixup and clean:
1531 1527 s.clean.extend(fixup)
1532 1528
1533 1529 self._poststatusfixup(s, fixup)
1534 1530
1535 1531 if match.always():
1536 1532 # cache for performance
1537 1533 if s.unknown or s.ignored or s.clean:
1538 1534 # "_status" is cached with list*=False in the normal route
1539 1535 self._status = scmutil.status(s.modified, s.added, s.removed,
1540 1536 s.deleted, [], [], [])
1541 1537 else:
1542 1538 self._status = s
1543 1539
1544 1540 return s
1545 1541
1546 1542 @propertycache
1547 1543 def _manifest(self):
1548 1544 """generate a manifest corresponding to the values in self._status
1549 1545
1550 1546 This reuse the file nodeid from parent, but we use special node
1551 1547 identifiers for added and modified files. This is used by manifests
1552 1548 merge to see that files are different and by update logic to avoid
1553 1549 deleting newly added files.
1554 1550 """
1555 1551 return self._buildstatusmanifest(self._status)
1556 1552
1557 1553 def _buildstatusmanifest(self, status):
1558 1554 """Builds a manifest that includes the given status results."""
1559 1555 parents = self.parents()
1560 1556
1561 1557 man = parents[0].manifest().copy()
1562 1558
1563 1559 ff = self._flagfunc
1564 1560 for i, l in ((addednodeid, status.added),
1565 1561 (modifiednodeid, status.modified)):
1566 1562 for f in l:
1567 1563 man[f] = i
1568 1564 try:
1569 1565 man.setflag(f, ff(f))
1570 1566 except OSError:
1571 1567 pass
1572 1568
1573 1569 for f in status.deleted + status.removed:
1574 1570 if f in man:
1575 1571 del man[f]
1576 1572
1577 1573 return man
1578 1574
1579 1575 def _buildstatus(self, other, s, match, listignored, listclean,
1580 1576 listunknown):
1581 1577 """build a status with respect to another context
1582 1578
1583 1579 This includes logic for maintaining the fast path of status when
1584 1580 comparing the working directory against its parent, which is to skip
1585 1581 building a new manifest if self (working directory) is not comparing
1586 1582 against its parent (repo['.']).
1587 1583 """
1588 1584 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1589 1585 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1590 1586 # might have accidentally ended up with the entire contents of the file
1591 1587 # they are supposed to be linking to.
1592 1588 s.modified[:] = self._filtersuspectsymlink(s.modified)
1593 1589 if other != self._repo['.']:
1594 1590 s = super(workingctx, self)._buildstatus(other, s, match,
1595 1591 listignored, listclean,
1596 1592 listunknown)
1597 1593 return s
1598 1594
1599 1595 def _matchstatus(self, other, match):
1600 1596 """override the match method with a filter for directory patterns
1601 1597
1602 1598 We use inheritance to customize the match.bad method only in cases of
1603 1599 workingctx since it belongs only to the working directory when
1604 1600 comparing against the parent changeset.
1605 1601
1606 1602 If we aren't comparing against the working directory's parent, then we
1607 1603 just use the default match object sent to us.
1608 1604 """
1609 1605 if other != self._repo['.']:
1610 1606 def bad(f, msg):
1611 1607 # 'f' may be a directory pattern from 'match.files()',
1612 1608 # so 'f not in ctx1' is not enough
1613 1609 if f not in other and not other.hasdir(f):
1614 1610 self._repo.ui.warn('%s: %s\n' %
1615 1611 (self._repo.dirstate.pathto(f), msg))
1616 1612 match.bad = bad
1617 1613 return match
1618 1614
1619 1615 def markcommitted(self, node):
1620 1616 super(workingctx, self).markcommitted(node)
1621 1617
1622 1618 sparse.aftercommit(self._repo, node)
1623 1619
1624 1620 class committablefilectx(basefilectx):
1625 1621 """A committablefilectx provides common functionality for a file context
1626 1622 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 1623 def __init__(self, repo, path, filelog=None, ctx=None):
1628 1624 self._repo = repo
1629 1625 self._path = path
1630 1626 self._changeid = None
1631 1627 self._filerev = self._filenode = None
1632 1628
1633 1629 if filelog is not None:
1634 1630 self._filelog = filelog
1635 1631 if ctx:
1636 1632 self._changectx = ctx
1637 1633
1638 1634 def __nonzero__(self):
1639 1635 return True
1640 1636
1641 1637 __bool__ = __nonzero__
1642 1638
1643 1639 def linkrev(self):
1644 1640 # linked to self._changectx no matter if file is modified or not
1645 1641 return self.rev()
1646 1642
1647 1643 def parents(self):
1648 1644 '''return parent filectxs, following copies if necessary'''
1649 1645 def filenode(ctx, path):
1650 1646 return ctx._manifest.get(path, nullid)
1651 1647
1652 1648 path = self._path
1653 1649 fl = self._filelog
1654 1650 pcl = self._changectx._parents
1655 1651 renamed = self.renamed()
1656 1652
1657 1653 if renamed:
1658 1654 pl = [renamed + (None,)]
1659 1655 else:
1660 1656 pl = [(path, filenode(pcl[0], path), fl)]
1661 1657
1662 1658 for pc in pcl[1:]:
1663 1659 pl.append((path, filenode(pc, path), fl))
1664 1660
1665 1661 return [self._parentfilectx(p, fileid=n, filelog=l)
1666 1662 for p, n, l in pl if n != nullid]
1667 1663
1668 1664 def children(self):
1669 1665 return []
1670 1666
1671 1667 class workingfilectx(committablefilectx):
1672 1668 """A workingfilectx object makes access to data related to a particular
1673 1669 file in the working directory convenient."""
1674 1670 def __init__(self, repo, path, filelog=None, workingctx=None):
1675 1671 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1676 1672
1677 1673 @propertycache
1678 1674 def _changectx(self):
1679 1675 return workingctx(self._repo)
1680 1676
1681 1677 def data(self):
1682 1678 return self._repo.wread(self._path)
1683 1679 def renamed(self):
1684 1680 rp = self._repo.dirstate.copied(self._path)
1685 1681 if not rp:
1686 1682 return None
1687 1683 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1688 1684
1689 1685 def size(self):
1690 1686 return self._repo.wvfs.lstat(self._path).st_size
1691 1687 def date(self):
1692 1688 t, tz = self._changectx.date()
1693 1689 try:
1694 1690 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1695 1691 except OSError as err:
1696 1692 if err.errno != errno.ENOENT:
1697 1693 raise
1698 1694 return (t, tz)
1699 1695
1700 1696 def exists(self):
1701 1697 return self._repo.wvfs.exists(self._path)
1702 1698
1703 1699 def lexists(self):
1704 1700 return self._repo.wvfs.lexists(self._path)
1705 1701
1706 1702 def audit(self):
1707 1703 return self._repo.wvfs.audit(self._path)
1708 1704
1709 1705 def cmp(self, fctx):
1710 1706 """compare with other file context
1711 1707
1712 1708 returns True if different than fctx.
1713 1709 """
1714 1710 # fctx should be a filectx (not a workingfilectx)
1715 1711 # invert comparison to reuse the same code path
1716 1712 return fctx.cmp(self)
1717 1713
1718 1714 def remove(self, ignoremissing=False):
1719 1715 """wraps unlink for a repo's working directory"""
1720 1716 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1721 1717 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1722 1718 rmdir=rmdir)
1723 1719
1724 1720 def write(self, data, flags, backgroundclose=False, **kwargs):
1725 1721 """wraps repo.wwrite"""
1726 1722 self._repo.wwrite(self._path, data, flags,
1727 1723 backgroundclose=backgroundclose,
1728 1724 **kwargs)
1729 1725
1730 1726 def markcopied(self, src):
1731 1727 """marks this file a copy of `src`"""
1732 1728 if self._repo.dirstate[self._path] in "nma":
1733 1729 self._repo.dirstate.copy(src, self._path)
1734 1730
1735 1731 def clearunknown(self):
1736 1732 """Removes conflicting items in the working directory so that
1737 1733 ``write()`` can be called successfully.
1738 1734 """
1739 1735 wvfs = self._repo.wvfs
1740 1736 f = self._path
1741 1737 wvfs.audit(f)
1742 1738 if wvfs.isdir(f) and not wvfs.islink(f):
1743 1739 wvfs.rmtree(f, forcibly=True)
1744 1740 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1745 1741 for p in reversed(list(util.finddirs(f))):
1746 1742 if wvfs.isfileorlink(p):
1747 1743 wvfs.unlink(p)
1748 1744 break
1749 1745
1750 1746 def setflags(self, l, x):
1751 1747 self._repo.wvfs.setflags(self._path, l, x)
1752 1748
1753 1749 class overlayworkingctx(committablectx):
1754 1750 """Wraps another mutable context with a write-back cache that can be
1755 1751 converted into a commit context.
1756 1752
1757 1753 self._cache[path] maps to a dict with keys: {
1758 1754 'exists': bool?
1759 1755 'date': date?
1760 1756 'data': str?
1761 1757 'flags': str?
1762 1758 'copied': str? (path or None)
1763 1759 }
1764 1760 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1765 1761 is `False`, the file was deleted.
1766 1762 """
1767 1763
1768 1764 def __init__(self, repo):
1769 1765 super(overlayworkingctx, self).__init__(repo)
1770 1766 self.clean()
1771 1767
1772 1768 def setbase(self, wrappedctx):
1773 1769 self._wrappedctx = wrappedctx
1774 1770 self._parents = [wrappedctx]
1775 1771 # Drop old manifest cache as it is now out of date.
1776 1772 # This is necessary when, e.g., rebasing several nodes with one
1777 1773 # ``overlayworkingctx`` (e.g. with --collapse).
1778 1774 util.clearcachedproperty(self, '_manifest')
1779 1775
1780 1776 def data(self, path):
1781 1777 if self.isdirty(path):
1782 1778 if self._cache[path]['exists']:
1783 1779 if self._cache[path]['data']:
1784 1780 return self._cache[path]['data']
1785 1781 else:
1786 1782 # Must fallback here, too, because we only set flags.
1787 1783 return self._wrappedctx[path].data()
1788 1784 else:
1789 1785 raise error.ProgrammingError("No such file or directory: %s" %
1790 1786 path)
1791 1787 else:
1792 1788 return self._wrappedctx[path].data()
1793 1789
1794 1790 @propertycache
1795 1791 def _manifest(self):
1796 1792 parents = self.parents()
1797 1793 man = parents[0].manifest().copy()
1798 1794
1799 1795 flag = self._flagfunc
1800 1796 for path in self.added():
1801 1797 man[path] = addednodeid
1802 1798 man.setflag(path, flag(path))
1803 1799 for path in self.modified():
1804 1800 man[path] = modifiednodeid
1805 1801 man.setflag(path, flag(path))
1806 1802 for path in self.removed():
1807 1803 del man[path]
1808 1804 return man
1809 1805
1810 1806 @propertycache
1811 1807 def _flagfunc(self):
1812 1808 def f(path):
1813 1809 return self._cache[path]['flags']
1814 1810 return f
1815 1811
1816 1812 def files(self):
1817 1813 return sorted(self.added() + self.modified() + self.removed())
1818 1814
1819 1815 def modified(self):
1820 1816 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1821 1817 self._existsinparent(f)]
1822 1818
1823 1819 def added(self):
1824 1820 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1825 1821 not self._existsinparent(f)]
1826 1822
1827 1823 def removed(self):
1828 1824 return [f for f in self._cache.keys() if
1829 1825 not self._cache[f]['exists'] and self._existsinparent(f)]
1830 1826
1831 1827 def isinmemory(self):
1832 1828 return True
1833 1829
1834 1830 def filedate(self, path):
1835 1831 if self.isdirty(path):
1836 1832 return self._cache[path]['date']
1837 1833 else:
1838 1834 return self._wrappedctx[path].date()
1839 1835
1840 1836 def markcopied(self, path, origin):
1841 1837 if self.isdirty(path):
1842 1838 self._cache[path]['copied'] = origin
1843 1839 else:
1844 1840 raise error.ProgrammingError('markcopied() called on clean context')
1845 1841
1846 1842 def copydata(self, path):
1847 1843 if self.isdirty(path):
1848 1844 return self._cache[path]['copied']
1849 1845 else:
1850 1846 raise error.ProgrammingError('copydata() called on clean context')
1851 1847
1852 1848 def flags(self, path):
1853 1849 if self.isdirty(path):
1854 1850 if self._cache[path]['exists']:
1855 1851 return self._cache[path]['flags']
1856 1852 else:
1857 1853 raise error.ProgrammingError("No such file or directory: %s" %
1858 1854 self._path)
1859 1855 else:
1860 1856 return self._wrappedctx[path].flags()
1861 1857
1862 1858 def _existsinparent(self, path):
1863 1859 try:
1864 1860 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1865 1861 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1866 1862 # with an ``exists()`` function.
1867 1863 self._wrappedctx[path]
1868 1864 return True
1869 1865 except error.ManifestLookupError:
1870 1866 return False
1871 1867
1872 1868 def _auditconflicts(self, path):
1873 1869 """Replicates conflict checks done by wvfs.write().
1874 1870
1875 1871 Since we never write to the filesystem and never call `applyupdates` in
1876 1872 IMM, we'll never check that a path is actually writable -- e.g., because
1877 1873 it adds `a/foo`, but `a` is actually a file in the other commit.
1878 1874 """
1879 1875 def fail(path, component):
1880 1876 # p1() is the base and we're receiving "writes" for p2()'s
1881 1877 # files.
1882 1878 if 'l' in self.p1()[component].flags():
1883 1879 raise error.Abort("error: %s conflicts with symlink %s "
1884 1880 "in %s." % (path, component,
1885 1881 self.p1().rev()))
1886 1882 else:
1887 1883 raise error.Abort("error: '%s' conflicts with file '%s' in "
1888 1884 "%s." % (path, component,
1889 1885 self.p1().rev()))
1890 1886
1891 1887 # Test that each new directory to be created to write this path from p2
1892 1888 # is not a file in p1.
1893 1889 components = path.split('/')
1894 1890 for i in xrange(len(components)):
1895 1891 component = "/".join(components[0:i])
1896 1892 if component in self.p1():
1897 1893 fail(path, component)
1898 1894
1899 1895 # Test the other direction -- that this path from p2 isn't a directory
1900 1896 # in p1 (test that p1 doesn't any paths matching `path/*`).
1901 1897 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1902 1898 matches = self.p1().manifest().matches(match)
1903 1899 if len(matches) > 0:
1904 1900 if len(matches) == 1 and matches.keys()[0] == path:
1905 1901 return
1906 1902 raise error.Abort("error: file '%s' cannot be written because "
1907 1903 " '%s/' is a folder in %s (containing %d "
1908 1904 "entries: %s)"
1909 1905 % (path, path, self.p1(), len(matches),
1910 1906 ', '.join(matches.keys())))
1911 1907
1912 1908 def write(self, path, data, flags='', **kwargs):
1913 1909 if data is None:
1914 1910 raise error.ProgrammingError("data must be non-None")
1915 1911 self._auditconflicts(path)
1916 1912 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1917 1913 flags=flags)
1918 1914
1919 1915 def setflags(self, path, l, x):
1920 1916 self._markdirty(path, exists=True, date=dateutil.makedate(),
1921 1917 flags=(l and 'l' or '') + (x and 'x' or ''))
1922 1918
1923 1919 def remove(self, path):
1924 1920 self._markdirty(path, exists=False)
1925 1921
1926 1922 def exists(self, path):
1927 1923 """exists behaves like `lexists`, but needs to follow symlinks and
1928 1924 return False if they are broken.
1929 1925 """
1930 1926 if self.isdirty(path):
1931 1927 # If this path exists and is a symlink, "follow" it by calling
1932 1928 # exists on the destination path.
1933 1929 if (self._cache[path]['exists'] and
1934 1930 'l' in self._cache[path]['flags']):
1935 1931 return self.exists(self._cache[path]['data'].strip())
1936 1932 else:
1937 1933 return self._cache[path]['exists']
1938 1934
1939 1935 return self._existsinparent(path)
1940 1936
1941 1937 def lexists(self, path):
1942 1938 """lexists returns True if the path exists"""
1943 1939 if self.isdirty(path):
1944 1940 return self._cache[path]['exists']
1945 1941
1946 1942 return self._existsinparent(path)
1947 1943
1948 1944 def size(self, path):
1949 1945 if self.isdirty(path):
1950 1946 if self._cache[path]['exists']:
1951 1947 return len(self._cache[path]['data'])
1952 1948 else:
1953 1949 raise error.ProgrammingError("No such file or directory: %s" %
1954 1950 self._path)
1955 1951 return self._wrappedctx[path].size()
1956 1952
1957 1953 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1958 1954 user=None, editor=None):
1959 1955 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1960 1956 committed.
1961 1957
1962 1958 ``text`` is the commit message.
1963 1959 ``parents`` (optional) are rev numbers.
1964 1960 """
1965 1961 # Default parents to the wrapped contexts' if not passed.
1966 1962 if parents is None:
1967 1963 parents = self._wrappedctx.parents()
1968 1964 if len(parents) == 1:
1969 1965 parents = (parents[0], None)
1970 1966
1971 1967 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1972 1968 if parents[1] is None:
1973 1969 parents = (self._repo[parents[0]], None)
1974 1970 else:
1975 1971 parents = (self._repo[parents[0]], self._repo[parents[1]])
1976 1972
1977 1973 files = self._cache.keys()
1978 1974 def getfile(repo, memctx, path):
1979 1975 if self._cache[path]['exists']:
1980 1976 return memfilectx(repo, memctx, path,
1981 1977 self._cache[path]['data'],
1982 1978 'l' in self._cache[path]['flags'],
1983 1979 'x' in self._cache[path]['flags'],
1984 1980 self._cache[path]['copied'])
1985 1981 else:
1986 1982 # Returning None, but including the path in `files`, is
1987 1983 # necessary for memctx to register a deletion.
1988 1984 return None
1989 1985 return memctx(self._repo, parents, text, files, getfile, date=date,
1990 1986 extra=extra, user=user, branch=branch, editor=editor)
1991 1987
1992 1988 def isdirty(self, path):
1993 1989 return path in self._cache
1994 1990
1995 1991 def isempty(self):
1996 1992 # We need to discard any keys that are actually clean before the empty
1997 1993 # commit check.
1998 1994 self._compact()
1999 1995 return len(self._cache) == 0
2000 1996
2001 1997 def clean(self):
2002 1998 self._cache = {}
2003 1999
2004 2000 def _compact(self):
2005 2001 """Removes keys from the cache that are actually clean, by comparing
2006 2002 them with the underlying context.
2007 2003
2008 2004 This can occur during the merge process, e.g. by passing --tool :local
2009 2005 to resolve a conflict.
2010 2006 """
2011 2007 keys = []
2012 2008 for path in self._cache.keys():
2013 2009 cache = self._cache[path]
2014 2010 try:
2015 2011 underlying = self._wrappedctx[path]
2016 2012 if (underlying.data() == cache['data'] and
2017 2013 underlying.flags() == cache['flags']):
2018 2014 keys.append(path)
2019 2015 except error.ManifestLookupError:
2020 2016 # Path not in the underlying manifest (created).
2021 2017 continue
2022 2018
2023 2019 for path in keys:
2024 2020 del self._cache[path]
2025 2021 return keys
2026 2022
2027 2023 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2028 2024 self._cache[path] = {
2029 2025 'exists': exists,
2030 2026 'data': data,
2031 2027 'date': date,
2032 2028 'flags': flags,
2033 2029 'copied': None,
2034 2030 }
2035 2031
2036 2032 def filectx(self, path, filelog=None):
2037 2033 return overlayworkingfilectx(self._repo, path, parent=self,
2038 2034 filelog=filelog)
2039 2035
2040 2036 class overlayworkingfilectx(committablefilectx):
2041 2037 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2042 2038 cache, which can be flushed through later by calling ``flush()``."""
2043 2039
2044 2040 def __init__(self, repo, path, filelog=None, parent=None):
2045 2041 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2046 2042 parent)
2047 2043 self._repo = repo
2048 2044 self._parent = parent
2049 2045 self._path = path
2050 2046
2051 2047 def cmp(self, fctx):
2052 2048 return self.data() != fctx.data()
2053 2049
2054 2050 def changectx(self):
2055 2051 return self._parent
2056 2052
2057 2053 def data(self):
2058 2054 return self._parent.data(self._path)
2059 2055
2060 2056 def date(self):
2061 2057 return self._parent.filedate(self._path)
2062 2058
2063 2059 def exists(self):
2064 2060 return self.lexists()
2065 2061
2066 2062 def lexists(self):
2067 2063 return self._parent.exists(self._path)
2068 2064
2069 2065 def renamed(self):
2070 2066 path = self._parent.copydata(self._path)
2071 2067 if not path:
2072 2068 return None
2073 2069 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2074 2070
2075 2071 def size(self):
2076 2072 return self._parent.size(self._path)
2077 2073
2078 2074 def markcopied(self, origin):
2079 2075 self._parent.markcopied(self._path, origin)
2080 2076
2081 2077 def audit(self):
2082 2078 pass
2083 2079
2084 2080 def flags(self):
2085 2081 return self._parent.flags(self._path)
2086 2082
2087 2083 def setflags(self, islink, isexec):
2088 2084 return self._parent.setflags(self._path, islink, isexec)
2089 2085
2090 2086 def write(self, data, flags, backgroundclose=False, **kwargs):
2091 2087 return self._parent.write(self._path, data, flags, **kwargs)
2092 2088
2093 2089 def remove(self, ignoremissing=False):
2094 2090 return self._parent.remove(self._path)
2095 2091
2096 2092 def clearunknown(self):
2097 2093 pass
2098 2094
2099 2095 class workingcommitctx(workingctx):
2100 2096 """A workingcommitctx object makes access to data related to
2101 2097 the revision being committed convenient.
2102 2098
2103 2099 This hides changes in the working directory, if they aren't
2104 2100 committed in this context.
2105 2101 """
2106 2102 def __init__(self, repo, changes,
2107 2103 text="", user=None, date=None, extra=None):
2108 2104 super(workingctx, self).__init__(repo, text, user, date, extra,
2109 2105 changes)
2110 2106
2111 2107 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2112 2108 """Return matched files only in ``self._status``
2113 2109
2114 2110 Uncommitted files appear "clean" via this context, even if
2115 2111 they aren't actually so in the working directory.
2116 2112 """
2117 2113 if clean:
2118 2114 clean = [f for f in self._manifest if f not in self._changedset]
2119 2115 else:
2120 2116 clean = []
2121 2117 return scmutil.status([f for f in self._status.modified if match(f)],
2122 2118 [f for f in self._status.added if match(f)],
2123 2119 [f for f in self._status.removed if match(f)],
2124 2120 [], [], [], clean)
2125 2121
2126 2122 @propertycache
2127 2123 def _changedset(self):
2128 2124 """Return the set of files changed in this context
2129 2125 """
2130 2126 changed = set(self._status.modified)
2131 2127 changed.update(self._status.added)
2132 2128 changed.update(self._status.removed)
2133 2129 return changed
2134 2130
2135 2131 def makecachingfilectxfn(func):
2136 2132 """Create a filectxfn that caches based on the path.
2137 2133
2138 2134 We can't use util.cachefunc because it uses all arguments as the cache
2139 2135 key and this creates a cycle since the arguments include the repo and
2140 2136 memctx.
2141 2137 """
2142 2138 cache = {}
2143 2139
2144 2140 def getfilectx(repo, memctx, path):
2145 2141 if path not in cache:
2146 2142 cache[path] = func(repo, memctx, path)
2147 2143 return cache[path]
2148 2144
2149 2145 return getfilectx
2150 2146
2151 2147 def memfilefromctx(ctx):
2152 2148 """Given a context return a memfilectx for ctx[path]
2153 2149
2154 2150 This is a convenience method for building a memctx based on another
2155 2151 context.
2156 2152 """
2157 2153 def getfilectx(repo, memctx, path):
2158 2154 fctx = ctx[path]
2159 2155 # this is weird but apparently we only keep track of one parent
2160 2156 # (why not only store that instead of a tuple?)
2161 2157 copied = fctx.renamed()
2162 2158 if copied:
2163 2159 copied = copied[0]
2164 2160 return memfilectx(repo, memctx, path, fctx.data(),
2165 2161 islink=fctx.islink(), isexec=fctx.isexec(),
2166 2162 copied=copied)
2167 2163
2168 2164 return getfilectx
2169 2165
2170 2166 def memfilefrompatch(patchstore):
2171 2167 """Given a patch (e.g. patchstore object) return a memfilectx
2172 2168
2173 2169 This is a convenience method for building a memctx based on a patchstore.
2174 2170 """
2175 2171 def getfilectx(repo, memctx, path):
2176 2172 data, mode, copied = patchstore.getfile(path)
2177 2173 if data is None:
2178 2174 return None
2179 2175 islink, isexec = mode
2180 2176 return memfilectx(repo, memctx, path, data, islink=islink,
2181 2177 isexec=isexec, copied=copied)
2182 2178
2183 2179 return getfilectx
2184 2180
2185 2181 class memctx(committablectx):
2186 2182 """Use memctx to perform in-memory commits via localrepo.commitctx().
2187 2183
2188 2184 Revision information is supplied at initialization time while
2189 2185 related files data and is made available through a callback
2190 2186 mechanism. 'repo' is the current localrepo, 'parents' is a
2191 2187 sequence of two parent revisions identifiers (pass None for every
2192 2188 missing parent), 'text' is the commit message and 'files' lists
2193 2189 names of files touched by the revision (normalized and relative to
2194 2190 repository root).
2195 2191
2196 2192 filectxfn(repo, memctx, path) is a callable receiving the
2197 2193 repository, the current memctx object and the normalized path of
2198 2194 requested file, relative to repository root. It is fired by the
2199 2195 commit function for every file in 'files', but calls order is
2200 2196 undefined. If the file is available in the revision being
2201 2197 committed (updated or added), filectxfn returns a memfilectx
2202 2198 object. If the file was removed, filectxfn return None for recent
2203 2199 Mercurial. Moved files are represented by marking the source file
2204 2200 removed and the new file added with copy information (see
2205 2201 memfilectx).
2206 2202
2207 2203 user receives the committer name and defaults to current
2208 2204 repository username, date is the commit date in any format
2209 2205 supported by dateutil.parsedate() and defaults to current date, extra
2210 2206 is a dictionary of metadata or is left empty.
2211 2207 """
2212 2208
2213 2209 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2214 2210 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2215 2211 # this field to determine what to do in filectxfn.
2216 2212 _returnnoneformissingfiles = True
2217 2213
2218 2214 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2219 2215 date=None, extra=None, branch=None, editor=False):
2220 2216 super(memctx, self).__init__(repo, text, user, date, extra)
2221 2217 self._rev = None
2222 2218 self._node = None
2223 2219 parents = [(p or nullid) for p in parents]
2224 2220 p1, p2 = parents
2225 2221 self._parents = [self._repo[p] for p in (p1, p2)]
2226 2222 files = sorted(set(files))
2227 2223 self._files = files
2228 2224 if branch is not None:
2229 2225 self._extra['branch'] = encoding.fromlocal(branch)
2230 2226 self.substate = {}
2231 2227
2232 2228 if isinstance(filectxfn, patch.filestore):
2233 2229 filectxfn = memfilefrompatch(filectxfn)
2234 2230 elif not callable(filectxfn):
2235 2231 # if store is not callable, wrap it in a function
2236 2232 filectxfn = memfilefromctx(filectxfn)
2237 2233
2238 2234 # memoizing increases performance for e.g. vcs convert scenarios.
2239 2235 self._filectxfn = makecachingfilectxfn(filectxfn)
2240 2236
2241 2237 if editor:
2242 2238 self._text = editor(self._repo, self, [])
2243 2239 self._repo.savecommitmessage(self._text)
2244 2240
2245 2241 def filectx(self, path, filelog=None):
2246 2242 """get a file context from the working directory
2247 2243
2248 2244 Returns None if file doesn't exist and should be removed."""
2249 2245 return self._filectxfn(self._repo, self, path)
2250 2246
2251 2247 def commit(self):
2252 2248 """commit context to the repo"""
2253 2249 return self._repo.commitctx(self)
2254 2250
2255 2251 @propertycache
2256 2252 def _manifest(self):
2257 2253 """generate a manifest based on the return values of filectxfn"""
2258 2254
2259 2255 # keep this simple for now; just worry about p1
2260 2256 pctx = self._parents[0]
2261 2257 man = pctx.manifest().copy()
2262 2258
2263 2259 for f in self._status.modified:
2264 2260 p1node = nullid
2265 2261 p2node = nullid
2266 2262 p = pctx[f].parents() # if file isn't in pctx, check p2?
2267 2263 if len(p) > 0:
2268 2264 p1node = p[0].filenode()
2269 2265 if len(p) > 1:
2270 2266 p2node = p[1].filenode()
2271 2267 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2272 2268
2273 2269 for f in self._status.added:
2274 2270 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2275 2271
2276 2272 for f in self._status.removed:
2277 2273 if f in man:
2278 2274 del man[f]
2279 2275
2280 2276 return man
2281 2277
2282 2278 @propertycache
2283 2279 def _status(self):
2284 2280 """Calculate exact status from ``files`` specified at construction
2285 2281 """
2286 2282 man1 = self.p1().manifest()
2287 2283 p2 = self._parents[1]
2288 2284 # "1 < len(self._parents)" can't be used for checking
2289 2285 # existence of the 2nd parent, because "memctx._parents" is
2290 2286 # explicitly initialized by the list, of which length is 2.
2291 2287 if p2.node() != nullid:
2292 2288 man2 = p2.manifest()
2293 2289 managing = lambda f: f in man1 or f in man2
2294 2290 else:
2295 2291 managing = lambda f: f in man1
2296 2292
2297 2293 modified, added, removed = [], [], []
2298 2294 for f in self._files:
2299 2295 if not managing(f):
2300 2296 added.append(f)
2301 2297 elif self[f]:
2302 2298 modified.append(f)
2303 2299 else:
2304 2300 removed.append(f)
2305 2301
2306 2302 return scmutil.status(modified, added, removed, [], [], [], [])
2307 2303
2308 2304 class memfilectx(committablefilectx):
2309 2305 """memfilectx represents an in-memory file to commit.
2310 2306
2311 2307 See memctx and committablefilectx for more details.
2312 2308 """
2313 2309 def __init__(self, repo, changectx, path, data, islink=False,
2314 2310 isexec=False, copied=None):
2315 2311 """
2316 2312 path is the normalized file path relative to repository root.
2317 2313 data is the file content as a string.
2318 2314 islink is True if the file is a symbolic link.
2319 2315 isexec is True if the file is executable.
2320 2316 copied is the source file path if current file was copied in the
2321 2317 revision being committed, or None."""
2322 2318 super(memfilectx, self).__init__(repo, path, None, changectx)
2323 2319 self._data = data
2324 2320 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2325 2321 self._copied = None
2326 2322 if copied:
2327 2323 self._copied = (copied, nullid)
2328 2324
2329 2325 def data(self):
2330 2326 return self._data
2331 2327
2332 2328 def remove(self, ignoremissing=False):
2333 2329 """wraps unlink for a repo's working directory"""
2334 2330 # need to figure out what to do here
2335 2331 del self._changectx[self._path]
2336 2332
2337 2333 def write(self, data, flags, **kwargs):
2338 2334 """wraps repo.wwrite"""
2339 2335 self._data = data
2340 2336
2341 2337 class overlayfilectx(committablefilectx):
2342 2338 """Like memfilectx but take an original filectx and optional parameters to
2343 2339 override parts of it. This is useful when fctx.data() is expensive (i.e.
2344 2340 flag processor is expensive) and raw data, flags, and filenode could be
2345 2341 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2346 2342 """
2347 2343
2348 2344 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2349 2345 copied=None, ctx=None):
2350 2346 """originalfctx: filecontext to duplicate
2351 2347
2352 2348 datafunc: None or a function to override data (file content). It is a
2353 2349 function to be lazy. path, flags, copied, ctx: None or overridden value
2354 2350
2355 2351 copied could be (path, rev), or False. copied could also be just path,
2356 2352 and will be converted to (path, nullid). This simplifies some callers.
2357 2353 """
2358 2354
2359 2355 if path is None:
2360 2356 path = originalfctx.path()
2361 2357 if ctx is None:
2362 2358 ctx = originalfctx.changectx()
2363 2359 ctxmatch = lambda: True
2364 2360 else:
2365 2361 ctxmatch = lambda: ctx == originalfctx.changectx()
2366 2362
2367 2363 repo = originalfctx.repo()
2368 2364 flog = originalfctx.filelog()
2369 2365 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2370 2366
2371 2367 if copied is None:
2372 2368 copied = originalfctx.renamed()
2373 2369 copiedmatch = lambda: True
2374 2370 else:
2375 2371 if copied and not isinstance(copied, tuple):
2376 2372 # repo._filecommit will recalculate copyrev so nullid is okay
2377 2373 copied = (copied, nullid)
2378 2374 copiedmatch = lambda: copied == originalfctx.renamed()
2379 2375
2380 2376 # When data, copied (could affect data), ctx (could affect filelog
2381 2377 # parents) are not overridden, rawdata, rawflags, and filenode may be
2382 2378 # reused (repo._filecommit should double check filelog parents).
2383 2379 #
2384 2380 # path, flags are not hashed in filelog (but in manifestlog) so they do
2385 2381 # not affect reusable here.
2386 2382 #
2387 2383 # If ctx or copied is overridden to a same value with originalfctx,
2388 2384 # still consider it's reusable. originalfctx.renamed() may be a bit
2389 2385 # expensive so it's not called unless necessary. Assuming datafunc is
2390 2386 # always expensive, do not call it for this "reusable" test.
2391 2387 reusable = datafunc is None and ctxmatch() and copiedmatch()
2392 2388
2393 2389 if datafunc is None:
2394 2390 datafunc = originalfctx.data
2395 2391 if flags is None:
2396 2392 flags = originalfctx.flags()
2397 2393
2398 2394 self._datafunc = datafunc
2399 2395 self._flags = flags
2400 2396 self._copied = copied
2401 2397
2402 2398 if reusable:
2403 2399 # copy extra fields from originalfctx
2404 2400 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2405 2401 for attr_ in attrs:
2406 2402 if util.safehasattr(originalfctx, attr_):
2407 2403 setattr(self, attr_, getattr(originalfctx, attr_))
2408 2404
2409 2405 def data(self):
2410 2406 return self._datafunc()
2411 2407
2412 2408 class metadataonlyctx(committablectx):
2413 2409 """Like memctx but it's reusing the manifest of different commit.
2414 2410 Intended to be used by lightweight operations that are creating
2415 2411 metadata-only changes.
2416 2412
2417 2413 Revision information is supplied at initialization time. 'repo' is the
2418 2414 current localrepo, 'ctx' is original revision which manifest we're reuisng
2419 2415 'parents' is a sequence of two parent revisions identifiers (pass None for
2420 2416 every missing parent), 'text' is the commit.
2421 2417
2422 2418 user receives the committer name and defaults to current repository
2423 2419 username, date is the commit date in any format supported by
2424 2420 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2425 2421 metadata or is left empty.
2426 2422 """
2427 2423 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2428 2424 date=None, extra=None, editor=False):
2429 2425 if text is None:
2430 2426 text = originalctx.description()
2431 2427 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2432 2428 self._rev = None
2433 2429 self._node = None
2434 2430 self._originalctx = originalctx
2435 2431 self._manifestnode = originalctx.manifestnode()
2436 2432 if parents is None:
2437 2433 parents = originalctx.parents()
2438 2434 else:
2439 2435 parents = [repo[p] for p in parents if p is not None]
2440 2436 parents = parents[:]
2441 2437 while len(parents) < 2:
2442 2438 parents.append(repo[nullid])
2443 2439 p1, p2 = self._parents = parents
2444 2440
2445 2441 # sanity check to ensure that the reused manifest parents are
2446 2442 # manifests of our commit parents
2447 2443 mp1, mp2 = self.manifestctx().parents
2448 2444 if p1 != nullid and p1.manifestnode() != mp1:
2449 2445 raise RuntimeError('can\'t reuse the manifest: '
2450 2446 'its p1 doesn\'t match the new ctx p1')
2451 2447 if p2 != nullid and p2.manifestnode() != mp2:
2452 2448 raise RuntimeError('can\'t reuse the manifest: '
2453 2449 'its p2 doesn\'t match the new ctx p2')
2454 2450
2455 2451 self._files = originalctx.files()
2456 2452 self.substate = {}
2457 2453
2458 2454 if editor:
2459 2455 self._text = editor(self._repo, self, [])
2460 2456 self._repo.savecommitmessage(self._text)
2461 2457
2462 2458 def manifestnode(self):
2463 2459 return self._manifestnode
2464 2460
2465 2461 @property
2466 2462 def _manifestctx(self):
2467 2463 return self._repo.manifestlog[self._manifestnode]
2468 2464
2469 2465 def filectx(self, path, filelog=None):
2470 2466 return self._originalctx.filectx(path, filelog=filelog)
2471 2467
2472 2468 def commit(self):
2473 2469 """commit context to the repo"""
2474 2470 return self._repo.commitctx(self)
2475 2471
2476 2472 @property
2477 2473 def _manifest(self):
2478 2474 return self._originalctx.manifest()
2479 2475
2480 2476 @propertycache
2481 2477 def _status(self):
2482 2478 """Calculate exact status from ``files`` specified in the ``origctx``
2483 2479 and parents manifests.
2484 2480 """
2485 2481 man1 = self.p1().manifest()
2486 2482 p2 = self._parents[1]
2487 2483 # "1 < len(self._parents)" can't be used for checking
2488 2484 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2489 2485 # explicitly initialized by the list, of which length is 2.
2490 2486 if p2.node() != nullid:
2491 2487 man2 = p2.manifest()
2492 2488 managing = lambda f: f in man1 or f in man2
2493 2489 else:
2494 2490 managing = lambda f: f in man1
2495 2491
2496 2492 modified, added, removed = [], [], []
2497 2493 for f in self._files:
2498 2494 if not managing(f):
2499 2495 added.append(f)
2500 2496 elif f in self:
2501 2497 modified.append(f)
2502 2498 else:
2503 2499 removed.append(f)
2504 2500
2505 2501 return scmutil.status(modified, added, removed, [], [], [], [])
2506 2502
2507 2503 class arbitraryfilectx(object):
2508 2504 """Allows you to use filectx-like functions on a file in an arbitrary
2509 2505 location on disk, possibly not in the working directory.
2510 2506 """
2511 2507 def __init__(self, path, repo=None):
2512 2508 # Repo is optional because contrib/simplemerge uses this class.
2513 2509 self._repo = repo
2514 2510 self._path = path
2515 2511
2516 2512 def cmp(self, fctx):
2517 2513 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2518 2514 # path if either side is a symlink.
2519 2515 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2520 2516 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2521 2517 # Add a fast-path for merge if both sides are disk-backed.
2522 2518 # Note that filecmp uses the opposite return values (True if same)
2523 2519 # from our cmp functions (True if different).
2524 2520 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2525 2521 return self.data() != fctx.data()
2526 2522
2527 2523 def path(self):
2528 2524 return self._path
2529 2525
2530 2526 def flags(self):
2531 2527 return ''
2532 2528
2533 2529 def data(self):
2534 2530 return util.readfile(self._path)
2535 2531
2536 2532 def decodeddata(self):
2537 2533 with open(self._path, "rb") as f:
2538 2534 return f.read()
2539 2535
2540 2536 def remove(self):
2541 2537 util.unlink(self._path)
2542 2538
2543 2539 def write(self, data, flags, **kwargs):
2544 2540 assert not flags
2545 2541 with open(self._path, "w") as f:
2546 2542 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now