##// END OF EJS Templates
arbitraryfilecontext: skip the cmp fast path if any side is a symlink...
Phil Cohen -
r34836:14c87708 default
parent child Browse files
Show More
@@ -0,0 +1,57 b''
1 Setup:
2 $ cat > eval.py <<EOF
3 > from __future__ import absolute_import
4 > import filecmp
5 > from mercurial import commands, context, registrar
6 > cmdtable = {}
7 > command = registrar.command(cmdtable)
8 > @command(b'eval', [], 'hg eval CMD')
9 > def eval_(ui, repo, *cmds, **opts):
10 > cmd = " ".join(cmds)
11 > res = str(eval(cmd, globals(), locals()))
12 > ui.warn("%s" % res)
13 > EOF
14
15 $ echo "[extensions]" >> $HGRCPATH
16 $ echo "eval=`pwd`/eval.py" >> $HGRCPATH
17
18 Arbitraryfilectx.cmp does not follow symlinks:
19 $ mkdir case1
20 $ cd case1
21 $ hg init
22 $ printf "A" > real_A
23 $ printf "foo" > A
24 $ printf "foo" > B
25 $ ln -s A sym_A
26 $ hg add .
27 adding A
28 adding B
29 adding real_A
30 adding sym_A
31 $ hg commit -m "base"
32
33 These files are different and should return True (different):
34 (Note that filecmp.cmp's return semantics are inverted from ours, so we invert
35 for simplicity):
36 $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['real_A'])"
37 True (no-eol)
38 $ hg eval "not filecmp.cmp('A', 'real_A')"
39 True (no-eol)
40
41 These files are identical and should return False (same):
42 $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['A'])"
43 False (no-eol)
44 $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['B'])"
45 False (no-eol)
46 $ hg eval "not filecmp.cmp('A', 'B')"
47 False (no-eol)
48
49 This comparison should also return False, since A and sym_A are substantially
50 the same in the eyes of ``filectx.cmp``, which looks at data only.
51 $ hg eval "context.arbitraryfilectx('real_A', repo).cmp(repo[None]['sym_A'])"
52 False (no-eol)
53
54 A naive use of filecmp on those two would wrongly return True, since it follows
55 the symlink to "A", which has different contents.
56 $ hg eval "not filecmp.cmp('real_A', 'sym_A')"
57 True (no-eol)
@@ -1,2598 +1,2602 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 revlog,
45 45 scmutil,
46 46 sparse,
47 47 subrepo,
48 48 util,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 nonascii = re.compile(r'[^\x21-\x7f]').search
54 54
55 55 class basectx(object):
56 56 """A basectx object represents the common logic for its children:
57 57 changectx: read-only context that is already present in the repo,
58 58 workingctx: a context that represents the working directory and can
59 59 be committed,
60 60 memctx: a context that represents changes in-memory and can also
61 61 be committed."""
62 62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 63 if isinstance(changeid, basectx):
64 64 return changeid
65 65
66 66 o = super(basectx, cls).__new__(cls)
67 67
68 68 o._repo = repo
69 69 o._rev = nullrev
70 70 o._node = nullid
71 71
72 72 return o
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 __str__ = encoding.strmethod(__bytes__)
78 78
79 79 def __int__(self):
80 80 return self.rev()
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 msg = ("'context.unstable' is deprecated, "
210 210 "use 'context.orphan'")
211 211 self._repo.ui.deprecwarn(msg, '4.4')
212 212 return self.orphan()
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete but it's ancestor are"""
216 216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217 217
218 218 def bumped(self):
219 219 msg = ("'context.bumped' is deprecated, "
220 220 "use 'context.phasedivergent'")
221 221 self._repo.ui.deprecwarn(msg, '4.4')
222 222 return self.phasedivergent()
223 223
224 224 def phasedivergent(self):
225 225 """True if the changeset try to be a successor of a public changeset
226 226
227 227 Only non-public and non-obsolete changesets may be bumped.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230 230
231 231 def divergent(self):
232 232 msg = ("'context.divergent' is deprecated, "
233 233 "use 'context.contentdivergent'")
234 234 self._repo.ui.deprecwarn(msg, '4.4')
235 235 return self.contentdivergent()
236 236
237 237 def contentdivergent(self):
238 238 """Is a successors of a changeset with multiple possible successors set
239 239
240 240 Only non-public and non-obsolete changesets may be divergent.
241 241 """
242 242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243 243
244 244 def troubled(self):
245 245 msg = ("'context.troubled' is deprecated, "
246 246 "use 'context.isunstable'")
247 247 self._repo.ui.deprecwarn(msg, '4.4')
248 248 return self.isunstable()
249 249
250 250 def isunstable(self):
251 251 """True if the changeset is either unstable, bumped or divergent"""
252 252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253 253
254 254 def troubles(self):
255 255 """Keep the old version around in order to avoid breaking extensions
256 256 about different return values.
257 257 """
258 258 msg = ("'context.troubles' is deprecated, "
259 259 "use 'context.instabilities'")
260 260 self._repo.ui.deprecwarn(msg, '4.4')
261 261
262 262 troubles = []
263 263 if self.orphan():
264 264 troubles.append('orphan')
265 265 if self.phasedivergent():
266 266 troubles.append('bumped')
267 267 if self.contentdivergent():
268 268 troubles.append('divergent')
269 269 return troubles
270 270
271 271 def instabilities(self):
272 272 """return the list of instabilities affecting this changeset.
273 273
274 274 Instabilities are returned as strings. possible values are:
275 275 - orphan,
276 276 - phase-divergent,
277 277 - content-divergent.
278 278 """
279 279 instabilities = []
280 280 if self.orphan():
281 281 instabilities.append('orphan')
282 282 if self.phasedivergent():
283 283 instabilities.append('phase-divergent')
284 284 if self.contentdivergent():
285 285 instabilities.append('content-divergent')
286 286 return instabilities
287 287
288 288 def parents(self):
289 289 """return contexts for each parent changeset"""
290 290 return self._parents
291 291
292 292 def p1(self):
293 293 return self._parents[0]
294 294
295 295 def p2(self):
296 296 parents = self._parents
297 297 if len(parents) == 2:
298 298 return parents[1]
299 299 return changectx(self._repo, nullrev)
300 300
301 301 def _fileinfo(self, path):
302 302 if r'_manifest' in self.__dict__:
303 303 try:
304 304 return self._manifest[path], self._manifest.flags(path)
305 305 except KeyError:
306 306 raise error.ManifestLookupError(self._node, path,
307 307 _('not found in manifest'))
308 308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 309 if path in self._manifestdelta:
310 310 return (self._manifestdelta[path],
311 311 self._manifestdelta.flags(path))
312 312 mfl = self._repo.manifestlog
313 313 try:
314 314 node, flag = mfl[self._changeset.manifest].find(path)
315 315 except KeyError:
316 316 raise error.ManifestLookupError(self._node, path,
317 317 _('not found in manifest'))
318 318
319 319 return node, flag
320 320
321 321 def filenode(self, path):
322 322 return self._fileinfo(path)[0]
323 323
324 324 def flags(self, path):
325 325 try:
326 326 return self._fileinfo(path)[1]
327 327 except error.LookupError:
328 328 return ''
329 329
330 330 def sub(self, path, allowcreate=True):
331 331 '''return a subrepo for the stored revision of path, never wdir()'''
332 332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333 333
334 334 def nullsub(self, path, pctx):
335 335 return subrepo.nullsubrepo(self, path, pctx)
336 336
337 337 def workingsub(self, path):
338 338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 339 context.
340 340 '''
341 341 return subrepo.subrepo(self, path, allowwdir=True)
342 342
343 343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 344 listsubrepos=False, badfn=None):
345 345 r = self._repo
346 346 return matchmod.match(r.root, r.getcwd(), pats,
347 347 include, exclude, default,
348 348 auditor=r.nofsauditor, ctx=self,
349 349 listsubrepos=listsubrepos, badfn=badfn)
350 350
351 351 def diff(self, ctx2=None, match=None, **opts):
352 352 """Returns a diff generator for the given contexts and matcher"""
353 353 if ctx2 is None:
354 354 ctx2 = self.p1()
355 355 if ctx2 is not None:
356 356 ctx2 = self._repo[ctx2]
357 357 diffopts = patch.diffopts(self._repo.ui, opts)
358 358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359 359
360 360 def dirs(self):
361 361 return self._manifest.dirs()
362 362
363 363 def hasdir(self, dir):
364 364 return self._manifest.hasdir(dir)
365 365
366 366 def status(self, other=None, match=None, listignored=False,
367 367 listclean=False, listunknown=False, listsubrepos=False):
368 368 """return status of files between two nodes or node and working
369 369 directory.
370 370
371 371 If other is None, compare this node with working directory.
372 372
373 373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 374 """
375 375
376 376 ctx1 = self
377 377 ctx2 = self._repo[other]
378 378
379 379 # This next code block is, admittedly, fragile logic that tests for
380 380 # reversing the contexts and wouldn't need to exist if it weren't for
381 381 # the fast (and common) code path of comparing the working directory
382 382 # with its first parent.
383 383 #
384 384 # What we're aiming for here is the ability to call:
385 385 #
386 386 # workingctx.status(parentctx)
387 387 #
388 388 # If we always built the manifest for each context and compared those,
389 389 # then we'd be done. But the special case of the above call means we
390 390 # just copy the manifest of the parent.
391 391 reversed = False
392 392 if (not isinstance(ctx1, changectx)
393 393 and isinstance(ctx2, changectx)):
394 394 reversed = True
395 395 ctx1, ctx2 = ctx2, ctx1
396 396
397 397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 398 match = ctx2._matchstatus(ctx1, match)
399 399 r = scmutil.status([], [], [], [], [], [], [])
400 400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 401 listunknown)
402 402
403 403 if reversed:
404 404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 405 # these make no sense to reverse.
406 406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 407 r.clean)
408 408
409 409 if listsubrepos:
410 410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 411 try:
412 412 rev2 = ctx2.subrev(subpath)
413 413 except KeyError:
414 414 # A subrepo that existed in node1 was deleted between
415 415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 416 # won't contain that subpath. The best we can do ignore it.
417 417 rev2 = None
418 418 submatch = matchmod.subdirmatcher(subpath, match)
419 419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 420 clean=listclean, unknown=listunknown,
421 421 listsubrepos=True)
422 422 for rfiles, sfiles in zip(r, s):
423 423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424 424
425 425 for l in r:
426 426 l.sort()
427 427
428 428 return r
429 429
430 430 def _filterederror(repo, changeid):
431 431 """build an exception to be raised about a filtered changeid
432 432
433 433 This is extracted in a function to help extensions (eg: evolve) to
434 434 experiment with various message variants."""
435 435 if repo.filtername.startswith('visible'):
436 436 msg = _("hidden revision '%s'") % changeid
437 437 hint = _('use --hidden to access hidden revisions')
438 438 return error.FilteredRepoLookupError(msg, hint=hint)
439 439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 440 msg %= (changeid, repo.filtername)
441 441 return error.FilteredRepoLookupError(msg)
442 442
443 443 class changectx(basectx):
444 444 """A changecontext object makes access to data related to a particular
445 445 changeset convenient. It represents a read-only context already present in
446 446 the repo."""
447 447 def __init__(self, repo, changeid=''):
448 448 """changeid is a revision number, node, or tag"""
449 449
450 450 # since basectx.__new__ already took care of copying the object, we
451 451 # don't need to do anything in __init__, so we just exit here
452 452 if isinstance(changeid, basectx):
453 453 return
454 454
455 455 if changeid == '':
456 456 changeid = '.'
457 457 self._repo = repo
458 458
459 459 try:
460 460 if isinstance(changeid, int):
461 461 self._node = repo.changelog.node(changeid)
462 462 self._rev = changeid
463 463 return
464 464 if not pycompat.ispy3 and isinstance(changeid, long):
465 465 changeid = str(changeid)
466 466 if changeid == 'null':
467 467 self._node = nullid
468 468 self._rev = nullrev
469 469 return
470 470 if changeid == 'tip':
471 471 self._node = repo.changelog.tip()
472 472 self._rev = repo.changelog.rev(self._node)
473 473 return
474 474 if changeid == '.' or changeid == repo.dirstate.p1():
475 475 # this is a hack to delay/avoid loading obsmarkers
476 476 # when we know that '.' won't be hidden
477 477 self._node = repo.dirstate.p1()
478 478 self._rev = repo.unfiltered().changelog.rev(self._node)
479 479 return
480 480 if len(changeid) == 20:
481 481 try:
482 482 self._node = changeid
483 483 self._rev = repo.changelog.rev(changeid)
484 484 return
485 485 except error.FilteredRepoLookupError:
486 486 raise
487 487 except LookupError:
488 488 pass
489 489
490 490 try:
491 491 r = int(changeid)
492 492 if '%d' % r != changeid:
493 493 raise ValueError
494 494 l = len(repo.changelog)
495 495 if r < 0:
496 496 r += l
497 497 if r < 0 or r >= l and r != wdirrev:
498 498 raise ValueError
499 499 self._rev = r
500 500 self._node = repo.changelog.node(r)
501 501 return
502 502 except error.FilteredIndexError:
503 503 raise
504 504 except (ValueError, OverflowError, IndexError):
505 505 pass
506 506
507 507 if len(changeid) == 40:
508 508 try:
509 509 self._node = bin(changeid)
510 510 self._rev = repo.changelog.rev(self._node)
511 511 return
512 512 except error.FilteredLookupError:
513 513 raise
514 514 except (TypeError, LookupError):
515 515 pass
516 516
517 517 # lookup bookmarks through the name interface
518 518 try:
519 519 self._node = repo.names.singlenode(repo, changeid)
520 520 self._rev = repo.changelog.rev(self._node)
521 521 return
522 522 except KeyError:
523 523 pass
524 524 except error.FilteredRepoLookupError:
525 525 raise
526 526 except error.RepoLookupError:
527 527 pass
528 528
529 529 self._node = repo.unfiltered().changelog._partialmatch(changeid)
530 530 if self._node is not None:
531 531 self._rev = repo.changelog.rev(self._node)
532 532 return
533 533
534 534 # lookup failed
535 535 # check if it might have come from damaged dirstate
536 536 #
537 537 # XXX we could avoid the unfiltered if we had a recognizable
538 538 # exception for filtered changeset access
539 539 if changeid in repo.unfiltered().dirstate.parents():
540 540 msg = _("working directory has unknown parent '%s'!")
541 541 raise error.Abort(msg % short(changeid))
542 542 try:
543 543 if len(changeid) == 20 and nonascii(changeid):
544 544 changeid = hex(changeid)
545 545 except TypeError:
546 546 pass
547 547 except (error.FilteredIndexError, error.FilteredLookupError,
548 548 error.FilteredRepoLookupError):
549 549 raise _filterederror(repo, changeid)
550 550 except IndexError:
551 551 pass
552 552 raise error.RepoLookupError(
553 553 _("unknown revision '%s'") % changeid)
554 554
555 555 def __hash__(self):
556 556 try:
557 557 return hash(self._rev)
558 558 except AttributeError:
559 559 return id(self)
560 560
561 561 def __nonzero__(self):
562 562 return self._rev != nullrev
563 563
564 564 __bool__ = __nonzero__
565 565
566 566 @propertycache
567 567 def _changeset(self):
568 568 return self._repo.changelog.changelogrevision(self.rev())
569 569
570 570 @propertycache
571 571 def _manifest(self):
572 572 return self._manifestctx.read()
573 573
574 574 @property
575 575 def _manifestctx(self):
576 576 return self._repo.manifestlog[self._changeset.manifest]
577 577
578 578 @propertycache
579 579 def _manifestdelta(self):
580 580 return self._manifestctx.readdelta()
581 581
582 582 @propertycache
583 583 def _parents(self):
584 584 repo = self._repo
585 585 p1, p2 = repo.changelog.parentrevs(self._rev)
586 586 if p2 == nullrev:
587 587 return [changectx(repo, p1)]
588 588 return [changectx(repo, p1), changectx(repo, p2)]
589 589
590 590 def changeset(self):
591 591 c = self._changeset
592 592 return (
593 593 c.manifest,
594 594 c.user,
595 595 c.date,
596 596 c.files,
597 597 c.description,
598 598 c.extra,
599 599 )
600 600 def manifestnode(self):
601 601 return self._changeset.manifest
602 602
603 603 def user(self):
604 604 return self._changeset.user
605 605 def date(self):
606 606 return self._changeset.date
607 607 def files(self):
608 608 return self._changeset.files
609 609 def description(self):
610 610 return self._changeset.description
611 611 def branch(self):
612 612 return encoding.tolocal(self._changeset.extra.get("branch"))
613 613 def closesbranch(self):
614 614 return 'close' in self._changeset.extra
615 615 def extra(self):
616 616 return self._changeset.extra
617 617 def tags(self):
618 618 return self._repo.nodetags(self._node)
619 619 def bookmarks(self):
620 620 return self._repo.nodebookmarks(self._node)
621 621 def phase(self):
622 622 return self._repo._phasecache.phase(self._repo, self._rev)
623 623 def hidden(self):
624 624 return self._rev in repoview.filterrevs(self._repo, 'visible')
625 625
626 626 def isinmemory(self):
627 627 return False
628 628
629 629 def children(self):
630 630 """return contexts for each child changeset"""
631 631 c = self._repo.changelog.children(self._node)
632 632 return [changectx(self._repo, x) for x in c]
633 633
634 634 def ancestors(self):
635 635 for a in self._repo.changelog.ancestors([self._rev]):
636 636 yield changectx(self._repo, a)
637 637
638 638 def descendants(self):
639 639 for d in self._repo.changelog.descendants([self._rev]):
640 640 yield changectx(self._repo, d)
641 641
642 642 def filectx(self, path, fileid=None, filelog=None):
643 643 """get a file context from this changeset"""
644 644 if fileid is None:
645 645 fileid = self.filenode(path)
646 646 return filectx(self._repo, path, fileid=fileid,
647 647 changectx=self, filelog=filelog)
648 648
649 649 def ancestor(self, c2, warn=False):
650 650 """return the "best" ancestor context of self and c2
651 651
652 652 If there are multiple candidates, it will show a message and check
653 653 merge.preferancestor configuration before falling back to the
654 654 revlog ancestor."""
655 655 # deal with workingctxs
656 656 n2 = c2._node
657 657 if n2 is None:
658 658 n2 = c2._parents[0]._node
659 659 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
660 660 if not cahs:
661 661 anc = nullid
662 662 elif len(cahs) == 1:
663 663 anc = cahs[0]
664 664 else:
665 665 # experimental config: merge.preferancestor
666 666 for r in self._repo.ui.configlist('merge', 'preferancestor'):
667 667 try:
668 668 ctx = changectx(self._repo, r)
669 669 except error.RepoLookupError:
670 670 continue
671 671 anc = ctx.node()
672 672 if anc in cahs:
673 673 break
674 674 else:
675 675 anc = self._repo.changelog.ancestor(self._node, n2)
676 676 if warn:
677 677 self._repo.ui.status(
678 678 (_("note: using %s as ancestor of %s and %s\n") %
679 679 (short(anc), short(self._node), short(n2))) +
680 680 ''.join(_(" alternatively, use --config "
681 681 "merge.preferancestor=%s\n") %
682 682 short(n) for n in sorted(cahs) if n != anc))
683 683 return changectx(self._repo, anc)
684 684
685 685 def descendant(self, other):
686 686 """True if other is descendant of this changeset"""
687 687 return self._repo.changelog.descendant(self._rev, other._rev)
688 688
689 689 def walk(self, match):
690 690 '''Generates matching file names.'''
691 691
692 692 # Wrap match.bad method to have message with nodeid
693 693 def bad(fn, msg):
694 694 # The manifest doesn't know about subrepos, so don't complain about
695 695 # paths into valid subrepos.
696 696 if any(fn == s or fn.startswith(s + '/')
697 697 for s in self.substate):
698 698 return
699 699 match.bad(fn, _('no such file in rev %s') % self)
700 700
701 701 m = matchmod.badmatch(match, bad)
702 702 return self._manifest.walk(m)
703 703
704 704 def matches(self, match):
705 705 return self.walk(match)
706 706
707 707 class basefilectx(object):
708 708 """A filecontext object represents the common logic for its children:
709 709 filectx: read-only access to a filerevision that is already present
710 710 in the repo,
711 711 workingfilectx: a filecontext that represents files from the working
712 712 directory,
713 713 memfilectx: a filecontext that represents files in-memory,
714 714 overlayfilectx: duplicate another filecontext with some fields overridden.
715 715 """
716 716 @propertycache
717 717 def _filelog(self):
718 718 return self._repo.file(self._path)
719 719
720 720 @propertycache
721 721 def _changeid(self):
722 722 if r'_changeid' in self.__dict__:
723 723 return self._changeid
724 724 elif r'_changectx' in self.__dict__:
725 725 return self._changectx.rev()
726 726 elif r'_descendantrev' in self.__dict__:
727 727 # this file context was created from a revision with a known
728 728 # descendant, we can (lazily) correct for linkrev aliases
729 729 return self._adjustlinkrev(self._descendantrev)
730 730 else:
731 731 return self._filelog.linkrev(self._filerev)
732 732
733 733 @propertycache
734 734 def _filenode(self):
735 735 if r'_fileid' in self.__dict__:
736 736 return self._filelog.lookup(self._fileid)
737 737 else:
738 738 return self._changectx.filenode(self._path)
739 739
740 740 @propertycache
741 741 def _filerev(self):
742 742 return self._filelog.rev(self._filenode)
743 743
744 744 @propertycache
745 745 def _repopath(self):
746 746 return self._path
747 747
748 748 def __nonzero__(self):
749 749 try:
750 750 self._filenode
751 751 return True
752 752 except error.LookupError:
753 753 # file is missing
754 754 return False
755 755
756 756 __bool__ = __nonzero__
757 757
758 758 def __bytes__(self):
759 759 try:
760 760 return "%s@%s" % (self.path(), self._changectx)
761 761 except error.LookupError:
762 762 return "%s@???" % self.path()
763 763
764 764 __str__ = encoding.strmethod(__bytes__)
765 765
766 766 def __repr__(self):
767 767 return "<%s %s>" % (type(self).__name__, str(self))
768 768
769 769 def __hash__(self):
770 770 try:
771 771 return hash((self._path, self._filenode))
772 772 except AttributeError:
773 773 return id(self)
774 774
775 775 def __eq__(self, other):
776 776 try:
777 777 return (type(self) == type(other) and self._path == other._path
778 778 and self._filenode == other._filenode)
779 779 except AttributeError:
780 780 return False
781 781
782 782 def __ne__(self, other):
783 783 return not (self == other)
784 784
785 785 def filerev(self):
786 786 return self._filerev
787 787 def filenode(self):
788 788 return self._filenode
789 789 @propertycache
790 790 def _flags(self):
791 791 return self._changectx.flags(self._path)
792 792 def flags(self):
793 793 return self._flags
794 794 def filelog(self):
795 795 return self._filelog
796 796 def rev(self):
797 797 return self._changeid
798 798 def linkrev(self):
799 799 return self._filelog.linkrev(self._filerev)
800 800 def node(self):
801 801 return self._changectx.node()
802 802 def hex(self):
803 803 return self._changectx.hex()
804 804 def user(self):
805 805 return self._changectx.user()
806 806 def date(self):
807 807 return self._changectx.date()
808 808 def files(self):
809 809 return self._changectx.files()
810 810 def description(self):
811 811 return self._changectx.description()
812 812 def branch(self):
813 813 return self._changectx.branch()
814 814 def extra(self):
815 815 return self._changectx.extra()
816 816 def phase(self):
817 817 return self._changectx.phase()
818 818 def phasestr(self):
819 819 return self._changectx.phasestr()
820 820 def manifest(self):
821 821 return self._changectx.manifest()
822 822 def changectx(self):
823 823 return self._changectx
824 824 def renamed(self):
825 825 return self._copied
826 826 def repo(self):
827 827 return self._repo
828 828 def size(self):
829 829 return len(self.data())
830 830
831 831 def path(self):
832 832 return self._path
833 833
834 834 def isbinary(self):
835 835 try:
836 836 return util.binary(self.data())
837 837 except IOError:
838 838 return False
839 839 def isexec(self):
840 840 return 'x' in self.flags()
841 841 def islink(self):
842 842 return 'l' in self.flags()
843 843
844 844 def isabsent(self):
845 845 """whether this filectx represents a file not in self._changectx
846 846
847 847 This is mainly for merge code to detect change/delete conflicts. This is
848 848 expected to be True for all subclasses of basectx."""
849 849 return False
850 850
851 851 _customcmp = False
852 852 def cmp(self, fctx):
853 853 """compare with other file context
854 854
855 855 returns True if different than fctx.
856 856 """
857 857 if fctx._customcmp:
858 858 return fctx.cmp(self)
859 859
860 860 if (fctx._filenode is None
861 861 and (self._repo._encodefilterpats
862 862 # if file data starts with '\1\n', empty metadata block is
863 863 # prepended, which adds 4 bytes to filelog.size().
864 864 or self.size() - 4 == fctx.size())
865 865 or self.size() == fctx.size()):
866 866 return self._filelog.cmp(self._filenode, fctx.data())
867 867
868 868 return True
869 869
870 870 def _adjustlinkrev(self, srcrev, inclusive=False):
871 871 """return the first ancestor of <srcrev> introducing <fnode>
872 872
873 873 If the linkrev of the file revision does not point to an ancestor of
874 874 srcrev, we'll walk down the ancestors until we find one introducing
875 875 this file revision.
876 876
877 877 :srcrev: the changeset revision we search ancestors from
878 878 :inclusive: if true, the src revision will also be checked
879 879 """
880 880 repo = self._repo
881 881 cl = repo.unfiltered().changelog
882 882 mfl = repo.manifestlog
883 883 # fetch the linkrev
884 884 lkr = self.linkrev()
885 885 # hack to reuse ancestor computation when searching for renames
886 886 memberanc = getattr(self, '_ancestrycontext', None)
887 887 iteranc = None
888 888 if srcrev is None:
889 889 # wctx case, used by workingfilectx during mergecopy
890 890 revs = [p.rev() for p in self._repo[None].parents()]
891 891 inclusive = True # we skipped the real (revless) source
892 892 else:
893 893 revs = [srcrev]
894 894 if memberanc is None:
895 895 memberanc = iteranc = cl.ancestors(revs, lkr,
896 896 inclusive=inclusive)
897 897 # check if this linkrev is an ancestor of srcrev
898 898 if lkr not in memberanc:
899 899 if iteranc is None:
900 900 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
901 901 fnode = self._filenode
902 902 path = self._path
903 903 for a in iteranc:
904 904 ac = cl.read(a) # get changeset data (we avoid object creation)
905 905 if path in ac[3]: # checking the 'files' field.
906 906 # The file has been touched, check if the content is
907 907 # similar to the one we search for.
908 908 if fnode == mfl[ac[0]].readfast().get(path):
909 909 return a
910 910 # In theory, we should never get out of that loop without a result.
911 911 # But if manifest uses a buggy file revision (not children of the
912 912 # one it replaces) we could. Such a buggy situation will likely
913 913 # result is crash somewhere else at to some point.
914 914 return lkr
915 915
916 916 def introrev(self):
917 917 """return the rev of the changeset which introduced this file revision
918 918
919 919 This method is different from linkrev because it take into account the
920 920 changeset the filectx was created from. It ensures the returned
921 921 revision is one of its ancestors. This prevents bugs from
922 922 'linkrev-shadowing' when a file revision is used by multiple
923 923 changesets.
924 924 """
925 925 lkr = self.linkrev()
926 926 attrs = vars(self)
927 927 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
928 928 if noctx or self.rev() == lkr:
929 929 return self.linkrev()
930 930 return self._adjustlinkrev(self.rev(), inclusive=True)
931 931
932 932 def _parentfilectx(self, path, fileid, filelog):
933 933 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
934 934 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
935 935 if '_changeid' in vars(self) or '_changectx' in vars(self):
936 936 # If self is associated with a changeset (probably explicitly
937 937 # fed), ensure the created filectx is associated with a
938 938 # changeset that is an ancestor of self.changectx.
939 939 # This lets us later use _adjustlinkrev to get a correct link.
940 940 fctx._descendantrev = self.rev()
941 941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 942 elif '_descendantrev' in vars(self):
943 943 # Otherwise propagate _descendantrev if we have one associated.
944 944 fctx._descendantrev = self._descendantrev
945 945 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
946 946 return fctx
947 947
948 948 def parents(self):
949 949 _path = self._path
950 950 fl = self._filelog
951 951 parents = self._filelog.parents(self._filenode)
952 952 pl = [(_path, node, fl) for node in parents if node != nullid]
953 953
954 954 r = fl.renamed(self._filenode)
955 955 if r:
956 956 # - In the simple rename case, both parent are nullid, pl is empty.
957 957 # - In case of merge, only one of the parent is null id and should
958 958 # be replaced with the rename information. This parent is -always-
959 959 # the first one.
960 960 #
961 961 # As null id have always been filtered out in the previous list
962 962 # comprehension, inserting to 0 will always result in "replacing
963 963 # first nullid parent with rename information.
964 964 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
965 965
966 966 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
967 967
968 968 def p1(self):
969 969 return self.parents()[0]
970 970
971 971 def p2(self):
972 972 p = self.parents()
973 973 if len(p) == 2:
974 974 return p[1]
975 975 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
976 976
977 977 def annotate(self, follow=False, linenumber=False, skiprevs=None,
978 978 diffopts=None):
979 979 '''returns a list of tuples of ((ctx, number), line) for each line
980 980 in the file, where ctx is the filectx of the node where
981 981 that line was last changed; if linenumber parameter is true, number is
982 982 the line number at the first appearance in the managed file, otherwise,
983 983 number has a fixed value of False.
984 984 '''
985 985
986 986 def lines(text):
987 987 if text.endswith("\n"):
988 988 return text.count("\n")
989 989 return text.count("\n") + int(bool(text))
990 990
991 991 if linenumber:
992 992 def decorate(text, rev):
993 993 return ([annotateline(fctx=rev, lineno=i)
994 994 for i in xrange(1, lines(text) + 1)], text)
995 995 else:
996 996 def decorate(text, rev):
997 997 return ([annotateline(fctx=rev)] * lines(text), text)
998 998
999 999 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1000 1000
1001 1001 def parents(f):
1002 1002 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1003 1003 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1004 1004 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1005 1005 # isn't an ancestor of the srcrev.
1006 1006 f._changeid
1007 1007 pl = f.parents()
1008 1008
1009 1009 # Don't return renamed parents if we aren't following.
1010 1010 if not follow:
1011 1011 pl = [p for p in pl if p.path() == f.path()]
1012 1012
1013 1013 # renamed filectx won't have a filelog yet, so set it
1014 1014 # from the cache to save time
1015 1015 for p in pl:
1016 1016 if not '_filelog' in p.__dict__:
1017 1017 p._filelog = getlog(p.path())
1018 1018
1019 1019 return pl
1020 1020
1021 1021 # use linkrev to find the first changeset where self appeared
1022 1022 base = self
1023 1023 introrev = self.introrev()
1024 1024 if self.rev() != introrev:
1025 1025 base = self.filectx(self.filenode(), changeid=introrev)
1026 1026 if getattr(base, '_ancestrycontext', None) is None:
1027 1027 cl = self._repo.changelog
1028 1028 if introrev is None:
1029 1029 # wctx is not inclusive, but works because _ancestrycontext
1030 1030 # is used to test filelog revisions
1031 1031 ac = cl.ancestors([p.rev() for p in base.parents()],
1032 1032 inclusive=True)
1033 1033 else:
1034 1034 ac = cl.ancestors([introrev], inclusive=True)
1035 1035 base._ancestrycontext = ac
1036 1036
1037 1037 # This algorithm would prefer to be recursive, but Python is a
1038 1038 # bit recursion-hostile. Instead we do an iterative
1039 1039 # depth-first search.
1040 1040
1041 1041 # 1st DFS pre-calculates pcache and needed
1042 1042 visit = [base]
1043 1043 pcache = {}
1044 1044 needed = {base: 1}
1045 1045 while visit:
1046 1046 f = visit.pop()
1047 1047 if f in pcache:
1048 1048 continue
1049 1049 pl = parents(f)
1050 1050 pcache[f] = pl
1051 1051 for p in pl:
1052 1052 needed[p] = needed.get(p, 0) + 1
1053 1053 if p not in pcache:
1054 1054 visit.append(p)
1055 1055
1056 1056 # 2nd DFS does the actual annotate
1057 1057 visit[:] = [base]
1058 1058 hist = {}
1059 1059 while visit:
1060 1060 f = visit[-1]
1061 1061 if f in hist:
1062 1062 visit.pop()
1063 1063 continue
1064 1064
1065 1065 ready = True
1066 1066 pl = pcache[f]
1067 1067 for p in pl:
1068 1068 if p not in hist:
1069 1069 ready = False
1070 1070 visit.append(p)
1071 1071 if ready:
1072 1072 visit.pop()
1073 1073 curr = decorate(f.data(), f)
1074 1074 skipchild = False
1075 1075 if skiprevs is not None:
1076 1076 skipchild = f._changeid in skiprevs
1077 1077 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1078 1078 diffopts)
1079 1079 for p in pl:
1080 1080 if needed[p] == 1:
1081 1081 del hist[p]
1082 1082 del needed[p]
1083 1083 else:
1084 1084 needed[p] -= 1
1085 1085
1086 1086 hist[f] = curr
1087 1087 del pcache[f]
1088 1088
1089 1089 return zip(hist[base][0], hist[base][1].splitlines(True))
1090 1090
1091 1091 def ancestors(self, followfirst=False):
1092 1092 visit = {}
1093 1093 c = self
1094 1094 if followfirst:
1095 1095 cut = 1
1096 1096 else:
1097 1097 cut = None
1098 1098
1099 1099 while True:
1100 1100 for parent in c.parents()[:cut]:
1101 1101 visit[(parent.linkrev(), parent.filenode())] = parent
1102 1102 if not visit:
1103 1103 break
1104 1104 c = visit.pop(max(visit))
1105 1105 yield c
1106 1106
1107 1107 def decodeddata(self):
1108 1108 """Returns `data()` after running repository decoding filters.
1109 1109
1110 1110 This is often equivalent to how the data would be expressed on disk.
1111 1111 """
1112 1112 return self._repo.wwritedata(self.path(), self.data())
1113 1113
1114 1114 @attr.s(slots=True, frozen=True)
1115 1115 class annotateline(object):
1116 1116 fctx = attr.ib()
1117 1117 lineno = attr.ib(default=False)
1118 1118 # Whether this annotation was the result of a skip-annotate.
1119 1119 skip = attr.ib(default=False)
1120 1120
1121 1121 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1122 1122 r'''
1123 1123 Given parent and child fctxes and annotate data for parents, for all lines
1124 1124 in either parent that match the child, annotate the child with the parent's
1125 1125 data.
1126 1126
1127 1127 Additionally, if `skipchild` is True, replace all other lines with parent
1128 1128 annotate data as well such that child is never blamed for any lines.
1129 1129
1130 1130 See test-annotate.py for unit tests.
1131 1131 '''
1132 1132 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1133 1133 for parent in parents]
1134 1134
1135 1135 if skipchild:
1136 1136 # Need to iterate over the blocks twice -- make it a list
1137 1137 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1138 1138 # Mercurial currently prefers p2 over p1 for annotate.
1139 1139 # TODO: change this?
1140 1140 for parent, blocks in pblocks:
1141 1141 for (a1, a2, b1, b2), t in blocks:
1142 1142 # Changed blocks ('!') or blocks made only of blank lines ('~')
1143 1143 # belong to the child.
1144 1144 if t == '=':
1145 1145 child[0][b1:b2] = parent[0][a1:a2]
1146 1146
1147 1147 if skipchild:
1148 1148 # Now try and match up anything that couldn't be matched,
1149 1149 # Reversing pblocks maintains bias towards p2, matching above
1150 1150 # behavior.
1151 1151 pblocks.reverse()
1152 1152
1153 1153 # The heuristics are:
1154 1154 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1155 1155 # This could potentially be smarter but works well enough.
1156 1156 # * For a non-matching section, do a best-effort fit. Match lines in
1157 1157 # diff hunks 1:1, dropping lines as necessary.
1158 1158 # * Repeat the last line as a last resort.
1159 1159
1160 1160 # First, replace as much as possible without repeating the last line.
1161 1161 remaining = [(parent, []) for parent, _blocks in pblocks]
1162 1162 for idx, (parent, blocks) in enumerate(pblocks):
1163 1163 for (a1, a2, b1, b2), _t in blocks:
1164 1164 if a2 - a1 >= b2 - b1:
1165 1165 for bk in xrange(b1, b2):
1166 1166 if child[0][bk].fctx == childfctx:
1167 1167 ak = min(a1 + (bk - b1), a2 - 1)
1168 1168 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1169 1169 else:
1170 1170 remaining[idx][1].append((a1, a2, b1, b2))
1171 1171
1172 1172 # Then, look at anything left, which might involve repeating the last
1173 1173 # line.
1174 1174 for parent, blocks in remaining:
1175 1175 for a1, a2, b1, b2 in blocks:
1176 1176 for bk in xrange(b1, b2):
1177 1177 if child[0][bk].fctx == childfctx:
1178 1178 ak = min(a1 + (bk - b1), a2 - 1)
1179 1179 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1180 1180 return child
1181 1181
1182 1182 class filectx(basefilectx):
1183 1183 """A filecontext object makes access to data related to a particular
1184 1184 filerevision convenient."""
1185 1185 def __init__(self, repo, path, changeid=None, fileid=None,
1186 1186 filelog=None, changectx=None):
1187 1187 """changeid can be a changeset revision, node, or tag.
1188 1188 fileid can be a file revision or node."""
1189 1189 self._repo = repo
1190 1190 self._path = path
1191 1191
1192 1192 assert (changeid is not None
1193 1193 or fileid is not None
1194 1194 or changectx is not None), \
1195 1195 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1196 1196 % (changeid, fileid, changectx))
1197 1197
1198 1198 if filelog is not None:
1199 1199 self._filelog = filelog
1200 1200
1201 1201 if changeid is not None:
1202 1202 self._changeid = changeid
1203 1203 if changectx is not None:
1204 1204 self._changectx = changectx
1205 1205 if fileid is not None:
1206 1206 self._fileid = fileid
1207 1207
1208 1208 @propertycache
1209 1209 def _changectx(self):
1210 1210 try:
1211 1211 return changectx(self._repo, self._changeid)
1212 1212 except error.FilteredRepoLookupError:
1213 1213 # Linkrev may point to any revision in the repository. When the
1214 1214 # repository is filtered this may lead to `filectx` trying to build
1215 1215 # `changectx` for filtered revision. In such case we fallback to
1216 1216 # creating `changectx` on the unfiltered version of the reposition.
1217 1217 # This fallback should not be an issue because `changectx` from
1218 1218 # `filectx` are not used in complex operations that care about
1219 1219 # filtering.
1220 1220 #
1221 1221 # This fallback is a cheap and dirty fix that prevent several
1222 1222 # crashes. It does not ensure the behavior is correct. However the
1223 1223 # behavior was not correct before filtering either and "incorrect
1224 1224 # behavior" is seen as better as "crash"
1225 1225 #
1226 1226 # Linkrevs have several serious troubles with filtering that are
1227 1227 # complicated to solve. Proper handling of the issue here should be
1228 1228 # considered when solving linkrev issue are on the table.
1229 1229 return changectx(self._repo.unfiltered(), self._changeid)
1230 1230
1231 1231 def filectx(self, fileid, changeid=None):
1232 1232 '''opens an arbitrary revision of the file without
1233 1233 opening a new filelog'''
1234 1234 return filectx(self._repo, self._path, fileid=fileid,
1235 1235 filelog=self._filelog, changeid=changeid)
1236 1236
1237 1237 def rawdata(self):
1238 1238 return self._filelog.revision(self._filenode, raw=True)
1239 1239
1240 1240 def rawflags(self):
1241 1241 """low-level revlog flags"""
1242 1242 return self._filelog.flags(self._filerev)
1243 1243
1244 1244 def data(self):
1245 1245 try:
1246 1246 return self._filelog.read(self._filenode)
1247 1247 except error.CensoredNodeError:
1248 1248 if self._repo.ui.config("censor", "policy") == "ignore":
1249 1249 return ""
1250 1250 raise error.Abort(_("censored node: %s") % short(self._filenode),
1251 1251 hint=_("set censor.policy to ignore errors"))
1252 1252
1253 1253 def size(self):
1254 1254 return self._filelog.size(self._filerev)
1255 1255
1256 1256 @propertycache
1257 1257 def _copied(self):
1258 1258 """check if file was actually renamed in this changeset revision
1259 1259
1260 1260 If rename logged in file revision, we report copy for changeset only
1261 1261 if file revisions linkrev points back to the changeset in question
1262 1262 or both changeset parents contain different file revisions.
1263 1263 """
1264 1264
1265 1265 renamed = self._filelog.renamed(self._filenode)
1266 1266 if not renamed:
1267 1267 return renamed
1268 1268
1269 1269 if self.rev() == self.linkrev():
1270 1270 return renamed
1271 1271
1272 1272 name = self.path()
1273 1273 fnode = self._filenode
1274 1274 for p in self._changectx.parents():
1275 1275 try:
1276 1276 if fnode == p.filenode(name):
1277 1277 return None
1278 1278 except error.LookupError:
1279 1279 pass
1280 1280 return renamed
1281 1281
1282 1282 def children(self):
1283 1283 # hard for renames
1284 1284 c = self._filelog.children(self._filenode)
1285 1285 return [filectx(self._repo, self._path, fileid=x,
1286 1286 filelog=self._filelog) for x in c]
1287 1287
1288 1288 class committablectx(basectx):
1289 1289 """A committablectx object provides common functionality for a context that
1290 1290 wants the ability to commit, e.g. workingctx or memctx."""
1291 1291 def __init__(self, repo, text="", user=None, date=None, extra=None,
1292 1292 changes=None):
1293 1293 self._repo = repo
1294 1294 self._rev = None
1295 1295 self._node = None
1296 1296 self._text = text
1297 1297 if date:
1298 1298 self._date = util.parsedate(date)
1299 1299 if user:
1300 1300 self._user = user
1301 1301 if changes:
1302 1302 self._status = changes
1303 1303
1304 1304 self._extra = {}
1305 1305 if extra:
1306 1306 self._extra = extra.copy()
1307 1307 if 'branch' not in self._extra:
1308 1308 try:
1309 1309 branch = encoding.fromlocal(self._repo.dirstate.branch())
1310 1310 except UnicodeDecodeError:
1311 1311 raise error.Abort(_('branch name not in UTF-8!'))
1312 1312 self._extra['branch'] = branch
1313 1313 if self._extra['branch'] == '':
1314 1314 self._extra['branch'] = 'default'
1315 1315
1316 1316 def __bytes__(self):
1317 1317 return bytes(self._parents[0]) + "+"
1318 1318
1319 1319 __str__ = encoding.strmethod(__bytes__)
1320 1320
1321 1321 def __nonzero__(self):
1322 1322 return True
1323 1323
1324 1324 __bool__ = __nonzero__
1325 1325
1326 1326 def _buildflagfunc(self):
1327 1327 # Create a fallback function for getting file flags when the
1328 1328 # filesystem doesn't support them
1329 1329
1330 1330 copiesget = self._repo.dirstate.copies().get
1331 1331 parents = self.parents()
1332 1332 if len(parents) < 2:
1333 1333 # when we have one parent, it's easy: copy from parent
1334 1334 man = parents[0].manifest()
1335 1335 def func(f):
1336 1336 f = copiesget(f, f)
1337 1337 return man.flags(f)
1338 1338 else:
1339 1339 # merges are tricky: we try to reconstruct the unstored
1340 1340 # result from the merge (issue1802)
1341 1341 p1, p2 = parents
1342 1342 pa = p1.ancestor(p2)
1343 1343 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1344 1344
1345 1345 def func(f):
1346 1346 f = copiesget(f, f) # may be wrong for merges with copies
1347 1347 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1348 1348 if fl1 == fl2:
1349 1349 return fl1
1350 1350 if fl1 == fla:
1351 1351 return fl2
1352 1352 if fl2 == fla:
1353 1353 return fl1
1354 1354 return '' # punt for conflicts
1355 1355
1356 1356 return func
1357 1357
1358 1358 @propertycache
1359 1359 def _flagfunc(self):
1360 1360 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1361 1361
1362 1362 @propertycache
1363 1363 def _status(self):
1364 1364 return self._repo.status()
1365 1365
1366 1366 @propertycache
1367 1367 def _user(self):
1368 1368 return self._repo.ui.username()
1369 1369
1370 1370 @propertycache
1371 1371 def _date(self):
1372 1372 ui = self._repo.ui
1373 1373 date = ui.configdate('devel', 'default-date')
1374 1374 if date is None:
1375 1375 date = util.makedate()
1376 1376 return date
1377 1377
1378 1378 def subrev(self, subpath):
1379 1379 return None
1380 1380
1381 1381 def manifestnode(self):
1382 1382 return None
1383 1383 def user(self):
1384 1384 return self._user or self._repo.ui.username()
1385 1385 def date(self):
1386 1386 return self._date
1387 1387 def description(self):
1388 1388 return self._text
1389 1389 def files(self):
1390 1390 return sorted(self._status.modified + self._status.added +
1391 1391 self._status.removed)
1392 1392
1393 1393 def modified(self):
1394 1394 return self._status.modified
1395 1395 def added(self):
1396 1396 return self._status.added
1397 1397 def removed(self):
1398 1398 return self._status.removed
1399 1399 def deleted(self):
1400 1400 return self._status.deleted
1401 1401 def branch(self):
1402 1402 return encoding.tolocal(self._extra['branch'])
1403 1403 def closesbranch(self):
1404 1404 return 'close' in self._extra
1405 1405 def extra(self):
1406 1406 return self._extra
1407 1407
1408 1408 def isinmemory(self):
1409 1409 return False
1410 1410
1411 1411 def tags(self):
1412 1412 return []
1413 1413
1414 1414 def bookmarks(self):
1415 1415 b = []
1416 1416 for p in self.parents():
1417 1417 b.extend(p.bookmarks())
1418 1418 return b
1419 1419
1420 1420 def phase(self):
1421 1421 phase = phases.draft # default phase to draft
1422 1422 for p in self.parents():
1423 1423 phase = max(phase, p.phase())
1424 1424 return phase
1425 1425
1426 1426 def hidden(self):
1427 1427 return False
1428 1428
1429 1429 def children(self):
1430 1430 return []
1431 1431
1432 1432 def flags(self, path):
1433 1433 if r'_manifest' in self.__dict__:
1434 1434 try:
1435 1435 return self._manifest.flags(path)
1436 1436 except KeyError:
1437 1437 return ''
1438 1438
1439 1439 try:
1440 1440 return self._flagfunc(path)
1441 1441 except OSError:
1442 1442 return ''
1443 1443
1444 1444 def ancestor(self, c2):
1445 1445 """return the "best" ancestor context of self and c2"""
1446 1446 return self._parents[0].ancestor(c2) # punt on two parents for now
1447 1447
1448 1448 def walk(self, match):
1449 1449 '''Generates matching file names.'''
1450 1450 return sorted(self._repo.dirstate.walk(match,
1451 1451 subrepos=sorted(self.substate),
1452 1452 unknown=True, ignored=False))
1453 1453
1454 1454 def matches(self, match):
1455 1455 return sorted(self._repo.dirstate.matches(match))
1456 1456
1457 1457 def ancestors(self):
1458 1458 for p in self._parents:
1459 1459 yield p
1460 1460 for a in self._repo.changelog.ancestors(
1461 1461 [p.rev() for p in self._parents]):
1462 1462 yield changectx(self._repo, a)
1463 1463
1464 1464 def markcommitted(self, node):
1465 1465 """Perform post-commit cleanup necessary after committing this ctx
1466 1466
1467 1467 Specifically, this updates backing stores this working context
1468 1468 wraps to reflect the fact that the changes reflected by this
1469 1469 workingctx have been committed. For example, it marks
1470 1470 modified and added files as normal in the dirstate.
1471 1471
1472 1472 """
1473 1473
1474 1474 with self._repo.dirstate.parentchange():
1475 1475 for f in self.modified() + self.added():
1476 1476 self._repo.dirstate.normal(f)
1477 1477 for f in self.removed():
1478 1478 self._repo.dirstate.drop(f)
1479 1479 self._repo.dirstate.setparents(node)
1480 1480
1481 1481 # write changes out explicitly, because nesting wlock at
1482 1482 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 1483 # from immediately doing so for subsequent changing files
1484 1484 self._repo.dirstate.write(self._repo.currenttransaction())
1485 1485
1486 1486 def dirty(self, missing=False, merge=True, branch=True):
1487 1487 return False
1488 1488
1489 1489 class workingctx(committablectx):
1490 1490 """A workingctx object makes access to data related to
1491 1491 the current working directory convenient.
1492 1492 date - any valid date string or (unixtime, offset), or None.
1493 1493 user - username string, or None.
1494 1494 extra - a dictionary of extra values, or None.
1495 1495 changes - a list of file lists as returned by localrepo.status()
1496 1496 or None to use the repository status.
1497 1497 """
1498 1498 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 1499 changes=None):
1500 1500 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501 1501
1502 1502 def __iter__(self):
1503 1503 d = self._repo.dirstate
1504 1504 for f in d:
1505 1505 if d[f] != 'r':
1506 1506 yield f
1507 1507
1508 1508 def __contains__(self, key):
1509 1509 return self._repo.dirstate[key] not in "?r"
1510 1510
1511 1511 def hex(self):
1512 1512 return hex(wdirid)
1513 1513
1514 1514 @propertycache
1515 1515 def _parents(self):
1516 1516 p = self._repo.dirstate.parents()
1517 1517 if p[1] == nullid:
1518 1518 p = p[:-1]
1519 1519 return [changectx(self._repo, x) for x in p]
1520 1520
1521 1521 def filectx(self, path, filelog=None):
1522 1522 """get a file context from the working directory"""
1523 1523 return workingfilectx(self._repo, path, workingctx=self,
1524 1524 filelog=filelog)
1525 1525
1526 1526 def dirty(self, missing=False, merge=True, branch=True):
1527 1527 "check whether a working directory is modified"
1528 1528 # check subrepos first
1529 1529 for s in sorted(self.substate):
1530 1530 if self.sub(s).dirty(missing=missing):
1531 1531 return True
1532 1532 # check current working dir
1533 1533 return ((merge and self.p2()) or
1534 1534 (branch and self.branch() != self.p1().branch()) or
1535 1535 self.modified() or self.added() or self.removed() or
1536 1536 (missing and self.deleted()))
1537 1537
1538 1538 def add(self, list, prefix=""):
1539 1539 with self._repo.wlock():
1540 1540 ui, ds = self._repo.ui, self._repo.dirstate
1541 1541 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 1542 rejected = []
1543 1543 lstat = self._repo.wvfs.lstat
1544 1544 for f in list:
1545 1545 # ds.pathto() returns an absolute file when this is invoked from
1546 1546 # the keyword extension. That gets flagged as non-portable on
1547 1547 # Windows, since it contains the drive letter and colon.
1548 1548 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 1549 try:
1550 1550 st = lstat(f)
1551 1551 except OSError:
1552 1552 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 1553 rejected.append(f)
1554 1554 continue
1555 1555 if st.st_size > 10000000:
1556 1556 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 1557 "to manage this file\n"
1558 1558 "(use 'hg revert %s' to cancel the "
1559 1559 "pending addition)\n")
1560 1560 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 1561 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 1562 ui.warn(_("%s not added: only files and symlinks "
1563 1563 "supported currently\n") % uipath(f))
1564 1564 rejected.append(f)
1565 1565 elif ds[f] in 'amn':
1566 1566 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 1567 elif ds[f] == 'r':
1568 1568 ds.normallookup(f)
1569 1569 else:
1570 1570 ds.add(f)
1571 1571 return rejected
1572 1572
1573 1573 def forget(self, files, prefix=""):
1574 1574 with self._repo.wlock():
1575 1575 ds = self._repo.dirstate
1576 1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 1577 rejected = []
1578 1578 for f in files:
1579 1579 if f not in self._repo.dirstate:
1580 1580 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 1581 rejected.append(f)
1582 1582 elif self._repo.dirstate[f] != 'a':
1583 1583 self._repo.dirstate.remove(f)
1584 1584 else:
1585 1585 self._repo.dirstate.drop(f)
1586 1586 return rejected
1587 1587
1588 1588 def undelete(self, list):
1589 1589 pctxs = self.parents()
1590 1590 with self._repo.wlock():
1591 1591 ds = self._repo.dirstate
1592 1592 for f in list:
1593 1593 if self._repo.dirstate[f] != 'r':
1594 1594 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 1595 else:
1596 1596 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 1597 t = fctx.data()
1598 1598 self._repo.wwrite(f, t, fctx.flags())
1599 1599 self._repo.dirstate.normal(f)
1600 1600
1601 1601 def copy(self, source, dest):
1602 1602 try:
1603 1603 st = self._repo.wvfs.lstat(dest)
1604 1604 except OSError as err:
1605 1605 if err.errno != errno.ENOENT:
1606 1606 raise
1607 1607 self._repo.ui.warn(_("%s does not exist!\n")
1608 1608 % self._repo.dirstate.pathto(dest))
1609 1609 return
1610 1610 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 1611 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 1612 "symbolic link\n")
1613 1613 % self._repo.dirstate.pathto(dest))
1614 1614 else:
1615 1615 with self._repo.wlock():
1616 1616 if self._repo.dirstate[dest] in '?':
1617 1617 self._repo.dirstate.add(dest)
1618 1618 elif self._repo.dirstate[dest] in 'r':
1619 1619 self._repo.dirstate.normallookup(dest)
1620 1620 self._repo.dirstate.copy(source, dest)
1621 1621
1622 1622 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 1623 listsubrepos=False, badfn=None):
1624 1624 r = self._repo
1625 1625
1626 1626 # Only a case insensitive filesystem needs magic to translate user input
1627 1627 # to actual case in the filesystem.
1628 1628 icasefs = not util.fscasesensitive(r.root)
1629 1629 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 1630 default, auditor=r.auditor, ctx=self,
1631 1631 listsubrepos=listsubrepos, badfn=badfn,
1632 1632 icasefs=icasefs)
1633 1633
1634 1634 def flushall(self):
1635 1635 pass # For overlayworkingfilectx compatibility.
1636 1636
1637 1637 def _filtersuspectsymlink(self, files):
1638 1638 if not files or self._repo.dirstate._checklink:
1639 1639 return files
1640 1640
1641 1641 # Symlink placeholders may get non-symlink-like contents
1642 1642 # via user error or dereferencing by NFS or Samba servers,
1643 1643 # so we filter out any placeholders that don't look like a
1644 1644 # symlink
1645 1645 sane = []
1646 1646 for f in files:
1647 1647 if self.flags(f) == 'l':
1648 1648 d = self[f].data()
1649 1649 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1650 1650 self._repo.ui.debug('ignoring suspect symlink placeholder'
1651 1651 ' "%s"\n' % f)
1652 1652 continue
1653 1653 sane.append(f)
1654 1654 return sane
1655 1655
1656 1656 def _checklookup(self, files):
1657 1657 # check for any possibly clean files
1658 1658 if not files:
1659 1659 return [], [], []
1660 1660
1661 1661 modified = []
1662 1662 deleted = []
1663 1663 fixup = []
1664 1664 pctx = self._parents[0]
1665 1665 # do a full compare of any files that might have changed
1666 1666 for f in sorted(files):
1667 1667 try:
1668 1668 # This will return True for a file that got replaced by a
1669 1669 # directory in the interim, but fixing that is pretty hard.
1670 1670 if (f not in pctx or self.flags(f) != pctx.flags(f)
1671 1671 or pctx[f].cmp(self[f])):
1672 1672 modified.append(f)
1673 1673 else:
1674 1674 fixup.append(f)
1675 1675 except (IOError, OSError):
1676 1676 # A file become inaccessible in between? Mark it as deleted,
1677 1677 # matching dirstate behavior (issue5584).
1678 1678 # The dirstate has more complex behavior around whether a
1679 1679 # missing file matches a directory, etc, but we don't need to
1680 1680 # bother with that: if f has made it to this point, we're sure
1681 1681 # it's in the dirstate.
1682 1682 deleted.append(f)
1683 1683
1684 1684 return modified, deleted, fixup
1685 1685
1686 1686 def _poststatusfixup(self, status, fixup):
1687 1687 """update dirstate for files that are actually clean"""
1688 1688 poststatus = self._repo.postdsstatus()
1689 1689 if fixup or poststatus:
1690 1690 try:
1691 1691 oldid = self._repo.dirstate.identity()
1692 1692
1693 1693 # updating the dirstate is optional
1694 1694 # so we don't wait on the lock
1695 1695 # wlock can invalidate the dirstate, so cache normal _after_
1696 1696 # taking the lock
1697 1697 with self._repo.wlock(False):
1698 1698 if self._repo.dirstate.identity() == oldid:
1699 1699 if fixup:
1700 1700 normal = self._repo.dirstate.normal
1701 1701 for f in fixup:
1702 1702 normal(f)
1703 1703 # write changes out explicitly, because nesting
1704 1704 # wlock at runtime may prevent 'wlock.release()'
1705 1705 # after this block from doing so for subsequent
1706 1706 # changing files
1707 1707 tr = self._repo.currenttransaction()
1708 1708 self._repo.dirstate.write(tr)
1709 1709
1710 1710 if poststatus:
1711 1711 for ps in poststatus:
1712 1712 ps(self, status)
1713 1713 else:
1714 1714 # in this case, writing changes out breaks
1715 1715 # consistency, because .hg/dirstate was
1716 1716 # already changed simultaneously after last
1717 1717 # caching (see also issue5584 for detail)
1718 1718 self._repo.ui.debug('skip updating dirstate: '
1719 1719 'identity mismatch\n')
1720 1720 except error.LockError:
1721 1721 pass
1722 1722 finally:
1723 1723 # Even if the wlock couldn't be grabbed, clear out the list.
1724 1724 self._repo.clearpostdsstatus()
1725 1725
1726 1726 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1727 1727 '''Gets the status from the dirstate -- internal use only.'''
1728 1728 subrepos = []
1729 1729 if '.hgsub' in self:
1730 1730 subrepos = sorted(self.substate)
1731 1731 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1732 1732 clean=clean, unknown=unknown)
1733 1733
1734 1734 # check for any possibly clean files
1735 1735 fixup = []
1736 1736 if cmp:
1737 1737 modified2, deleted2, fixup = self._checklookup(cmp)
1738 1738 s.modified.extend(modified2)
1739 1739 s.deleted.extend(deleted2)
1740 1740
1741 1741 if fixup and clean:
1742 1742 s.clean.extend(fixup)
1743 1743
1744 1744 self._poststatusfixup(s, fixup)
1745 1745
1746 1746 if match.always():
1747 1747 # cache for performance
1748 1748 if s.unknown or s.ignored or s.clean:
1749 1749 # "_status" is cached with list*=False in the normal route
1750 1750 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 1751 s.deleted, [], [], [])
1752 1752 else:
1753 1753 self._status = s
1754 1754
1755 1755 return s
1756 1756
1757 1757 @propertycache
1758 1758 def _manifest(self):
1759 1759 """generate a manifest corresponding to the values in self._status
1760 1760
1761 1761 This reuse the file nodeid from parent, but we use special node
1762 1762 identifiers for added and modified files. This is used by manifests
1763 1763 merge to see that files are different and by update logic to avoid
1764 1764 deleting newly added files.
1765 1765 """
1766 1766 return self._buildstatusmanifest(self._status)
1767 1767
1768 1768 def _buildstatusmanifest(self, status):
1769 1769 """Builds a manifest that includes the given status results."""
1770 1770 parents = self.parents()
1771 1771
1772 1772 man = parents[0].manifest().copy()
1773 1773
1774 1774 ff = self._flagfunc
1775 1775 for i, l in ((addednodeid, status.added),
1776 1776 (modifiednodeid, status.modified)):
1777 1777 for f in l:
1778 1778 man[f] = i
1779 1779 try:
1780 1780 man.setflag(f, ff(f))
1781 1781 except OSError:
1782 1782 pass
1783 1783
1784 1784 for f in status.deleted + status.removed:
1785 1785 if f in man:
1786 1786 del man[f]
1787 1787
1788 1788 return man
1789 1789
1790 1790 def _buildstatus(self, other, s, match, listignored, listclean,
1791 1791 listunknown):
1792 1792 """build a status with respect to another context
1793 1793
1794 1794 This includes logic for maintaining the fast path of status when
1795 1795 comparing the working directory against its parent, which is to skip
1796 1796 building a new manifest if self (working directory) is not comparing
1797 1797 against its parent (repo['.']).
1798 1798 """
1799 1799 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 1800 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 1801 # might have accidentally ended up with the entire contents of the file
1802 1802 # they are supposed to be linking to.
1803 1803 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 1804 if other != self._repo['.']:
1805 1805 s = super(workingctx, self)._buildstatus(other, s, match,
1806 1806 listignored, listclean,
1807 1807 listunknown)
1808 1808 return s
1809 1809
1810 1810 def _matchstatus(self, other, match):
1811 1811 """override the match method with a filter for directory patterns
1812 1812
1813 1813 We use inheritance to customize the match.bad method only in cases of
1814 1814 workingctx since it belongs only to the working directory when
1815 1815 comparing against the parent changeset.
1816 1816
1817 1817 If we aren't comparing against the working directory's parent, then we
1818 1818 just use the default match object sent to us.
1819 1819 """
1820 1820 if other != self._repo['.']:
1821 1821 def bad(f, msg):
1822 1822 # 'f' may be a directory pattern from 'match.files()',
1823 1823 # so 'f not in ctx1' is not enough
1824 1824 if f not in other and not other.hasdir(f):
1825 1825 self._repo.ui.warn('%s: %s\n' %
1826 1826 (self._repo.dirstate.pathto(f), msg))
1827 1827 match.bad = bad
1828 1828 return match
1829 1829
1830 1830 def markcommitted(self, node):
1831 1831 super(workingctx, self).markcommitted(node)
1832 1832
1833 1833 sparse.aftercommit(self._repo, node)
1834 1834
1835 1835 class committablefilectx(basefilectx):
1836 1836 """A committablefilectx provides common functionality for a file context
1837 1837 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1838 1838 def __init__(self, repo, path, filelog=None, ctx=None):
1839 1839 self._repo = repo
1840 1840 self._path = path
1841 1841 self._changeid = None
1842 1842 self._filerev = self._filenode = None
1843 1843
1844 1844 if filelog is not None:
1845 1845 self._filelog = filelog
1846 1846 if ctx:
1847 1847 self._changectx = ctx
1848 1848
1849 1849 def __nonzero__(self):
1850 1850 return True
1851 1851
1852 1852 __bool__ = __nonzero__
1853 1853
1854 1854 def linkrev(self):
1855 1855 # linked to self._changectx no matter if file is modified or not
1856 1856 return self.rev()
1857 1857
1858 1858 def parents(self):
1859 1859 '''return parent filectxs, following copies if necessary'''
1860 1860 def filenode(ctx, path):
1861 1861 return ctx._manifest.get(path, nullid)
1862 1862
1863 1863 path = self._path
1864 1864 fl = self._filelog
1865 1865 pcl = self._changectx._parents
1866 1866 renamed = self.renamed()
1867 1867
1868 1868 if renamed:
1869 1869 pl = [renamed + (None,)]
1870 1870 else:
1871 1871 pl = [(path, filenode(pcl[0], path), fl)]
1872 1872
1873 1873 for pc in pcl[1:]:
1874 1874 pl.append((path, filenode(pc, path), fl))
1875 1875
1876 1876 return [self._parentfilectx(p, fileid=n, filelog=l)
1877 1877 for p, n, l in pl if n != nullid]
1878 1878
1879 1879 def children(self):
1880 1880 return []
1881 1881
1882 1882 class workingfilectx(committablefilectx):
1883 1883 """A workingfilectx object makes access to data related to a particular
1884 1884 file in the working directory convenient."""
1885 1885 def __init__(self, repo, path, filelog=None, workingctx=None):
1886 1886 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1887 1887
1888 1888 @propertycache
1889 1889 def _changectx(self):
1890 1890 return workingctx(self._repo)
1891 1891
1892 1892 def data(self):
1893 1893 return self._repo.wread(self._path)
1894 1894 def renamed(self):
1895 1895 rp = self._repo.dirstate.copied(self._path)
1896 1896 if not rp:
1897 1897 return None
1898 1898 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1899 1899
1900 1900 def size(self):
1901 1901 return self._repo.wvfs.lstat(self._path).st_size
1902 1902 def date(self):
1903 1903 t, tz = self._changectx.date()
1904 1904 try:
1905 1905 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1906 1906 except OSError as err:
1907 1907 if err.errno != errno.ENOENT:
1908 1908 raise
1909 1909 return (t, tz)
1910 1910
1911 1911 def exists(self):
1912 1912 return self._repo.wvfs.exists(self._path)
1913 1913
1914 1914 def lexists(self):
1915 1915 return self._repo.wvfs.lexists(self._path)
1916 1916
1917 1917 def audit(self):
1918 1918 return self._repo.wvfs.audit(self._path)
1919 1919
1920 1920 def cmp(self, fctx):
1921 1921 """compare with other file context
1922 1922
1923 1923 returns True if different than fctx.
1924 1924 """
1925 1925 # fctx should be a filectx (not a workingfilectx)
1926 1926 # invert comparison to reuse the same code path
1927 1927 return fctx.cmp(self)
1928 1928
1929 1929 def remove(self, ignoremissing=False):
1930 1930 """wraps unlink for a repo's working directory"""
1931 1931 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1932 1932
1933 1933 def write(self, data, flags, backgroundclose=False):
1934 1934 """wraps repo.wwrite"""
1935 1935 self._repo.wwrite(self._path, data, flags,
1936 1936 backgroundclose=backgroundclose)
1937 1937
1938 1938 def markcopied(self, src):
1939 1939 """marks this file a copy of `src`"""
1940 1940 if self._repo.dirstate[self._path] in "nma":
1941 1941 self._repo.dirstate.copy(src, self._path)
1942 1942
1943 1943 def clearunknown(self):
1944 1944 """Removes conflicting items in the working directory so that
1945 1945 ``write()`` can be called successfully.
1946 1946 """
1947 1947 wvfs = self._repo.wvfs
1948 1948 f = self._path
1949 1949 wvfs.audit(f)
1950 1950 if wvfs.isdir(f) and not wvfs.islink(f):
1951 1951 wvfs.rmtree(f, forcibly=True)
1952 1952 for p in reversed(list(util.finddirs(f))):
1953 1953 if wvfs.isfileorlink(p):
1954 1954 wvfs.unlink(p)
1955 1955 break
1956 1956
1957 1957 def setflags(self, l, x):
1958 1958 self._repo.wvfs.setflags(self._path, l, x)
1959 1959
1960 1960 class overlayworkingctx(workingctx):
1961 1961 """Wraps another mutable context with a write-back cache that can be flushed
1962 1962 at a later time.
1963 1963
1964 1964 self._cache[path] maps to a dict with keys: {
1965 1965 'exists': bool?
1966 1966 'date': date?
1967 1967 'data': str?
1968 1968 'flags': str?
1969 1969 }
1970 1970 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1971 1971 is `False`, the file was deleted.
1972 1972 """
1973 1973
1974 1974 def __init__(self, repo, wrappedctx):
1975 1975 super(overlayworkingctx, self).__init__(repo)
1976 1976 self._repo = repo
1977 1977 self._wrappedctx = wrappedctx
1978 1978 self._clean()
1979 1979
1980 1980 def data(self, path):
1981 1981 if self.isdirty(path):
1982 1982 if self._cache[path]['exists']:
1983 1983 if self._cache[path]['data']:
1984 1984 return self._cache[path]['data']
1985 1985 else:
1986 1986 # Must fallback here, too, because we only set flags.
1987 1987 return self._wrappedctx[path].data()
1988 1988 else:
1989 1989 raise error.ProgrammingError("No such file or directory: %s" %
1990 1990 self._path)
1991 1991 else:
1992 1992 return self._wrappedctx[path].data()
1993 1993
1994 1994 def isinmemory(self):
1995 1995 return True
1996 1996
1997 1997 def filedate(self, path):
1998 1998 if self.isdirty(path):
1999 1999 return self._cache[path]['date']
2000 2000 else:
2001 2001 return self._wrappedctx[path].date()
2002 2002
2003 2003 def flags(self, path):
2004 2004 if self.isdirty(path):
2005 2005 if self._cache[path]['exists']:
2006 2006 return self._cache[path]['flags']
2007 2007 else:
2008 2008 raise error.ProgrammingError("No such file or directory: %s" %
2009 2009 self._path)
2010 2010 else:
2011 2011 return self._wrappedctx[path].flags()
2012 2012
2013 2013 def write(self, path, data, flags=''):
2014 2014 if data is None:
2015 2015 raise error.ProgrammingError("data must be non-None")
2016 2016 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2017 2017 flags=flags)
2018 2018
2019 2019 def setflags(self, path, l, x):
2020 2020 self._markdirty(path, exists=True, date=util.makedate(),
2021 2021 flags=(l and 'l' or '') + (x and 'x' or ''))
2022 2022
2023 2023 def remove(self, path):
2024 2024 self._markdirty(path, exists=False)
2025 2025
2026 2026 def exists(self, path):
2027 2027 """exists behaves like `lexists`, but needs to follow symlinks and
2028 2028 return False if they are broken.
2029 2029 """
2030 2030 if self.isdirty(path):
2031 2031 # If this path exists and is a symlink, "follow" it by calling
2032 2032 # exists on the destination path.
2033 2033 if (self._cache[path]['exists'] and
2034 2034 'l' in self._cache[path]['flags']):
2035 2035 return self.exists(self._cache[path]['data'].strip())
2036 2036 else:
2037 2037 return self._cache[path]['exists']
2038 2038 return self._wrappedctx[path].exists()
2039 2039
2040 2040 def lexists(self, path):
2041 2041 """lexists returns True if the path exists"""
2042 2042 if self.isdirty(path):
2043 2043 return self._cache[path]['exists']
2044 2044 return self._wrappedctx[path].lexists()
2045 2045
2046 2046 def size(self, path):
2047 2047 if self.isdirty(path):
2048 2048 if self._cache[path]['exists']:
2049 2049 return len(self._cache[path]['data'])
2050 2050 else:
2051 2051 raise error.ProgrammingError("No such file or directory: %s" %
2052 2052 self._path)
2053 2053 return self._wrappedctx[path].size()
2054 2054
2055 2055 def flushall(self):
2056 2056 for path in self._writeorder:
2057 2057 entry = self._cache[path]
2058 2058 if entry['exists']:
2059 2059 self._wrappedctx[path].clearunknown()
2060 2060 if entry['data'] is not None:
2061 2061 if entry['flags'] is None:
2062 2062 raise error.ProgrammingError('data set but not flags')
2063 2063 self._wrappedctx[path].write(
2064 2064 entry['data'],
2065 2065 entry['flags'])
2066 2066 else:
2067 2067 self._wrappedctx[path].setflags(
2068 2068 'l' in entry['flags'],
2069 2069 'x' in entry['flags'])
2070 2070 else:
2071 2071 self._wrappedctx[path].remove(path)
2072 2072 self._clean()
2073 2073
2074 2074 def isdirty(self, path):
2075 2075 return path in self._cache
2076 2076
2077 2077 def _clean(self):
2078 2078 self._cache = {}
2079 2079 self._writeorder = []
2080 2080
2081 2081 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2082 2082 if path not in self._cache:
2083 2083 self._writeorder.append(path)
2084 2084
2085 2085 self._cache[path] = {
2086 2086 'exists': exists,
2087 2087 'data': data,
2088 2088 'date': date,
2089 2089 'flags': flags,
2090 2090 }
2091 2091
2092 2092 def filectx(self, path, filelog=None):
2093 2093 return overlayworkingfilectx(self._repo, path, parent=self,
2094 2094 filelog=filelog)
2095 2095
2096 2096 class overlayworkingfilectx(workingfilectx):
2097 2097 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2098 2098 cache, which can be flushed through later by calling ``flush()``."""
2099 2099
2100 2100 def __init__(self, repo, path, filelog=None, parent=None):
2101 2101 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2102 2102 parent)
2103 2103 self._repo = repo
2104 2104 self._parent = parent
2105 2105 self._path = path
2106 2106
2107 2107 def cmp(self, fctx):
2108 2108 return self.data() != fctx.data()
2109 2109
2110 2110 def ctx(self):
2111 2111 return self._parent
2112 2112
2113 2113 def data(self):
2114 2114 return self._parent.data(self._path)
2115 2115
2116 2116 def date(self):
2117 2117 return self._parent.filedate(self._path)
2118 2118
2119 2119 def exists(self):
2120 2120 return self.lexists()
2121 2121
2122 2122 def lexists(self):
2123 2123 return self._parent.exists(self._path)
2124 2124
2125 2125 def renamed(self):
2126 2126 # Copies are currently tracked in the dirstate as before. Straight copy
2127 2127 # from workingfilectx.
2128 2128 rp = self._repo.dirstate.copied(self._path)
2129 2129 if not rp:
2130 2130 return None
2131 2131 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2132 2132
2133 2133 def size(self):
2134 2134 return self._parent.size(self._path)
2135 2135
2136 2136 def audit(self):
2137 2137 pass
2138 2138
2139 2139 def flags(self):
2140 2140 return self._parent.flags(self._path)
2141 2141
2142 2142 def setflags(self, islink, isexec):
2143 2143 return self._parent.setflags(self._path, islink, isexec)
2144 2144
2145 2145 def write(self, data, flags, backgroundclose=False):
2146 2146 return self._parent.write(self._path, data, flags)
2147 2147
2148 2148 def remove(self, ignoremissing=False):
2149 2149 return self._parent.remove(self._path)
2150 2150
2151 2151 class workingcommitctx(workingctx):
2152 2152 """A workingcommitctx object makes access to data related to
2153 2153 the revision being committed convenient.
2154 2154
2155 2155 This hides changes in the working directory, if they aren't
2156 2156 committed in this context.
2157 2157 """
2158 2158 def __init__(self, repo, changes,
2159 2159 text="", user=None, date=None, extra=None):
2160 2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2161 2161 changes)
2162 2162
2163 2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2164 2164 """Return matched files only in ``self._status``
2165 2165
2166 2166 Uncommitted files appear "clean" via this context, even if
2167 2167 they aren't actually so in the working directory.
2168 2168 """
2169 2169 if clean:
2170 2170 clean = [f for f in self._manifest if f not in self._changedset]
2171 2171 else:
2172 2172 clean = []
2173 2173 return scmutil.status([f for f in self._status.modified if match(f)],
2174 2174 [f for f in self._status.added if match(f)],
2175 2175 [f for f in self._status.removed if match(f)],
2176 2176 [], [], [], clean)
2177 2177
2178 2178 @propertycache
2179 2179 def _changedset(self):
2180 2180 """Return the set of files changed in this context
2181 2181 """
2182 2182 changed = set(self._status.modified)
2183 2183 changed.update(self._status.added)
2184 2184 changed.update(self._status.removed)
2185 2185 return changed
2186 2186
2187 2187 def makecachingfilectxfn(func):
2188 2188 """Create a filectxfn that caches based on the path.
2189 2189
2190 2190 We can't use util.cachefunc because it uses all arguments as the cache
2191 2191 key and this creates a cycle since the arguments include the repo and
2192 2192 memctx.
2193 2193 """
2194 2194 cache = {}
2195 2195
2196 2196 def getfilectx(repo, memctx, path):
2197 2197 if path not in cache:
2198 2198 cache[path] = func(repo, memctx, path)
2199 2199 return cache[path]
2200 2200
2201 2201 return getfilectx
2202 2202
2203 2203 def memfilefromctx(ctx):
2204 2204 """Given a context return a memfilectx for ctx[path]
2205 2205
2206 2206 This is a convenience method for building a memctx based on another
2207 2207 context.
2208 2208 """
2209 2209 def getfilectx(repo, memctx, path):
2210 2210 fctx = ctx[path]
2211 2211 # this is weird but apparently we only keep track of one parent
2212 2212 # (why not only store that instead of a tuple?)
2213 2213 copied = fctx.renamed()
2214 2214 if copied:
2215 2215 copied = copied[0]
2216 2216 return memfilectx(repo, path, fctx.data(),
2217 2217 islink=fctx.islink(), isexec=fctx.isexec(),
2218 2218 copied=copied, memctx=memctx)
2219 2219
2220 2220 return getfilectx
2221 2221
2222 2222 def memfilefrompatch(patchstore):
2223 2223 """Given a patch (e.g. patchstore object) return a memfilectx
2224 2224
2225 2225 This is a convenience method for building a memctx based on a patchstore.
2226 2226 """
2227 2227 def getfilectx(repo, memctx, path):
2228 2228 data, mode, copied = patchstore.getfile(path)
2229 2229 if data is None:
2230 2230 return None
2231 2231 islink, isexec = mode
2232 2232 return memfilectx(repo, path, data, islink=islink,
2233 2233 isexec=isexec, copied=copied,
2234 2234 memctx=memctx)
2235 2235
2236 2236 return getfilectx
2237 2237
2238 2238 class memctx(committablectx):
2239 2239 """Use memctx to perform in-memory commits via localrepo.commitctx().
2240 2240
2241 2241 Revision information is supplied at initialization time while
2242 2242 related files data and is made available through a callback
2243 2243 mechanism. 'repo' is the current localrepo, 'parents' is a
2244 2244 sequence of two parent revisions identifiers (pass None for every
2245 2245 missing parent), 'text' is the commit message and 'files' lists
2246 2246 names of files touched by the revision (normalized and relative to
2247 2247 repository root).
2248 2248
2249 2249 filectxfn(repo, memctx, path) is a callable receiving the
2250 2250 repository, the current memctx object and the normalized path of
2251 2251 requested file, relative to repository root. It is fired by the
2252 2252 commit function for every file in 'files', but calls order is
2253 2253 undefined. If the file is available in the revision being
2254 2254 committed (updated or added), filectxfn returns a memfilectx
2255 2255 object. If the file was removed, filectxfn return None for recent
2256 2256 Mercurial. Moved files are represented by marking the source file
2257 2257 removed and the new file added with copy information (see
2258 2258 memfilectx).
2259 2259
2260 2260 user receives the committer name and defaults to current
2261 2261 repository username, date is the commit date in any format
2262 2262 supported by util.parsedate() and defaults to current date, extra
2263 2263 is a dictionary of metadata or is left empty.
2264 2264 """
2265 2265
2266 2266 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2267 2267 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2268 2268 # this field to determine what to do in filectxfn.
2269 2269 _returnnoneformissingfiles = True
2270 2270
2271 2271 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2272 2272 date=None, extra=None, branch=None, editor=False):
2273 2273 super(memctx, self).__init__(repo, text, user, date, extra)
2274 2274 self._rev = None
2275 2275 self._node = None
2276 2276 parents = [(p or nullid) for p in parents]
2277 2277 p1, p2 = parents
2278 2278 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2279 2279 files = sorted(set(files))
2280 2280 self._files = files
2281 2281 if branch is not None:
2282 2282 self._extra['branch'] = encoding.fromlocal(branch)
2283 2283 self.substate = {}
2284 2284
2285 2285 if isinstance(filectxfn, patch.filestore):
2286 2286 filectxfn = memfilefrompatch(filectxfn)
2287 2287 elif not callable(filectxfn):
2288 2288 # if store is not callable, wrap it in a function
2289 2289 filectxfn = memfilefromctx(filectxfn)
2290 2290
2291 2291 # memoizing increases performance for e.g. vcs convert scenarios.
2292 2292 self._filectxfn = makecachingfilectxfn(filectxfn)
2293 2293
2294 2294 if editor:
2295 2295 self._text = editor(self._repo, self, [])
2296 2296 self._repo.savecommitmessage(self._text)
2297 2297
2298 2298 def filectx(self, path, filelog=None):
2299 2299 """get a file context from the working directory
2300 2300
2301 2301 Returns None if file doesn't exist and should be removed."""
2302 2302 return self._filectxfn(self._repo, self, path)
2303 2303
2304 2304 def commit(self):
2305 2305 """commit context to the repo"""
2306 2306 return self._repo.commitctx(self)
2307 2307
2308 2308 @propertycache
2309 2309 def _manifest(self):
2310 2310 """generate a manifest based on the return values of filectxfn"""
2311 2311
2312 2312 # keep this simple for now; just worry about p1
2313 2313 pctx = self._parents[0]
2314 2314 man = pctx.manifest().copy()
2315 2315
2316 2316 for f in self._status.modified:
2317 2317 p1node = nullid
2318 2318 p2node = nullid
2319 2319 p = pctx[f].parents() # if file isn't in pctx, check p2?
2320 2320 if len(p) > 0:
2321 2321 p1node = p[0].filenode()
2322 2322 if len(p) > 1:
2323 2323 p2node = p[1].filenode()
2324 2324 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2325 2325
2326 2326 for f in self._status.added:
2327 2327 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2328 2328
2329 2329 for f in self._status.removed:
2330 2330 if f in man:
2331 2331 del man[f]
2332 2332
2333 2333 return man
2334 2334
2335 2335 @propertycache
2336 2336 def _status(self):
2337 2337 """Calculate exact status from ``files`` specified at construction
2338 2338 """
2339 2339 man1 = self.p1().manifest()
2340 2340 p2 = self._parents[1]
2341 2341 # "1 < len(self._parents)" can't be used for checking
2342 2342 # existence of the 2nd parent, because "memctx._parents" is
2343 2343 # explicitly initialized by the list, of which length is 2.
2344 2344 if p2.node() != nullid:
2345 2345 man2 = p2.manifest()
2346 2346 managing = lambda f: f in man1 or f in man2
2347 2347 else:
2348 2348 managing = lambda f: f in man1
2349 2349
2350 2350 modified, added, removed = [], [], []
2351 2351 for f in self._files:
2352 2352 if not managing(f):
2353 2353 added.append(f)
2354 2354 elif self[f]:
2355 2355 modified.append(f)
2356 2356 else:
2357 2357 removed.append(f)
2358 2358
2359 2359 return scmutil.status(modified, added, removed, [], [], [], [])
2360 2360
2361 2361 class memfilectx(committablefilectx):
2362 2362 """memfilectx represents an in-memory file to commit.
2363 2363
2364 2364 See memctx and committablefilectx for more details.
2365 2365 """
2366 2366 def __init__(self, repo, path, data, islink=False,
2367 2367 isexec=False, copied=None, memctx=None):
2368 2368 """
2369 2369 path is the normalized file path relative to repository root.
2370 2370 data is the file content as a string.
2371 2371 islink is True if the file is a symbolic link.
2372 2372 isexec is True if the file is executable.
2373 2373 copied is the source file path if current file was copied in the
2374 2374 revision being committed, or None."""
2375 2375 super(memfilectx, self).__init__(repo, path, None, memctx)
2376 2376 self._data = data
2377 2377 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2378 2378 self._copied = None
2379 2379 if copied:
2380 2380 self._copied = (copied, nullid)
2381 2381
2382 2382 def data(self):
2383 2383 return self._data
2384 2384
2385 2385 def remove(self, ignoremissing=False):
2386 2386 """wraps unlink for a repo's working directory"""
2387 2387 # need to figure out what to do here
2388 2388 del self._changectx[self._path]
2389 2389
2390 2390 def write(self, data, flags):
2391 2391 """wraps repo.wwrite"""
2392 2392 self._data = data
2393 2393
2394 2394 class overlayfilectx(committablefilectx):
2395 2395 """Like memfilectx but take an original filectx and optional parameters to
2396 2396 override parts of it. This is useful when fctx.data() is expensive (i.e.
2397 2397 flag processor is expensive) and raw data, flags, and filenode could be
2398 2398 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2399 2399 """
2400 2400
2401 2401 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2402 2402 copied=None, ctx=None):
2403 2403 """originalfctx: filecontext to duplicate
2404 2404
2405 2405 datafunc: None or a function to override data (file content). It is a
2406 2406 function to be lazy. path, flags, copied, ctx: None or overridden value
2407 2407
2408 2408 copied could be (path, rev), or False. copied could also be just path,
2409 2409 and will be converted to (path, nullid). This simplifies some callers.
2410 2410 """
2411 2411
2412 2412 if path is None:
2413 2413 path = originalfctx.path()
2414 2414 if ctx is None:
2415 2415 ctx = originalfctx.changectx()
2416 2416 ctxmatch = lambda: True
2417 2417 else:
2418 2418 ctxmatch = lambda: ctx == originalfctx.changectx()
2419 2419
2420 2420 repo = originalfctx.repo()
2421 2421 flog = originalfctx.filelog()
2422 2422 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2423 2423
2424 2424 if copied is None:
2425 2425 copied = originalfctx.renamed()
2426 2426 copiedmatch = lambda: True
2427 2427 else:
2428 2428 if copied and not isinstance(copied, tuple):
2429 2429 # repo._filecommit will recalculate copyrev so nullid is okay
2430 2430 copied = (copied, nullid)
2431 2431 copiedmatch = lambda: copied == originalfctx.renamed()
2432 2432
2433 2433 # When data, copied (could affect data), ctx (could affect filelog
2434 2434 # parents) are not overridden, rawdata, rawflags, and filenode may be
2435 2435 # reused (repo._filecommit should double check filelog parents).
2436 2436 #
2437 2437 # path, flags are not hashed in filelog (but in manifestlog) so they do
2438 2438 # not affect reusable here.
2439 2439 #
2440 2440 # If ctx or copied is overridden to a same value with originalfctx,
2441 2441 # still consider it's reusable. originalfctx.renamed() may be a bit
2442 2442 # expensive so it's not called unless necessary. Assuming datafunc is
2443 2443 # always expensive, do not call it for this "reusable" test.
2444 2444 reusable = datafunc is None and ctxmatch() and copiedmatch()
2445 2445
2446 2446 if datafunc is None:
2447 2447 datafunc = originalfctx.data
2448 2448 if flags is None:
2449 2449 flags = originalfctx.flags()
2450 2450
2451 2451 self._datafunc = datafunc
2452 2452 self._flags = flags
2453 2453 self._copied = copied
2454 2454
2455 2455 if reusable:
2456 2456 # copy extra fields from originalfctx
2457 2457 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2458 2458 for attr_ in attrs:
2459 2459 if util.safehasattr(originalfctx, attr_):
2460 2460 setattr(self, attr_, getattr(originalfctx, attr_))
2461 2461
2462 2462 def data(self):
2463 2463 return self._datafunc()
2464 2464
2465 2465 class metadataonlyctx(committablectx):
2466 2466 """Like memctx but it's reusing the manifest of different commit.
2467 2467 Intended to be used by lightweight operations that are creating
2468 2468 metadata-only changes.
2469 2469
2470 2470 Revision information is supplied at initialization time. 'repo' is the
2471 2471 current localrepo, 'ctx' is original revision which manifest we're reuisng
2472 2472 'parents' is a sequence of two parent revisions identifiers (pass None for
2473 2473 every missing parent), 'text' is the commit.
2474 2474
2475 2475 user receives the committer name and defaults to current repository
2476 2476 username, date is the commit date in any format supported by
2477 2477 util.parsedate() and defaults to current date, extra is a dictionary of
2478 2478 metadata or is left empty.
2479 2479 """
2480 2480 def __new__(cls, repo, originalctx, *args, **kwargs):
2481 2481 return super(metadataonlyctx, cls).__new__(cls, repo)
2482 2482
2483 2483 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2484 2484 date=None, extra=None, editor=False):
2485 2485 if text is None:
2486 2486 text = originalctx.description()
2487 2487 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2488 2488 self._rev = None
2489 2489 self._node = None
2490 2490 self._originalctx = originalctx
2491 2491 self._manifestnode = originalctx.manifestnode()
2492 2492 if parents is None:
2493 2493 parents = originalctx.parents()
2494 2494 else:
2495 2495 parents = [repo[p] for p in parents if p is not None]
2496 2496 parents = parents[:]
2497 2497 while len(parents) < 2:
2498 2498 parents.append(repo[nullid])
2499 2499 p1, p2 = self._parents = parents
2500 2500
2501 2501 # sanity check to ensure that the reused manifest parents are
2502 2502 # manifests of our commit parents
2503 2503 mp1, mp2 = self.manifestctx().parents
2504 2504 if p1 != nullid and p1.manifestnode() != mp1:
2505 2505 raise RuntimeError('can\'t reuse the manifest: '
2506 2506 'its p1 doesn\'t match the new ctx p1')
2507 2507 if p2 != nullid and p2.manifestnode() != mp2:
2508 2508 raise RuntimeError('can\'t reuse the manifest: '
2509 2509 'its p2 doesn\'t match the new ctx p2')
2510 2510
2511 2511 self._files = originalctx.files()
2512 2512 self.substate = {}
2513 2513
2514 2514 if editor:
2515 2515 self._text = editor(self._repo, self, [])
2516 2516 self._repo.savecommitmessage(self._text)
2517 2517
2518 2518 def manifestnode(self):
2519 2519 return self._manifestnode
2520 2520
2521 2521 @property
2522 2522 def _manifestctx(self):
2523 2523 return self._repo.manifestlog[self._manifestnode]
2524 2524
2525 2525 def filectx(self, path, filelog=None):
2526 2526 return self._originalctx.filectx(path, filelog=filelog)
2527 2527
2528 2528 def commit(self):
2529 2529 """commit context to the repo"""
2530 2530 return self._repo.commitctx(self)
2531 2531
2532 2532 @property
2533 2533 def _manifest(self):
2534 2534 return self._originalctx.manifest()
2535 2535
2536 2536 @propertycache
2537 2537 def _status(self):
2538 2538 """Calculate exact status from ``files`` specified in the ``origctx``
2539 2539 and parents manifests.
2540 2540 """
2541 2541 man1 = self.p1().manifest()
2542 2542 p2 = self._parents[1]
2543 2543 # "1 < len(self._parents)" can't be used for checking
2544 2544 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2545 2545 # explicitly initialized by the list, of which length is 2.
2546 2546 if p2.node() != nullid:
2547 2547 man2 = p2.manifest()
2548 2548 managing = lambda f: f in man1 or f in man2
2549 2549 else:
2550 2550 managing = lambda f: f in man1
2551 2551
2552 2552 modified, added, removed = [], [], []
2553 2553 for f in self._files:
2554 2554 if not managing(f):
2555 2555 added.append(f)
2556 2556 elif f in self:
2557 2557 modified.append(f)
2558 2558 else:
2559 2559 removed.append(f)
2560 2560
2561 2561 return scmutil.status(modified, added, removed, [], [], [], [])
2562 2562
2563 2563 class arbitraryfilectx(object):
2564 2564 """Allows you to use filectx-like functions on a file in an arbitrary
2565 2565 location on disk, possibly not in the working directory.
2566 2566 """
2567 2567 def __init__(self, path, repo=None):
2568 2568 # Repo is optional because contrib/simplemerge uses this class.
2569 2569 self._repo = repo
2570 2570 self._path = path
2571 2571
2572 2572 def cmp(self, fctx):
2573 if isinstance(fctx, workingfilectx) and self._repo:
2573 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2574 # path if either side is a symlink.
2575 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2576 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2574 2577 # Add a fast-path for merge if both sides are disk-backed.
2575 # Note that filecmp uses the opposite return values as cmp.
2578 # Note that filecmp uses the opposite return values (True if same)
2579 # from our cmp functions (True if different).
2576 2580 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2577 2581 return self.data() != fctx.data()
2578 2582
2579 2583 def path(self):
2580 2584 return self._path
2581 2585
2582 2586 def flags(self):
2583 2587 return ''
2584 2588
2585 2589 def data(self):
2586 2590 return util.readfile(self._path)
2587 2591
2588 2592 def decodeddata(self):
2589 2593 with open(self._path, "rb") as f:
2590 2594 return f.read()
2591 2595
2592 2596 def remove(self):
2593 2597 util.unlink(self._path)
2594 2598
2595 2599 def write(self, data, flags):
2596 2600 assert not flags
2597 2601 with open(self._path, "w") as f:
2598 2602 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now