##// END OF EJS Templates
context: avoid writing outdated dirstate out (issue5584)...
FUJIWARA Katsunori -
r32752:dc7efa28 default
parent child Browse files
Show More
@@ -1,2354 +1,2365
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 subrepo,
42 42 util,
43 43 )
44 44
45 45 propertycache = util.propertycache
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 r = short(self.node())
70 70 if pycompat.ispy3:
71 71 return r.decode('ascii')
72 72 return r
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 def __int__(self):
78 78 return self.rev()
79 79
80 80 def __repr__(self):
81 81 return r"<%s %s>" % (type(self).__name__, str(self))
82 82
83 83 def __eq__(self, other):
84 84 try:
85 85 return type(self) == type(other) and self._rev == other._rev
86 86 except AttributeError:
87 87 return False
88 88
89 89 def __ne__(self, other):
90 90 return not (self == other)
91 91
92 92 def __contains__(self, key):
93 93 return key in self._manifest
94 94
95 95 def __getitem__(self, key):
96 96 return self.filectx(key)
97 97
98 98 def __iter__(self):
99 99 return iter(self._manifest)
100 100
101 101 def _buildstatusmanifest(self, status):
102 102 """Builds a manifest that includes the given status results, if this is
103 103 a working copy context. For non-working copy contexts, it just returns
104 104 the normal manifest."""
105 105 return self.manifest()
106 106
107 107 def _matchstatus(self, other, match):
108 108 """return match.always if match is none
109 109
110 110 This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 """True if the changeset is not obsolete but it's ancestor are"""
210 210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211 211
212 212 def bumped(self):
213 213 """True if the changeset try to be a successor of a public changeset
214 214
215 215 Only non-public and non-obsolete changesets may be bumped.
216 216 """
217 217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218 218
219 219 def divergent(self):
220 220 """Is a successors of a changeset with multiple possible successors set
221 221
222 222 Only non-public and non-obsolete changesets may be divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225 225
226 226 def troubled(self):
227 227 """True if the changeset is either unstable, bumped or divergent"""
228 228 return self.unstable() or self.bumped() or self.divergent()
229 229
230 230 def troubles(self):
231 231 """return the list of troubles affecting this changesets.
232 232
233 233 Troubles are returned as strings. possible values are:
234 234 - unstable,
235 235 - bumped,
236 236 - divergent.
237 237 """
238 238 troubles = []
239 239 if self.unstable():
240 240 troubles.append('unstable')
241 241 if self.bumped():
242 242 troubles.append('bumped')
243 243 if self.divergent():
244 244 troubles.append('divergent')
245 245 return troubles
246 246
247 247 def parents(self):
248 248 """return contexts for each parent changeset"""
249 249 return self._parents
250 250
251 251 def p1(self):
252 252 return self._parents[0]
253 253
254 254 def p2(self):
255 255 parents = self._parents
256 256 if len(parents) == 2:
257 257 return parents[1]
258 258 return changectx(self._repo, nullrev)
259 259
260 260 def _fileinfo(self, path):
261 261 if r'_manifest' in self.__dict__:
262 262 try:
263 263 return self._manifest[path], self._manifest.flags(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(self._node, path,
266 266 _('not found in manifest'))
267 267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 268 if path in self._manifestdelta:
269 269 return (self._manifestdelta[path],
270 270 self._manifestdelta.flags(path))
271 271 mfl = self._repo.manifestlog
272 272 try:
273 273 node, flag = mfl[self._changeset.manifest].find(path)
274 274 except KeyError:
275 275 raise error.ManifestLookupError(self._node, path,
276 276 _('not found in manifest'))
277 277
278 278 return node, flag
279 279
280 280 def filenode(self, path):
281 281 return self._fileinfo(path)[0]
282 282
283 283 def flags(self, path):
284 284 try:
285 285 return self._fileinfo(path)[1]
286 286 except error.LookupError:
287 287 return ''
288 288
289 289 def sub(self, path, allowcreate=True):
290 290 '''return a subrepo for the stored revision of path, never wdir()'''
291 291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292 292
293 293 def nullsub(self, path, pctx):
294 294 return subrepo.nullsubrepo(self, path, pctx)
295 295
296 296 def workingsub(self, path):
297 297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 298 context.
299 299 '''
300 300 return subrepo.subrepo(self, path, allowwdir=True)
301 301
302 302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 303 listsubrepos=False, badfn=None):
304 304 r = self._repo
305 305 return matchmod.match(r.root, r.getcwd(), pats,
306 306 include, exclude, default,
307 307 auditor=r.nofsauditor, ctx=self,
308 308 listsubrepos=listsubrepos, badfn=badfn)
309 309
310 310 def diff(self, ctx2=None, match=None, **opts):
311 311 """Returns a diff generator for the given contexts and matcher"""
312 312 if ctx2 is None:
313 313 ctx2 = self.p1()
314 314 if ctx2 is not None:
315 315 ctx2 = self._repo[ctx2]
316 316 diffopts = patch.diffopts(self._repo.ui, opts)
317 317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 318
319 319 def dirs(self):
320 320 return self._manifest.dirs()
321 321
322 322 def hasdir(self, dir):
323 323 return self._manifest.hasdir(dir)
324 324
325 325 def status(self, other=None, match=None, listignored=False,
326 326 listclean=False, listunknown=False, listsubrepos=False):
327 327 """return status of files between two nodes or node and working
328 328 directory.
329 329
330 330 If other is None, compare this node with working directory.
331 331
332 332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 333 """
334 334
335 335 ctx1 = self
336 336 ctx2 = self._repo[other]
337 337
338 338 # This next code block is, admittedly, fragile logic that tests for
339 339 # reversing the contexts and wouldn't need to exist if it weren't for
340 340 # the fast (and common) code path of comparing the working directory
341 341 # with its first parent.
342 342 #
343 343 # What we're aiming for here is the ability to call:
344 344 #
345 345 # workingctx.status(parentctx)
346 346 #
347 347 # If we always built the manifest for each context and compared those,
348 348 # then we'd be done. But the special case of the above call means we
349 349 # just copy the manifest of the parent.
350 350 reversed = False
351 351 if (not isinstance(ctx1, changectx)
352 352 and isinstance(ctx2, changectx)):
353 353 reversed = True
354 354 ctx1, ctx2 = ctx2, ctx1
355 355
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388
389 389 def makememctx(repo, parents, text, user, date, branch, files, store,
390 390 editor=None, extra=None):
391 391 def getfilectx(repo, memctx, path):
392 392 data, mode, copied = store.getfile(path)
393 393 if data is None:
394 394 return None
395 395 islink, isexec = mode
396 396 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
397 397 copied=copied, memctx=memctx)
398 398 if extra is None:
399 399 extra = {}
400 400 if branch:
401 401 extra['branch'] = encoding.fromlocal(branch)
402 402 ctx = memctx(repo, parents, text, files, getfilectx, user,
403 403 date, extra, editor)
404 404 return ctx
405 405
406 406 def _filterederror(repo, changeid):
407 407 """build an exception to be raised about a filtered changeid
408 408
409 409 This is extracted in a function to help extensions (eg: evolve) to
410 410 experiment with various message variants."""
411 411 if repo.filtername.startswith('visible'):
412 412 msg = _("hidden revision '%s'") % changeid
413 413 hint = _('use --hidden to access hidden revisions')
414 414 return error.FilteredRepoLookupError(msg, hint=hint)
415 415 msg = _("filtered revision '%s' (not in '%s' subset)")
416 416 msg %= (changeid, repo.filtername)
417 417 return error.FilteredRepoLookupError(msg)
418 418
419 419 class changectx(basectx):
420 420 """A changecontext object makes access to data related to a particular
421 421 changeset convenient. It represents a read-only context already present in
422 422 the repo."""
423 423 def __init__(self, repo, changeid=''):
424 424 """changeid is a revision number, node, or tag"""
425 425
426 426 # since basectx.__new__ already took care of copying the object, we
427 427 # don't need to do anything in __init__, so we just exit here
428 428 if isinstance(changeid, basectx):
429 429 return
430 430
431 431 if changeid == '':
432 432 changeid = '.'
433 433 self._repo = repo
434 434
435 435 try:
436 436 if isinstance(changeid, int):
437 437 self._node = repo.changelog.node(changeid)
438 438 self._rev = changeid
439 439 return
440 440 if not pycompat.ispy3 and isinstance(changeid, long):
441 441 changeid = str(changeid)
442 442 if changeid == 'null':
443 443 self._node = nullid
444 444 self._rev = nullrev
445 445 return
446 446 if changeid == 'tip':
447 447 self._node = repo.changelog.tip()
448 448 self._rev = repo.changelog.rev(self._node)
449 449 return
450 450 if changeid == '.' or changeid == repo.dirstate.p1():
451 451 # this is a hack to delay/avoid loading obsmarkers
452 452 # when we know that '.' won't be hidden
453 453 self._node = repo.dirstate.p1()
454 454 self._rev = repo.unfiltered().changelog.rev(self._node)
455 455 return
456 456 if len(changeid) == 20:
457 457 try:
458 458 self._node = changeid
459 459 self._rev = repo.changelog.rev(changeid)
460 460 return
461 461 except error.FilteredRepoLookupError:
462 462 raise
463 463 except LookupError:
464 464 pass
465 465
466 466 try:
467 467 r = int(changeid)
468 468 if '%d' % r != changeid:
469 469 raise ValueError
470 470 l = len(repo.changelog)
471 471 if r < 0:
472 472 r += l
473 473 if r < 0 or r >= l and r != wdirrev:
474 474 raise ValueError
475 475 self._rev = r
476 476 self._node = repo.changelog.node(r)
477 477 return
478 478 except error.FilteredIndexError:
479 479 raise
480 480 except (ValueError, OverflowError, IndexError):
481 481 pass
482 482
483 483 if len(changeid) == 40:
484 484 try:
485 485 self._node = bin(changeid)
486 486 self._rev = repo.changelog.rev(self._node)
487 487 return
488 488 except error.FilteredLookupError:
489 489 raise
490 490 except (TypeError, LookupError):
491 491 pass
492 492
493 493 # lookup bookmarks through the name interface
494 494 try:
495 495 self._node = repo.names.singlenode(repo, changeid)
496 496 self._rev = repo.changelog.rev(self._node)
497 497 return
498 498 except KeyError:
499 499 pass
500 500 except error.FilteredRepoLookupError:
501 501 raise
502 502 except error.RepoLookupError:
503 503 pass
504 504
505 505 self._node = repo.unfiltered().changelog._partialmatch(changeid)
506 506 if self._node is not None:
507 507 self._rev = repo.changelog.rev(self._node)
508 508 return
509 509
510 510 # lookup failed
511 511 # check if it might have come from damaged dirstate
512 512 #
513 513 # XXX we could avoid the unfiltered if we had a recognizable
514 514 # exception for filtered changeset access
515 515 if changeid in repo.unfiltered().dirstate.parents():
516 516 msg = _("working directory has unknown parent '%s'!")
517 517 raise error.Abort(msg % short(changeid))
518 518 try:
519 519 if len(changeid) == 20 and nonascii(changeid):
520 520 changeid = hex(changeid)
521 521 except TypeError:
522 522 pass
523 523 except (error.FilteredIndexError, error.FilteredLookupError,
524 524 error.FilteredRepoLookupError):
525 525 raise _filterederror(repo, changeid)
526 526 except IndexError:
527 527 pass
528 528 raise error.RepoLookupError(
529 529 _("unknown revision '%s'") % changeid)
530 530
531 531 def __hash__(self):
532 532 try:
533 533 return hash(self._rev)
534 534 except AttributeError:
535 535 return id(self)
536 536
537 537 def __nonzero__(self):
538 538 return self._rev != nullrev
539 539
540 540 __bool__ = __nonzero__
541 541
542 542 @propertycache
543 543 def _changeset(self):
544 544 return self._repo.changelog.changelogrevision(self.rev())
545 545
546 546 @propertycache
547 547 def _manifest(self):
548 548 return self._manifestctx.read()
549 549
550 550 @property
551 551 def _manifestctx(self):
552 552 return self._repo.manifestlog[self._changeset.manifest]
553 553
554 554 @propertycache
555 555 def _manifestdelta(self):
556 556 return self._manifestctx.readdelta()
557 557
558 558 @propertycache
559 559 def _parents(self):
560 560 repo = self._repo
561 561 p1, p2 = repo.changelog.parentrevs(self._rev)
562 562 if p2 == nullrev:
563 563 return [changectx(repo, p1)]
564 564 return [changectx(repo, p1), changectx(repo, p2)]
565 565
566 566 def changeset(self):
567 567 c = self._changeset
568 568 return (
569 569 c.manifest,
570 570 c.user,
571 571 c.date,
572 572 c.files,
573 573 c.description,
574 574 c.extra,
575 575 )
576 576 def manifestnode(self):
577 577 return self._changeset.manifest
578 578
579 579 def user(self):
580 580 return self._changeset.user
581 581 def date(self):
582 582 return self._changeset.date
583 583 def files(self):
584 584 return self._changeset.files
585 585 def description(self):
586 586 return self._changeset.description
587 587 def branch(self):
588 588 return encoding.tolocal(self._changeset.extra.get("branch"))
589 589 def closesbranch(self):
590 590 return 'close' in self._changeset.extra
591 591 def extra(self):
592 592 return self._changeset.extra
593 593 def tags(self):
594 594 return self._repo.nodetags(self._node)
595 595 def bookmarks(self):
596 596 return self._repo.nodebookmarks(self._node)
597 597 def phase(self):
598 598 return self._repo._phasecache.phase(self._repo, self._rev)
599 599 def hidden(self):
600 600 return self._rev in repoview.filterrevs(self._repo, 'visible')
601 601
602 602 def children(self):
603 603 """return contexts for each child changeset"""
604 604 c = self._repo.changelog.children(self._node)
605 605 return [changectx(self._repo, x) for x in c]
606 606
607 607 def ancestors(self):
608 608 for a in self._repo.changelog.ancestors([self._rev]):
609 609 yield changectx(self._repo, a)
610 610
611 611 def descendants(self):
612 612 for d in self._repo.changelog.descendants([self._rev]):
613 613 yield changectx(self._repo, d)
614 614
615 615 def filectx(self, path, fileid=None, filelog=None):
616 616 """get a file context from this changeset"""
617 617 if fileid is None:
618 618 fileid = self.filenode(path)
619 619 return filectx(self._repo, path, fileid=fileid,
620 620 changectx=self, filelog=filelog)
621 621
622 622 def ancestor(self, c2, warn=False):
623 623 """return the "best" ancestor context of self and c2
624 624
625 625 If there are multiple candidates, it will show a message and check
626 626 merge.preferancestor configuration before falling back to the
627 627 revlog ancestor."""
628 628 # deal with workingctxs
629 629 n2 = c2._node
630 630 if n2 is None:
631 631 n2 = c2._parents[0]._node
632 632 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
633 633 if not cahs:
634 634 anc = nullid
635 635 elif len(cahs) == 1:
636 636 anc = cahs[0]
637 637 else:
638 638 # experimental config: merge.preferancestor
639 639 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
640 640 try:
641 641 ctx = changectx(self._repo, r)
642 642 except error.RepoLookupError:
643 643 continue
644 644 anc = ctx.node()
645 645 if anc in cahs:
646 646 break
647 647 else:
648 648 anc = self._repo.changelog.ancestor(self._node, n2)
649 649 if warn:
650 650 self._repo.ui.status(
651 651 (_("note: using %s as ancestor of %s and %s\n") %
652 652 (short(anc), short(self._node), short(n2))) +
653 653 ''.join(_(" alternatively, use --config "
654 654 "merge.preferancestor=%s\n") %
655 655 short(n) for n in sorted(cahs) if n != anc))
656 656 return changectx(self._repo, anc)
657 657
658 658 def descendant(self, other):
659 659 """True if other is descendant of this changeset"""
660 660 return self._repo.changelog.descendant(self._rev, other._rev)
661 661
662 662 def walk(self, match):
663 663 '''Generates matching file names.'''
664 664
665 665 # Wrap match.bad method to have message with nodeid
666 666 def bad(fn, msg):
667 667 # The manifest doesn't know about subrepos, so don't complain about
668 668 # paths into valid subrepos.
669 669 if any(fn == s or fn.startswith(s + '/')
670 670 for s in self.substate):
671 671 return
672 672 match.bad(fn, _('no such file in rev %s') % self)
673 673
674 674 m = matchmod.badmatch(match, bad)
675 675 return self._manifest.walk(m)
676 676
677 677 def matches(self, match):
678 678 return self.walk(match)
679 679
680 680 class basefilectx(object):
681 681 """A filecontext object represents the common logic for its children:
682 682 filectx: read-only access to a filerevision that is already present
683 683 in the repo,
684 684 workingfilectx: a filecontext that represents files from the working
685 685 directory,
686 686 memfilectx: a filecontext that represents files in-memory,
687 687 overlayfilectx: duplicate another filecontext with some fields overridden.
688 688 """
689 689 @propertycache
690 690 def _filelog(self):
691 691 return self._repo.file(self._path)
692 692
693 693 @propertycache
694 694 def _changeid(self):
695 695 if r'_changeid' in self.__dict__:
696 696 return self._changeid
697 697 elif r'_changectx' in self.__dict__:
698 698 return self._changectx.rev()
699 699 elif r'_descendantrev' in self.__dict__:
700 700 # this file context was created from a revision with a known
701 701 # descendant, we can (lazily) correct for linkrev aliases
702 702 return self._adjustlinkrev(self._descendantrev)
703 703 else:
704 704 return self._filelog.linkrev(self._filerev)
705 705
706 706 @propertycache
707 707 def _filenode(self):
708 708 if r'_fileid' in self.__dict__:
709 709 return self._filelog.lookup(self._fileid)
710 710 else:
711 711 return self._changectx.filenode(self._path)
712 712
713 713 @propertycache
714 714 def _filerev(self):
715 715 return self._filelog.rev(self._filenode)
716 716
717 717 @propertycache
718 718 def _repopath(self):
719 719 return self._path
720 720
721 721 def __nonzero__(self):
722 722 try:
723 723 self._filenode
724 724 return True
725 725 except error.LookupError:
726 726 # file is missing
727 727 return False
728 728
729 729 __bool__ = __nonzero__
730 730
731 731 def __str__(self):
732 732 try:
733 733 return "%s@%s" % (self.path(), self._changectx)
734 734 except error.LookupError:
735 735 return "%s@???" % self.path()
736 736
737 737 def __repr__(self):
738 738 return "<%s %s>" % (type(self).__name__, str(self))
739 739
740 740 def __hash__(self):
741 741 try:
742 742 return hash((self._path, self._filenode))
743 743 except AttributeError:
744 744 return id(self)
745 745
746 746 def __eq__(self, other):
747 747 try:
748 748 return (type(self) == type(other) and self._path == other._path
749 749 and self._filenode == other._filenode)
750 750 except AttributeError:
751 751 return False
752 752
753 753 def __ne__(self, other):
754 754 return not (self == other)
755 755
756 756 def filerev(self):
757 757 return self._filerev
758 758 def filenode(self):
759 759 return self._filenode
760 760 @propertycache
761 761 def _flags(self):
762 762 return self._changectx.flags(self._path)
763 763 def flags(self):
764 764 return self._flags
765 765 def filelog(self):
766 766 return self._filelog
767 767 def rev(self):
768 768 return self._changeid
769 769 def linkrev(self):
770 770 return self._filelog.linkrev(self._filerev)
771 771 def node(self):
772 772 return self._changectx.node()
773 773 def hex(self):
774 774 return self._changectx.hex()
775 775 def user(self):
776 776 return self._changectx.user()
777 777 def date(self):
778 778 return self._changectx.date()
779 779 def files(self):
780 780 return self._changectx.files()
781 781 def description(self):
782 782 return self._changectx.description()
783 783 def branch(self):
784 784 return self._changectx.branch()
785 785 def extra(self):
786 786 return self._changectx.extra()
787 787 def phase(self):
788 788 return self._changectx.phase()
789 789 def phasestr(self):
790 790 return self._changectx.phasestr()
791 791 def manifest(self):
792 792 return self._changectx.manifest()
793 793 def changectx(self):
794 794 return self._changectx
795 795 def renamed(self):
796 796 return self._copied
797 797 def repo(self):
798 798 return self._repo
799 799 def size(self):
800 800 return len(self.data())
801 801
802 802 def path(self):
803 803 return self._path
804 804
805 805 def isbinary(self):
806 806 try:
807 807 return util.binary(self.data())
808 808 except IOError:
809 809 return False
810 810 def isexec(self):
811 811 return 'x' in self.flags()
812 812 def islink(self):
813 813 return 'l' in self.flags()
814 814
815 815 def isabsent(self):
816 816 """whether this filectx represents a file not in self._changectx
817 817
818 818 This is mainly for merge code to detect change/delete conflicts. This is
819 819 expected to be True for all subclasses of basectx."""
820 820 return False
821 821
822 822 _customcmp = False
823 823 def cmp(self, fctx):
824 824 """compare with other file context
825 825
826 826 returns True if different than fctx.
827 827 """
828 828 if fctx._customcmp:
829 829 return fctx.cmp(self)
830 830
831 831 if (fctx._filenode is None
832 832 and (self._repo._encodefilterpats
833 833 # if file data starts with '\1\n', empty metadata block is
834 834 # prepended, which adds 4 bytes to filelog.size().
835 835 or self.size() - 4 == fctx.size())
836 836 or self.size() == fctx.size()):
837 837 return self._filelog.cmp(self._filenode, fctx.data())
838 838
839 839 return True
840 840
841 841 def _adjustlinkrev(self, srcrev, inclusive=False):
842 842 """return the first ancestor of <srcrev> introducing <fnode>
843 843
844 844 If the linkrev of the file revision does not point to an ancestor of
845 845 srcrev, we'll walk down the ancestors until we find one introducing
846 846 this file revision.
847 847
848 848 :srcrev: the changeset revision we search ancestors from
849 849 :inclusive: if true, the src revision will also be checked
850 850 """
851 851 repo = self._repo
852 852 cl = repo.unfiltered().changelog
853 853 mfl = repo.manifestlog
854 854 # fetch the linkrev
855 855 lkr = self.linkrev()
856 856 # hack to reuse ancestor computation when searching for renames
857 857 memberanc = getattr(self, '_ancestrycontext', None)
858 858 iteranc = None
859 859 if srcrev is None:
860 860 # wctx case, used by workingfilectx during mergecopy
861 861 revs = [p.rev() for p in self._repo[None].parents()]
862 862 inclusive = True # we skipped the real (revless) source
863 863 else:
864 864 revs = [srcrev]
865 865 if memberanc is None:
866 866 memberanc = iteranc = cl.ancestors(revs, lkr,
867 867 inclusive=inclusive)
868 868 # check if this linkrev is an ancestor of srcrev
869 869 if lkr not in memberanc:
870 870 if iteranc is None:
871 871 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
872 872 fnode = self._filenode
873 873 path = self._path
874 874 for a in iteranc:
875 875 ac = cl.read(a) # get changeset data (we avoid object creation)
876 876 if path in ac[3]: # checking the 'files' field.
877 877 # The file has been touched, check if the content is
878 878 # similar to the one we search for.
879 879 if fnode == mfl[ac[0]].readfast().get(path):
880 880 return a
881 881 # In theory, we should never get out of that loop without a result.
882 882 # But if manifest uses a buggy file revision (not children of the
883 883 # one it replaces) we could. Such a buggy situation will likely
884 884 # result is crash somewhere else at to some point.
885 885 return lkr
886 886
887 887 def introrev(self):
888 888 """return the rev of the changeset which introduced this file revision
889 889
890 890 This method is different from linkrev because it take into account the
891 891 changeset the filectx was created from. It ensures the returned
892 892 revision is one of its ancestors. This prevents bugs from
893 893 'linkrev-shadowing' when a file revision is used by multiple
894 894 changesets.
895 895 """
896 896 lkr = self.linkrev()
897 897 attrs = vars(self)
898 898 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
899 899 if noctx or self.rev() == lkr:
900 900 return self.linkrev()
901 901 return self._adjustlinkrev(self.rev(), inclusive=True)
902 902
903 903 def _parentfilectx(self, path, fileid, filelog):
904 904 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
905 905 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
906 906 if '_changeid' in vars(self) or '_changectx' in vars(self):
907 907 # If self is associated with a changeset (probably explicitly
908 908 # fed), ensure the created filectx is associated with a
909 909 # changeset that is an ancestor of self.changectx.
910 910 # This lets us later use _adjustlinkrev to get a correct link.
911 911 fctx._descendantrev = self.rev()
912 912 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
913 913 elif '_descendantrev' in vars(self):
914 914 # Otherwise propagate _descendantrev if we have one associated.
915 915 fctx._descendantrev = self._descendantrev
916 916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 917 return fctx
918 918
919 919 def parents(self):
920 920 _path = self._path
921 921 fl = self._filelog
922 922 parents = self._filelog.parents(self._filenode)
923 923 pl = [(_path, node, fl) for node in parents if node != nullid]
924 924
925 925 r = fl.renamed(self._filenode)
926 926 if r:
927 927 # - In the simple rename case, both parent are nullid, pl is empty.
928 928 # - In case of merge, only one of the parent is null id and should
929 929 # be replaced with the rename information. This parent is -always-
930 930 # the first one.
931 931 #
932 932 # As null id have always been filtered out in the previous list
933 933 # comprehension, inserting to 0 will always result in "replacing
934 934 # first nullid parent with rename information.
935 935 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
936 936
937 937 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
938 938
939 939 def p1(self):
940 940 return self.parents()[0]
941 941
942 942 def p2(self):
943 943 p = self.parents()
944 944 if len(p) == 2:
945 945 return p[1]
946 946 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
947 947
948 948 def annotate(self, follow=False, linenumber=False, skiprevs=None,
949 949 diffopts=None):
950 950 '''returns a list of tuples of ((ctx, number), line) for each line
951 951 in the file, where ctx is the filectx of the node where
952 952 that line was last changed; if linenumber parameter is true, number is
953 953 the line number at the first appearance in the managed file, otherwise,
954 954 number has a fixed value of False.
955 955 '''
956 956
957 957 def lines(text):
958 958 if text.endswith("\n"):
959 959 return text.count("\n")
960 960 return text.count("\n") + int(bool(text))
961 961
962 962 if linenumber:
963 963 def decorate(text, rev):
964 964 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
965 965 else:
966 966 def decorate(text, rev):
967 967 return ([(rev, False)] * lines(text), text)
968 968
969 969 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
970 970
971 971 def parents(f):
972 972 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
973 973 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
974 974 # from the topmost introrev (= srcrev) down to p.linkrev() if it
975 975 # isn't an ancestor of the srcrev.
976 976 f._changeid
977 977 pl = f.parents()
978 978
979 979 # Don't return renamed parents if we aren't following.
980 980 if not follow:
981 981 pl = [p for p in pl if p.path() == f.path()]
982 982
983 983 # renamed filectx won't have a filelog yet, so set it
984 984 # from the cache to save time
985 985 for p in pl:
986 986 if not '_filelog' in p.__dict__:
987 987 p._filelog = getlog(p.path())
988 988
989 989 return pl
990 990
991 991 # use linkrev to find the first changeset where self appeared
992 992 base = self
993 993 introrev = self.introrev()
994 994 if self.rev() != introrev:
995 995 base = self.filectx(self.filenode(), changeid=introrev)
996 996 if getattr(base, '_ancestrycontext', None) is None:
997 997 cl = self._repo.changelog
998 998 if introrev is None:
999 999 # wctx is not inclusive, but works because _ancestrycontext
1000 1000 # is used to test filelog revisions
1001 1001 ac = cl.ancestors([p.rev() for p in base.parents()],
1002 1002 inclusive=True)
1003 1003 else:
1004 1004 ac = cl.ancestors([introrev], inclusive=True)
1005 1005 base._ancestrycontext = ac
1006 1006
1007 1007 # This algorithm would prefer to be recursive, but Python is a
1008 1008 # bit recursion-hostile. Instead we do an iterative
1009 1009 # depth-first search.
1010 1010
1011 1011 # 1st DFS pre-calculates pcache and needed
1012 1012 visit = [base]
1013 1013 pcache = {}
1014 1014 needed = {base: 1}
1015 1015 while visit:
1016 1016 f = visit.pop()
1017 1017 if f in pcache:
1018 1018 continue
1019 1019 pl = parents(f)
1020 1020 pcache[f] = pl
1021 1021 for p in pl:
1022 1022 needed[p] = needed.get(p, 0) + 1
1023 1023 if p not in pcache:
1024 1024 visit.append(p)
1025 1025
1026 1026 # 2nd DFS does the actual annotate
1027 1027 visit[:] = [base]
1028 1028 hist = {}
1029 1029 while visit:
1030 1030 f = visit[-1]
1031 1031 if f in hist:
1032 1032 visit.pop()
1033 1033 continue
1034 1034
1035 1035 ready = True
1036 1036 pl = pcache[f]
1037 1037 for p in pl:
1038 1038 if p not in hist:
1039 1039 ready = False
1040 1040 visit.append(p)
1041 1041 if ready:
1042 1042 visit.pop()
1043 1043 curr = decorate(f.data(), f)
1044 1044 skipchild = False
1045 1045 if skiprevs is not None:
1046 1046 skipchild = f._changeid in skiprevs
1047 1047 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1048 1048 diffopts)
1049 1049 for p in pl:
1050 1050 if needed[p] == 1:
1051 1051 del hist[p]
1052 1052 del needed[p]
1053 1053 else:
1054 1054 needed[p] -= 1
1055 1055
1056 1056 hist[f] = curr
1057 1057 del pcache[f]
1058 1058
1059 1059 return zip(hist[base][0], hist[base][1].splitlines(True))
1060 1060
1061 1061 def ancestors(self, followfirst=False):
1062 1062 visit = {}
1063 1063 c = self
1064 1064 if followfirst:
1065 1065 cut = 1
1066 1066 else:
1067 1067 cut = None
1068 1068
1069 1069 while True:
1070 1070 for parent in c.parents()[:cut]:
1071 1071 visit[(parent.linkrev(), parent.filenode())] = parent
1072 1072 if not visit:
1073 1073 break
1074 1074 c = visit.pop(max(visit))
1075 1075 yield c
1076 1076
1077 1077 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1078 1078 r'''
1079 1079 Given parent and child fctxes and annotate data for parents, for all lines
1080 1080 in either parent that match the child, annotate the child with the parent's
1081 1081 data.
1082 1082
1083 1083 Additionally, if `skipchild` is True, replace all other lines with parent
1084 1084 annotate data as well such that child is never blamed for any lines.
1085 1085
1086 1086 >>> oldfctx = 'old'
1087 1087 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1088 1088 >>> olddata = 'a\nb\n'
1089 1089 >>> p1data = 'a\nb\nc\n'
1090 1090 >>> p2data = 'a\nc\nd\n'
1091 1091 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1092 1092 >>> diffopts = mdiff.diffopts()
1093 1093
1094 1094 >>> def decorate(text, rev):
1095 1095 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1096 1096
1097 1097 Basic usage:
1098 1098
1099 1099 >>> oldann = decorate(olddata, oldfctx)
1100 1100 >>> p1ann = decorate(p1data, p1fctx)
1101 1101 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1102 1102 >>> p1ann[0]
1103 1103 [('old', 1), ('old', 2), ('p1', 3)]
1104 1104 >>> p2ann = decorate(p2data, p2fctx)
1105 1105 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1106 1106 >>> p2ann[0]
1107 1107 [('old', 1), ('p2', 2), ('p2', 3)]
1108 1108
1109 1109 Test with multiple parents (note the difference caused by ordering):
1110 1110
1111 1111 >>> childann = decorate(childdata, childfctx)
1112 1112 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1113 1113 ... diffopts)
1114 1114 >>> childann[0]
1115 1115 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1116 1116
1117 1117 >>> childann = decorate(childdata, childfctx)
1118 1118 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1119 1119 ... diffopts)
1120 1120 >>> childann[0]
1121 1121 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1122 1122
1123 1123 Test with skipchild (note the difference caused by ordering):
1124 1124
1125 1125 >>> childann = decorate(childdata, childfctx)
1126 1126 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1127 1127 ... diffopts)
1128 1128 >>> childann[0]
1129 1129 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1130 1130
1131 1131 >>> childann = decorate(childdata, childfctx)
1132 1132 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1133 1133 ... diffopts)
1134 1134 >>> childann[0]
1135 1135 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1136 1136 '''
1137 1137 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1138 1138 for parent in parents]
1139 1139
1140 1140 if skipchild:
1141 1141 # Need to iterate over the blocks twice -- make it a list
1142 1142 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1143 1143 # Mercurial currently prefers p2 over p1 for annotate.
1144 1144 # TODO: change this?
1145 1145 for parent, blocks in pblocks:
1146 1146 for (a1, a2, b1, b2), t in blocks:
1147 1147 # Changed blocks ('!') or blocks made only of blank lines ('~')
1148 1148 # belong to the child.
1149 1149 if t == '=':
1150 1150 child[0][b1:b2] = parent[0][a1:a2]
1151 1151
1152 1152 if skipchild:
1153 1153 # Now try and match up anything that couldn't be matched,
1154 1154 # Reversing pblocks maintains bias towards p2, matching above
1155 1155 # behavior.
1156 1156 pblocks.reverse()
1157 1157
1158 1158 # The heuristics are:
1159 1159 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1160 1160 # This could potentially be smarter but works well enough.
1161 1161 # * For a non-matching section, do a best-effort fit. Match lines in
1162 1162 # diff hunks 1:1, dropping lines as necessary.
1163 1163 # * Repeat the last line as a last resort.
1164 1164
1165 1165 # First, replace as much as possible without repeating the last line.
1166 1166 remaining = [(parent, []) for parent, _blocks in pblocks]
1167 1167 for idx, (parent, blocks) in enumerate(pblocks):
1168 1168 for (a1, a2, b1, b2), _t in blocks:
1169 1169 if a2 - a1 >= b2 - b1:
1170 1170 for bk in xrange(b1, b2):
1171 1171 if child[0][bk][0] == childfctx:
1172 1172 ak = min(a1 + (bk - b1), a2 - 1)
1173 1173 child[0][bk] = parent[0][ak]
1174 1174 else:
1175 1175 remaining[idx][1].append((a1, a2, b1, b2))
1176 1176
1177 1177 # Then, look at anything left, which might involve repeating the last
1178 1178 # line.
1179 1179 for parent, blocks in remaining:
1180 1180 for a1, a2, b1, b2 in blocks:
1181 1181 for bk in xrange(b1, b2):
1182 1182 if child[0][bk][0] == childfctx:
1183 1183 ak = min(a1 + (bk - b1), a2 - 1)
1184 1184 child[0][bk] = parent[0][ak]
1185 1185 return child
1186 1186
1187 1187 class filectx(basefilectx):
1188 1188 """A filecontext object makes access to data related to a particular
1189 1189 filerevision convenient."""
1190 1190 def __init__(self, repo, path, changeid=None, fileid=None,
1191 1191 filelog=None, changectx=None):
1192 1192 """changeid can be a changeset revision, node, or tag.
1193 1193 fileid can be a file revision or node."""
1194 1194 self._repo = repo
1195 1195 self._path = path
1196 1196
1197 1197 assert (changeid is not None
1198 1198 or fileid is not None
1199 1199 or changectx is not None), \
1200 1200 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1201 1201 % (changeid, fileid, changectx))
1202 1202
1203 1203 if filelog is not None:
1204 1204 self._filelog = filelog
1205 1205
1206 1206 if changeid is not None:
1207 1207 self._changeid = changeid
1208 1208 if changectx is not None:
1209 1209 self._changectx = changectx
1210 1210 if fileid is not None:
1211 1211 self._fileid = fileid
1212 1212
1213 1213 @propertycache
1214 1214 def _changectx(self):
1215 1215 try:
1216 1216 return changectx(self._repo, self._changeid)
1217 1217 except error.FilteredRepoLookupError:
1218 1218 # Linkrev may point to any revision in the repository. When the
1219 1219 # repository is filtered this may lead to `filectx` trying to build
1220 1220 # `changectx` for filtered revision. In such case we fallback to
1221 1221 # creating `changectx` on the unfiltered version of the reposition.
1222 1222 # This fallback should not be an issue because `changectx` from
1223 1223 # `filectx` are not used in complex operations that care about
1224 1224 # filtering.
1225 1225 #
1226 1226 # This fallback is a cheap and dirty fix that prevent several
1227 1227 # crashes. It does not ensure the behavior is correct. However the
1228 1228 # behavior was not correct before filtering either and "incorrect
1229 1229 # behavior" is seen as better as "crash"
1230 1230 #
1231 1231 # Linkrevs have several serious troubles with filtering that are
1232 1232 # complicated to solve. Proper handling of the issue here should be
1233 1233 # considered when solving linkrev issue are on the table.
1234 1234 return changectx(self._repo.unfiltered(), self._changeid)
1235 1235
1236 1236 def filectx(self, fileid, changeid=None):
1237 1237 '''opens an arbitrary revision of the file without
1238 1238 opening a new filelog'''
1239 1239 return filectx(self._repo, self._path, fileid=fileid,
1240 1240 filelog=self._filelog, changeid=changeid)
1241 1241
1242 1242 def rawdata(self):
1243 1243 return self._filelog.revision(self._filenode, raw=True)
1244 1244
1245 1245 def rawflags(self):
1246 1246 """low-level revlog flags"""
1247 1247 return self._filelog.flags(self._filerev)
1248 1248
1249 1249 def data(self):
1250 1250 try:
1251 1251 return self._filelog.read(self._filenode)
1252 1252 except error.CensoredNodeError:
1253 1253 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1254 1254 return ""
1255 1255 raise error.Abort(_("censored node: %s") % short(self._filenode),
1256 1256 hint=_("set censor.policy to ignore errors"))
1257 1257
1258 1258 def size(self):
1259 1259 return self._filelog.size(self._filerev)
1260 1260
1261 1261 @propertycache
1262 1262 def _copied(self):
1263 1263 """check if file was actually renamed in this changeset revision
1264 1264
1265 1265 If rename logged in file revision, we report copy for changeset only
1266 1266 if file revisions linkrev points back to the changeset in question
1267 1267 or both changeset parents contain different file revisions.
1268 1268 """
1269 1269
1270 1270 renamed = self._filelog.renamed(self._filenode)
1271 1271 if not renamed:
1272 1272 return renamed
1273 1273
1274 1274 if self.rev() == self.linkrev():
1275 1275 return renamed
1276 1276
1277 1277 name = self.path()
1278 1278 fnode = self._filenode
1279 1279 for p in self._changectx.parents():
1280 1280 try:
1281 1281 if fnode == p.filenode(name):
1282 1282 return None
1283 1283 except error.LookupError:
1284 1284 pass
1285 1285 return renamed
1286 1286
1287 1287 def children(self):
1288 1288 # hard for renames
1289 1289 c = self._filelog.children(self._filenode)
1290 1290 return [filectx(self._repo, self._path, fileid=x,
1291 1291 filelog=self._filelog) for x in c]
1292 1292
1293 1293 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1294 1294 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1295 1295 if diff from fctx2 to fctx1 has changes in linerange2 and
1296 1296 `linerange1` is the new line range for fctx1.
1297 1297 """
1298 1298 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1299 1299 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1300 1300 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1301 1301 return diffinrange, linerange1
1302 1302
1303 1303 def blockancestors(fctx, fromline, toline, followfirst=False):
1304 1304 """Yield ancestors of `fctx` with respect to the block of lines within
1305 1305 `fromline`-`toline` range.
1306 1306 """
1307 1307 diffopts = patch.diffopts(fctx._repo.ui)
1308 1308 introrev = fctx.introrev()
1309 1309 if fctx.rev() != introrev:
1310 1310 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1311 1311 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1312 1312 while visit:
1313 1313 c, linerange2 = visit.pop(max(visit))
1314 1314 pl = c.parents()
1315 1315 if followfirst:
1316 1316 pl = pl[:1]
1317 1317 if not pl:
1318 1318 # The block originates from the initial revision.
1319 1319 yield c, linerange2
1320 1320 continue
1321 1321 inrange = False
1322 1322 for p in pl:
1323 1323 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1324 1324 inrange = inrange or inrangep
1325 1325 if linerange1[0] == linerange1[1]:
1326 1326 # Parent's linerange is empty, meaning that the block got
1327 1327 # introduced in this revision; no need to go futher in this
1328 1328 # branch.
1329 1329 continue
1330 1330 # Set _descendantrev with 'c' (a known descendant) so that, when
1331 1331 # _adjustlinkrev is called for 'p', it receives this descendant
1332 1332 # (as srcrev) instead possibly topmost introrev.
1333 1333 p._descendantrev = c.rev()
1334 1334 visit[p.linkrev(), p.filenode()] = p, linerange1
1335 1335 if inrange:
1336 1336 yield c, linerange2
1337 1337
1338 1338 def blockdescendants(fctx, fromline, toline):
1339 1339 """Yield descendants of `fctx` with respect to the block of lines within
1340 1340 `fromline`-`toline` range.
1341 1341 """
1342 1342 # First possibly yield 'fctx' if it has changes in range with respect to
1343 1343 # its parents.
1344 1344 try:
1345 1345 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1346 1346 except StopIteration:
1347 1347 pass
1348 1348 else:
1349 1349 if c == fctx:
1350 1350 yield c, linerange1
1351 1351
1352 1352 diffopts = patch.diffopts(fctx._repo.ui)
1353 1353 fl = fctx.filelog()
1354 1354 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1355 1355 for i in fl.descendants([fctx.filerev()]):
1356 1356 c = fctx.filectx(i)
1357 1357 inrange = False
1358 1358 for x in fl.parentrevs(i):
1359 1359 try:
1360 1360 p, linerange2 = seen[x]
1361 1361 except KeyError:
1362 1362 # nullrev or other branch
1363 1363 continue
1364 1364 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1365 1365 inrange = inrange or inrangep
1366 1366 # If revision 'i' has been seen (it's a merge), we assume that its
1367 1367 # line range is the same independently of which parents was used
1368 1368 # to compute it.
1369 1369 assert i not in seen or seen[i][1] == linerange1, (
1370 1370 'computed line range for %s is not consistent between '
1371 1371 'ancestor branches' % c)
1372 1372 seen[i] = c, linerange1
1373 1373 if inrange:
1374 1374 yield c, linerange1
1375 1375
1376 1376 class committablectx(basectx):
1377 1377 """A committablectx object provides common functionality for a context that
1378 1378 wants the ability to commit, e.g. workingctx or memctx."""
1379 1379 def __init__(self, repo, text="", user=None, date=None, extra=None,
1380 1380 changes=None):
1381 1381 self._repo = repo
1382 1382 self._rev = None
1383 1383 self._node = None
1384 1384 self._text = text
1385 1385 if date:
1386 1386 self._date = util.parsedate(date)
1387 1387 if user:
1388 1388 self._user = user
1389 1389 if changes:
1390 1390 self._status = changes
1391 1391
1392 1392 self._extra = {}
1393 1393 if extra:
1394 1394 self._extra = extra.copy()
1395 1395 if 'branch' not in self._extra:
1396 1396 try:
1397 1397 branch = encoding.fromlocal(self._repo.dirstate.branch())
1398 1398 except UnicodeDecodeError:
1399 1399 raise error.Abort(_('branch name not in UTF-8!'))
1400 1400 self._extra['branch'] = branch
1401 1401 if self._extra['branch'] == '':
1402 1402 self._extra['branch'] = 'default'
1403 1403
1404 1404 def __str__(self):
1405 1405 return str(self._parents[0]) + r"+"
1406 1406
1407 1407 def __bytes__(self):
1408 1408 return bytes(self._parents[0]) + "+"
1409 1409
1410 1410 def __nonzero__(self):
1411 1411 return True
1412 1412
1413 1413 __bool__ = __nonzero__
1414 1414
1415 1415 def _buildflagfunc(self):
1416 1416 # Create a fallback function for getting file flags when the
1417 1417 # filesystem doesn't support them
1418 1418
1419 1419 copiesget = self._repo.dirstate.copies().get
1420 1420 parents = self.parents()
1421 1421 if len(parents) < 2:
1422 1422 # when we have one parent, it's easy: copy from parent
1423 1423 man = parents[0].manifest()
1424 1424 def func(f):
1425 1425 f = copiesget(f, f)
1426 1426 return man.flags(f)
1427 1427 else:
1428 1428 # merges are tricky: we try to reconstruct the unstored
1429 1429 # result from the merge (issue1802)
1430 1430 p1, p2 = parents
1431 1431 pa = p1.ancestor(p2)
1432 1432 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1433 1433
1434 1434 def func(f):
1435 1435 f = copiesget(f, f) # may be wrong for merges with copies
1436 1436 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1437 1437 if fl1 == fl2:
1438 1438 return fl1
1439 1439 if fl1 == fla:
1440 1440 return fl2
1441 1441 if fl2 == fla:
1442 1442 return fl1
1443 1443 return '' # punt for conflicts
1444 1444
1445 1445 return func
1446 1446
1447 1447 @propertycache
1448 1448 def _flagfunc(self):
1449 1449 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1450 1450
1451 1451 @propertycache
1452 1452 def _status(self):
1453 1453 return self._repo.status()
1454 1454
1455 1455 @propertycache
1456 1456 def _user(self):
1457 1457 return self._repo.ui.username()
1458 1458
1459 1459 @propertycache
1460 1460 def _date(self):
1461 1461 ui = self._repo.ui
1462 1462 date = ui.configdate('devel', 'default-date')
1463 1463 if date is None:
1464 1464 date = util.makedate()
1465 1465 return date
1466 1466
1467 1467 def subrev(self, subpath):
1468 1468 return None
1469 1469
1470 1470 def manifestnode(self):
1471 1471 return None
1472 1472 def user(self):
1473 1473 return self._user or self._repo.ui.username()
1474 1474 def date(self):
1475 1475 return self._date
1476 1476 def description(self):
1477 1477 return self._text
1478 1478 def files(self):
1479 1479 return sorted(self._status.modified + self._status.added +
1480 1480 self._status.removed)
1481 1481
1482 1482 def modified(self):
1483 1483 return self._status.modified
1484 1484 def added(self):
1485 1485 return self._status.added
1486 1486 def removed(self):
1487 1487 return self._status.removed
1488 1488 def deleted(self):
1489 1489 return self._status.deleted
1490 1490 def branch(self):
1491 1491 return encoding.tolocal(self._extra['branch'])
1492 1492 def closesbranch(self):
1493 1493 return 'close' in self._extra
1494 1494 def extra(self):
1495 1495 return self._extra
1496 1496
1497 1497 def tags(self):
1498 1498 return []
1499 1499
1500 1500 def bookmarks(self):
1501 1501 b = []
1502 1502 for p in self.parents():
1503 1503 b.extend(p.bookmarks())
1504 1504 return b
1505 1505
1506 1506 def phase(self):
1507 1507 phase = phases.draft # default phase to draft
1508 1508 for p in self.parents():
1509 1509 phase = max(phase, p.phase())
1510 1510 return phase
1511 1511
1512 1512 def hidden(self):
1513 1513 return False
1514 1514
1515 1515 def children(self):
1516 1516 return []
1517 1517
1518 1518 def flags(self, path):
1519 1519 if r'_manifest' in self.__dict__:
1520 1520 try:
1521 1521 return self._manifest.flags(path)
1522 1522 except KeyError:
1523 1523 return ''
1524 1524
1525 1525 try:
1526 1526 return self._flagfunc(path)
1527 1527 except OSError:
1528 1528 return ''
1529 1529
1530 1530 def ancestor(self, c2):
1531 1531 """return the "best" ancestor context of self and c2"""
1532 1532 return self._parents[0].ancestor(c2) # punt on two parents for now
1533 1533
1534 1534 def walk(self, match):
1535 1535 '''Generates matching file names.'''
1536 1536 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1537 1537 True, False))
1538 1538
1539 1539 def matches(self, match):
1540 1540 return sorted(self._repo.dirstate.matches(match))
1541 1541
1542 1542 def ancestors(self):
1543 1543 for p in self._parents:
1544 1544 yield p
1545 1545 for a in self._repo.changelog.ancestors(
1546 1546 [p.rev() for p in self._parents]):
1547 1547 yield changectx(self._repo, a)
1548 1548
1549 1549 def markcommitted(self, node):
1550 1550 """Perform post-commit cleanup necessary after committing this ctx
1551 1551
1552 1552 Specifically, this updates backing stores this working context
1553 1553 wraps to reflect the fact that the changes reflected by this
1554 1554 workingctx have been committed. For example, it marks
1555 1555 modified and added files as normal in the dirstate.
1556 1556
1557 1557 """
1558 1558
1559 1559 with self._repo.dirstate.parentchange():
1560 1560 for f in self.modified() + self.added():
1561 1561 self._repo.dirstate.normal(f)
1562 1562 for f in self.removed():
1563 1563 self._repo.dirstate.drop(f)
1564 1564 self._repo.dirstate.setparents(node)
1565 1565
1566 1566 # write changes out explicitly, because nesting wlock at
1567 1567 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1568 1568 # from immediately doing so for subsequent changing files
1569 1569 self._repo.dirstate.write(self._repo.currenttransaction())
1570 1570
1571 1571 def dirty(self, missing=False, merge=True, branch=True):
1572 1572 return False
1573 1573
1574 1574 class workingctx(committablectx):
1575 1575 """A workingctx object makes access to data related to
1576 1576 the current working directory convenient.
1577 1577 date - any valid date string or (unixtime, offset), or None.
1578 1578 user - username string, or None.
1579 1579 extra - a dictionary of extra values, or None.
1580 1580 changes - a list of file lists as returned by localrepo.status()
1581 1581 or None to use the repository status.
1582 1582 """
1583 1583 def __init__(self, repo, text="", user=None, date=None, extra=None,
1584 1584 changes=None):
1585 1585 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1586 1586
1587 1587 def __iter__(self):
1588 1588 d = self._repo.dirstate
1589 1589 for f in d:
1590 1590 if d[f] != 'r':
1591 1591 yield f
1592 1592
1593 1593 def __contains__(self, key):
1594 1594 return self._repo.dirstate[key] not in "?r"
1595 1595
1596 1596 def hex(self):
1597 1597 return hex(wdirid)
1598 1598
1599 1599 @propertycache
1600 1600 def _parents(self):
1601 1601 p = self._repo.dirstate.parents()
1602 1602 if p[1] == nullid:
1603 1603 p = p[:-1]
1604 1604 return [changectx(self._repo, x) for x in p]
1605 1605
1606 1606 def filectx(self, path, filelog=None):
1607 1607 """get a file context from the working directory"""
1608 1608 return workingfilectx(self._repo, path, workingctx=self,
1609 1609 filelog=filelog)
1610 1610
1611 1611 def dirty(self, missing=False, merge=True, branch=True):
1612 1612 "check whether a working directory is modified"
1613 1613 # check subrepos first
1614 1614 for s in sorted(self.substate):
1615 1615 if self.sub(s).dirty():
1616 1616 return True
1617 1617 # check current working dir
1618 1618 return ((merge and self.p2()) or
1619 1619 (branch and self.branch() != self.p1().branch()) or
1620 1620 self.modified() or self.added() or self.removed() or
1621 1621 (missing and self.deleted()))
1622 1622
1623 1623 def add(self, list, prefix=""):
1624 1624 join = lambda f: os.path.join(prefix, f)
1625 1625 with self._repo.wlock():
1626 1626 ui, ds = self._repo.ui, self._repo.dirstate
1627 1627 rejected = []
1628 1628 lstat = self._repo.wvfs.lstat
1629 1629 for f in list:
1630 1630 scmutil.checkportable(ui, join(f))
1631 1631 try:
1632 1632 st = lstat(f)
1633 1633 except OSError:
1634 1634 ui.warn(_("%s does not exist!\n") % join(f))
1635 1635 rejected.append(f)
1636 1636 continue
1637 1637 if st.st_size > 10000000:
1638 1638 ui.warn(_("%s: up to %d MB of RAM may be required "
1639 1639 "to manage this file\n"
1640 1640 "(use 'hg revert %s' to cancel the "
1641 1641 "pending addition)\n")
1642 1642 % (f, 3 * st.st_size // 1000000, join(f)))
1643 1643 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1644 1644 ui.warn(_("%s not added: only files and symlinks "
1645 1645 "supported currently\n") % join(f))
1646 1646 rejected.append(f)
1647 1647 elif ds[f] in 'amn':
1648 1648 ui.warn(_("%s already tracked!\n") % join(f))
1649 1649 elif ds[f] == 'r':
1650 1650 ds.normallookup(f)
1651 1651 else:
1652 1652 ds.add(f)
1653 1653 return rejected
1654 1654
1655 1655 def forget(self, files, prefix=""):
1656 1656 join = lambda f: os.path.join(prefix, f)
1657 1657 with self._repo.wlock():
1658 1658 rejected = []
1659 1659 for f in files:
1660 1660 if f not in self._repo.dirstate:
1661 1661 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1662 1662 rejected.append(f)
1663 1663 elif self._repo.dirstate[f] != 'a':
1664 1664 self._repo.dirstate.remove(f)
1665 1665 else:
1666 1666 self._repo.dirstate.drop(f)
1667 1667 return rejected
1668 1668
1669 1669 def undelete(self, list):
1670 1670 pctxs = self.parents()
1671 1671 with self._repo.wlock():
1672 1672 for f in list:
1673 1673 if self._repo.dirstate[f] != 'r':
1674 1674 self._repo.ui.warn(_("%s not removed!\n") % f)
1675 1675 else:
1676 1676 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1677 1677 t = fctx.data()
1678 1678 self._repo.wwrite(f, t, fctx.flags())
1679 1679 self._repo.dirstate.normal(f)
1680 1680
1681 1681 def copy(self, source, dest):
1682 1682 try:
1683 1683 st = self._repo.wvfs.lstat(dest)
1684 1684 except OSError as err:
1685 1685 if err.errno != errno.ENOENT:
1686 1686 raise
1687 1687 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1688 1688 return
1689 1689 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1690 1690 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1691 1691 "symbolic link\n") % dest)
1692 1692 else:
1693 1693 with self._repo.wlock():
1694 1694 if self._repo.dirstate[dest] in '?':
1695 1695 self._repo.dirstate.add(dest)
1696 1696 elif self._repo.dirstate[dest] in 'r':
1697 1697 self._repo.dirstate.normallookup(dest)
1698 1698 self._repo.dirstate.copy(source, dest)
1699 1699
1700 1700 def match(self, pats=None, include=None, exclude=None, default='glob',
1701 1701 listsubrepos=False, badfn=None):
1702 1702 r = self._repo
1703 1703
1704 1704 # Only a case insensitive filesystem needs magic to translate user input
1705 1705 # to actual case in the filesystem.
1706 1706 icasefs = not util.fscasesensitive(r.root)
1707 1707 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1708 1708 default, auditor=r.auditor, ctx=self,
1709 1709 listsubrepos=listsubrepos, badfn=badfn,
1710 1710 icasefs=icasefs)
1711 1711
1712 1712 def _filtersuspectsymlink(self, files):
1713 1713 if not files or self._repo.dirstate._checklink:
1714 1714 return files
1715 1715
1716 1716 # Symlink placeholders may get non-symlink-like contents
1717 1717 # via user error or dereferencing by NFS or Samba servers,
1718 1718 # so we filter out any placeholders that don't look like a
1719 1719 # symlink
1720 1720 sane = []
1721 1721 for f in files:
1722 1722 if self.flags(f) == 'l':
1723 1723 d = self[f].data()
1724 1724 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1725 1725 self._repo.ui.debug('ignoring suspect symlink placeholder'
1726 1726 ' "%s"\n' % f)
1727 1727 continue
1728 1728 sane.append(f)
1729 1729 return sane
1730 1730
1731 1731 def _checklookup(self, files):
1732 1732 # check for any possibly clean files
1733 1733 if not files:
1734 1734 return [], [], []
1735 1735
1736 1736 modified = []
1737 1737 deleted = []
1738 1738 fixup = []
1739 1739 pctx = self._parents[0]
1740 1740 # do a full compare of any files that might have changed
1741 1741 for f in sorted(files):
1742 1742 try:
1743 1743 # This will return True for a file that got replaced by a
1744 1744 # directory in the interim, but fixing that is pretty hard.
1745 1745 if (f not in pctx or self.flags(f) != pctx.flags(f)
1746 1746 or pctx[f].cmp(self[f])):
1747 1747 modified.append(f)
1748 1748 else:
1749 1749 fixup.append(f)
1750 1750 except (IOError, OSError):
1751 1751 # A file become inaccessible in between? Mark it as deleted,
1752 1752 # matching dirstate behavior (issue5584).
1753 1753 # The dirstate has more complex behavior around whether a
1754 1754 # missing file matches a directory, etc, but we don't need to
1755 1755 # bother with that: if f has made it to this point, we're sure
1756 1756 # it's in the dirstate.
1757 1757 deleted.append(f)
1758 1758
1759 1759 # update dirstate for files that are actually clean
1760 1760 if fixup:
1761 1761 try:
1762 oldid = self._repo.dirstate.identity()
1763
1762 1764 # updating the dirstate is optional
1763 1765 # so we don't wait on the lock
1764 1766 # wlock can invalidate the dirstate, so cache normal _after_
1765 1767 # taking the lock
1766 1768 with self._repo.wlock(False):
1769 if self._repo.dirstate.identity() == oldid:
1767 1770 normal = self._repo.dirstate.normal
1768 1771 for f in fixup:
1769 1772 normal(f)
1770 1773 # write changes out explicitly, because nesting
1771 1774 # wlock at runtime may prevent 'wlock.release()'
1772 1775 # after this block from doing so for subsequent
1773 1776 # changing files
1774 self._repo.dirstate.write(self._repo.currenttransaction())
1777 tr = self._repo.currenttransaction()
1778 self._repo.dirstate.write(tr)
1779 else:
1780 # in this case, writing changes out breaks
1781 # consistency, because .hg/dirstate was
1782 # already changed simultaneously after last
1783 # caching (see also issue5584 for detail)
1784 self._repo.ui.debug('skip updating dirstate: '
1785 'identity mismatch\n')
1775 1786 except error.LockError:
1776 1787 pass
1777 1788 return modified, deleted, fixup
1778 1789
1779 1790 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1780 1791 unknown=False):
1781 1792 '''Gets the status from the dirstate -- internal use only.'''
1782 1793 listignored, listclean, listunknown = ignored, clean, unknown
1783 1794 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1784 1795 subrepos = []
1785 1796 if '.hgsub' in self:
1786 1797 subrepos = sorted(self.substate)
1787 1798 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1788 1799 listclean, listunknown)
1789 1800
1790 1801 # check for any possibly clean files
1791 1802 if cmp:
1792 1803 modified2, deleted2, fixup = self._checklookup(cmp)
1793 1804 s.modified.extend(modified2)
1794 1805 s.deleted.extend(deleted2)
1795 1806
1796 1807 # update dirstate for files that are actually clean
1797 1808 if fixup and listclean:
1798 1809 s.clean.extend(fixup)
1799 1810
1800 1811 if match.always():
1801 1812 # cache for performance
1802 1813 if s.unknown or s.ignored or s.clean:
1803 1814 # "_status" is cached with list*=False in the normal route
1804 1815 self._status = scmutil.status(s.modified, s.added, s.removed,
1805 1816 s.deleted, [], [], [])
1806 1817 else:
1807 1818 self._status = s
1808 1819
1809 1820 return s
1810 1821
1811 1822 @propertycache
1812 1823 def _manifest(self):
1813 1824 """generate a manifest corresponding to the values in self._status
1814 1825
1815 1826 This reuse the file nodeid from parent, but we use special node
1816 1827 identifiers for added and modified files. This is used by manifests
1817 1828 merge to see that files are different and by update logic to avoid
1818 1829 deleting newly added files.
1819 1830 """
1820 1831 return self._buildstatusmanifest(self._status)
1821 1832
1822 1833 def _buildstatusmanifest(self, status):
1823 1834 """Builds a manifest that includes the given status results."""
1824 1835 parents = self.parents()
1825 1836
1826 1837 man = parents[0].manifest().copy()
1827 1838
1828 1839 ff = self._flagfunc
1829 1840 for i, l in ((addednodeid, status.added),
1830 1841 (modifiednodeid, status.modified)):
1831 1842 for f in l:
1832 1843 man[f] = i
1833 1844 try:
1834 1845 man.setflag(f, ff(f))
1835 1846 except OSError:
1836 1847 pass
1837 1848
1838 1849 for f in status.deleted + status.removed:
1839 1850 if f in man:
1840 1851 del man[f]
1841 1852
1842 1853 return man
1843 1854
1844 1855 def _buildstatus(self, other, s, match, listignored, listclean,
1845 1856 listunknown):
1846 1857 """build a status with respect to another context
1847 1858
1848 1859 This includes logic for maintaining the fast path of status when
1849 1860 comparing the working directory against its parent, which is to skip
1850 1861 building a new manifest if self (working directory) is not comparing
1851 1862 against its parent (repo['.']).
1852 1863 """
1853 1864 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1854 1865 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1855 1866 # might have accidentally ended up with the entire contents of the file
1856 1867 # they are supposed to be linking to.
1857 1868 s.modified[:] = self._filtersuspectsymlink(s.modified)
1858 1869 if other != self._repo['.']:
1859 1870 s = super(workingctx, self)._buildstatus(other, s, match,
1860 1871 listignored, listclean,
1861 1872 listunknown)
1862 1873 return s
1863 1874
1864 1875 def _matchstatus(self, other, match):
1865 1876 """override the match method with a filter for directory patterns
1866 1877
1867 1878 We use inheritance to customize the match.bad method only in cases of
1868 1879 workingctx since it belongs only to the working directory when
1869 1880 comparing against the parent changeset.
1870 1881
1871 1882 If we aren't comparing against the working directory's parent, then we
1872 1883 just use the default match object sent to us.
1873 1884 """
1874 1885 superself = super(workingctx, self)
1875 1886 match = superself._matchstatus(other, match)
1876 1887 if other != self._repo['.']:
1877 1888 def bad(f, msg):
1878 1889 # 'f' may be a directory pattern from 'match.files()',
1879 1890 # so 'f not in ctx1' is not enough
1880 1891 if f not in other and not other.hasdir(f):
1881 1892 self._repo.ui.warn('%s: %s\n' %
1882 1893 (self._repo.dirstate.pathto(f), msg))
1883 1894 match.bad = bad
1884 1895 return match
1885 1896
1886 1897 class committablefilectx(basefilectx):
1887 1898 """A committablefilectx provides common functionality for a file context
1888 1899 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1889 1900 def __init__(self, repo, path, filelog=None, ctx=None):
1890 1901 self._repo = repo
1891 1902 self._path = path
1892 1903 self._changeid = None
1893 1904 self._filerev = self._filenode = None
1894 1905
1895 1906 if filelog is not None:
1896 1907 self._filelog = filelog
1897 1908 if ctx:
1898 1909 self._changectx = ctx
1899 1910
1900 1911 def __nonzero__(self):
1901 1912 return True
1902 1913
1903 1914 __bool__ = __nonzero__
1904 1915
1905 1916 def linkrev(self):
1906 1917 # linked to self._changectx no matter if file is modified or not
1907 1918 return self.rev()
1908 1919
1909 1920 def parents(self):
1910 1921 '''return parent filectxs, following copies if necessary'''
1911 1922 def filenode(ctx, path):
1912 1923 return ctx._manifest.get(path, nullid)
1913 1924
1914 1925 path = self._path
1915 1926 fl = self._filelog
1916 1927 pcl = self._changectx._parents
1917 1928 renamed = self.renamed()
1918 1929
1919 1930 if renamed:
1920 1931 pl = [renamed + (None,)]
1921 1932 else:
1922 1933 pl = [(path, filenode(pcl[0], path), fl)]
1923 1934
1924 1935 for pc in pcl[1:]:
1925 1936 pl.append((path, filenode(pc, path), fl))
1926 1937
1927 1938 return [self._parentfilectx(p, fileid=n, filelog=l)
1928 1939 for p, n, l in pl if n != nullid]
1929 1940
1930 1941 def children(self):
1931 1942 return []
1932 1943
1933 1944 class workingfilectx(committablefilectx):
1934 1945 """A workingfilectx object makes access to data related to a particular
1935 1946 file in the working directory convenient."""
1936 1947 def __init__(self, repo, path, filelog=None, workingctx=None):
1937 1948 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1938 1949
1939 1950 @propertycache
1940 1951 def _changectx(self):
1941 1952 return workingctx(self._repo)
1942 1953
1943 1954 def data(self):
1944 1955 return self._repo.wread(self._path)
1945 1956 def renamed(self):
1946 1957 rp = self._repo.dirstate.copied(self._path)
1947 1958 if not rp:
1948 1959 return None
1949 1960 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1950 1961
1951 1962 def size(self):
1952 1963 return self._repo.wvfs.lstat(self._path).st_size
1953 1964 def date(self):
1954 1965 t, tz = self._changectx.date()
1955 1966 try:
1956 1967 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1957 1968 except OSError as err:
1958 1969 if err.errno != errno.ENOENT:
1959 1970 raise
1960 1971 return (t, tz)
1961 1972
1962 1973 def cmp(self, fctx):
1963 1974 """compare with other file context
1964 1975
1965 1976 returns True if different than fctx.
1966 1977 """
1967 1978 # fctx should be a filectx (not a workingfilectx)
1968 1979 # invert comparison to reuse the same code path
1969 1980 return fctx.cmp(self)
1970 1981
1971 1982 def remove(self, ignoremissing=False):
1972 1983 """wraps unlink for a repo's working directory"""
1973 1984 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1974 1985
1975 1986 def write(self, data, flags):
1976 1987 """wraps repo.wwrite"""
1977 1988 self._repo.wwrite(self._path, data, flags)
1978 1989
1979 1990 class workingcommitctx(workingctx):
1980 1991 """A workingcommitctx object makes access to data related to
1981 1992 the revision being committed convenient.
1982 1993
1983 1994 This hides changes in the working directory, if they aren't
1984 1995 committed in this context.
1985 1996 """
1986 1997 def __init__(self, repo, changes,
1987 1998 text="", user=None, date=None, extra=None):
1988 1999 super(workingctx, self).__init__(repo, text, user, date, extra,
1989 2000 changes)
1990 2001
1991 2002 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1992 2003 unknown=False):
1993 2004 """Return matched files only in ``self._status``
1994 2005
1995 2006 Uncommitted files appear "clean" via this context, even if
1996 2007 they aren't actually so in the working directory.
1997 2008 """
1998 2009 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1999 2010 if clean:
2000 2011 clean = [f for f in self._manifest if f not in self._changedset]
2001 2012 else:
2002 2013 clean = []
2003 2014 return scmutil.status([f for f in self._status.modified if match(f)],
2004 2015 [f for f in self._status.added if match(f)],
2005 2016 [f for f in self._status.removed if match(f)],
2006 2017 [], [], [], clean)
2007 2018
2008 2019 @propertycache
2009 2020 def _changedset(self):
2010 2021 """Return the set of files changed in this context
2011 2022 """
2012 2023 changed = set(self._status.modified)
2013 2024 changed.update(self._status.added)
2014 2025 changed.update(self._status.removed)
2015 2026 return changed
2016 2027
2017 2028 def makecachingfilectxfn(func):
2018 2029 """Create a filectxfn that caches based on the path.
2019 2030
2020 2031 We can't use util.cachefunc because it uses all arguments as the cache
2021 2032 key and this creates a cycle since the arguments include the repo and
2022 2033 memctx.
2023 2034 """
2024 2035 cache = {}
2025 2036
2026 2037 def getfilectx(repo, memctx, path):
2027 2038 if path not in cache:
2028 2039 cache[path] = func(repo, memctx, path)
2029 2040 return cache[path]
2030 2041
2031 2042 return getfilectx
2032 2043
2033 2044 class memctx(committablectx):
2034 2045 """Use memctx to perform in-memory commits via localrepo.commitctx().
2035 2046
2036 2047 Revision information is supplied at initialization time while
2037 2048 related files data and is made available through a callback
2038 2049 mechanism. 'repo' is the current localrepo, 'parents' is a
2039 2050 sequence of two parent revisions identifiers (pass None for every
2040 2051 missing parent), 'text' is the commit message and 'files' lists
2041 2052 names of files touched by the revision (normalized and relative to
2042 2053 repository root).
2043 2054
2044 2055 filectxfn(repo, memctx, path) is a callable receiving the
2045 2056 repository, the current memctx object and the normalized path of
2046 2057 requested file, relative to repository root. It is fired by the
2047 2058 commit function for every file in 'files', but calls order is
2048 2059 undefined. If the file is available in the revision being
2049 2060 committed (updated or added), filectxfn returns a memfilectx
2050 2061 object. If the file was removed, filectxfn return None for recent
2051 2062 Mercurial. Moved files are represented by marking the source file
2052 2063 removed and the new file added with copy information (see
2053 2064 memfilectx).
2054 2065
2055 2066 user receives the committer name and defaults to current
2056 2067 repository username, date is the commit date in any format
2057 2068 supported by util.parsedate() and defaults to current date, extra
2058 2069 is a dictionary of metadata or is left empty.
2059 2070 """
2060 2071
2061 2072 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2062 2073 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2063 2074 # this field to determine what to do in filectxfn.
2064 2075 _returnnoneformissingfiles = True
2065 2076
2066 2077 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2067 2078 date=None, extra=None, editor=False):
2068 2079 super(memctx, self).__init__(repo, text, user, date, extra)
2069 2080 self._rev = None
2070 2081 self._node = None
2071 2082 parents = [(p or nullid) for p in parents]
2072 2083 p1, p2 = parents
2073 2084 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2074 2085 files = sorted(set(files))
2075 2086 self._files = files
2076 2087 self.substate = {}
2077 2088
2078 2089 # if store is not callable, wrap it in a function
2079 2090 if not callable(filectxfn):
2080 2091 def getfilectx(repo, memctx, path):
2081 2092 fctx = filectxfn[path]
2082 2093 # this is weird but apparently we only keep track of one parent
2083 2094 # (why not only store that instead of a tuple?)
2084 2095 copied = fctx.renamed()
2085 2096 if copied:
2086 2097 copied = copied[0]
2087 2098 return memfilectx(repo, path, fctx.data(),
2088 2099 islink=fctx.islink(), isexec=fctx.isexec(),
2089 2100 copied=copied, memctx=memctx)
2090 2101 self._filectxfn = getfilectx
2091 2102 else:
2092 2103 # memoizing increases performance for e.g. vcs convert scenarios.
2093 2104 self._filectxfn = makecachingfilectxfn(filectxfn)
2094 2105
2095 2106 if editor:
2096 2107 self._text = editor(self._repo, self, [])
2097 2108 self._repo.savecommitmessage(self._text)
2098 2109
2099 2110 def filectx(self, path, filelog=None):
2100 2111 """get a file context from the working directory
2101 2112
2102 2113 Returns None if file doesn't exist and should be removed."""
2103 2114 return self._filectxfn(self._repo, self, path)
2104 2115
2105 2116 def commit(self):
2106 2117 """commit context to the repo"""
2107 2118 return self._repo.commitctx(self)
2108 2119
2109 2120 @propertycache
2110 2121 def _manifest(self):
2111 2122 """generate a manifest based on the return values of filectxfn"""
2112 2123
2113 2124 # keep this simple for now; just worry about p1
2114 2125 pctx = self._parents[0]
2115 2126 man = pctx.manifest().copy()
2116 2127
2117 2128 for f in self._status.modified:
2118 2129 p1node = nullid
2119 2130 p2node = nullid
2120 2131 p = pctx[f].parents() # if file isn't in pctx, check p2?
2121 2132 if len(p) > 0:
2122 2133 p1node = p[0].filenode()
2123 2134 if len(p) > 1:
2124 2135 p2node = p[1].filenode()
2125 2136 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2126 2137
2127 2138 for f in self._status.added:
2128 2139 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2129 2140
2130 2141 for f in self._status.removed:
2131 2142 if f in man:
2132 2143 del man[f]
2133 2144
2134 2145 return man
2135 2146
2136 2147 @propertycache
2137 2148 def _status(self):
2138 2149 """Calculate exact status from ``files`` specified at construction
2139 2150 """
2140 2151 man1 = self.p1().manifest()
2141 2152 p2 = self._parents[1]
2142 2153 # "1 < len(self._parents)" can't be used for checking
2143 2154 # existence of the 2nd parent, because "memctx._parents" is
2144 2155 # explicitly initialized by the list, of which length is 2.
2145 2156 if p2.node() != nullid:
2146 2157 man2 = p2.manifest()
2147 2158 managing = lambda f: f in man1 or f in man2
2148 2159 else:
2149 2160 managing = lambda f: f in man1
2150 2161
2151 2162 modified, added, removed = [], [], []
2152 2163 for f in self._files:
2153 2164 if not managing(f):
2154 2165 added.append(f)
2155 2166 elif self[f]:
2156 2167 modified.append(f)
2157 2168 else:
2158 2169 removed.append(f)
2159 2170
2160 2171 return scmutil.status(modified, added, removed, [], [], [], [])
2161 2172
2162 2173 class memfilectx(committablefilectx):
2163 2174 """memfilectx represents an in-memory file to commit.
2164 2175
2165 2176 See memctx and committablefilectx for more details.
2166 2177 """
2167 2178 def __init__(self, repo, path, data, islink=False,
2168 2179 isexec=False, copied=None, memctx=None):
2169 2180 """
2170 2181 path is the normalized file path relative to repository root.
2171 2182 data is the file content as a string.
2172 2183 islink is True if the file is a symbolic link.
2173 2184 isexec is True if the file is executable.
2174 2185 copied is the source file path if current file was copied in the
2175 2186 revision being committed, or None."""
2176 2187 super(memfilectx, self).__init__(repo, path, None, memctx)
2177 2188 self._data = data
2178 2189 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2179 2190 self._copied = None
2180 2191 if copied:
2181 2192 self._copied = (copied, nullid)
2182 2193
2183 2194 def data(self):
2184 2195 return self._data
2185 2196
2186 2197 def remove(self, ignoremissing=False):
2187 2198 """wraps unlink for a repo's working directory"""
2188 2199 # need to figure out what to do here
2189 2200 del self._changectx[self._path]
2190 2201
2191 2202 def write(self, data, flags):
2192 2203 """wraps repo.wwrite"""
2193 2204 self._data = data
2194 2205
2195 2206 class overlayfilectx(committablefilectx):
2196 2207 """Like memfilectx but take an original filectx and optional parameters to
2197 2208 override parts of it. This is useful when fctx.data() is expensive (i.e.
2198 2209 flag processor is expensive) and raw data, flags, and filenode could be
2199 2210 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2200 2211 """
2201 2212
2202 2213 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2203 2214 copied=None, ctx=None):
2204 2215 """originalfctx: filecontext to duplicate
2205 2216
2206 2217 datafunc: None or a function to override data (file content). It is a
2207 2218 function to be lazy. path, flags, copied, ctx: None or overridden value
2208 2219
2209 2220 copied could be (path, rev), or False. copied could also be just path,
2210 2221 and will be converted to (path, nullid). This simplifies some callers.
2211 2222 """
2212 2223
2213 2224 if path is None:
2214 2225 path = originalfctx.path()
2215 2226 if ctx is None:
2216 2227 ctx = originalfctx.changectx()
2217 2228 ctxmatch = lambda: True
2218 2229 else:
2219 2230 ctxmatch = lambda: ctx == originalfctx.changectx()
2220 2231
2221 2232 repo = originalfctx.repo()
2222 2233 flog = originalfctx.filelog()
2223 2234 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2224 2235
2225 2236 if copied is None:
2226 2237 copied = originalfctx.renamed()
2227 2238 copiedmatch = lambda: True
2228 2239 else:
2229 2240 if copied and not isinstance(copied, tuple):
2230 2241 # repo._filecommit will recalculate copyrev so nullid is okay
2231 2242 copied = (copied, nullid)
2232 2243 copiedmatch = lambda: copied == originalfctx.renamed()
2233 2244
2234 2245 # When data, copied (could affect data), ctx (could affect filelog
2235 2246 # parents) are not overridden, rawdata, rawflags, and filenode may be
2236 2247 # reused (repo._filecommit should double check filelog parents).
2237 2248 #
2238 2249 # path, flags are not hashed in filelog (but in manifestlog) so they do
2239 2250 # not affect reusable here.
2240 2251 #
2241 2252 # If ctx or copied is overridden to a same value with originalfctx,
2242 2253 # still consider it's reusable. originalfctx.renamed() may be a bit
2243 2254 # expensive so it's not called unless necessary. Assuming datafunc is
2244 2255 # always expensive, do not call it for this "reusable" test.
2245 2256 reusable = datafunc is None and ctxmatch() and copiedmatch()
2246 2257
2247 2258 if datafunc is None:
2248 2259 datafunc = originalfctx.data
2249 2260 if flags is None:
2250 2261 flags = originalfctx.flags()
2251 2262
2252 2263 self._datafunc = datafunc
2253 2264 self._flags = flags
2254 2265 self._copied = copied
2255 2266
2256 2267 if reusable:
2257 2268 # copy extra fields from originalfctx
2258 2269 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2259 2270 for attr in attrs:
2260 2271 if util.safehasattr(originalfctx, attr):
2261 2272 setattr(self, attr, getattr(originalfctx, attr))
2262 2273
2263 2274 def data(self):
2264 2275 return self._datafunc()
2265 2276
2266 2277 class metadataonlyctx(committablectx):
2267 2278 """Like memctx but it's reusing the manifest of different commit.
2268 2279 Intended to be used by lightweight operations that are creating
2269 2280 metadata-only changes.
2270 2281
2271 2282 Revision information is supplied at initialization time. 'repo' is the
2272 2283 current localrepo, 'ctx' is original revision which manifest we're reuisng
2273 2284 'parents' is a sequence of two parent revisions identifiers (pass None for
2274 2285 every missing parent), 'text' is the commit.
2275 2286
2276 2287 user receives the committer name and defaults to current repository
2277 2288 username, date is the commit date in any format supported by
2278 2289 util.parsedate() and defaults to current date, extra is a dictionary of
2279 2290 metadata or is left empty.
2280 2291 """
2281 2292 def __new__(cls, repo, originalctx, *args, **kwargs):
2282 2293 return super(metadataonlyctx, cls).__new__(cls, repo)
2283 2294
2284 2295 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2285 2296 extra=None, editor=False):
2286 2297 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2287 2298 self._rev = None
2288 2299 self._node = None
2289 2300 self._originalctx = originalctx
2290 2301 self._manifestnode = originalctx.manifestnode()
2291 2302 parents = [(p or nullid) for p in parents]
2292 2303 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2293 2304
2294 2305 # sanity check to ensure that the reused manifest parents are
2295 2306 # manifests of our commit parents
2296 2307 mp1, mp2 = self.manifestctx().parents
2297 2308 if p1 != nullid and p1.manifestnode() != mp1:
2298 2309 raise RuntimeError('can\'t reuse the manifest: '
2299 2310 'its p1 doesn\'t match the new ctx p1')
2300 2311 if p2 != nullid and p2.manifestnode() != mp2:
2301 2312 raise RuntimeError('can\'t reuse the manifest: '
2302 2313 'its p2 doesn\'t match the new ctx p2')
2303 2314
2304 2315 self._files = originalctx.files()
2305 2316 self.substate = {}
2306 2317
2307 2318 if editor:
2308 2319 self._text = editor(self._repo, self, [])
2309 2320 self._repo.savecommitmessage(self._text)
2310 2321
2311 2322 def manifestnode(self):
2312 2323 return self._manifestnode
2313 2324
2314 2325 @property
2315 2326 def _manifestctx(self):
2316 2327 return self._repo.manifestlog[self._manifestnode]
2317 2328
2318 2329 def filectx(self, path, filelog=None):
2319 2330 return self._originalctx.filectx(path, filelog=filelog)
2320 2331
2321 2332 def commit(self):
2322 2333 """commit context to the repo"""
2323 2334 return self._repo.commitctx(self)
2324 2335
2325 2336 @property
2326 2337 def _manifest(self):
2327 2338 return self._originalctx.manifest()
2328 2339
2329 2340 @propertycache
2330 2341 def _status(self):
2331 2342 """Calculate exact status from ``files`` specified in the ``origctx``
2332 2343 and parents manifests.
2333 2344 """
2334 2345 man1 = self.p1().manifest()
2335 2346 p2 = self._parents[1]
2336 2347 # "1 < len(self._parents)" can't be used for checking
2337 2348 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2338 2349 # explicitly initialized by the list, of which length is 2.
2339 2350 if p2.node() != nullid:
2340 2351 man2 = p2.manifest()
2341 2352 managing = lambda f: f in man1 or f in man2
2342 2353 else:
2343 2354 managing = lambda f: f in man1
2344 2355
2345 2356 modified, added, removed = [], [], []
2346 2357 for f in self._files:
2347 2358 if not managing(f):
2348 2359 added.append(f)
2349 2360 elif self[f]:
2350 2361 modified.append(f)
2351 2362 else:
2352 2363 removed.append(f)
2353 2364
2354 2365 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,97 +1,159
1 1 $ hg init repo
2 2 $ cd repo
3 3 $ echo a > a
4 4 $ hg add a
5 5 $ hg commit -m test
6 6
7 7 Do we ever miss a sub-second change?:
8 8
9 9 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
10 10 > hg co -qC 0
11 11 > echo b > a
12 12 > hg st
13 13 > done
14 14 M a
15 15 M a
16 16 M a
17 17 M a
18 18 M a
19 19 M a
20 20 M a
21 21 M a
22 22 M a
23 23 M a
24 24 M a
25 25 M a
26 26 M a
27 27 M a
28 28 M a
29 29 M a
30 30 M a
31 31 M a
32 32 M a
33 33 M a
34 34
35 35 $ echo test > b
36 36 $ mkdir dir1
37 37 $ echo test > dir1/c
38 38 $ echo test > d
39 39
40 40 $ echo test > e
41 41 #if execbit
42 42 A directory will typically have the execute bit -- make sure it doesn't get
43 43 confused with a file with the exec bit set
44 44 $ chmod +x e
45 45 #endif
46 46
47 47 $ hg add b dir1 d e
48 48 adding dir1/c (glob)
49 49 $ hg commit -m test2
50 50
51 51 $ cat >> $TESTTMP/dirstaterace.py << EOF
52 52 > from mercurial import (
53 53 > context,
54 54 > extensions,
55 55 > )
56 56 > def extsetup():
57 57 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
58 58 > def overridechecklookup(orig, self, files):
59 59 > # make an update that changes the dirstate from underneath
60 60 > self._repo.ui.system(r"sh '$TESTTMP/dirstaterace.sh'",
61 61 > cwd=self._repo.root)
62 62 > return orig(self, files)
63 63 > EOF
64 64
65 65 $ hg debugrebuilddirstate
66 66 $ hg debugdirstate
67 67 n 0 -1 unset a
68 68 n 0 -1 unset b
69 69 n 0 -1 unset d
70 70 n 0 -1 unset dir1/c
71 71 n 0 -1 unset e
72 72
73 73 XXX Note that this returns M for files that got replaced by directories. This is
74 74 definitely a bug, but the fix for that is hard and the next status run is fine
75 75 anyway.
76 76
77 77 $ cat > $TESTTMP/dirstaterace.sh <<EOF
78 78 > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
79 79 > EOF
80 80
81 81 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
82 82 M d
83 83 M e
84 84 ! b
85 85 ! dir1/c
86 86 $ hg debugdirstate
87 87 n 644 2 * a (glob)
88 88 n 0 -1 unset b
89 89 n 0 -1 unset d
90 90 n 0 -1 unset dir1/c
91 91 n 0 -1 unset e
92 92
93 93 $ hg status
94 94 ! b
95 95 ! d
96 96 ! dir1/c
97 97 ! e
98
99 $ rmdir d e
100 $ hg update -C -q .
101
102 Test that dirstate changes aren't written out at the end of "hg
103 status", if .hg/dirstate is already changed simultaneously before
104 acquisition of wlock in workingctx._checklookup().
105
106 This avoidance is important to keep consistency of dirstate in race
107 condition (see issue5584 for detail).
108
109 $ hg parents -q
110 1:* (glob)
111
112 $ hg debugrebuilddirstate
113 $ hg debugdirstate
114 n 0 -1 unset a
115 n 0 -1 unset b
116 n 0 -1 unset d
117 n 0 -1 unset dir1/c
118 n 0 -1 unset e
119
120 $ cat > $TESTTMP/dirstaterace.sh <<EOF
121 > # This script assumes timetable of typical issue5584 case below:
122 > #
123 > # 1. "hg status" loads .hg/dirstate
124 > # 2. "hg status" confirms clean-ness of FILE
125 > # 3. "hg update -C 0" updates the working directory simultaneously
126 > # (FILE is removed, and FILE is dropped from .hg/dirstate)
127 > # 4. "hg status" acquires wlock
128 > # (.hg/dirstate is re-loaded = no FILE entry in dirstate)
129 > # 5. "hg status" marks FILE in dirstate as clean
130 > # (FILE entry is added to in-memory dirstate)
131 > # 6. "hg status" writes dirstate changes into .hg/dirstate
132 > # (FILE entry is written into .hg/dirstate)
133 > #
134 > # To reproduce similar situation easily and certainly, #2 and #3
135 > # are swapped. "hg cat" below ensures #2 on "hg status" side.
136 >
137 > hg update -q -C 0
138 > hg cat -r 1 b > b
139 > EOF
140
141 "hg status" below should excludes "e", of which exec flag is set, for
142 portability of test scenario, because unsure but missing "e" is
143 treated differently in _checklookup() according to runtime platform.
144
145 - "missing(!)" on POSIX, "pctx[f].cmp(self[f])" raises ENOENT
146 - "modified(M)" on Windows, "self.flags(f) != pctx.flags(f)" is True
147
148 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug -X path:e
149 skip updating dirstate: identity mismatch
150 M a
151 ! d
152 ! dir1/c
153
154 $ hg parents -q
155 0:* (glob)
156 $ hg files
157 a
158 $ hg debugdirstate
159 n * * * a (glob)
General Comments 0
You need to be logged in to leave comments. Login now