##// END OF EJS Templates
status: don't crash if a lookup file disappears...
Siddharth Agarwal -
r32651:c850f0ed 4.2.1 stable
parent child Browse files
Show More
@@ -1,2174 +1,2187
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if '_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if '_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 410 def _filterederror(repo, changeid):
411 411 """build an exception to be raised about a filtered changeid
412 412
413 413 This is extracted in a function to help extensions (eg: evolve) to
414 414 experiment with various message variants."""
415 415 if repo.filtername.startswith('visible'):
416 416 msg = _("hidden revision '%s'") % changeid
417 417 hint = _('use --hidden to access hidden revisions')
418 418 return error.FilteredRepoLookupError(msg, hint=hint)
419 419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 420 msg %= (changeid, repo.filtername)
421 421 return error.FilteredRepoLookupError(msg)
422 422
423 423 class changectx(basectx):
424 424 """A changecontext object makes access to data related to a particular
425 425 changeset convenient. It represents a read-only context already present in
426 426 the repo."""
427 427 def __init__(self, repo, changeid=''):
428 428 """changeid is a revision number, node, or tag"""
429 429
430 430 # since basectx.__new__ already took care of copying the object, we
431 431 # don't need to do anything in __init__, so we just exit here
432 432 if isinstance(changeid, basectx):
433 433 return
434 434
435 435 if changeid == '':
436 436 changeid = '.'
437 437 self._repo = repo
438 438
439 439 try:
440 440 if isinstance(changeid, int):
441 441 self._node = repo.changelog.node(changeid)
442 442 self._rev = changeid
443 443 return
444 444 if not pycompat.ispy3 and isinstance(changeid, long):
445 445 changeid = str(changeid)
446 446 if changeid == 'null':
447 447 self._node = nullid
448 448 self._rev = nullrev
449 449 return
450 450 if changeid == 'tip':
451 451 self._node = repo.changelog.tip()
452 452 self._rev = repo.changelog.rev(self._node)
453 453 return
454 454 if changeid == '.' or changeid == repo.dirstate.p1():
455 455 # this is a hack to delay/avoid loading obsmarkers
456 456 # when we know that '.' won't be hidden
457 457 self._node = repo.dirstate.p1()
458 458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 459 return
460 460 if len(changeid) == 20:
461 461 try:
462 462 self._node = changeid
463 463 self._rev = repo.changelog.rev(changeid)
464 464 return
465 465 except error.FilteredRepoLookupError:
466 466 raise
467 467 except LookupError:
468 468 pass
469 469
470 470 try:
471 471 r = int(changeid)
472 472 if '%d' % r != changeid:
473 473 raise ValueError
474 474 l = len(repo.changelog)
475 475 if r < 0:
476 476 r += l
477 477 if r < 0 or r >= l:
478 478 raise ValueError
479 479 self._rev = r
480 480 self._node = repo.changelog.node(r)
481 481 return
482 482 except error.FilteredIndexError:
483 483 raise
484 484 except (ValueError, OverflowError, IndexError):
485 485 pass
486 486
487 487 if len(changeid) == 40:
488 488 try:
489 489 self._node = bin(changeid)
490 490 self._rev = repo.changelog.rev(self._node)
491 491 return
492 492 except error.FilteredLookupError:
493 493 raise
494 494 except (TypeError, LookupError):
495 495 pass
496 496
497 497 # lookup bookmarks through the name interface
498 498 try:
499 499 self._node = repo.names.singlenode(repo, changeid)
500 500 self._rev = repo.changelog.rev(self._node)
501 501 return
502 502 except KeyError:
503 503 pass
504 504 except error.FilteredRepoLookupError:
505 505 raise
506 506 except error.RepoLookupError:
507 507 pass
508 508
509 509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 510 if self._node is not None:
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513
514 514 # lookup failed
515 515 # check if it might have come from damaged dirstate
516 516 #
517 517 # XXX we could avoid the unfiltered if we had a recognizable
518 518 # exception for filtered changeset access
519 519 if changeid in repo.unfiltered().dirstate.parents():
520 520 msg = _("working directory has unknown parent '%s'!")
521 521 raise error.Abort(msg % short(changeid))
522 522 try:
523 523 if len(changeid) == 20 and nonascii(changeid):
524 524 changeid = hex(changeid)
525 525 except TypeError:
526 526 pass
527 527 except (error.FilteredIndexError, error.FilteredLookupError,
528 528 error.FilteredRepoLookupError):
529 529 raise _filterederror(repo, changeid)
530 530 except IndexError:
531 531 pass
532 532 raise error.RepoLookupError(
533 533 _("unknown revision '%s'") % changeid)
534 534
535 535 def __hash__(self):
536 536 try:
537 537 return hash(self._rev)
538 538 except AttributeError:
539 539 return id(self)
540 540
541 541 def __nonzero__(self):
542 542 return self._rev != nullrev
543 543
544 544 __bool__ = __nonzero__
545 545
546 546 @propertycache
547 547 def _changeset(self):
548 548 return self._repo.changelog.changelogrevision(self.rev())
549 549
550 550 @propertycache
551 551 def _manifest(self):
552 552 return self._manifestctx.read()
553 553
554 554 @propertycache
555 555 def _manifestctx(self):
556 556 return self._repo.manifestlog[self._changeset.manifest]
557 557
558 558 @propertycache
559 559 def _manifestdelta(self):
560 560 return self._manifestctx.readdelta()
561 561
562 562 @propertycache
563 563 def _parents(self):
564 564 repo = self._repo
565 565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 566 if p2 == nullrev:
567 567 return [changectx(repo, p1)]
568 568 return [changectx(repo, p1), changectx(repo, p2)]
569 569
570 570 def changeset(self):
571 571 c = self._changeset
572 572 return (
573 573 c.manifest,
574 574 c.user,
575 575 c.date,
576 576 c.files,
577 577 c.description,
578 578 c.extra,
579 579 )
580 580 def manifestnode(self):
581 581 return self._changeset.manifest
582 582
583 583 def user(self):
584 584 return self._changeset.user
585 585 def date(self):
586 586 return self._changeset.date
587 587 def files(self):
588 588 return self._changeset.files
589 589 def description(self):
590 590 return self._changeset.description
591 591 def branch(self):
592 592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 593 def closesbranch(self):
594 594 return 'close' in self._changeset.extra
595 595 def extra(self):
596 596 return self._changeset.extra
597 597 def tags(self):
598 598 return self._repo.nodetags(self._node)
599 599 def bookmarks(self):
600 600 return self._repo.nodebookmarks(self._node)
601 601 def phase(self):
602 602 return self._repo._phasecache.phase(self._repo, self._rev)
603 603 def hidden(self):
604 604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605 605
606 606 def children(self):
607 607 """return contexts for each child changeset"""
608 608 c = self._repo.changelog.children(self._node)
609 609 return [changectx(self._repo, x) for x in c]
610 610
611 611 def ancestors(self):
612 612 for a in self._repo.changelog.ancestors([self._rev]):
613 613 yield changectx(self._repo, a)
614 614
615 615 def descendants(self):
616 616 for d in self._repo.changelog.descendants([self._rev]):
617 617 yield changectx(self._repo, d)
618 618
619 619 def filectx(self, path, fileid=None, filelog=None):
620 620 """get a file context from this changeset"""
621 621 if fileid is None:
622 622 fileid = self.filenode(path)
623 623 return filectx(self._repo, path, fileid=fileid,
624 624 changectx=self, filelog=filelog)
625 625
626 626 def ancestor(self, c2, warn=False):
627 627 """return the "best" ancestor context of self and c2
628 628
629 629 If there are multiple candidates, it will show a message and check
630 630 merge.preferancestor configuration before falling back to the
631 631 revlog ancestor."""
632 632 # deal with workingctxs
633 633 n2 = c2._node
634 634 if n2 is None:
635 635 n2 = c2._parents[0]._node
636 636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 637 if not cahs:
638 638 anc = nullid
639 639 elif len(cahs) == 1:
640 640 anc = cahs[0]
641 641 else:
642 642 # experimental config: merge.preferancestor
643 643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 644 try:
645 645 ctx = changectx(self._repo, r)
646 646 except error.RepoLookupError:
647 647 continue
648 648 anc = ctx.node()
649 649 if anc in cahs:
650 650 break
651 651 else:
652 652 anc = self._repo.changelog.ancestor(self._node, n2)
653 653 if warn:
654 654 self._repo.ui.status(
655 655 (_("note: using %s as ancestor of %s and %s\n") %
656 656 (short(anc), short(self._node), short(n2))) +
657 657 ''.join(_(" alternatively, use --config "
658 658 "merge.preferancestor=%s\n") %
659 659 short(n) for n in sorted(cahs) if n != anc))
660 660 return changectx(self._repo, anc)
661 661
662 662 def descendant(self, other):
663 663 """True if other is descendant of this changeset"""
664 664 return self._repo.changelog.descendant(self._rev, other._rev)
665 665
666 666 def walk(self, match):
667 667 '''Generates matching file names.'''
668 668
669 669 # Wrap match.bad method to have message with nodeid
670 670 def bad(fn, msg):
671 671 # The manifest doesn't know about subrepos, so don't complain about
672 672 # paths into valid subrepos.
673 673 if any(fn == s or fn.startswith(s + '/')
674 674 for s in self.substate):
675 675 return
676 676 match.bad(fn, _('no such file in rev %s') % self)
677 677
678 678 m = matchmod.badmatch(match, bad)
679 679 return self._manifest.walk(m)
680 680
681 681 def matches(self, match):
682 682 return self.walk(match)
683 683
684 684 class basefilectx(object):
685 685 """A filecontext object represents the common logic for its children:
686 686 filectx: read-only access to a filerevision that is already present
687 687 in the repo,
688 688 workingfilectx: a filecontext that represents files from the working
689 689 directory,
690 690 memfilectx: a filecontext that represents files in-memory."""
691 691 def __new__(cls, repo, path, *args, **kwargs):
692 692 return super(basefilectx, cls).__new__(cls)
693 693
694 694 @propertycache
695 695 def _filelog(self):
696 696 return self._repo.file(self._path)
697 697
698 698 @propertycache
699 699 def _changeid(self):
700 700 if '_changeid' in self.__dict__:
701 701 return self._changeid
702 702 elif '_changectx' in self.__dict__:
703 703 return self._changectx.rev()
704 704 elif '_descendantrev' in self.__dict__:
705 705 # this file context was created from a revision with a known
706 706 # descendant, we can (lazily) correct for linkrev aliases
707 707 return self._adjustlinkrev(self._descendantrev)
708 708 else:
709 709 return self._filelog.linkrev(self._filerev)
710 710
711 711 @propertycache
712 712 def _filenode(self):
713 713 if '_fileid' in self.__dict__:
714 714 return self._filelog.lookup(self._fileid)
715 715 else:
716 716 return self._changectx.filenode(self._path)
717 717
718 718 @propertycache
719 719 def _filerev(self):
720 720 return self._filelog.rev(self._filenode)
721 721
722 722 @propertycache
723 723 def _repopath(self):
724 724 return self._path
725 725
726 726 def __nonzero__(self):
727 727 try:
728 728 self._filenode
729 729 return True
730 730 except error.LookupError:
731 731 # file is missing
732 732 return False
733 733
734 734 __bool__ = __nonzero__
735 735
736 736 def __str__(self):
737 737 try:
738 738 return "%s@%s" % (self.path(), self._changectx)
739 739 except error.LookupError:
740 740 return "%s@???" % self.path()
741 741
742 742 def __repr__(self):
743 743 return "<%s %s>" % (type(self).__name__, str(self))
744 744
745 745 def __hash__(self):
746 746 try:
747 747 return hash((self._path, self._filenode))
748 748 except AttributeError:
749 749 return id(self)
750 750
751 751 def __eq__(self, other):
752 752 try:
753 753 return (type(self) == type(other) and self._path == other._path
754 754 and self._filenode == other._filenode)
755 755 except AttributeError:
756 756 return False
757 757
758 758 def __ne__(self, other):
759 759 return not (self == other)
760 760
761 761 def filerev(self):
762 762 return self._filerev
763 763 def filenode(self):
764 764 return self._filenode
765 765 def flags(self):
766 766 return self._changectx.flags(self._path)
767 767 def filelog(self):
768 768 return self._filelog
769 769 def rev(self):
770 770 return self._changeid
771 771 def linkrev(self):
772 772 return self._filelog.linkrev(self._filerev)
773 773 def node(self):
774 774 return self._changectx.node()
775 775 def hex(self):
776 776 return self._changectx.hex()
777 777 def user(self):
778 778 return self._changectx.user()
779 779 def date(self):
780 780 return self._changectx.date()
781 781 def files(self):
782 782 return self._changectx.files()
783 783 def description(self):
784 784 return self._changectx.description()
785 785 def branch(self):
786 786 return self._changectx.branch()
787 787 def extra(self):
788 788 return self._changectx.extra()
789 789 def phase(self):
790 790 return self._changectx.phase()
791 791 def phasestr(self):
792 792 return self._changectx.phasestr()
793 793 def manifest(self):
794 794 return self._changectx.manifest()
795 795 def changectx(self):
796 796 return self._changectx
797 797 def repo(self):
798 798 return self._repo
799 799
800 800 def path(self):
801 801 return self._path
802 802
803 803 def isbinary(self):
804 804 try:
805 805 return util.binary(self.data())
806 806 except IOError:
807 807 return False
808 808 def isexec(self):
809 809 return 'x' in self.flags()
810 810 def islink(self):
811 811 return 'l' in self.flags()
812 812
813 813 def isabsent(self):
814 814 """whether this filectx represents a file not in self._changectx
815 815
816 816 This is mainly for merge code to detect change/delete conflicts. This is
817 817 expected to be True for all subclasses of basectx."""
818 818 return False
819 819
820 820 _customcmp = False
821 821 def cmp(self, fctx):
822 822 """compare with other file context
823 823
824 824 returns True if different than fctx.
825 825 """
826 826 if fctx._customcmp:
827 827 return fctx.cmp(self)
828 828
829 829 if (fctx._filenode is None
830 830 and (self._repo._encodefilterpats
831 831 # if file data starts with '\1\n', empty metadata block is
832 832 # prepended, which adds 4 bytes to filelog.size().
833 833 or self.size() - 4 == fctx.size())
834 834 or self.size() == fctx.size()):
835 835 return self._filelog.cmp(self._filenode, fctx.data())
836 836
837 837 return True
838 838
839 839 def _adjustlinkrev(self, srcrev, inclusive=False):
840 840 """return the first ancestor of <srcrev> introducing <fnode>
841 841
842 842 If the linkrev of the file revision does not point to an ancestor of
843 843 srcrev, we'll walk down the ancestors until we find one introducing
844 844 this file revision.
845 845
846 846 :srcrev: the changeset revision we search ancestors from
847 847 :inclusive: if true, the src revision will also be checked
848 848 """
849 849 repo = self._repo
850 850 cl = repo.unfiltered().changelog
851 851 mfl = repo.manifestlog
852 852 # fetch the linkrev
853 853 lkr = self.linkrev()
854 854 # hack to reuse ancestor computation when searching for renames
855 855 memberanc = getattr(self, '_ancestrycontext', None)
856 856 iteranc = None
857 857 if srcrev is None:
858 858 # wctx case, used by workingfilectx during mergecopy
859 859 revs = [p.rev() for p in self._repo[None].parents()]
860 860 inclusive = True # we skipped the real (revless) source
861 861 else:
862 862 revs = [srcrev]
863 863 if memberanc is None:
864 864 memberanc = iteranc = cl.ancestors(revs, lkr,
865 865 inclusive=inclusive)
866 866 # check if this linkrev is an ancestor of srcrev
867 867 if lkr not in memberanc:
868 868 if iteranc is None:
869 869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
870 870 fnode = self._filenode
871 871 path = self._path
872 872 for a in iteranc:
873 873 ac = cl.read(a) # get changeset data (we avoid object creation)
874 874 if path in ac[3]: # checking the 'files' field.
875 875 # The file has been touched, check if the content is
876 876 # similar to the one we search for.
877 877 if fnode == mfl[ac[0]].readfast().get(path):
878 878 return a
879 879 # In theory, we should never get out of that loop without a result.
880 880 # But if manifest uses a buggy file revision (not children of the
881 881 # one it replaces) we could. Such a buggy situation will likely
882 882 # result is crash somewhere else at to some point.
883 883 return lkr
884 884
885 885 def introrev(self):
886 886 """return the rev of the changeset which introduced this file revision
887 887
888 888 This method is different from linkrev because it take into account the
889 889 changeset the filectx was created from. It ensures the returned
890 890 revision is one of its ancestors. This prevents bugs from
891 891 'linkrev-shadowing' when a file revision is used by multiple
892 892 changesets.
893 893 """
894 894 lkr = self.linkrev()
895 895 attrs = vars(self)
896 896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
897 897 if noctx or self.rev() == lkr:
898 898 return self.linkrev()
899 899 return self._adjustlinkrev(self.rev(), inclusive=True)
900 900
901 901 def _parentfilectx(self, path, fileid, filelog):
902 902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
903 903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
904 904 if '_changeid' in vars(self) or '_changectx' in vars(self):
905 905 # If self is associated with a changeset (probably explicitly
906 906 # fed), ensure the created filectx is associated with a
907 907 # changeset that is an ancestor of self.changectx.
908 908 # This lets us later use _adjustlinkrev to get a correct link.
909 909 fctx._descendantrev = self.rev()
910 910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
911 911 elif '_descendantrev' in vars(self):
912 912 # Otherwise propagate _descendantrev if we have one associated.
913 913 fctx._descendantrev = self._descendantrev
914 914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 915 return fctx
916 916
917 917 def parents(self):
918 918 _path = self._path
919 919 fl = self._filelog
920 920 parents = self._filelog.parents(self._filenode)
921 921 pl = [(_path, node, fl) for node in parents if node != nullid]
922 922
923 923 r = fl.renamed(self._filenode)
924 924 if r:
925 925 # - In the simple rename case, both parent are nullid, pl is empty.
926 926 # - In case of merge, only one of the parent is null id and should
927 927 # be replaced with the rename information. This parent is -always-
928 928 # the first one.
929 929 #
930 930 # As null id have always been filtered out in the previous list
931 931 # comprehension, inserting to 0 will always result in "replacing
932 932 # first nullid parent with rename information.
933 933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
934 934
935 935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
936 936
937 937 def p1(self):
938 938 return self.parents()[0]
939 939
940 940 def p2(self):
941 941 p = self.parents()
942 942 if len(p) == 2:
943 943 return p[1]
944 944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
945 945
946 946 def annotate(self, follow=False, linenumber=False, diffopts=None):
947 947 '''returns a list of tuples of ((ctx, number), line) for each line
948 948 in the file, where ctx is the filectx of the node where
949 949 that line was last changed; if linenumber parameter is true, number is
950 950 the line number at the first appearance in the managed file, otherwise,
951 951 number has a fixed value of False.
952 952 '''
953 953
954 954 def lines(text):
955 955 if text.endswith("\n"):
956 956 return text.count("\n")
957 957 return text.count("\n") + int(bool(text))
958 958
959 959 if linenumber:
960 960 def decorate(text, rev):
961 961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
962 962 else:
963 963 def decorate(text, rev):
964 964 return ([(rev, False)] * lines(text), text)
965 965
966 966 def pair(parent, child):
967 967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
968 968 for (a1, a2, b1, b2), t in blocks:
969 969 # Changed blocks ('!') or blocks made only of blank lines ('~')
970 970 # belong to the child.
971 971 if t == '=':
972 972 child[0][b1:b2] = parent[0][a1:a2]
973 973 return child
974 974
975 975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
976 976
977 977 def parents(f):
978 978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
979 979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
980 980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
981 981 # isn't an ancestor of the srcrev.
982 982 f._changeid
983 983 pl = f.parents()
984 984
985 985 # Don't return renamed parents if we aren't following.
986 986 if not follow:
987 987 pl = [p for p in pl if p.path() == f.path()]
988 988
989 989 # renamed filectx won't have a filelog yet, so set it
990 990 # from the cache to save time
991 991 for p in pl:
992 992 if not '_filelog' in p.__dict__:
993 993 p._filelog = getlog(p.path())
994 994
995 995 return pl
996 996
997 997 # use linkrev to find the first changeset where self appeared
998 998 base = self
999 999 introrev = self.introrev()
1000 1000 if self.rev() != introrev:
1001 1001 base = self.filectx(self.filenode(), changeid=introrev)
1002 1002 if getattr(base, '_ancestrycontext', None) is None:
1003 1003 cl = self._repo.changelog
1004 1004 if introrev is None:
1005 1005 # wctx is not inclusive, but works because _ancestrycontext
1006 1006 # is used to test filelog revisions
1007 1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1008 1008 inclusive=True)
1009 1009 else:
1010 1010 ac = cl.ancestors([introrev], inclusive=True)
1011 1011 base._ancestrycontext = ac
1012 1012
1013 1013 # This algorithm would prefer to be recursive, but Python is a
1014 1014 # bit recursion-hostile. Instead we do an iterative
1015 1015 # depth-first search.
1016 1016
1017 1017 # 1st DFS pre-calculates pcache and needed
1018 1018 visit = [base]
1019 1019 pcache = {}
1020 1020 needed = {base: 1}
1021 1021 while visit:
1022 1022 f = visit.pop()
1023 1023 if f in pcache:
1024 1024 continue
1025 1025 pl = parents(f)
1026 1026 pcache[f] = pl
1027 1027 for p in pl:
1028 1028 needed[p] = needed.get(p, 0) + 1
1029 1029 if p not in pcache:
1030 1030 visit.append(p)
1031 1031
1032 1032 # 2nd DFS does the actual annotate
1033 1033 visit[:] = [base]
1034 1034 hist = {}
1035 1035 while visit:
1036 1036 f = visit[-1]
1037 1037 if f in hist:
1038 1038 visit.pop()
1039 1039 continue
1040 1040
1041 1041 ready = True
1042 1042 pl = pcache[f]
1043 1043 for p in pl:
1044 1044 if p not in hist:
1045 1045 ready = False
1046 1046 visit.append(p)
1047 1047 if ready:
1048 1048 visit.pop()
1049 1049 curr = decorate(f.data(), f)
1050 1050 for p in pl:
1051 1051 curr = pair(hist[p], curr)
1052 1052 if needed[p] == 1:
1053 1053 del hist[p]
1054 1054 del needed[p]
1055 1055 else:
1056 1056 needed[p] -= 1
1057 1057
1058 1058 hist[f] = curr
1059 1059 del pcache[f]
1060 1060
1061 1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1062 1062
1063 1063 def ancestors(self, followfirst=False):
1064 1064 visit = {}
1065 1065 c = self
1066 1066 if followfirst:
1067 1067 cut = 1
1068 1068 else:
1069 1069 cut = None
1070 1070
1071 1071 while True:
1072 1072 for parent in c.parents()[:cut]:
1073 1073 visit[(parent.linkrev(), parent.filenode())] = parent
1074 1074 if not visit:
1075 1075 break
1076 1076 c = visit.pop(max(visit))
1077 1077 yield c
1078 1078
1079 1079 class filectx(basefilectx):
1080 1080 """A filecontext object makes access to data related to a particular
1081 1081 filerevision convenient."""
1082 1082 def __init__(self, repo, path, changeid=None, fileid=None,
1083 1083 filelog=None, changectx=None):
1084 1084 """changeid can be a changeset revision, node, or tag.
1085 1085 fileid can be a file revision or node."""
1086 1086 self._repo = repo
1087 1087 self._path = path
1088 1088
1089 1089 assert (changeid is not None
1090 1090 or fileid is not None
1091 1091 or changectx is not None), \
1092 1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1093 1093 % (changeid, fileid, changectx))
1094 1094
1095 1095 if filelog is not None:
1096 1096 self._filelog = filelog
1097 1097
1098 1098 if changeid is not None:
1099 1099 self._changeid = changeid
1100 1100 if changectx is not None:
1101 1101 self._changectx = changectx
1102 1102 if fileid is not None:
1103 1103 self._fileid = fileid
1104 1104
1105 1105 @propertycache
1106 1106 def _changectx(self):
1107 1107 try:
1108 1108 return changectx(self._repo, self._changeid)
1109 1109 except error.FilteredRepoLookupError:
1110 1110 # Linkrev may point to any revision in the repository. When the
1111 1111 # repository is filtered this may lead to `filectx` trying to build
1112 1112 # `changectx` for filtered revision. In such case we fallback to
1113 1113 # creating `changectx` on the unfiltered version of the reposition.
1114 1114 # This fallback should not be an issue because `changectx` from
1115 1115 # `filectx` are not used in complex operations that care about
1116 1116 # filtering.
1117 1117 #
1118 1118 # This fallback is a cheap and dirty fix that prevent several
1119 1119 # crashes. It does not ensure the behavior is correct. However the
1120 1120 # behavior was not correct before filtering either and "incorrect
1121 1121 # behavior" is seen as better as "crash"
1122 1122 #
1123 1123 # Linkrevs have several serious troubles with filtering that are
1124 1124 # complicated to solve. Proper handling of the issue here should be
1125 1125 # considered when solving linkrev issue are on the table.
1126 1126 return changectx(self._repo.unfiltered(), self._changeid)
1127 1127
1128 1128 def filectx(self, fileid, changeid=None):
1129 1129 '''opens an arbitrary revision of the file without
1130 1130 opening a new filelog'''
1131 1131 return filectx(self._repo, self._path, fileid=fileid,
1132 1132 filelog=self._filelog, changeid=changeid)
1133 1133
1134 1134 def rawdata(self):
1135 1135 return self._filelog.revision(self._filenode, raw=True)
1136 1136
1137 1137 def data(self):
1138 1138 try:
1139 1139 return self._filelog.read(self._filenode)
1140 1140 except error.CensoredNodeError:
1141 1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1142 1142 return ""
1143 1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1144 1144 hint=_("set censor.policy to ignore errors"))
1145 1145
1146 1146 def size(self):
1147 1147 return self._filelog.size(self._filerev)
1148 1148
1149 1149 def renamed(self):
1150 1150 """check if file was actually renamed in this changeset revision
1151 1151
1152 1152 If rename logged in file revision, we report copy for changeset only
1153 1153 if file revisions linkrev points back to the changeset in question
1154 1154 or both changeset parents contain different file revisions.
1155 1155 """
1156 1156
1157 1157 renamed = self._filelog.renamed(self._filenode)
1158 1158 if not renamed:
1159 1159 return renamed
1160 1160
1161 1161 if self.rev() == self.linkrev():
1162 1162 return renamed
1163 1163
1164 1164 name = self.path()
1165 1165 fnode = self._filenode
1166 1166 for p in self._changectx.parents():
1167 1167 try:
1168 1168 if fnode == p.filenode(name):
1169 1169 return None
1170 1170 except error.LookupError:
1171 1171 pass
1172 1172 return renamed
1173 1173
1174 1174 def children(self):
1175 1175 # hard for renames
1176 1176 c = self._filelog.children(self._filenode)
1177 1177 return [filectx(self._repo, self._path, fileid=x,
1178 1178 filelog=self._filelog) for x in c]
1179 1179
1180 1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1181 1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1182 1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1183 1183 `linerange1` is the new line range for fctx1.
1184 1184 """
1185 1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1186 1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1187 1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1188 1188 return diffinrange, linerange1
1189 1189
1190 1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1191 1191 """Yield ancestors of `fctx` with respect to the block of lines within
1192 1192 `fromline`-`toline` range.
1193 1193 """
1194 1194 diffopts = patch.diffopts(fctx._repo.ui)
1195 1195 introrev = fctx.introrev()
1196 1196 if fctx.rev() != introrev:
1197 1197 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1198 1198 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1199 1199 while visit:
1200 1200 c, linerange2 = visit.pop(max(visit))
1201 1201 pl = c.parents()
1202 1202 if followfirst:
1203 1203 pl = pl[:1]
1204 1204 if not pl:
1205 1205 # The block originates from the initial revision.
1206 1206 yield c, linerange2
1207 1207 continue
1208 1208 inrange = False
1209 1209 for p in pl:
1210 1210 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1211 1211 inrange = inrange or inrangep
1212 1212 if linerange1[0] == linerange1[1]:
1213 1213 # Parent's linerange is empty, meaning that the block got
1214 1214 # introduced in this revision; no need to go futher in this
1215 1215 # branch.
1216 1216 continue
1217 1217 # Set _descendantrev with 'c' (a known descendant) so that, when
1218 1218 # _adjustlinkrev is called for 'p', it receives this descendant
1219 1219 # (as srcrev) instead possibly topmost introrev.
1220 1220 p._descendantrev = c.rev()
1221 1221 visit[p.linkrev(), p.filenode()] = p, linerange1
1222 1222 if inrange:
1223 1223 yield c, linerange2
1224 1224
1225 1225 def blockdescendants(fctx, fromline, toline):
1226 1226 """Yield descendants of `fctx` with respect to the block of lines within
1227 1227 `fromline`-`toline` range.
1228 1228 """
1229 1229 # First possibly yield 'fctx' if it has changes in range with respect to
1230 1230 # its parents.
1231 1231 try:
1232 1232 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1233 1233 except StopIteration:
1234 1234 pass
1235 1235 else:
1236 1236 if c == fctx:
1237 1237 yield c, linerange1
1238 1238
1239 1239 diffopts = patch.diffopts(fctx._repo.ui)
1240 1240 fl = fctx.filelog()
1241 1241 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1242 1242 for i in fl.descendants([fctx.filerev()]):
1243 1243 c = fctx.filectx(i)
1244 1244 inrange = False
1245 1245 for x in fl.parentrevs(i):
1246 1246 try:
1247 1247 p, linerange2 = seen[x]
1248 1248 except KeyError:
1249 1249 # nullrev or other branch
1250 1250 continue
1251 1251 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1252 1252 inrange = inrange or inrangep
1253 1253 # If revision 'i' has been seen (it's a merge), we assume that its
1254 1254 # line range is the same independently of which parents was used
1255 1255 # to compute it.
1256 1256 assert i not in seen or seen[i][1] == linerange1, (
1257 1257 'computed line range for %s is not consistent between '
1258 1258 'ancestor branches' % c)
1259 1259 seen[i] = c, linerange1
1260 1260 if inrange:
1261 1261 yield c, linerange1
1262 1262
1263 1263 class committablectx(basectx):
1264 1264 """A committablectx object provides common functionality for a context that
1265 1265 wants the ability to commit, e.g. workingctx or memctx."""
1266 1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1267 1267 changes=None):
1268 1268 self._repo = repo
1269 1269 self._rev = None
1270 1270 self._node = None
1271 1271 self._text = text
1272 1272 if date:
1273 1273 self._date = util.parsedate(date)
1274 1274 if user:
1275 1275 self._user = user
1276 1276 if changes:
1277 1277 self._status = changes
1278 1278
1279 1279 self._extra = {}
1280 1280 if extra:
1281 1281 self._extra = extra.copy()
1282 1282 if 'branch' not in self._extra:
1283 1283 try:
1284 1284 branch = encoding.fromlocal(self._repo.dirstate.branch())
1285 1285 except UnicodeDecodeError:
1286 1286 raise error.Abort(_('branch name not in UTF-8!'))
1287 1287 self._extra['branch'] = branch
1288 1288 if self._extra['branch'] == '':
1289 1289 self._extra['branch'] = 'default'
1290 1290
1291 1291 def __str__(self):
1292 1292 return str(self._parents[0]) + "+"
1293 1293
1294 1294 def __nonzero__(self):
1295 1295 return True
1296 1296
1297 1297 __bool__ = __nonzero__
1298 1298
1299 1299 def _buildflagfunc(self):
1300 1300 # Create a fallback function for getting file flags when the
1301 1301 # filesystem doesn't support them
1302 1302
1303 1303 copiesget = self._repo.dirstate.copies().get
1304 1304 parents = self.parents()
1305 1305 if len(parents) < 2:
1306 1306 # when we have one parent, it's easy: copy from parent
1307 1307 man = parents[0].manifest()
1308 1308 def func(f):
1309 1309 f = copiesget(f, f)
1310 1310 return man.flags(f)
1311 1311 else:
1312 1312 # merges are tricky: we try to reconstruct the unstored
1313 1313 # result from the merge (issue1802)
1314 1314 p1, p2 = parents
1315 1315 pa = p1.ancestor(p2)
1316 1316 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1317 1317
1318 1318 def func(f):
1319 1319 f = copiesget(f, f) # may be wrong for merges with copies
1320 1320 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1321 1321 if fl1 == fl2:
1322 1322 return fl1
1323 1323 if fl1 == fla:
1324 1324 return fl2
1325 1325 if fl2 == fla:
1326 1326 return fl1
1327 1327 return '' # punt for conflicts
1328 1328
1329 1329 return func
1330 1330
1331 1331 @propertycache
1332 1332 def _flagfunc(self):
1333 1333 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1334 1334
1335 1335 @propertycache
1336 1336 def _status(self):
1337 1337 return self._repo.status()
1338 1338
1339 1339 @propertycache
1340 1340 def _user(self):
1341 1341 return self._repo.ui.username()
1342 1342
1343 1343 @propertycache
1344 1344 def _date(self):
1345 1345 return util.makedate()
1346 1346
1347 1347 def subrev(self, subpath):
1348 1348 return None
1349 1349
1350 1350 def manifestnode(self):
1351 1351 return None
1352 1352 def user(self):
1353 1353 return self._user or self._repo.ui.username()
1354 1354 def date(self):
1355 1355 return self._date
1356 1356 def description(self):
1357 1357 return self._text
1358 1358 def files(self):
1359 1359 return sorted(self._status.modified + self._status.added +
1360 1360 self._status.removed)
1361 1361
1362 1362 def modified(self):
1363 1363 return self._status.modified
1364 1364 def added(self):
1365 1365 return self._status.added
1366 1366 def removed(self):
1367 1367 return self._status.removed
1368 1368 def deleted(self):
1369 1369 return self._status.deleted
1370 1370 def branch(self):
1371 1371 return encoding.tolocal(self._extra['branch'])
1372 1372 def closesbranch(self):
1373 1373 return 'close' in self._extra
1374 1374 def extra(self):
1375 1375 return self._extra
1376 1376
1377 1377 def tags(self):
1378 1378 return []
1379 1379
1380 1380 def bookmarks(self):
1381 1381 b = []
1382 1382 for p in self.parents():
1383 1383 b.extend(p.bookmarks())
1384 1384 return b
1385 1385
1386 1386 def phase(self):
1387 1387 phase = phases.draft # default phase to draft
1388 1388 for p in self.parents():
1389 1389 phase = max(phase, p.phase())
1390 1390 return phase
1391 1391
1392 1392 def hidden(self):
1393 1393 return False
1394 1394
1395 1395 def children(self):
1396 1396 return []
1397 1397
1398 1398 def flags(self, path):
1399 1399 if '_manifest' in self.__dict__:
1400 1400 try:
1401 1401 return self._manifest.flags(path)
1402 1402 except KeyError:
1403 1403 return ''
1404 1404
1405 1405 try:
1406 1406 return self._flagfunc(path)
1407 1407 except OSError:
1408 1408 return ''
1409 1409
1410 1410 def ancestor(self, c2):
1411 1411 """return the "best" ancestor context of self and c2"""
1412 1412 return self._parents[0].ancestor(c2) # punt on two parents for now
1413 1413
1414 1414 def walk(self, match):
1415 1415 '''Generates matching file names.'''
1416 1416 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1417 1417 True, False))
1418 1418
1419 1419 def matches(self, match):
1420 1420 return sorted(self._repo.dirstate.matches(match))
1421 1421
1422 1422 def ancestors(self):
1423 1423 for p in self._parents:
1424 1424 yield p
1425 1425 for a in self._repo.changelog.ancestors(
1426 1426 [p.rev() for p in self._parents]):
1427 1427 yield changectx(self._repo, a)
1428 1428
1429 1429 def markcommitted(self, node):
1430 1430 """Perform post-commit cleanup necessary after committing this ctx
1431 1431
1432 1432 Specifically, this updates backing stores this working context
1433 1433 wraps to reflect the fact that the changes reflected by this
1434 1434 workingctx have been committed. For example, it marks
1435 1435 modified and added files as normal in the dirstate.
1436 1436
1437 1437 """
1438 1438
1439 1439 self._repo.dirstate.beginparentchange()
1440 1440 for f in self.modified() + self.added():
1441 1441 self._repo.dirstate.normal(f)
1442 1442 for f in self.removed():
1443 1443 self._repo.dirstate.drop(f)
1444 1444 self._repo.dirstate.setparents(node)
1445 1445 self._repo.dirstate.endparentchange()
1446 1446
1447 1447 # write changes out explicitly, because nesting wlock at
1448 1448 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1449 1449 # from immediately doing so for subsequent changing files
1450 1450 self._repo.dirstate.write(self._repo.currenttransaction())
1451 1451
1452 1452 class workingctx(committablectx):
1453 1453 """A workingctx object makes access to data related to
1454 1454 the current working directory convenient.
1455 1455 date - any valid date string or (unixtime, offset), or None.
1456 1456 user - username string, or None.
1457 1457 extra - a dictionary of extra values, or None.
1458 1458 changes - a list of file lists as returned by localrepo.status()
1459 1459 or None to use the repository status.
1460 1460 """
1461 1461 def __init__(self, repo, text="", user=None, date=None, extra=None,
1462 1462 changes=None):
1463 1463 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1464 1464
1465 1465 def __iter__(self):
1466 1466 d = self._repo.dirstate
1467 1467 for f in d:
1468 1468 if d[f] != 'r':
1469 1469 yield f
1470 1470
1471 1471 def __contains__(self, key):
1472 1472 return self._repo.dirstate[key] not in "?r"
1473 1473
1474 1474 def hex(self):
1475 1475 return hex(wdirid)
1476 1476
1477 1477 @propertycache
1478 1478 def _parents(self):
1479 1479 p = self._repo.dirstate.parents()
1480 1480 if p[1] == nullid:
1481 1481 p = p[:-1]
1482 1482 return [changectx(self._repo, x) for x in p]
1483 1483
1484 1484 def filectx(self, path, filelog=None):
1485 1485 """get a file context from the working directory"""
1486 1486 return workingfilectx(self._repo, path, workingctx=self,
1487 1487 filelog=filelog)
1488 1488
1489 1489 def dirty(self, missing=False, merge=True, branch=True):
1490 1490 "check whether a working directory is modified"
1491 1491 # check subrepos first
1492 1492 for s in sorted(self.substate):
1493 1493 if self.sub(s).dirty():
1494 1494 return True
1495 1495 # check current working dir
1496 1496 return ((merge and self.p2()) or
1497 1497 (branch and self.branch() != self.p1().branch()) or
1498 1498 self.modified() or self.added() or self.removed() or
1499 1499 (missing and self.deleted()))
1500 1500
1501 1501 def add(self, list, prefix=""):
1502 1502 join = lambda f: os.path.join(prefix, f)
1503 1503 with self._repo.wlock():
1504 1504 ui, ds = self._repo.ui, self._repo.dirstate
1505 1505 rejected = []
1506 1506 lstat = self._repo.wvfs.lstat
1507 1507 for f in list:
1508 1508 scmutil.checkportable(ui, join(f))
1509 1509 try:
1510 1510 st = lstat(f)
1511 1511 except OSError:
1512 1512 ui.warn(_("%s does not exist!\n") % join(f))
1513 1513 rejected.append(f)
1514 1514 continue
1515 1515 if st.st_size > 10000000:
1516 1516 ui.warn(_("%s: up to %d MB of RAM may be required "
1517 1517 "to manage this file\n"
1518 1518 "(use 'hg revert %s' to cancel the "
1519 1519 "pending addition)\n")
1520 1520 % (f, 3 * st.st_size // 1000000, join(f)))
1521 1521 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1522 1522 ui.warn(_("%s not added: only files and symlinks "
1523 1523 "supported currently\n") % join(f))
1524 1524 rejected.append(f)
1525 1525 elif ds[f] in 'amn':
1526 1526 ui.warn(_("%s already tracked!\n") % join(f))
1527 1527 elif ds[f] == 'r':
1528 1528 ds.normallookup(f)
1529 1529 else:
1530 1530 ds.add(f)
1531 1531 return rejected
1532 1532
1533 1533 def forget(self, files, prefix=""):
1534 1534 join = lambda f: os.path.join(prefix, f)
1535 1535 with self._repo.wlock():
1536 1536 rejected = []
1537 1537 for f in files:
1538 1538 if f not in self._repo.dirstate:
1539 1539 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1540 1540 rejected.append(f)
1541 1541 elif self._repo.dirstate[f] != 'a':
1542 1542 self._repo.dirstate.remove(f)
1543 1543 else:
1544 1544 self._repo.dirstate.drop(f)
1545 1545 return rejected
1546 1546
1547 1547 def undelete(self, list):
1548 1548 pctxs = self.parents()
1549 1549 with self._repo.wlock():
1550 1550 for f in list:
1551 1551 if self._repo.dirstate[f] != 'r':
1552 1552 self._repo.ui.warn(_("%s not removed!\n") % f)
1553 1553 else:
1554 1554 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1555 1555 t = fctx.data()
1556 1556 self._repo.wwrite(f, t, fctx.flags())
1557 1557 self._repo.dirstate.normal(f)
1558 1558
1559 1559 def copy(self, source, dest):
1560 1560 try:
1561 1561 st = self._repo.wvfs.lstat(dest)
1562 1562 except OSError as err:
1563 1563 if err.errno != errno.ENOENT:
1564 1564 raise
1565 1565 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1566 1566 return
1567 1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1568 1568 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1569 1569 "symbolic link\n") % dest)
1570 1570 else:
1571 1571 with self._repo.wlock():
1572 1572 if self._repo.dirstate[dest] in '?':
1573 1573 self._repo.dirstate.add(dest)
1574 1574 elif self._repo.dirstate[dest] in 'r':
1575 1575 self._repo.dirstate.normallookup(dest)
1576 1576 self._repo.dirstate.copy(source, dest)
1577 1577
1578 1578 def match(self, pats=None, include=None, exclude=None, default='glob',
1579 1579 listsubrepos=False, badfn=None):
1580 1580 if pats is None:
1581 1581 pats = []
1582 1582 r = self._repo
1583 1583
1584 1584 # Only a case insensitive filesystem needs magic to translate user input
1585 1585 # to actual case in the filesystem.
1586 1586 matcherfunc = matchmod.match
1587 1587 if not util.fscasesensitive(r.root):
1588 1588 matcherfunc = matchmod.icasefsmatcher
1589 1589 return matcherfunc(r.root, r.getcwd(), pats,
1590 1590 include, exclude, default,
1591 1591 auditor=r.auditor, ctx=self,
1592 1592 listsubrepos=listsubrepos, badfn=badfn)
1593 1593
1594 1594 def _filtersuspectsymlink(self, files):
1595 1595 if not files or self._repo.dirstate._checklink:
1596 1596 return files
1597 1597
1598 1598 # Symlink placeholders may get non-symlink-like contents
1599 1599 # via user error or dereferencing by NFS or Samba servers,
1600 1600 # so we filter out any placeholders that don't look like a
1601 1601 # symlink
1602 1602 sane = []
1603 1603 for f in files:
1604 1604 if self.flags(f) == 'l':
1605 1605 d = self[f].data()
1606 1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1607 1607 self._repo.ui.debug('ignoring suspect symlink placeholder'
1608 1608 ' "%s"\n' % f)
1609 1609 continue
1610 1610 sane.append(f)
1611 1611 return sane
1612 1612
1613 1613 def _checklookup(self, files):
1614 1614 # check for any possibly clean files
1615 1615 if not files:
1616 return [], []
1616 return [], [], []
1617 1617
1618 1618 modified = []
1619 deleted = []
1619 1620 fixup = []
1620 1621 pctx = self._parents[0]
1621 1622 # do a full compare of any files that might have changed
1622 1623 for f in sorted(files):
1624 try:
1625 # This will return True for a file that got replaced by a
1626 # directory in the interim, but fixing that is pretty hard.
1623 1627 if (f not in pctx or self.flags(f) != pctx.flags(f)
1624 1628 or pctx[f].cmp(self[f])):
1625 1629 modified.append(f)
1626 1630 else:
1627 1631 fixup.append(f)
1632 except (IOError, OSError):
1633 # A file become inaccessible in between? Mark it as deleted,
1634 # matching dirstate behavior (issue5584).
1635 # The dirstate has more complex behavior around whether a
1636 # missing file matches a directory, etc, but we don't need to
1637 # bother with that: if f has made it to this point, we're sure
1638 # it's in the dirstate.
1639 deleted.append(f)
1628 1640
1629 1641 # update dirstate for files that are actually clean
1630 1642 if fixup:
1631 1643 try:
1632 1644 # updating the dirstate is optional
1633 1645 # so we don't wait on the lock
1634 1646 # wlock can invalidate the dirstate, so cache normal _after_
1635 1647 # taking the lock
1636 1648 with self._repo.wlock(False):
1637 1649 normal = self._repo.dirstate.normal
1638 1650 for f in fixup:
1639 1651 normal(f)
1640 1652 # write changes out explicitly, because nesting
1641 1653 # wlock at runtime may prevent 'wlock.release()'
1642 1654 # after this block from doing so for subsequent
1643 1655 # changing files
1644 1656 self._repo.dirstate.write(self._repo.currenttransaction())
1645 1657 except error.LockError:
1646 1658 pass
1647 return modified, fixup
1659 return modified, deleted, fixup
1648 1660
1649 1661 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1650 1662 unknown=False):
1651 1663 '''Gets the status from the dirstate -- internal use only.'''
1652 1664 listignored, listclean, listunknown = ignored, clean, unknown
1653 1665 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1654 1666 subrepos = []
1655 1667 if '.hgsub' in self:
1656 1668 subrepos = sorted(self.substate)
1657 1669 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1658 1670 listclean, listunknown)
1659 1671
1660 1672 # check for any possibly clean files
1661 1673 if cmp:
1662 modified2, fixup = self._checklookup(cmp)
1674 modified2, deleted2, fixup = self._checklookup(cmp)
1663 1675 s.modified.extend(modified2)
1676 s.deleted.extend(deleted2)
1664 1677
1665 1678 # update dirstate for files that are actually clean
1666 1679 if fixup and listclean:
1667 1680 s.clean.extend(fixup)
1668 1681
1669 1682 if match.always():
1670 1683 # cache for performance
1671 1684 if s.unknown or s.ignored or s.clean:
1672 1685 # "_status" is cached with list*=False in the normal route
1673 1686 self._status = scmutil.status(s.modified, s.added, s.removed,
1674 1687 s.deleted, [], [], [])
1675 1688 else:
1676 1689 self._status = s
1677 1690
1678 1691 return s
1679 1692
1680 1693 @propertycache
1681 1694 def _manifest(self):
1682 1695 """generate a manifest corresponding to the values in self._status
1683 1696
1684 1697 This reuse the file nodeid from parent, but we use special node
1685 1698 identifiers for added and modified files. This is used by manifests
1686 1699 merge to see that files are different and by update logic to avoid
1687 1700 deleting newly added files.
1688 1701 """
1689 1702 return self._buildstatusmanifest(self._status)
1690 1703
1691 1704 def _buildstatusmanifest(self, status):
1692 1705 """Builds a manifest that includes the given status results."""
1693 1706 parents = self.parents()
1694 1707
1695 1708 man = parents[0].manifest().copy()
1696 1709
1697 1710 ff = self._flagfunc
1698 1711 for i, l in ((addednodeid, status.added),
1699 1712 (modifiednodeid, status.modified)):
1700 1713 for f in l:
1701 1714 man[f] = i
1702 1715 try:
1703 1716 man.setflag(f, ff(f))
1704 1717 except OSError:
1705 1718 pass
1706 1719
1707 1720 for f in status.deleted + status.removed:
1708 1721 if f in man:
1709 1722 del man[f]
1710 1723
1711 1724 return man
1712 1725
1713 1726 def _buildstatus(self, other, s, match, listignored, listclean,
1714 1727 listunknown):
1715 1728 """build a status with respect to another context
1716 1729
1717 1730 This includes logic for maintaining the fast path of status when
1718 1731 comparing the working directory against its parent, which is to skip
1719 1732 building a new manifest if self (working directory) is not comparing
1720 1733 against its parent (repo['.']).
1721 1734 """
1722 1735 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1723 1736 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1724 1737 # might have accidentally ended up with the entire contents of the file
1725 1738 # they are supposed to be linking to.
1726 1739 s.modified[:] = self._filtersuspectsymlink(s.modified)
1727 1740 if other != self._repo['.']:
1728 1741 s = super(workingctx, self)._buildstatus(other, s, match,
1729 1742 listignored, listclean,
1730 1743 listunknown)
1731 1744 return s
1732 1745
1733 1746 def _matchstatus(self, other, match):
1734 1747 """override the match method with a filter for directory patterns
1735 1748
1736 1749 We use inheritance to customize the match.bad method only in cases of
1737 1750 workingctx since it belongs only to the working directory when
1738 1751 comparing against the parent changeset.
1739 1752
1740 1753 If we aren't comparing against the working directory's parent, then we
1741 1754 just use the default match object sent to us.
1742 1755 """
1743 1756 superself = super(workingctx, self)
1744 1757 match = superself._matchstatus(other, match)
1745 1758 if other != self._repo['.']:
1746 1759 def bad(f, msg):
1747 1760 # 'f' may be a directory pattern from 'match.files()',
1748 1761 # so 'f not in ctx1' is not enough
1749 1762 if f not in other and not other.hasdir(f):
1750 1763 self._repo.ui.warn('%s: %s\n' %
1751 1764 (self._repo.dirstate.pathto(f), msg))
1752 1765 match.bad = bad
1753 1766 return match
1754 1767
1755 1768 class committablefilectx(basefilectx):
1756 1769 """A committablefilectx provides common functionality for a file context
1757 1770 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1758 1771 def __init__(self, repo, path, filelog=None, ctx=None):
1759 1772 self._repo = repo
1760 1773 self._path = path
1761 1774 self._changeid = None
1762 1775 self._filerev = self._filenode = None
1763 1776
1764 1777 if filelog is not None:
1765 1778 self._filelog = filelog
1766 1779 if ctx:
1767 1780 self._changectx = ctx
1768 1781
1769 1782 def __nonzero__(self):
1770 1783 return True
1771 1784
1772 1785 __bool__ = __nonzero__
1773 1786
1774 1787 def linkrev(self):
1775 1788 # linked to self._changectx no matter if file is modified or not
1776 1789 return self.rev()
1777 1790
1778 1791 def parents(self):
1779 1792 '''return parent filectxs, following copies if necessary'''
1780 1793 def filenode(ctx, path):
1781 1794 return ctx._manifest.get(path, nullid)
1782 1795
1783 1796 path = self._path
1784 1797 fl = self._filelog
1785 1798 pcl = self._changectx._parents
1786 1799 renamed = self.renamed()
1787 1800
1788 1801 if renamed:
1789 1802 pl = [renamed + (None,)]
1790 1803 else:
1791 1804 pl = [(path, filenode(pcl[0], path), fl)]
1792 1805
1793 1806 for pc in pcl[1:]:
1794 1807 pl.append((path, filenode(pc, path), fl))
1795 1808
1796 1809 return [self._parentfilectx(p, fileid=n, filelog=l)
1797 1810 for p, n, l in pl if n != nullid]
1798 1811
1799 1812 def children(self):
1800 1813 return []
1801 1814
1802 1815 class workingfilectx(committablefilectx):
1803 1816 """A workingfilectx object makes access to data related to a particular
1804 1817 file in the working directory convenient."""
1805 1818 def __init__(self, repo, path, filelog=None, workingctx=None):
1806 1819 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1807 1820
1808 1821 @propertycache
1809 1822 def _changectx(self):
1810 1823 return workingctx(self._repo)
1811 1824
1812 1825 def data(self):
1813 1826 return self._repo.wread(self._path)
1814 1827 def renamed(self):
1815 1828 rp = self._repo.dirstate.copied(self._path)
1816 1829 if not rp:
1817 1830 return None
1818 1831 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1819 1832
1820 1833 def size(self):
1821 1834 return self._repo.wvfs.lstat(self._path).st_size
1822 1835 def date(self):
1823 1836 t, tz = self._changectx.date()
1824 1837 try:
1825 1838 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1826 1839 except OSError as err:
1827 1840 if err.errno != errno.ENOENT:
1828 1841 raise
1829 1842 return (t, tz)
1830 1843
1831 1844 def cmp(self, fctx):
1832 1845 """compare with other file context
1833 1846
1834 1847 returns True if different than fctx.
1835 1848 """
1836 1849 # fctx should be a filectx (not a workingfilectx)
1837 1850 # invert comparison to reuse the same code path
1838 1851 return fctx.cmp(self)
1839 1852
1840 1853 def remove(self, ignoremissing=False):
1841 1854 """wraps unlink for a repo's working directory"""
1842 1855 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1843 1856
1844 1857 def write(self, data, flags):
1845 1858 """wraps repo.wwrite"""
1846 1859 self._repo.wwrite(self._path, data, flags)
1847 1860
1848 1861 class workingcommitctx(workingctx):
1849 1862 """A workingcommitctx object makes access to data related to
1850 1863 the revision being committed convenient.
1851 1864
1852 1865 This hides changes in the working directory, if they aren't
1853 1866 committed in this context.
1854 1867 """
1855 1868 def __init__(self, repo, changes,
1856 1869 text="", user=None, date=None, extra=None):
1857 1870 super(workingctx, self).__init__(repo, text, user, date, extra,
1858 1871 changes)
1859 1872
1860 1873 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1861 1874 unknown=False):
1862 1875 """Return matched files only in ``self._status``
1863 1876
1864 1877 Uncommitted files appear "clean" via this context, even if
1865 1878 they aren't actually so in the working directory.
1866 1879 """
1867 1880 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1868 1881 if clean:
1869 1882 clean = [f for f in self._manifest if f not in self._changedset]
1870 1883 else:
1871 1884 clean = []
1872 1885 return scmutil.status([f for f in self._status.modified if match(f)],
1873 1886 [f for f in self._status.added if match(f)],
1874 1887 [f for f in self._status.removed if match(f)],
1875 1888 [], [], [], clean)
1876 1889
1877 1890 @propertycache
1878 1891 def _changedset(self):
1879 1892 """Return the set of files changed in this context
1880 1893 """
1881 1894 changed = set(self._status.modified)
1882 1895 changed.update(self._status.added)
1883 1896 changed.update(self._status.removed)
1884 1897 return changed
1885 1898
1886 1899 def makecachingfilectxfn(func):
1887 1900 """Create a filectxfn that caches based on the path.
1888 1901
1889 1902 We can't use util.cachefunc because it uses all arguments as the cache
1890 1903 key and this creates a cycle since the arguments include the repo and
1891 1904 memctx.
1892 1905 """
1893 1906 cache = {}
1894 1907
1895 1908 def getfilectx(repo, memctx, path):
1896 1909 if path not in cache:
1897 1910 cache[path] = func(repo, memctx, path)
1898 1911 return cache[path]
1899 1912
1900 1913 return getfilectx
1901 1914
1902 1915 class memctx(committablectx):
1903 1916 """Use memctx to perform in-memory commits via localrepo.commitctx().
1904 1917
1905 1918 Revision information is supplied at initialization time while
1906 1919 related files data and is made available through a callback
1907 1920 mechanism. 'repo' is the current localrepo, 'parents' is a
1908 1921 sequence of two parent revisions identifiers (pass None for every
1909 1922 missing parent), 'text' is the commit message and 'files' lists
1910 1923 names of files touched by the revision (normalized and relative to
1911 1924 repository root).
1912 1925
1913 1926 filectxfn(repo, memctx, path) is a callable receiving the
1914 1927 repository, the current memctx object and the normalized path of
1915 1928 requested file, relative to repository root. It is fired by the
1916 1929 commit function for every file in 'files', but calls order is
1917 1930 undefined. If the file is available in the revision being
1918 1931 committed (updated or added), filectxfn returns a memfilectx
1919 1932 object. If the file was removed, filectxfn return None for recent
1920 1933 Mercurial. Moved files are represented by marking the source file
1921 1934 removed and the new file added with copy information (see
1922 1935 memfilectx).
1923 1936
1924 1937 user receives the committer name and defaults to current
1925 1938 repository username, date is the commit date in any format
1926 1939 supported by util.parsedate() and defaults to current date, extra
1927 1940 is a dictionary of metadata or is left empty.
1928 1941 """
1929 1942
1930 1943 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1931 1944 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1932 1945 # this field to determine what to do in filectxfn.
1933 1946 _returnnoneformissingfiles = True
1934 1947
1935 1948 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1936 1949 date=None, extra=None, editor=False):
1937 1950 super(memctx, self).__init__(repo, text, user, date, extra)
1938 1951 self._rev = None
1939 1952 self._node = None
1940 1953 parents = [(p or nullid) for p in parents]
1941 1954 p1, p2 = parents
1942 1955 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1943 1956 files = sorted(set(files))
1944 1957 self._files = files
1945 1958 self.substate = {}
1946 1959
1947 1960 # if store is not callable, wrap it in a function
1948 1961 if not callable(filectxfn):
1949 1962 def getfilectx(repo, memctx, path):
1950 1963 fctx = filectxfn[path]
1951 1964 # this is weird but apparently we only keep track of one parent
1952 1965 # (why not only store that instead of a tuple?)
1953 1966 copied = fctx.renamed()
1954 1967 if copied:
1955 1968 copied = copied[0]
1956 1969 return memfilectx(repo, path, fctx.data(),
1957 1970 islink=fctx.islink(), isexec=fctx.isexec(),
1958 1971 copied=copied, memctx=memctx)
1959 1972 self._filectxfn = getfilectx
1960 1973 else:
1961 1974 # memoizing increases performance for e.g. vcs convert scenarios.
1962 1975 self._filectxfn = makecachingfilectxfn(filectxfn)
1963 1976
1964 1977 if extra:
1965 1978 self._extra = extra.copy()
1966 1979 else:
1967 1980 self._extra = {}
1968 1981
1969 1982 if self._extra.get('branch', '') == '':
1970 1983 self._extra['branch'] = 'default'
1971 1984
1972 1985 if editor:
1973 1986 self._text = editor(self._repo, self, [])
1974 1987 self._repo.savecommitmessage(self._text)
1975 1988
1976 1989 def filectx(self, path, filelog=None):
1977 1990 """get a file context from the working directory
1978 1991
1979 1992 Returns None if file doesn't exist and should be removed."""
1980 1993 return self._filectxfn(self._repo, self, path)
1981 1994
1982 1995 def commit(self):
1983 1996 """commit context to the repo"""
1984 1997 return self._repo.commitctx(self)
1985 1998
1986 1999 @propertycache
1987 2000 def _manifest(self):
1988 2001 """generate a manifest based on the return values of filectxfn"""
1989 2002
1990 2003 # keep this simple for now; just worry about p1
1991 2004 pctx = self._parents[0]
1992 2005 man = pctx.manifest().copy()
1993 2006
1994 2007 for f in self._status.modified:
1995 2008 p1node = nullid
1996 2009 p2node = nullid
1997 2010 p = pctx[f].parents() # if file isn't in pctx, check p2?
1998 2011 if len(p) > 0:
1999 2012 p1node = p[0].filenode()
2000 2013 if len(p) > 1:
2001 2014 p2node = p[1].filenode()
2002 2015 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2003 2016
2004 2017 for f in self._status.added:
2005 2018 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2006 2019
2007 2020 for f in self._status.removed:
2008 2021 if f in man:
2009 2022 del man[f]
2010 2023
2011 2024 return man
2012 2025
2013 2026 @propertycache
2014 2027 def _status(self):
2015 2028 """Calculate exact status from ``files`` specified at construction
2016 2029 """
2017 2030 man1 = self.p1().manifest()
2018 2031 p2 = self._parents[1]
2019 2032 # "1 < len(self._parents)" can't be used for checking
2020 2033 # existence of the 2nd parent, because "memctx._parents" is
2021 2034 # explicitly initialized by the list, of which length is 2.
2022 2035 if p2.node() != nullid:
2023 2036 man2 = p2.manifest()
2024 2037 managing = lambda f: f in man1 or f in man2
2025 2038 else:
2026 2039 managing = lambda f: f in man1
2027 2040
2028 2041 modified, added, removed = [], [], []
2029 2042 for f in self._files:
2030 2043 if not managing(f):
2031 2044 added.append(f)
2032 2045 elif self[f]:
2033 2046 modified.append(f)
2034 2047 else:
2035 2048 removed.append(f)
2036 2049
2037 2050 return scmutil.status(modified, added, removed, [], [], [], [])
2038 2051
2039 2052 class memfilectx(committablefilectx):
2040 2053 """memfilectx represents an in-memory file to commit.
2041 2054
2042 2055 See memctx and committablefilectx for more details.
2043 2056 """
2044 2057 def __init__(self, repo, path, data, islink=False,
2045 2058 isexec=False, copied=None, memctx=None):
2046 2059 """
2047 2060 path is the normalized file path relative to repository root.
2048 2061 data is the file content as a string.
2049 2062 islink is True if the file is a symbolic link.
2050 2063 isexec is True if the file is executable.
2051 2064 copied is the source file path if current file was copied in the
2052 2065 revision being committed, or None."""
2053 2066 super(memfilectx, self).__init__(repo, path, None, memctx)
2054 2067 self._data = data
2055 2068 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2056 2069 self._copied = None
2057 2070 if copied:
2058 2071 self._copied = (copied, nullid)
2059 2072
2060 2073 def data(self):
2061 2074 return self._data
2062 2075 def size(self):
2063 2076 return len(self.data())
2064 2077 def flags(self):
2065 2078 return self._flags
2066 2079 def renamed(self):
2067 2080 return self._copied
2068 2081
2069 2082 def remove(self, ignoremissing=False):
2070 2083 """wraps unlink for a repo's working directory"""
2071 2084 # need to figure out what to do here
2072 2085 del self._changectx[self._path]
2073 2086
2074 2087 def write(self, data, flags):
2075 2088 """wraps repo.wwrite"""
2076 2089 self._data = data
2077 2090
2078 2091 class metadataonlyctx(committablectx):
2079 2092 """Like memctx but it's reusing the manifest of different commit.
2080 2093 Intended to be used by lightweight operations that are creating
2081 2094 metadata-only changes.
2082 2095
2083 2096 Revision information is supplied at initialization time. 'repo' is the
2084 2097 current localrepo, 'ctx' is original revision which manifest we're reuisng
2085 2098 'parents' is a sequence of two parent revisions identifiers (pass None for
2086 2099 every missing parent), 'text' is the commit.
2087 2100
2088 2101 user receives the committer name and defaults to current repository
2089 2102 username, date is the commit date in any format supported by
2090 2103 util.parsedate() and defaults to current date, extra is a dictionary of
2091 2104 metadata or is left empty.
2092 2105 """
2093 2106 def __new__(cls, repo, originalctx, *args, **kwargs):
2094 2107 return super(metadataonlyctx, cls).__new__(cls, repo)
2095 2108
2096 2109 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2097 2110 extra=None, editor=False):
2098 2111 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2099 2112 self._rev = None
2100 2113 self._node = None
2101 2114 self._originalctx = originalctx
2102 2115 self._manifestnode = originalctx.manifestnode()
2103 2116 parents = [(p or nullid) for p in parents]
2104 2117 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2105 2118
2106 2119 # sanity check to ensure that the reused manifest parents are
2107 2120 # manifests of our commit parents
2108 2121 mp1, mp2 = self.manifestctx().parents
2109 2122 if p1 != nullid and p1.manifestnode() != mp1:
2110 2123 raise RuntimeError('can\'t reuse the manifest: '
2111 2124 'its p1 doesn\'t match the new ctx p1')
2112 2125 if p2 != nullid and p2.manifestnode() != mp2:
2113 2126 raise RuntimeError('can\'t reuse the manifest: '
2114 2127 'its p2 doesn\'t match the new ctx p2')
2115 2128
2116 2129 self._files = originalctx.files()
2117 2130 self.substate = {}
2118 2131
2119 2132 if extra:
2120 2133 self._extra = extra.copy()
2121 2134 else:
2122 2135 self._extra = {}
2123 2136
2124 2137 if self._extra.get('branch', '') == '':
2125 2138 self._extra['branch'] = 'default'
2126 2139
2127 2140 if editor:
2128 2141 self._text = editor(self._repo, self, [])
2129 2142 self._repo.savecommitmessage(self._text)
2130 2143
2131 2144 def manifestnode(self):
2132 2145 return self._manifestnode
2133 2146
2134 2147 @propertycache
2135 2148 def _manifestctx(self):
2136 2149 return self._repo.manifestlog[self._manifestnode]
2137 2150
2138 2151 def filectx(self, path, filelog=None):
2139 2152 return self._originalctx.filectx(path, filelog=filelog)
2140 2153
2141 2154 def commit(self):
2142 2155 """commit context to the repo"""
2143 2156 return self._repo.commitctx(self)
2144 2157
2145 2158 @property
2146 2159 def _manifest(self):
2147 2160 return self._originalctx.manifest()
2148 2161
2149 2162 @propertycache
2150 2163 def _status(self):
2151 2164 """Calculate exact status from ``files`` specified in the ``origctx``
2152 2165 and parents manifests.
2153 2166 """
2154 2167 man1 = self.p1().manifest()
2155 2168 p2 = self._parents[1]
2156 2169 # "1 < len(self._parents)" can't be used for checking
2157 2170 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2158 2171 # explicitly initialized by the list, of which length is 2.
2159 2172 if p2.node() != nullid:
2160 2173 man2 = p2.manifest()
2161 2174 managing = lambda f: f in man1 or f in man2
2162 2175 else:
2163 2176 managing = lambda f: f in man1
2164 2177
2165 2178 modified, added, removed = [], [], []
2166 2179 for f in self._files:
2167 2180 if not managing(f):
2168 2181 added.append(f)
2169 2182 elif self[f]:
2170 2183 modified.append(f)
2171 2184 else:
2172 2185 removed.append(f)
2173 2186
2174 2187 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,33 +1,93
1 $ hg init
1 $ hg init repo
2 $ cd repo
2 3 $ echo a > a
3 4 $ hg add a
4 5 $ hg commit -m test
5 6
6 7 Do we ever miss a sub-second change?:
7 8
8 9 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
9 10 > hg co -qC 0
10 11 > echo b > a
11 12 > hg st
12 13 > done
13 14 M a
14 15 M a
15 16 M a
16 17 M a
17 18 M a
18 19 M a
19 20 M a
20 21 M a
21 22 M a
22 23 M a
23 24 M a
24 25 M a
25 26 M a
26 27 M a
27 28 M a
28 29 M a
29 30 M a
30 31 M a
31 32 M a
32 33 M a
33 34
35 $ echo test > b
36 $ mkdir dir1
37 $ echo test > dir1/c
38 $ echo test > d
39
40 $ echo test > e
41 #if execbit
42 A directory will typically have the execute bit -- make sure it doesn't get
43 confused with a file with the exec bit set
44 $ chmod +x e
45 #endif
46
47 $ hg add b dir1 d e
48 adding dir1/c
49 $ hg commit -m test2
50
51 $ cat >> $TESTTMP/dirstaterace.py << EOF
52 > from mercurial import (
53 > context,
54 > extensions,
55 > )
56 > def extsetup():
57 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
58 > def overridechecklookup(orig, self, files):
59 > # make an update that changes the dirstate from underneath
60 > self._repo.ui.system(self._repo.ui.config('dirstaterace', 'command'), cwd=self._repo.root)
61 > return orig(self, files)
62 > EOF
63
64 $ hg debugrebuilddirstate
65 $ hg debugdirstate
66 n 0 -1 unset a
67 n 0 -1 unset b
68 n 0 -1 unset d
69 n 0 -1 unset dir1/c
70 n 0 -1 unset e
71
72 XXX Note that this returns M for files that got replaced by directories. This is
73 definitely a bug, but the fix for that is hard and the next status run is fine
74 anyway.
75
76 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py \
77 > --config dirstaterace.command='rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e'
78 M d
79 M e
80 ! b
81 ! dir1/c
82 $ hg debugdirstate
83 n 644 2 * a (glob)
84 n 0 -1 unset b
85 n 0 -1 unset d
86 n 0 -1 unset dir1/c
87 n 0 -1 unset e
88
89 $ hg status
90 ! b
91 ! d
92 ! dir1/c
93 ! e
General Comments 0
You need to be logged in to leave comments. Login now