##// END OF EJS Templates
context: spell out the logic around linkrev adjustement starting point...
Boris Feld -
r40728:f3f4d853 default
parent child Browse files
Show More
@@ -1,2435 +1,2442 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = self._repo.narrowmatch(match)
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 for l in r:
375 375 l.sort()
376 376
377 377 return r
378 378
379 379 class changectx(basectx):
380 380 """A changecontext object makes access to data related to a particular
381 381 changeset convenient. It represents a read-only context already present in
382 382 the repo."""
383 383 def __init__(self, repo, rev, node):
384 384 super(changectx, self).__init__(repo)
385 385 self._rev = rev
386 386 self._node = node
387 387
388 388 def __hash__(self):
389 389 try:
390 390 return hash(self._rev)
391 391 except AttributeError:
392 392 return id(self)
393 393
394 394 def __nonzero__(self):
395 395 return self._rev != nullrev
396 396
397 397 __bool__ = __nonzero__
398 398
399 399 @propertycache
400 400 def _changeset(self):
401 401 return self._repo.changelog.changelogrevision(self.rev())
402 402
403 403 @propertycache
404 404 def _manifest(self):
405 405 return self._manifestctx.read()
406 406
407 407 @property
408 408 def _manifestctx(self):
409 409 return self._repo.manifestlog[self._changeset.manifest]
410 410
411 411 @propertycache
412 412 def _manifestdelta(self):
413 413 return self._manifestctx.readdelta()
414 414
415 415 @propertycache
416 416 def _parents(self):
417 417 repo = self._repo
418 418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 419 if p2 == nullrev:
420 420 return [repo[p1]]
421 421 return [repo[p1], repo[p2]]
422 422
423 423 def changeset(self):
424 424 c = self._changeset
425 425 return (
426 426 c.manifest,
427 427 c.user,
428 428 c.date,
429 429 c.files,
430 430 c.description,
431 431 c.extra,
432 432 )
433 433 def manifestnode(self):
434 434 return self._changeset.manifest
435 435
436 436 def user(self):
437 437 return self._changeset.user
438 438 def date(self):
439 439 return self._changeset.date
440 440 def files(self):
441 441 return self._changeset.files
442 442 def description(self):
443 443 return self._changeset.description
444 444 def branch(self):
445 445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 446 def closesbranch(self):
447 447 return 'close' in self._changeset.extra
448 448 def extra(self):
449 449 """Return a dict of extra information."""
450 450 return self._changeset.extra
451 451 def tags(self):
452 452 """Return a list of byte tag names"""
453 453 return self._repo.nodetags(self._node)
454 454 def bookmarks(self):
455 455 """Return a list of byte bookmark names."""
456 456 return self._repo.nodebookmarks(self._node)
457 457 def phase(self):
458 458 return self._repo._phasecache.phase(self._repo, self._rev)
459 459 def hidden(self):
460 460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461 461
462 462 def isinmemory(self):
463 463 return False
464 464
465 465 def children(self):
466 466 """return list of changectx contexts for each child changeset.
467 467
468 468 This returns only the immediate child changesets. Use descendants() to
469 469 recursively walk children.
470 470 """
471 471 c = self._repo.changelog.children(self._node)
472 472 return [self._repo[x] for x in c]
473 473
474 474 def ancestors(self):
475 475 for a in self._repo.changelog.ancestors([self._rev]):
476 476 yield self._repo[a]
477 477
478 478 def descendants(self):
479 479 """Recursively yield all children of the changeset.
480 480
481 481 For just the immediate children, use children()
482 482 """
483 483 for d in self._repo.changelog.descendants([self._rev]):
484 484 yield self._repo[d]
485 485
486 486 def filectx(self, path, fileid=None, filelog=None):
487 487 """get a file context from this changeset"""
488 488 if fileid is None:
489 489 fileid = self.filenode(path)
490 490 return filectx(self._repo, path, fileid=fileid,
491 491 changectx=self, filelog=filelog)
492 492
493 493 def ancestor(self, c2, warn=False):
494 494 """return the "best" ancestor context of self and c2
495 495
496 496 If there are multiple candidates, it will show a message and check
497 497 merge.preferancestor configuration before falling back to the
498 498 revlog ancestor."""
499 499 # deal with workingctxs
500 500 n2 = c2._node
501 501 if n2 is None:
502 502 n2 = c2._parents[0]._node
503 503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 504 if not cahs:
505 505 anc = nullid
506 506 elif len(cahs) == 1:
507 507 anc = cahs[0]
508 508 else:
509 509 # experimental config: merge.preferancestor
510 510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 511 try:
512 512 ctx = scmutil.revsymbol(self._repo, r)
513 513 except error.RepoLookupError:
514 514 continue
515 515 anc = ctx.node()
516 516 if anc in cahs:
517 517 break
518 518 else:
519 519 anc = self._repo.changelog.ancestor(self._node, n2)
520 520 if warn:
521 521 self._repo.ui.status(
522 522 (_("note: using %s as ancestor of %s and %s\n") %
523 523 (short(anc), short(self._node), short(n2))) +
524 524 ''.join(_(" alternatively, use --config "
525 525 "merge.preferancestor=%s\n") %
526 526 short(n) for n in sorted(cahs) if n != anc))
527 527 return self._repo[anc]
528 528
529 529 def isancestorof(self, other):
530 530 """True if this changeset is an ancestor of other"""
531 531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532 532
533 533 def walk(self, match):
534 534 '''Generates matching file names.'''
535 535
536 536 # Wrap match.bad method to have message with nodeid
537 537 def bad(fn, msg):
538 538 # The manifest doesn't know about subrepos, so don't complain about
539 539 # paths into valid subrepos.
540 540 if any(fn == s or fn.startswith(s + '/')
541 541 for s in self.substate):
542 542 return
543 543 match.bad(fn, _('no such file in rev %s') % self)
544 544
545 545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 546 return self._manifest.walk(m)
547 547
548 548 def matches(self, match):
549 549 return self.walk(match)
550 550
551 551 class basefilectx(object):
552 552 """A filecontext object represents the common logic for its children:
553 553 filectx: read-only access to a filerevision that is already present
554 554 in the repo,
555 555 workingfilectx: a filecontext that represents files from the working
556 556 directory,
557 557 memfilectx: a filecontext that represents files in-memory,
558 558 """
559 559 @propertycache
560 560 def _filelog(self):
561 561 return self._repo.file(self._path)
562 562
563 563 @propertycache
564 564 def _changeid(self):
565 565 if r'_changectx' in self.__dict__:
566 566 return self._changectx.rev()
567 567 elif r'_descendantrev' in self.__dict__:
568 568 # this file context was created from a revision with a known
569 569 # descendant, we can (lazily) correct for linkrev aliases
570 570 return self._adjustlinkrev(self._descendantrev)
571 571 else:
572 572 return self._filelog.linkrev(self._filerev)
573 573
574 574 @propertycache
575 575 def _filenode(self):
576 576 if r'_fileid' in self.__dict__:
577 577 return self._filelog.lookup(self._fileid)
578 578 else:
579 579 return self._changectx.filenode(self._path)
580 580
581 581 @propertycache
582 582 def _filerev(self):
583 583 return self._filelog.rev(self._filenode)
584 584
585 585 @propertycache
586 586 def _repopath(self):
587 587 return self._path
588 588
589 589 def __nonzero__(self):
590 590 try:
591 591 self._filenode
592 592 return True
593 593 except error.LookupError:
594 594 # file is missing
595 595 return False
596 596
597 597 __bool__ = __nonzero__
598 598
599 599 def __bytes__(self):
600 600 try:
601 601 return "%s@%s" % (self.path(), self._changectx)
602 602 except error.LookupError:
603 603 return "%s@???" % self.path()
604 604
605 605 __str__ = encoding.strmethod(__bytes__)
606 606
607 607 def __repr__(self):
608 608 return r"<%s %s>" % (type(self).__name__, str(self))
609 609
610 610 def __hash__(self):
611 611 try:
612 612 return hash((self._path, self._filenode))
613 613 except AttributeError:
614 614 return id(self)
615 615
616 616 def __eq__(self, other):
617 617 try:
618 618 return (type(self) == type(other) and self._path == other._path
619 619 and self._filenode == other._filenode)
620 620 except AttributeError:
621 621 return False
622 622
623 623 def __ne__(self, other):
624 624 return not (self == other)
625 625
626 626 def filerev(self):
627 627 return self._filerev
628 628 def filenode(self):
629 629 return self._filenode
630 630 @propertycache
631 631 def _flags(self):
632 632 return self._changectx.flags(self._path)
633 633 def flags(self):
634 634 return self._flags
635 635 def filelog(self):
636 636 return self._filelog
637 637 def rev(self):
638 638 return self._changeid
639 639 def linkrev(self):
640 640 return self._filelog.linkrev(self._filerev)
641 641 def node(self):
642 642 return self._changectx.node()
643 643 def hex(self):
644 644 return self._changectx.hex()
645 645 def user(self):
646 646 return self._changectx.user()
647 647 def date(self):
648 648 return self._changectx.date()
649 649 def files(self):
650 650 return self._changectx.files()
651 651 def description(self):
652 652 return self._changectx.description()
653 653 def branch(self):
654 654 return self._changectx.branch()
655 655 def extra(self):
656 656 return self._changectx.extra()
657 657 def phase(self):
658 658 return self._changectx.phase()
659 659 def phasestr(self):
660 660 return self._changectx.phasestr()
661 661 def obsolete(self):
662 662 return self._changectx.obsolete()
663 663 def instabilities(self):
664 664 return self._changectx.instabilities()
665 665 def manifest(self):
666 666 return self._changectx.manifest()
667 667 def changectx(self):
668 668 return self._changectx
669 669 def renamed(self):
670 670 return self._copied
671 671 def repo(self):
672 672 return self._repo
673 673 def size(self):
674 674 return len(self.data())
675 675
676 676 def path(self):
677 677 return self._path
678 678
679 679 def isbinary(self):
680 680 try:
681 681 return stringutil.binary(self.data())
682 682 except IOError:
683 683 return False
684 684 def isexec(self):
685 685 return 'x' in self.flags()
686 686 def islink(self):
687 687 return 'l' in self.flags()
688 688
689 689 def isabsent(self):
690 690 """whether this filectx represents a file not in self._changectx
691 691
692 692 This is mainly for merge code to detect change/delete conflicts. This is
693 693 expected to be True for all subclasses of basectx."""
694 694 return False
695 695
696 696 _customcmp = False
697 697 def cmp(self, fctx):
698 698 """compare with other file context
699 699
700 700 returns True if different than fctx.
701 701 """
702 702 if fctx._customcmp:
703 703 return fctx.cmp(self)
704 704
705 705 if (fctx._filenode is None
706 706 and (self._repo._encodefilterpats
707 707 # if file data starts with '\1\n', empty metadata block is
708 708 # prepended, which adds 4 bytes to filelog.size().
709 709 or self.size() - 4 == fctx.size())
710 710 or self.size() == fctx.size()):
711 711 return self._filelog.cmp(self._filenode, fctx.data())
712 712
713 713 return True
714 714
715 715 def _adjustlinkrev(self, srcrev, inclusive=False):
716 716 """return the first ancestor of <srcrev> introducing <fnode>
717 717
718 718 If the linkrev of the file revision does not point to an ancestor of
719 719 srcrev, we'll walk down the ancestors until we find one introducing
720 720 this file revision.
721 721
722 722 :srcrev: the changeset revision we search ancestors from
723 723 :inclusive: if true, the src revision will also be checked
724 724 """
725 725 repo = self._repo
726 726 cl = repo.unfiltered().changelog
727 727 mfl = repo.manifestlog
728 728 # fetch the linkrev
729 729 lkr = self.linkrev()
730 730 if srcrev == lkr:
731 731 return lkr
732 732 # hack to reuse ancestor computation when searching for renames
733 733 memberanc = getattr(self, '_ancestrycontext', None)
734 734 iteranc = None
735 735 if srcrev is None:
736 736 # wctx case, used by workingfilectx during mergecopy
737 737 revs = [p.rev() for p in self._repo[None].parents()]
738 738 inclusive = True # we skipped the real (revless) source
739 739 else:
740 740 revs = [srcrev]
741 741 if memberanc is None:
742 742 memberanc = iteranc = cl.ancestors(revs, lkr,
743 743 inclusive=inclusive)
744 744 # check if this linkrev is an ancestor of srcrev
745 745 if lkr not in memberanc:
746 746 if iteranc is None:
747 747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
748 748 fnode = self._filenode
749 749 path = self._path
750 750 for a in iteranc:
751 751 ac = cl.read(a) # get changeset data (we avoid object creation)
752 752 if path in ac[3]: # checking the 'files' field.
753 753 # The file has been touched, check if the content is
754 754 # similar to the one we search for.
755 755 if fnode == mfl[ac[0]].readfast().get(path):
756 756 return a
757 757 # In theory, we should never get out of that loop without a result.
758 758 # But if manifest uses a buggy file revision (not children of the
759 759 # one it replaces) we could. Such a buggy situation will likely
760 760 # result is crash somewhere else at to some point.
761 761 return lkr
762 762
763 763 def introrev(self):
764 764 """return the rev of the changeset which introduced this file revision
765 765
766 766 This method is different from linkrev because it take into account the
767 767 changeset the filectx was created from. It ensures the returned
768 768 revision is one of its ancestors. This prevents bugs from
769 769 'linkrev-shadowing' when a file revision is used by multiple
770 770 changesets.
771 771 """
772 toprev = None
772 773 attrs = vars(self)
773 hastoprev = (r'_changeid' in attrs or r'_changectx' in attrs)
774 if hastoprev:
775 return self._adjustlinkrev(self.rev(), inclusive=True)
774 if r'_changeid' in attrs:
775 # We have a cached value already
776 toprev = self._changeid
777 elif r'_changectx' in attrs:
778 # We know which changelog entry we are coming from
779 toprev = self._changectx.rev()
780
781 if toprev is not None:
782 return self._adjustlinkrev(toprev, inclusive=True)
776 783 else:
777 784 return self.linkrev()
778 785
779 786 def introfilectx(self):
780 787 """Return filectx having identical contents, but pointing to the
781 788 changeset revision where this filectx was introduced"""
782 789 introrev = self.introrev()
783 790 if self.rev() == introrev:
784 791 return self
785 792 return self.filectx(self.filenode(), changeid=introrev)
786 793
787 794 def _parentfilectx(self, path, fileid, filelog):
788 795 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
789 796 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
790 797 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
791 798 # If self is associated with a changeset (probably explicitly
792 799 # fed), ensure the created filectx is associated with a
793 800 # changeset that is an ancestor of self.changectx.
794 801 # This lets us later use _adjustlinkrev to get a correct link.
795 802 fctx._descendantrev = self.rev()
796 803 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
797 804 elif r'_descendantrev' in vars(self):
798 805 # Otherwise propagate _descendantrev if we have one associated.
799 806 fctx._descendantrev = self._descendantrev
800 807 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
801 808 return fctx
802 809
803 810 def parents(self):
804 811 _path = self._path
805 812 fl = self._filelog
806 813 parents = self._filelog.parents(self._filenode)
807 814 pl = [(_path, node, fl) for node in parents if node != nullid]
808 815
809 816 r = fl.renamed(self._filenode)
810 817 if r:
811 818 # - In the simple rename case, both parent are nullid, pl is empty.
812 819 # - In case of merge, only one of the parent is null id and should
813 820 # be replaced with the rename information. This parent is -always-
814 821 # the first one.
815 822 #
816 823 # As null id have always been filtered out in the previous list
817 824 # comprehension, inserting to 0 will always result in "replacing
818 825 # first nullid parent with rename information.
819 826 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
820 827
821 828 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
822 829
823 830 def p1(self):
824 831 return self.parents()[0]
825 832
826 833 def p2(self):
827 834 p = self.parents()
828 835 if len(p) == 2:
829 836 return p[1]
830 837 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
831 838
832 839 def annotate(self, follow=False, skiprevs=None, diffopts=None):
833 840 """Returns a list of annotateline objects for each line in the file
834 841
835 842 - line.fctx is the filectx of the node where that line was last changed
836 843 - line.lineno is the line number at the first appearance in the managed
837 844 file
838 845 - line.text is the data on that line (including newline character)
839 846 """
840 847 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
841 848
842 849 def parents(f):
843 850 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
844 851 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
845 852 # from the topmost introrev (= srcrev) down to p.linkrev() if it
846 853 # isn't an ancestor of the srcrev.
847 854 f._changeid
848 855 pl = f.parents()
849 856
850 857 # Don't return renamed parents if we aren't following.
851 858 if not follow:
852 859 pl = [p for p in pl if p.path() == f.path()]
853 860
854 861 # renamed filectx won't have a filelog yet, so set it
855 862 # from the cache to save time
856 863 for p in pl:
857 864 if not r'_filelog' in p.__dict__:
858 865 p._filelog = getlog(p.path())
859 866
860 867 return pl
861 868
862 869 # use linkrev to find the first changeset where self appeared
863 870 base = self.introfilectx()
864 871 if getattr(base, '_ancestrycontext', None) is None:
865 872 cl = self._repo.changelog
866 873 if base.rev() is None:
867 874 # wctx is not inclusive, but works because _ancestrycontext
868 875 # is used to test filelog revisions
869 876 ac = cl.ancestors([p.rev() for p in base.parents()],
870 877 inclusive=True)
871 878 else:
872 879 ac = cl.ancestors([base.rev()], inclusive=True)
873 880 base._ancestrycontext = ac
874 881
875 882 return dagop.annotate(base, parents, skiprevs=skiprevs,
876 883 diffopts=diffopts)
877 884
878 885 def ancestors(self, followfirst=False):
879 886 visit = {}
880 887 c = self
881 888 if followfirst:
882 889 cut = 1
883 890 else:
884 891 cut = None
885 892
886 893 while True:
887 894 for parent in c.parents()[:cut]:
888 895 visit[(parent.linkrev(), parent.filenode())] = parent
889 896 if not visit:
890 897 break
891 898 c = visit.pop(max(visit))
892 899 yield c
893 900
894 901 def decodeddata(self):
895 902 """Returns `data()` after running repository decoding filters.
896 903
897 904 This is often equivalent to how the data would be expressed on disk.
898 905 """
899 906 return self._repo.wwritedata(self.path(), self.data())
900 907
901 908 class filectx(basefilectx):
902 909 """A filecontext object makes access to data related to a particular
903 910 filerevision convenient."""
904 911 def __init__(self, repo, path, changeid=None, fileid=None,
905 912 filelog=None, changectx=None):
906 913 """changeid must be a revision number, if specified.
907 914 fileid can be a file revision or node."""
908 915 self._repo = repo
909 916 self._path = path
910 917
911 918 assert (changeid is not None
912 919 or fileid is not None
913 920 or changectx is not None), \
914 921 ("bad args: changeid=%r, fileid=%r, changectx=%r"
915 922 % (changeid, fileid, changectx))
916 923
917 924 if filelog is not None:
918 925 self._filelog = filelog
919 926
920 927 if changeid is not None:
921 928 self._changeid = changeid
922 929 if changectx is not None:
923 930 self._changectx = changectx
924 931 if fileid is not None:
925 932 self._fileid = fileid
926 933
927 934 @propertycache
928 935 def _changectx(self):
929 936 try:
930 937 return self._repo[self._changeid]
931 938 except error.FilteredRepoLookupError:
932 939 # Linkrev may point to any revision in the repository. When the
933 940 # repository is filtered this may lead to `filectx` trying to build
934 941 # `changectx` for filtered revision. In such case we fallback to
935 942 # creating `changectx` on the unfiltered version of the reposition.
936 943 # This fallback should not be an issue because `changectx` from
937 944 # `filectx` are not used in complex operations that care about
938 945 # filtering.
939 946 #
940 947 # This fallback is a cheap and dirty fix that prevent several
941 948 # crashes. It does not ensure the behavior is correct. However the
942 949 # behavior was not correct before filtering either and "incorrect
943 950 # behavior" is seen as better as "crash"
944 951 #
945 952 # Linkrevs have several serious troubles with filtering that are
946 953 # complicated to solve. Proper handling of the issue here should be
947 954 # considered when solving linkrev issue are on the table.
948 955 return self._repo.unfiltered()[self._changeid]
949 956
950 957 def filectx(self, fileid, changeid=None):
951 958 '''opens an arbitrary revision of the file without
952 959 opening a new filelog'''
953 960 return filectx(self._repo, self._path, fileid=fileid,
954 961 filelog=self._filelog, changeid=changeid)
955 962
956 963 def rawdata(self):
957 964 return self._filelog.revision(self._filenode, raw=True)
958 965
959 966 def rawflags(self):
960 967 """low-level revlog flags"""
961 968 return self._filelog.flags(self._filerev)
962 969
963 970 def data(self):
964 971 try:
965 972 return self._filelog.read(self._filenode)
966 973 except error.CensoredNodeError:
967 974 if self._repo.ui.config("censor", "policy") == "ignore":
968 975 return ""
969 976 raise error.Abort(_("censored node: %s") % short(self._filenode),
970 977 hint=_("set censor.policy to ignore errors"))
971 978
972 979 def size(self):
973 980 return self._filelog.size(self._filerev)
974 981
975 982 @propertycache
976 983 def _copied(self):
977 984 """check if file was actually renamed in this changeset revision
978 985
979 986 If rename logged in file revision, we report copy for changeset only
980 987 if file revisions linkrev points back to the changeset in question
981 988 or both changeset parents contain different file revisions.
982 989 """
983 990
984 991 renamed = self._filelog.renamed(self._filenode)
985 992 if not renamed:
986 993 return None
987 994
988 995 if self.rev() == self.linkrev():
989 996 return renamed
990 997
991 998 name = self.path()
992 999 fnode = self._filenode
993 1000 for p in self._changectx.parents():
994 1001 try:
995 1002 if fnode == p.filenode(name):
996 1003 return None
997 1004 except error.LookupError:
998 1005 pass
999 1006 return renamed
1000 1007
1001 1008 def children(self):
1002 1009 # hard for renames
1003 1010 c = self._filelog.children(self._filenode)
1004 1011 return [filectx(self._repo, self._path, fileid=x,
1005 1012 filelog=self._filelog) for x in c]
1006 1013
1007 1014 class committablectx(basectx):
1008 1015 """A committablectx object provides common functionality for a context that
1009 1016 wants the ability to commit, e.g. workingctx or memctx."""
1010 1017 def __init__(self, repo, text="", user=None, date=None, extra=None,
1011 1018 changes=None):
1012 1019 super(committablectx, self).__init__(repo)
1013 1020 self._rev = None
1014 1021 self._node = None
1015 1022 self._text = text
1016 1023 if date:
1017 1024 self._date = dateutil.parsedate(date)
1018 1025 if user:
1019 1026 self._user = user
1020 1027 if changes:
1021 1028 self._status = changes
1022 1029
1023 1030 self._extra = {}
1024 1031 if extra:
1025 1032 self._extra = extra.copy()
1026 1033 if 'branch' not in self._extra:
1027 1034 try:
1028 1035 branch = encoding.fromlocal(self._repo.dirstate.branch())
1029 1036 except UnicodeDecodeError:
1030 1037 raise error.Abort(_('branch name not in UTF-8!'))
1031 1038 self._extra['branch'] = branch
1032 1039 if self._extra['branch'] == '':
1033 1040 self._extra['branch'] = 'default'
1034 1041
1035 1042 def __bytes__(self):
1036 1043 return bytes(self._parents[0]) + "+"
1037 1044
1038 1045 __str__ = encoding.strmethod(__bytes__)
1039 1046
1040 1047 def __nonzero__(self):
1041 1048 return True
1042 1049
1043 1050 __bool__ = __nonzero__
1044 1051
1045 1052 def _buildflagfunc(self):
1046 1053 # Create a fallback function for getting file flags when the
1047 1054 # filesystem doesn't support them
1048 1055
1049 1056 copiesget = self._repo.dirstate.copies().get
1050 1057 parents = self.parents()
1051 1058 if len(parents) < 2:
1052 1059 # when we have one parent, it's easy: copy from parent
1053 1060 man = parents[0].manifest()
1054 1061 def func(f):
1055 1062 f = copiesget(f, f)
1056 1063 return man.flags(f)
1057 1064 else:
1058 1065 # merges are tricky: we try to reconstruct the unstored
1059 1066 # result from the merge (issue1802)
1060 1067 p1, p2 = parents
1061 1068 pa = p1.ancestor(p2)
1062 1069 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1063 1070
1064 1071 def func(f):
1065 1072 f = copiesget(f, f) # may be wrong for merges with copies
1066 1073 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1067 1074 if fl1 == fl2:
1068 1075 return fl1
1069 1076 if fl1 == fla:
1070 1077 return fl2
1071 1078 if fl2 == fla:
1072 1079 return fl1
1073 1080 return '' # punt for conflicts
1074 1081
1075 1082 return func
1076 1083
1077 1084 @propertycache
1078 1085 def _flagfunc(self):
1079 1086 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1080 1087
1081 1088 @propertycache
1082 1089 def _status(self):
1083 1090 return self._repo.status()
1084 1091
1085 1092 @propertycache
1086 1093 def _user(self):
1087 1094 return self._repo.ui.username()
1088 1095
1089 1096 @propertycache
1090 1097 def _date(self):
1091 1098 ui = self._repo.ui
1092 1099 date = ui.configdate('devel', 'default-date')
1093 1100 if date is None:
1094 1101 date = dateutil.makedate()
1095 1102 return date
1096 1103
1097 1104 def subrev(self, subpath):
1098 1105 return None
1099 1106
1100 1107 def manifestnode(self):
1101 1108 return None
1102 1109 def user(self):
1103 1110 return self._user or self._repo.ui.username()
1104 1111 def date(self):
1105 1112 return self._date
1106 1113 def description(self):
1107 1114 return self._text
1108 1115 def files(self):
1109 1116 return sorted(self._status.modified + self._status.added +
1110 1117 self._status.removed)
1111 1118
1112 1119 def modified(self):
1113 1120 return self._status.modified
1114 1121 def added(self):
1115 1122 return self._status.added
1116 1123 def removed(self):
1117 1124 return self._status.removed
1118 1125 def deleted(self):
1119 1126 return self._status.deleted
1120 1127 def branch(self):
1121 1128 return encoding.tolocal(self._extra['branch'])
1122 1129 def closesbranch(self):
1123 1130 return 'close' in self._extra
1124 1131 def extra(self):
1125 1132 return self._extra
1126 1133
1127 1134 def isinmemory(self):
1128 1135 return False
1129 1136
1130 1137 def tags(self):
1131 1138 return []
1132 1139
1133 1140 def bookmarks(self):
1134 1141 b = []
1135 1142 for p in self.parents():
1136 1143 b.extend(p.bookmarks())
1137 1144 return b
1138 1145
1139 1146 def phase(self):
1140 1147 phase = phases.draft # default phase to draft
1141 1148 for p in self.parents():
1142 1149 phase = max(phase, p.phase())
1143 1150 return phase
1144 1151
1145 1152 def hidden(self):
1146 1153 return False
1147 1154
1148 1155 def children(self):
1149 1156 return []
1150 1157
1151 1158 def flags(self, path):
1152 1159 if r'_manifest' in self.__dict__:
1153 1160 try:
1154 1161 return self._manifest.flags(path)
1155 1162 except KeyError:
1156 1163 return ''
1157 1164
1158 1165 try:
1159 1166 return self._flagfunc(path)
1160 1167 except OSError:
1161 1168 return ''
1162 1169
1163 1170 def ancestor(self, c2):
1164 1171 """return the "best" ancestor context of self and c2"""
1165 1172 return self._parents[0].ancestor(c2) # punt on two parents for now
1166 1173
1167 1174 def walk(self, match):
1168 1175 '''Generates matching file names.'''
1169 1176 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1170 1177 subrepos=sorted(self.substate),
1171 1178 unknown=True, ignored=False))
1172 1179
1173 1180 def matches(self, match):
1174 1181 match = self._repo.narrowmatch(match)
1175 1182 ds = self._repo.dirstate
1176 1183 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1177 1184
1178 1185 def ancestors(self):
1179 1186 for p in self._parents:
1180 1187 yield p
1181 1188 for a in self._repo.changelog.ancestors(
1182 1189 [p.rev() for p in self._parents]):
1183 1190 yield self._repo[a]
1184 1191
1185 1192 def markcommitted(self, node):
1186 1193 """Perform post-commit cleanup necessary after committing this ctx
1187 1194
1188 1195 Specifically, this updates backing stores this working context
1189 1196 wraps to reflect the fact that the changes reflected by this
1190 1197 workingctx have been committed. For example, it marks
1191 1198 modified and added files as normal in the dirstate.
1192 1199
1193 1200 """
1194 1201
1195 1202 with self._repo.dirstate.parentchange():
1196 1203 for f in self.modified() + self.added():
1197 1204 self._repo.dirstate.normal(f)
1198 1205 for f in self.removed():
1199 1206 self._repo.dirstate.drop(f)
1200 1207 self._repo.dirstate.setparents(node)
1201 1208
1202 1209 # write changes out explicitly, because nesting wlock at
1203 1210 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1204 1211 # from immediately doing so for subsequent changing files
1205 1212 self._repo.dirstate.write(self._repo.currenttransaction())
1206 1213
1207 1214 def dirty(self, missing=False, merge=True, branch=True):
1208 1215 return False
1209 1216
1210 1217 class workingctx(committablectx):
1211 1218 """A workingctx object makes access to data related to
1212 1219 the current working directory convenient.
1213 1220 date - any valid date string or (unixtime, offset), or None.
1214 1221 user - username string, or None.
1215 1222 extra - a dictionary of extra values, or None.
1216 1223 changes - a list of file lists as returned by localrepo.status()
1217 1224 or None to use the repository status.
1218 1225 """
1219 1226 def __init__(self, repo, text="", user=None, date=None, extra=None,
1220 1227 changes=None):
1221 1228 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1222 1229
1223 1230 def __iter__(self):
1224 1231 d = self._repo.dirstate
1225 1232 for f in d:
1226 1233 if d[f] != 'r':
1227 1234 yield f
1228 1235
1229 1236 def __contains__(self, key):
1230 1237 return self._repo.dirstate[key] not in "?r"
1231 1238
1232 1239 def hex(self):
1233 1240 return hex(wdirid)
1234 1241
1235 1242 @propertycache
1236 1243 def _parents(self):
1237 1244 p = self._repo.dirstate.parents()
1238 1245 if p[1] == nullid:
1239 1246 p = p[:-1]
1240 1247 # use unfiltered repo to delay/avoid loading obsmarkers
1241 1248 unfi = self._repo.unfiltered()
1242 1249 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1243 1250
1244 1251 def _fileinfo(self, path):
1245 1252 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1246 1253 self._manifest
1247 1254 return super(workingctx, self)._fileinfo(path)
1248 1255
1249 1256 def filectx(self, path, filelog=None):
1250 1257 """get a file context from the working directory"""
1251 1258 return workingfilectx(self._repo, path, workingctx=self,
1252 1259 filelog=filelog)
1253 1260
1254 1261 def dirty(self, missing=False, merge=True, branch=True):
1255 1262 "check whether a working directory is modified"
1256 1263 # check subrepos first
1257 1264 for s in sorted(self.substate):
1258 1265 if self.sub(s).dirty(missing=missing):
1259 1266 return True
1260 1267 # check current working dir
1261 1268 return ((merge and self.p2()) or
1262 1269 (branch and self.branch() != self.p1().branch()) or
1263 1270 self.modified() or self.added() or self.removed() or
1264 1271 (missing and self.deleted()))
1265 1272
1266 1273 def add(self, list, prefix=""):
1267 1274 with self._repo.wlock():
1268 1275 ui, ds = self._repo.ui, self._repo.dirstate
1269 1276 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1270 1277 rejected = []
1271 1278 lstat = self._repo.wvfs.lstat
1272 1279 for f in list:
1273 1280 # ds.pathto() returns an absolute file when this is invoked from
1274 1281 # the keyword extension. That gets flagged as non-portable on
1275 1282 # Windows, since it contains the drive letter and colon.
1276 1283 scmutil.checkportable(ui, os.path.join(prefix, f))
1277 1284 try:
1278 1285 st = lstat(f)
1279 1286 except OSError:
1280 1287 ui.warn(_("%s does not exist!\n") % uipath(f))
1281 1288 rejected.append(f)
1282 1289 continue
1283 1290 limit = ui.configbytes('ui', 'large-file-limit')
1284 1291 if limit != 0 and st.st_size > limit:
1285 1292 ui.warn(_("%s: up to %d MB of RAM may be required "
1286 1293 "to manage this file\n"
1287 1294 "(use 'hg revert %s' to cancel the "
1288 1295 "pending addition)\n")
1289 1296 % (f, 3 * st.st_size // 1000000, uipath(f)))
1290 1297 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1291 1298 ui.warn(_("%s not added: only files and symlinks "
1292 1299 "supported currently\n") % uipath(f))
1293 1300 rejected.append(f)
1294 1301 elif ds[f] in 'amn':
1295 1302 ui.warn(_("%s already tracked!\n") % uipath(f))
1296 1303 elif ds[f] == 'r':
1297 1304 ds.normallookup(f)
1298 1305 else:
1299 1306 ds.add(f)
1300 1307 return rejected
1301 1308
1302 1309 def forget(self, files, prefix=""):
1303 1310 with self._repo.wlock():
1304 1311 ds = self._repo.dirstate
1305 1312 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1306 1313 rejected = []
1307 1314 for f in files:
1308 1315 if f not in self._repo.dirstate:
1309 1316 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1310 1317 rejected.append(f)
1311 1318 elif self._repo.dirstate[f] != 'a':
1312 1319 self._repo.dirstate.remove(f)
1313 1320 else:
1314 1321 self._repo.dirstate.drop(f)
1315 1322 return rejected
1316 1323
1317 1324 def undelete(self, list):
1318 1325 pctxs = self.parents()
1319 1326 with self._repo.wlock():
1320 1327 ds = self._repo.dirstate
1321 1328 for f in list:
1322 1329 if self._repo.dirstate[f] != 'r':
1323 1330 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1324 1331 else:
1325 1332 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1326 1333 t = fctx.data()
1327 1334 self._repo.wwrite(f, t, fctx.flags())
1328 1335 self._repo.dirstate.normal(f)
1329 1336
1330 1337 def copy(self, source, dest):
1331 1338 try:
1332 1339 st = self._repo.wvfs.lstat(dest)
1333 1340 except OSError as err:
1334 1341 if err.errno != errno.ENOENT:
1335 1342 raise
1336 1343 self._repo.ui.warn(_("%s does not exist!\n")
1337 1344 % self._repo.dirstate.pathto(dest))
1338 1345 return
1339 1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1340 1347 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1341 1348 "symbolic link\n")
1342 1349 % self._repo.dirstate.pathto(dest))
1343 1350 else:
1344 1351 with self._repo.wlock():
1345 1352 if self._repo.dirstate[dest] in '?':
1346 1353 self._repo.dirstate.add(dest)
1347 1354 elif self._repo.dirstate[dest] in 'r':
1348 1355 self._repo.dirstate.normallookup(dest)
1349 1356 self._repo.dirstate.copy(source, dest)
1350 1357
1351 1358 def match(self, pats=None, include=None, exclude=None, default='glob',
1352 1359 listsubrepos=False, badfn=None):
1353 1360 r = self._repo
1354 1361
1355 1362 # Only a case insensitive filesystem needs magic to translate user input
1356 1363 # to actual case in the filesystem.
1357 1364 icasefs = not util.fscasesensitive(r.root)
1358 1365 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1359 1366 default, auditor=r.auditor, ctx=self,
1360 1367 listsubrepos=listsubrepos, badfn=badfn,
1361 1368 icasefs=icasefs)
1362 1369
1363 1370 def _filtersuspectsymlink(self, files):
1364 1371 if not files or self._repo.dirstate._checklink:
1365 1372 return files
1366 1373
1367 1374 # Symlink placeholders may get non-symlink-like contents
1368 1375 # via user error or dereferencing by NFS or Samba servers,
1369 1376 # so we filter out any placeholders that don't look like a
1370 1377 # symlink
1371 1378 sane = []
1372 1379 for f in files:
1373 1380 if self.flags(f) == 'l':
1374 1381 d = self[f].data()
1375 1382 if (d == '' or len(d) >= 1024 or '\n' in d
1376 1383 or stringutil.binary(d)):
1377 1384 self._repo.ui.debug('ignoring suspect symlink placeholder'
1378 1385 ' "%s"\n' % f)
1379 1386 continue
1380 1387 sane.append(f)
1381 1388 return sane
1382 1389
1383 1390 def _checklookup(self, files):
1384 1391 # check for any possibly clean files
1385 1392 if not files:
1386 1393 return [], [], []
1387 1394
1388 1395 modified = []
1389 1396 deleted = []
1390 1397 fixup = []
1391 1398 pctx = self._parents[0]
1392 1399 # do a full compare of any files that might have changed
1393 1400 for f in sorted(files):
1394 1401 try:
1395 1402 # This will return True for a file that got replaced by a
1396 1403 # directory in the interim, but fixing that is pretty hard.
1397 1404 if (f not in pctx or self.flags(f) != pctx.flags(f)
1398 1405 or pctx[f].cmp(self[f])):
1399 1406 modified.append(f)
1400 1407 else:
1401 1408 fixup.append(f)
1402 1409 except (IOError, OSError):
1403 1410 # A file become inaccessible in between? Mark it as deleted,
1404 1411 # matching dirstate behavior (issue5584).
1405 1412 # The dirstate has more complex behavior around whether a
1406 1413 # missing file matches a directory, etc, but we don't need to
1407 1414 # bother with that: if f has made it to this point, we're sure
1408 1415 # it's in the dirstate.
1409 1416 deleted.append(f)
1410 1417
1411 1418 return modified, deleted, fixup
1412 1419
1413 1420 def _poststatusfixup(self, status, fixup):
1414 1421 """update dirstate for files that are actually clean"""
1415 1422 poststatus = self._repo.postdsstatus()
1416 1423 if fixup or poststatus:
1417 1424 try:
1418 1425 oldid = self._repo.dirstate.identity()
1419 1426
1420 1427 # updating the dirstate is optional
1421 1428 # so we don't wait on the lock
1422 1429 # wlock can invalidate the dirstate, so cache normal _after_
1423 1430 # taking the lock
1424 1431 with self._repo.wlock(False):
1425 1432 if self._repo.dirstate.identity() == oldid:
1426 1433 if fixup:
1427 1434 normal = self._repo.dirstate.normal
1428 1435 for f in fixup:
1429 1436 normal(f)
1430 1437 # write changes out explicitly, because nesting
1431 1438 # wlock at runtime may prevent 'wlock.release()'
1432 1439 # after this block from doing so for subsequent
1433 1440 # changing files
1434 1441 tr = self._repo.currenttransaction()
1435 1442 self._repo.dirstate.write(tr)
1436 1443
1437 1444 if poststatus:
1438 1445 for ps in poststatus:
1439 1446 ps(self, status)
1440 1447 else:
1441 1448 # in this case, writing changes out breaks
1442 1449 # consistency, because .hg/dirstate was
1443 1450 # already changed simultaneously after last
1444 1451 # caching (see also issue5584 for detail)
1445 1452 self._repo.ui.debug('skip updating dirstate: '
1446 1453 'identity mismatch\n')
1447 1454 except error.LockError:
1448 1455 pass
1449 1456 finally:
1450 1457 # Even if the wlock couldn't be grabbed, clear out the list.
1451 1458 self._repo.clearpostdsstatus()
1452 1459
1453 1460 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1454 1461 '''Gets the status from the dirstate -- internal use only.'''
1455 1462 subrepos = []
1456 1463 if '.hgsub' in self:
1457 1464 subrepos = sorted(self.substate)
1458 1465 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1459 1466 clean=clean, unknown=unknown)
1460 1467
1461 1468 # check for any possibly clean files
1462 1469 fixup = []
1463 1470 if cmp:
1464 1471 modified2, deleted2, fixup = self._checklookup(cmp)
1465 1472 s.modified.extend(modified2)
1466 1473 s.deleted.extend(deleted2)
1467 1474
1468 1475 if fixup and clean:
1469 1476 s.clean.extend(fixup)
1470 1477
1471 1478 self._poststatusfixup(s, fixup)
1472 1479
1473 1480 if match.always():
1474 1481 # cache for performance
1475 1482 if s.unknown or s.ignored or s.clean:
1476 1483 # "_status" is cached with list*=False in the normal route
1477 1484 self._status = scmutil.status(s.modified, s.added, s.removed,
1478 1485 s.deleted, [], [], [])
1479 1486 else:
1480 1487 self._status = s
1481 1488
1482 1489 return s
1483 1490
1484 1491 @propertycache
1485 1492 def _manifest(self):
1486 1493 """generate a manifest corresponding to the values in self._status
1487 1494
1488 1495 This reuse the file nodeid from parent, but we use special node
1489 1496 identifiers for added and modified files. This is used by manifests
1490 1497 merge to see that files are different and by update logic to avoid
1491 1498 deleting newly added files.
1492 1499 """
1493 1500 return self._buildstatusmanifest(self._status)
1494 1501
1495 1502 def _buildstatusmanifest(self, status):
1496 1503 """Builds a manifest that includes the given status results."""
1497 1504 parents = self.parents()
1498 1505
1499 1506 man = parents[0].manifest().copy()
1500 1507
1501 1508 ff = self._flagfunc
1502 1509 for i, l in ((addednodeid, status.added),
1503 1510 (modifiednodeid, status.modified)):
1504 1511 for f in l:
1505 1512 man[f] = i
1506 1513 try:
1507 1514 man.setflag(f, ff(f))
1508 1515 except OSError:
1509 1516 pass
1510 1517
1511 1518 for f in status.deleted + status.removed:
1512 1519 if f in man:
1513 1520 del man[f]
1514 1521
1515 1522 return man
1516 1523
1517 1524 def _buildstatus(self, other, s, match, listignored, listclean,
1518 1525 listunknown):
1519 1526 """build a status with respect to another context
1520 1527
1521 1528 This includes logic for maintaining the fast path of status when
1522 1529 comparing the working directory against its parent, which is to skip
1523 1530 building a new manifest if self (working directory) is not comparing
1524 1531 against its parent (repo['.']).
1525 1532 """
1526 1533 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1527 1534 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1528 1535 # might have accidentally ended up with the entire contents of the file
1529 1536 # they are supposed to be linking to.
1530 1537 s.modified[:] = self._filtersuspectsymlink(s.modified)
1531 1538 if other != self._repo['.']:
1532 1539 s = super(workingctx, self)._buildstatus(other, s, match,
1533 1540 listignored, listclean,
1534 1541 listunknown)
1535 1542 return s
1536 1543
1537 1544 def _matchstatus(self, other, match):
1538 1545 """override the match method with a filter for directory patterns
1539 1546
1540 1547 We use inheritance to customize the match.bad method only in cases of
1541 1548 workingctx since it belongs only to the working directory when
1542 1549 comparing against the parent changeset.
1543 1550
1544 1551 If we aren't comparing against the working directory's parent, then we
1545 1552 just use the default match object sent to us.
1546 1553 """
1547 1554 if other != self._repo['.']:
1548 1555 def bad(f, msg):
1549 1556 # 'f' may be a directory pattern from 'match.files()',
1550 1557 # so 'f not in ctx1' is not enough
1551 1558 if f not in other and not other.hasdir(f):
1552 1559 self._repo.ui.warn('%s: %s\n' %
1553 1560 (self._repo.dirstate.pathto(f), msg))
1554 1561 match.bad = bad
1555 1562 return match
1556 1563
1557 1564 def markcommitted(self, node):
1558 1565 super(workingctx, self).markcommitted(node)
1559 1566
1560 1567 sparse.aftercommit(self._repo, node)
1561 1568
1562 1569 class committablefilectx(basefilectx):
1563 1570 """A committablefilectx provides common functionality for a file context
1564 1571 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1565 1572 def __init__(self, repo, path, filelog=None, ctx=None):
1566 1573 self._repo = repo
1567 1574 self._path = path
1568 1575 self._changeid = None
1569 1576 self._filerev = self._filenode = None
1570 1577
1571 1578 if filelog is not None:
1572 1579 self._filelog = filelog
1573 1580 if ctx:
1574 1581 self._changectx = ctx
1575 1582
1576 1583 def __nonzero__(self):
1577 1584 return True
1578 1585
1579 1586 __bool__ = __nonzero__
1580 1587
1581 1588 def linkrev(self):
1582 1589 # linked to self._changectx no matter if file is modified or not
1583 1590 return self.rev()
1584 1591
1585 1592 def parents(self):
1586 1593 '''return parent filectxs, following copies if necessary'''
1587 1594 def filenode(ctx, path):
1588 1595 return ctx._manifest.get(path, nullid)
1589 1596
1590 1597 path = self._path
1591 1598 fl = self._filelog
1592 1599 pcl = self._changectx._parents
1593 1600 renamed = self.renamed()
1594 1601
1595 1602 if renamed:
1596 1603 pl = [renamed + (None,)]
1597 1604 else:
1598 1605 pl = [(path, filenode(pcl[0], path), fl)]
1599 1606
1600 1607 for pc in pcl[1:]:
1601 1608 pl.append((path, filenode(pc, path), fl))
1602 1609
1603 1610 return [self._parentfilectx(p, fileid=n, filelog=l)
1604 1611 for p, n, l in pl if n != nullid]
1605 1612
1606 1613 def children(self):
1607 1614 return []
1608 1615
1609 1616 class workingfilectx(committablefilectx):
1610 1617 """A workingfilectx object makes access to data related to a particular
1611 1618 file in the working directory convenient."""
1612 1619 def __init__(self, repo, path, filelog=None, workingctx=None):
1613 1620 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1614 1621
1615 1622 @propertycache
1616 1623 def _changectx(self):
1617 1624 return workingctx(self._repo)
1618 1625
1619 1626 def data(self):
1620 1627 return self._repo.wread(self._path)
1621 1628 def renamed(self):
1622 1629 rp = self._repo.dirstate.copied(self._path)
1623 1630 if not rp:
1624 1631 return None
1625 1632 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1626 1633
1627 1634 def size(self):
1628 1635 return self._repo.wvfs.lstat(self._path).st_size
1629 1636 def date(self):
1630 1637 t, tz = self._changectx.date()
1631 1638 try:
1632 1639 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1633 1640 except OSError as err:
1634 1641 if err.errno != errno.ENOENT:
1635 1642 raise
1636 1643 return (t, tz)
1637 1644
1638 1645 def exists(self):
1639 1646 return self._repo.wvfs.exists(self._path)
1640 1647
1641 1648 def lexists(self):
1642 1649 return self._repo.wvfs.lexists(self._path)
1643 1650
1644 1651 def audit(self):
1645 1652 return self._repo.wvfs.audit(self._path)
1646 1653
1647 1654 def cmp(self, fctx):
1648 1655 """compare with other file context
1649 1656
1650 1657 returns True if different than fctx.
1651 1658 """
1652 1659 # fctx should be a filectx (not a workingfilectx)
1653 1660 # invert comparison to reuse the same code path
1654 1661 return fctx.cmp(self)
1655 1662
1656 1663 def remove(self, ignoremissing=False):
1657 1664 """wraps unlink for a repo's working directory"""
1658 1665 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1659 1666 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1660 1667 rmdir=rmdir)
1661 1668
1662 1669 def write(self, data, flags, backgroundclose=False, **kwargs):
1663 1670 """wraps repo.wwrite"""
1664 1671 self._repo.wwrite(self._path, data, flags,
1665 1672 backgroundclose=backgroundclose,
1666 1673 **kwargs)
1667 1674
1668 1675 def markcopied(self, src):
1669 1676 """marks this file a copy of `src`"""
1670 1677 if self._repo.dirstate[self._path] in "nma":
1671 1678 self._repo.dirstate.copy(src, self._path)
1672 1679
1673 1680 def clearunknown(self):
1674 1681 """Removes conflicting items in the working directory so that
1675 1682 ``write()`` can be called successfully.
1676 1683 """
1677 1684 wvfs = self._repo.wvfs
1678 1685 f = self._path
1679 1686 wvfs.audit(f)
1680 1687 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1681 1688 # remove files under the directory as they should already be
1682 1689 # warned and backed up
1683 1690 if wvfs.isdir(f) and not wvfs.islink(f):
1684 1691 wvfs.rmtree(f, forcibly=True)
1685 1692 for p in reversed(list(util.finddirs(f))):
1686 1693 if wvfs.isfileorlink(p):
1687 1694 wvfs.unlink(p)
1688 1695 break
1689 1696 else:
1690 1697 # don't remove files if path conflicts are not processed
1691 1698 if wvfs.isdir(f) and not wvfs.islink(f):
1692 1699 wvfs.removedirs(f)
1693 1700
1694 1701 def setflags(self, l, x):
1695 1702 self._repo.wvfs.setflags(self._path, l, x)
1696 1703
1697 1704 class overlayworkingctx(committablectx):
1698 1705 """Wraps another mutable context with a write-back cache that can be
1699 1706 converted into a commit context.
1700 1707
1701 1708 self._cache[path] maps to a dict with keys: {
1702 1709 'exists': bool?
1703 1710 'date': date?
1704 1711 'data': str?
1705 1712 'flags': str?
1706 1713 'copied': str? (path or None)
1707 1714 }
1708 1715 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1709 1716 is `False`, the file was deleted.
1710 1717 """
1711 1718
1712 1719 def __init__(self, repo):
1713 1720 super(overlayworkingctx, self).__init__(repo)
1714 1721 self.clean()
1715 1722
1716 1723 def setbase(self, wrappedctx):
1717 1724 self._wrappedctx = wrappedctx
1718 1725 self._parents = [wrappedctx]
1719 1726 # Drop old manifest cache as it is now out of date.
1720 1727 # This is necessary when, e.g., rebasing several nodes with one
1721 1728 # ``overlayworkingctx`` (e.g. with --collapse).
1722 1729 util.clearcachedproperty(self, '_manifest')
1723 1730
1724 1731 def data(self, path):
1725 1732 if self.isdirty(path):
1726 1733 if self._cache[path]['exists']:
1727 1734 if self._cache[path]['data']:
1728 1735 return self._cache[path]['data']
1729 1736 else:
1730 1737 # Must fallback here, too, because we only set flags.
1731 1738 return self._wrappedctx[path].data()
1732 1739 else:
1733 1740 raise error.ProgrammingError("No such file or directory: %s" %
1734 1741 path)
1735 1742 else:
1736 1743 return self._wrappedctx[path].data()
1737 1744
1738 1745 @propertycache
1739 1746 def _manifest(self):
1740 1747 parents = self.parents()
1741 1748 man = parents[0].manifest().copy()
1742 1749
1743 1750 flag = self._flagfunc
1744 1751 for path in self.added():
1745 1752 man[path] = addednodeid
1746 1753 man.setflag(path, flag(path))
1747 1754 for path in self.modified():
1748 1755 man[path] = modifiednodeid
1749 1756 man.setflag(path, flag(path))
1750 1757 for path in self.removed():
1751 1758 del man[path]
1752 1759 return man
1753 1760
1754 1761 @propertycache
1755 1762 def _flagfunc(self):
1756 1763 def f(path):
1757 1764 return self._cache[path]['flags']
1758 1765 return f
1759 1766
1760 1767 def files(self):
1761 1768 return sorted(self.added() + self.modified() + self.removed())
1762 1769
1763 1770 def modified(self):
1764 1771 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1765 1772 self._existsinparent(f)]
1766 1773
1767 1774 def added(self):
1768 1775 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1769 1776 not self._existsinparent(f)]
1770 1777
1771 1778 def removed(self):
1772 1779 return [f for f in self._cache.keys() if
1773 1780 not self._cache[f]['exists'] and self._existsinparent(f)]
1774 1781
1775 1782 def isinmemory(self):
1776 1783 return True
1777 1784
1778 1785 def filedate(self, path):
1779 1786 if self.isdirty(path):
1780 1787 return self._cache[path]['date']
1781 1788 else:
1782 1789 return self._wrappedctx[path].date()
1783 1790
1784 1791 def markcopied(self, path, origin):
1785 1792 if self.isdirty(path):
1786 1793 self._cache[path]['copied'] = origin
1787 1794 else:
1788 1795 raise error.ProgrammingError('markcopied() called on clean context')
1789 1796
1790 1797 def copydata(self, path):
1791 1798 if self.isdirty(path):
1792 1799 return self._cache[path]['copied']
1793 1800 else:
1794 1801 raise error.ProgrammingError('copydata() called on clean context')
1795 1802
1796 1803 def flags(self, path):
1797 1804 if self.isdirty(path):
1798 1805 if self._cache[path]['exists']:
1799 1806 return self._cache[path]['flags']
1800 1807 else:
1801 1808 raise error.ProgrammingError("No such file or directory: %s" %
1802 1809 self._path)
1803 1810 else:
1804 1811 return self._wrappedctx[path].flags()
1805 1812
1806 1813 def _existsinparent(self, path):
1807 1814 try:
1808 1815 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1809 1816 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1810 1817 # with an ``exists()`` function.
1811 1818 self._wrappedctx[path]
1812 1819 return True
1813 1820 except error.ManifestLookupError:
1814 1821 return False
1815 1822
1816 1823 def _auditconflicts(self, path):
1817 1824 """Replicates conflict checks done by wvfs.write().
1818 1825
1819 1826 Since we never write to the filesystem and never call `applyupdates` in
1820 1827 IMM, we'll never check that a path is actually writable -- e.g., because
1821 1828 it adds `a/foo`, but `a` is actually a file in the other commit.
1822 1829 """
1823 1830 def fail(path, component):
1824 1831 # p1() is the base and we're receiving "writes" for p2()'s
1825 1832 # files.
1826 1833 if 'l' in self.p1()[component].flags():
1827 1834 raise error.Abort("error: %s conflicts with symlink %s "
1828 1835 "in %s." % (path, component,
1829 1836 self.p1().rev()))
1830 1837 else:
1831 1838 raise error.Abort("error: '%s' conflicts with file '%s' in "
1832 1839 "%s." % (path, component,
1833 1840 self.p1().rev()))
1834 1841
1835 1842 # Test that each new directory to be created to write this path from p2
1836 1843 # is not a file in p1.
1837 1844 components = path.split('/')
1838 1845 for i in pycompat.xrange(len(components)):
1839 1846 component = "/".join(components[0:i])
1840 1847 if component in self.p1() and self._cache[component]['exists']:
1841 1848 fail(path, component)
1842 1849
1843 1850 # Test the other direction -- that this path from p2 isn't a directory
1844 1851 # in p1 (test that p1 doesn't any paths matching `path/*`).
1845 1852 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1846 1853 matches = self.p1().manifest().matches(match)
1847 1854 mfiles = matches.keys()
1848 1855 if len(mfiles) > 0:
1849 1856 if len(mfiles) == 1 and mfiles[0] == path:
1850 1857 return
1851 1858 # omit the files which are deleted in current IMM wctx
1852 1859 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1853 1860 if not mfiles:
1854 1861 return
1855 1862 raise error.Abort("error: file '%s' cannot be written because "
1856 1863 " '%s/' is a folder in %s (containing %d "
1857 1864 "entries: %s)"
1858 1865 % (path, path, self.p1(), len(mfiles),
1859 1866 ', '.join(mfiles)))
1860 1867
1861 1868 def write(self, path, data, flags='', **kwargs):
1862 1869 if data is None:
1863 1870 raise error.ProgrammingError("data must be non-None")
1864 1871 self._auditconflicts(path)
1865 1872 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1866 1873 flags=flags)
1867 1874
1868 1875 def setflags(self, path, l, x):
1869 1876 flag = ''
1870 1877 if l:
1871 1878 flag = 'l'
1872 1879 elif x:
1873 1880 flag = 'x'
1874 1881 self._markdirty(path, exists=True, date=dateutil.makedate(),
1875 1882 flags=flag)
1876 1883
1877 1884 def remove(self, path):
1878 1885 self._markdirty(path, exists=False)
1879 1886
1880 1887 def exists(self, path):
1881 1888 """exists behaves like `lexists`, but needs to follow symlinks and
1882 1889 return False if they are broken.
1883 1890 """
1884 1891 if self.isdirty(path):
1885 1892 # If this path exists and is a symlink, "follow" it by calling
1886 1893 # exists on the destination path.
1887 1894 if (self._cache[path]['exists'] and
1888 1895 'l' in self._cache[path]['flags']):
1889 1896 return self.exists(self._cache[path]['data'].strip())
1890 1897 else:
1891 1898 return self._cache[path]['exists']
1892 1899
1893 1900 return self._existsinparent(path)
1894 1901
1895 1902 def lexists(self, path):
1896 1903 """lexists returns True if the path exists"""
1897 1904 if self.isdirty(path):
1898 1905 return self._cache[path]['exists']
1899 1906
1900 1907 return self._existsinparent(path)
1901 1908
1902 1909 def size(self, path):
1903 1910 if self.isdirty(path):
1904 1911 if self._cache[path]['exists']:
1905 1912 return len(self._cache[path]['data'])
1906 1913 else:
1907 1914 raise error.ProgrammingError("No such file or directory: %s" %
1908 1915 self._path)
1909 1916 return self._wrappedctx[path].size()
1910 1917
1911 1918 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1912 1919 user=None, editor=None):
1913 1920 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1914 1921 committed.
1915 1922
1916 1923 ``text`` is the commit message.
1917 1924 ``parents`` (optional) are rev numbers.
1918 1925 """
1919 1926 # Default parents to the wrapped contexts' if not passed.
1920 1927 if parents is None:
1921 1928 parents = self._wrappedctx.parents()
1922 1929 if len(parents) == 1:
1923 1930 parents = (parents[0], None)
1924 1931
1925 1932 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1926 1933 if parents[1] is None:
1927 1934 parents = (self._repo[parents[0]], None)
1928 1935 else:
1929 1936 parents = (self._repo[parents[0]], self._repo[parents[1]])
1930 1937
1931 1938 files = self._cache.keys()
1932 1939 def getfile(repo, memctx, path):
1933 1940 if self._cache[path]['exists']:
1934 1941 return memfilectx(repo, memctx, path,
1935 1942 self._cache[path]['data'],
1936 1943 'l' in self._cache[path]['flags'],
1937 1944 'x' in self._cache[path]['flags'],
1938 1945 self._cache[path]['copied'])
1939 1946 else:
1940 1947 # Returning None, but including the path in `files`, is
1941 1948 # necessary for memctx to register a deletion.
1942 1949 return None
1943 1950 return memctx(self._repo, parents, text, files, getfile, date=date,
1944 1951 extra=extra, user=user, branch=branch, editor=editor)
1945 1952
1946 1953 def isdirty(self, path):
1947 1954 return path in self._cache
1948 1955
1949 1956 def isempty(self):
1950 1957 # We need to discard any keys that are actually clean before the empty
1951 1958 # commit check.
1952 1959 self._compact()
1953 1960 return len(self._cache) == 0
1954 1961
1955 1962 def clean(self):
1956 1963 self._cache = {}
1957 1964
1958 1965 def _compact(self):
1959 1966 """Removes keys from the cache that are actually clean, by comparing
1960 1967 them with the underlying context.
1961 1968
1962 1969 This can occur during the merge process, e.g. by passing --tool :local
1963 1970 to resolve a conflict.
1964 1971 """
1965 1972 keys = []
1966 1973 for path in self._cache.keys():
1967 1974 cache = self._cache[path]
1968 1975 try:
1969 1976 underlying = self._wrappedctx[path]
1970 1977 if (underlying.data() == cache['data'] and
1971 1978 underlying.flags() == cache['flags']):
1972 1979 keys.append(path)
1973 1980 except error.ManifestLookupError:
1974 1981 # Path not in the underlying manifest (created).
1975 1982 continue
1976 1983
1977 1984 for path in keys:
1978 1985 del self._cache[path]
1979 1986 return keys
1980 1987
1981 1988 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1982 1989 # data not provided, let's see if we already have some; if not, let's
1983 1990 # grab it from our underlying context, so that we always have data if
1984 1991 # the file is marked as existing.
1985 1992 if exists and data is None:
1986 1993 oldentry = self._cache.get(path) or {}
1987 1994 data = oldentry.get('data') or self._wrappedctx[path].data()
1988 1995
1989 1996 self._cache[path] = {
1990 1997 'exists': exists,
1991 1998 'data': data,
1992 1999 'date': date,
1993 2000 'flags': flags,
1994 2001 'copied': None,
1995 2002 }
1996 2003
1997 2004 def filectx(self, path, filelog=None):
1998 2005 return overlayworkingfilectx(self._repo, path, parent=self,
1999 2006 filelog=filelog)
2000 2007
2001 2008 class overlayworkingfilectx(committablefilectx):
2002 2009 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2003 2010 cache, which can be flushed through later by calling ``flush()``."""
2004 2011
2005 2012 def __init__(self, repo, path, filelog=None, parent=None):
2006 2013 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2007 2014 parent)
2008 2015 self._repo = repo
2009 2016 self._parent = parent
2010 2017 self._path = path
2011 2018
2012 2019 def cmp(self, fctx):
2013 2020 return self.data() != fctx.data()
2014 2021
2015 2022 def changectx(self):
2016 2023 return self._parent
2017 2024
2018 2025 def data(self):
2019 2026 return self._parent.data(self._path)
2020 2027
2021 2028 def date(self):
2022 2029 return self._parent.filedate(self._path)
2023 2030
2024 2031 def exists(self):
2025 2032 return self.lexists()
2026 2033
2027 2034 def lexists(self):
2028 2035 return self._parent.exists(self._path)
2029 2036
2030 2037 def renamed(self):
2031 2038 path = self._parent.copydata(self._path)
2032 2039 if not path:
2033 2040 return None
2034 2041 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2035 2042
2036 2043 def size(self):
2037 2044 return self._parent.size(self._path)
2038 2045
2039 2046 def markcopied(self, origin):
2040 2047 self._parent.markcopied(self._path, origin)
2041 2048
2042 2049 def audit(self):
2043 2050 pass
2044 2051
2045 2052 def flags(self):
2046 2053 return self._parent.flags(self._path)
2047 2054
2048 2055 def setflags(self, islink, isexec):
2049 2056 return self._parent.setflags(self._path, islink, isexec)
2050 2057
2051 2058 def write(self, data, flags, backgroundclose=False, **kwargs):
2052 2059 return self._parent.write(self._path, data, flags, **kwargs)
2053 2060
2054 2061 def remove(self, ignoremissing=False):
2055 2062 return self._parent.remove(self._path)
2056 2063
2057 2064 def clearunknown(self):
2058 2065 pass
2059 2066
2060 2067 class workingcommitctx(workingctx):
2061 2068 """A workingcommitctx object makes access to data related to
2062 2069 the revision being committed convenient.
2063 2070
2064 2071 This hides changes in the working directory, if they aren't
2065 2072 committed in this context.
2066 2073 """
2067 2074 def __init__(self, repo, changes,
2068 2075 text="", user=None, date=None, extra=None):
2069 2076 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2070 2077 changes)
2071 2078
2072 2079 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2073 2080 """Return matched files only in ``self._status``
2074 2081
2075 2082 Uncommitted files appear "clean" via this context, even if
2076 2083 they aren't actually so in the working directory.
2077 2084 """
2078 2085 if clean:
2079 2086 clean = [f for f in self._manifest if f not in self._changedset]
2080 2087 else:
2081 2088 clean = []
2082 2089 return scmutil.status([f for f in self._status.modified if match(f)],
2083 2090 [f for f in self._status.added if match(f)],
2084 2091 [f for f in self._status.removed if match(f)],
2085 2092 [], [], [], clean)
2086 2093
2087 2094 @propertycache
2088 2095 def _changedset(self):
2089 2096 """Return the set of files changed in this context
2090 2097 """
2091 2098 changed = set(self._status.modified)
2092 2099 changed.update(self._status.added)
2093 2100 changed.update(self._status.removed)
2094 2101 return changed
2095 2102
2096 2103 def makecachingfilectxfn(func):
2097 2104 """Create a filectxfn that caches based on the path.
2098 2105
2099 2106 We can't use util.cachefunc because it uses all arguments as the cache
2100 2107 key and this creates a cycle since the arguments include the repo and
2101 2108 memctx.
2102 2109 """
2103 2110 cache = {}
2104 2111
2105 2112 def getfilectx(repo, memctx, path):
2106 2113 if path not in cache:
2107 2114 cache[path] = func(repo, memctx, path)
2108 2115 return cache[path]
2109 2116
2110 2117 return getfilectx
2111 2118
2112 2119 def memfilefromctx(ctx):
2113 2120 """Given a context return a memfilectx for ctx[path]
2114 2121
2115 2122 This is a convenience method for building a memctx based on another
2116 2123 context.
2117 2124 """
2118 2125 def getfilectx(repo, memctx, path):
2119 2126 fctx = ctx[path]
2120 2127 # this is weird but apparently we only keep track of one parent
2121 2128 # (why not only store that instead of a tuple?)
2122 2129 copied = fctx.renamed()
2123 2130 if copied:
2124 2131 copied = copied[0]
2125 2132 return memfilectx(repo, memctx, path, fctx.data(),
2126 2133 islink=fctx.islink(), isexec=fctx.isexec(),
2127 2134 copied=copied)
2128 2135
2129 2136 return getfilectx
2130 2137
2131 2138 def memfilefrompatch(patchstore):
2132 2139 """Given a patch (e.g. patchstore object) return a memfilectx
2133 2140
2134 2141 This is a convenience method for building a memctx based on a patchstore.
2135 2142 """
2136 2143 def getfilectx(repo, memctx, path):
2137 2144 data, mode, copied = patchstore.getfile(path)
2138 2145 if data is None:
2139 2146 return None
2140 2147 islink, isexec = mode
2141 2148 return memfilectx(repo, memctx, path, data, islink=islink,
2142 2149 isexec=isexec, copied=copied)
2143 2150
2144 2151 return getfilectx
2145 2152
2146 2153 class memctx(committablectx):
2147 2154 """Use memctx to perform in-memory commits via localrepo.commitctx().
2148 2155
2149 2156 Revision information is supplied at initialization time while
2150 2157 related files data and is made available through a callback
2151 2158 mechanism. 'repo' is the current localrepo, 'parents' is a
2152 2159 sequence of two parent revisions identifiers (pass None for every
2153 2160 missing parent), 'text' is the commit message and 'files' lists
2154 2161 names of files touched by the revision (normalized and relative to
2155 2162 repository root).
2156 2163
2157 2164 filectxfn(repo, memctx, path) is a callable receiving the
2158 2165 repository, the current memctx object and the normalized path of
2159 2166 requested file, relative to repository root. It is fired by the
2160 2167 commit function for every file in 'files', but calls order is
2161 2168 undefined. If the file is available in the revision being
2162 2169 committed (updated or added), filectxfn returns a memfilectx
2163 2170 object. If the file was removed, filectxfn return None for recent
2164 2171 Mercurial. Moved files are represented by marking the source file
2165 2172 removed and the new file added with copy information (see
2166 2173 memfilectx).
2167 2174
2168 2175 user receives the committer name and defaults to current
2169 2176 repository username, date is the commit date in any format
2170 2177 supported by dateutil.parsedate() and defaults to current date, extra
2171 2178 is a dictionary of metadata or is left empty.
2172 2179 """
2173 2180
2174 2181 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2175 2182 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2176 2183 # this field to determine what to do in filectxfn.
2177 2184 _returnnoneformissingfiles = True
2178 2185
2179 2186 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2180 2187 date=None, extra=None, branch=None, editor=False):
2181 2188 super(memctx, self).__init__(repo, text, user, date, extra)
2182 2189 self._rev = None
2183 2190 self._node = None
2184 2191 parents = [(p or nullid) for p in parents]
2185 2192 p1, p2 = parents
2186 2193 self._parents = [self._repo[p] for p in (p1, p2)]
2187 2194 files = sorted(set(files))
2188 2195 self._files = files
2189 2196 if branch is not None:
2190 2197 self._extra['branch'] = encoding.fromlocal(branch)
2191 2198 self.substate = {}
2192 2199
2193 2200 if isinstance(filectxfn, patch.filestore):
2194 2201 filectxfn = memfilefrompatch(filectxfn)
2195 2202 elif not callable(filectxfn):
2196 2203 # if store is not callable, wrap it in a function
2197 2204 filectxfn = memfilefromctx(filectxfn)
2198 2205
2199 2206 # memoizing increases performance for e.g. vcs convert scenarios.
2200 2207 self._filectxfn = makecachingfilectxfn(filectxfn)
2201 2208
2202 2209 if editor:
2203 2210 self._text = editor(self._repo, self, [])
2204 2211 self._repo.savecommitmessage(self._text)
2205 2212
2206 2213 def filectx(self, path, filelog=None):
2207 2214 """get a file context from the working directory
2208 2215
2209 2216 Returns None if file doesn't exist and should be removed."""
2210 2217 return self._filectxfn(self._repo, self, path)
2211 2218
2212 2219 def commit(self):
2213 2220 """commit context to the repo"""
2214 2221 return self._repo.commitctx(self)
2215 2222
2216 2223 @propertycache
2217 2224 def _manifest(self):
2218 2225 """generate a manifest based on the return values of filectxfn"""
2219 2226
2220 2227 # keep this simple for now; just worry about p1
2221 2228 pctx = self._parents[0]
2222 2229 man = pctx.manifest().copy()
2223 2230
2224 2231 for f in self._status.modified:
2225 2232 man[f] = modifiednodeid
2226 2233
2227 2234 for f in self._status.added:
2228 2235 man[f] = addednodeid
2229 2236
2230 2237 for f in self._status.removed:
2231 2238 if f in man:
2232 2239 del man[f]
2233 2240
2234 2241 return man
2235 2242
2236 2243 @propertycache
2237 2244 def _status(self):
2238 2245 """Calculate exact status from ``files`` specified at construction
2239 2246 """
2240 2247 man1 = self.p1().manifest()
2241 2248 p2 = self._parents[1]
2242 2249 # "1 < len(self._parents)" can't be used for checking
2243 2250 # existence of the 2nd parent, because "memctx._parents" is
2244 2251 # explicitly initialized by the list, of which length is 2.
2245 2252 if p2.node() != nullid:
2246 2253 man2 = p2.manifest()
2247 2254 managing = lambda f: f in man1 or f in man2
2248 2255 else:
2249 2256 managing = lambda f: f in man1
2250 2257
2251 2258 modified, added, removed = [], [], []
2252 2259 for f in self._files:
2253 2260 if not managing(f):
2254 2261 added.append(f)
2255 2262 elif self[f]:
2256 2263 modified.append(f)
2257 2264 else:
2258 2265 removed.append(f)
2259 2266
2260 2267 return scmutil.status(modified, added, removed, [], [], [], [])
2261 2268
2262 2269 class memfilectx(committablefilectx):
2263 2270 """memfilectx represents an in-memory file to commit.
2264 2271
2265 2272 See memctx and committablefilectx for more details.
2266 2273 """
2267 2274 def __init__(self, repo, changectx, path, data, islink=False,
2268 2275 isexec=False, copied=None):
2269 2276 """
2270 2277 path is the normalized file path relative to repository root.
2271 2278 data is the file content as a string.
2272 2279 islink is True if the file is a symbolic link.
2273 2280 isexec is True if the file is executable.
2274 2281 copied is the source file path if current file was copied in the
2275 2282 revision being committed, or None."""
2276 2283 super(memfilectx, self).__init__(repo, path, None, changectx)
2277 2284 self._data = data
2278 2285 if islink:
2279 2286 self._flags = 'l'
2280 2287 elif isexec:
2281 2288 self._flags = 'x'
2282 2289 else:
2283 2290 self._flags = ''
2284 2291 self._copied = None
2285 2292 if copied:
2286 2293 self._copied = (copied, nullid)
2287 2294
2288 2295 def data(self):
2289 2296 return self._data
2290 2297
2291 2298 def remove(self, ignoremissing=False):
2292 2299 """wraps unlink for a repo's working directory"""
2293 2300 # need to figure out what to do here
2294 2301 del self._changectx[self._path]
2295 2302
2296 2303 def write(self, data, flags, **kwargs):
2297 2304 """wraps repo.wwrite"""
2298 2305 self._data = data
2299 2306
2300 2307
2301 2308 class metadataonlyctx(committablectx):
2302 2309 """Like memctx but it's reusing the manifest of different commit.
2303 2310 Intended to be used by lightweight operations that are creating
2304 2311 metadata-only changes.
2305 2312
2306 2313 Revision information is supplied at initialization time. 'repo' is the
2307 2314 current localrepo, 'ctx' is original revision which manifest we're reuisng
2308 2315 'parents' is a sequence of two parent revisions identifiers (pass None for
2309 2316 every missing parent), 'text' is the commit.
2310 2317
2311 2318 user receives the committer name and defaults to current repository
2312 2319 username, date is the commit date in any format supported by
2313 2320 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2314 2321 metadata or is left empty.
2315 2322 """
2316 2323 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2317 2324 date=None, extra=None, editor=False):
2318 2325 if text is None:
2319 2326 text = originalctx.description()
2320 2327 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2321 2328 self._rev = None
2322 2329 self._node = None
2323 2330 self._originalctx = originalctx
2324 2331 self._manifestnode = originalctx.manifestnode()
2325 2332 if parents is None:
2326 2333 parents = originalctx.parents()
2327 2334 else:
2328 2335 parents = [repo[p] for p in parents if p is not None]
2329 2336 parents = parents[:]
2330 2337 while len(parents) < 2:
2331 2338 parents.append(repo[nullid])
2332 2339 p1, p2 = self._parents = parents
2333 2340
2334 2341 # sanity check to ensure that the reused manifest parents are
2335 2342 # manifests of our commit parents
2336 2343 mp1, mp2 = self.manifestctx().parents
2337 2344 if p1 != nullid and p1.manifestnode() != mp1:
2338 2345 raise RuntimeError(r"can't reuse the manifest: its p1 "
2339 2346 r"doesn't match the new ctx p1")
2340 2347 if p2 != nullid and p2.manifestnode() != mp2:
2341 2348 raise RuntimeError(r"can't reuse the manifest: "
2342 2349 r"its p2 doesn't match the new ctx p2")
2343 2350
2344 2351 self._files = originalctx.files()
2345 2352 self.substate = {}
2346 2353
2347 2354 if editor:
2348 2355 self._text = editor(self._repo, self, [])
2349 2356 self._repo.savecommitmessage(self._text)
2350 2357
2351 2358 def manifestnode(self):
2352 2359 return self._manifestnode
2353 2360
2354 2361 @property
2355 2362 def _manifestctx(self):
2356 2363 return self._repo.manifestlog[self._manifestnode]
2357 2364
2358 2365 def filectx(self, path, filelog=None):
2359 2366 return self._originalctx.filectx(path, filelog=filelog)
2360 2367
2361 2368 def commit(self):
2362 2369 """commit context to the repo"""
2363 2370 return self._repo.commitctx(self)
2364 2371
2365 2372 @property
2366 2373 def _manifest(self):
2367 2374 return self._originalctx.manifest()
2368 2375
2369 2376 @propertycache
2370 2377 def _status(self):
2371 2378 """Calculate exact status from ``files`` specified in the ``origctx``
2372 2379 and parents manifests.
2373 2380 """
2374 2381 man1 = self.p1().manifest()
2375 2382 p2 = self._parents[1]
2376 2383 # "1 < len(self._parents)" can't be used for checking
2377 2384 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2378 2385 # explicitly initialized by the list, of which length is 2.
2379 2386 if p2.node() != nullid:
2380 2387 man2 = p2.manifest()
2381 2388 managing = lambda f: f in man1 or f in man2
2382 2389 else:
2383 2390 managing = lambda f: f in man1
2384 2391
2385 2392 modified, added, removed = [], [], []
2386 2393 for f in self._files:
2387 2394 if not managing(f):
2388 2395 added.append(f)
2389 2396 elif f in self:
2390 2397 modified.append(f)
2391 2398 else:
2392 2399 removed.append(f)
2393 2400
2394 2401 return scmutil.status(modified, added, removed, [], [], [], [])
2395 2402
2396 2403 class arbitraryfilectx(object):
2397 2404 """Allows you to use filectx-like functions on a file in an arbitrary
2398 2405 location on disk, possibly not in the working directory.
2399 2406 """
2400 2407 def __init__(self, path, repo=None):
2401 2408 # Repo is optional because contrib/simplemerge uses this class.
2402 2409 self._repo = repo
2403 2410 self._path = path
2404 2411
2405 2412 def cmp(self, fctx):
2406 2413 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2407 2414 # path if either side is a symlink.
2408 2415 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2409 2416 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2410 2417 # Add a fast-path for merge if both sides are disk-backed.
2411 2418 # Note that filecmp uses the opposite return values (True if same)
2412 2419 # from our cmp functions (True if different).
2413 2420 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2414 2421 return self.data() != fctx.data()
2415 2422
2416 2423 def path(self):
2417 2424 return self._path
2418 2425
2419 2426 def flags(self):
2420 2427 return ''
2421 2428
2422 2429 def data(self):
2423 2430 return util.readfile(self._path)
2424 2431
2425 2432 def decodeddata(self):
2426 2433 with open(self._path, "rb") as f:
2427 2434 return f.read()
2428 2435
2429 2436 def remove(self):
2430 2437 util.unlink(self._path)
2431 2438
2432 2439 def write(self, data, flags, **kwargs):
2433 2440 assert not flags
2434 2441 with open(self._path, "wb") as f:
2435 2442 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now