##// END OF EJS Templates
context: take advantage of `_descendantrev` in introrev if available...
Boris Feld -
r40729:aee94f0a default
parent child Browse files
Show More
@@ -1,2442 +1,2447 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = self._repo.narrowmatch(match)
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 for l in r:
375 375 l.sort()
376 376
377 377 return r
378 378
379 379 class changectx(basectx):
380 380 """A changecontext object makes access to data related to a particular
381 381 changeset convenient. It represents a read-only context already present in
382 382 the repo."""
383 383 def __init__(self, repo, rev, node):
384 384 super(changectx, self).__init__(repo)
385 385 self._rev = rev
386 386 self._node = node
387 387
388 388 def __hash__(self):
389 389 try:
390 390 return hash(self._rev)
391 391 except AttributeError:
392 392 return id(self)
393 393
394 394 def __nonzero__(self):
395 395 return self._rev != nullrev
396 396
397 397 __bool__ = __nonzero__
398 398
399 399 @propertycache
400 400 def _changeset(self):
401 401 return self._repo.changelog.changelogrevision(self.rev())
402 402
403 403 @propertycache
404 404 def _manifest(self):
405 405 return self._manifestctx.read()
406 406
407 407 @property
408 408 def _manifestctx(self):
409 409 return self._repo.manifestlog[self._changeset.manifest]
410 410
411 411 @propertycache
412 412 def _manifestdelta(self):
413 413 return self._manifestctx.readdelta()
414 414
415 415 @propertycache
416 416 def _parents(self):
417 417 repo = self._repo
418 418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 419 if p2 == nullrev:
420 420 return [repo[p1]]
421 421 return [repo[p1], repo[p2]]
422 422
423 423 def changeset(self):
424 424 c = self._changeset
425 425 return (
426 426 c.manifest,
427 427 c.user,
428 428 c.date,
429 429 c.files,
430 430 c.description,
431 431 c.extra,
432 432 )
433 433 def manifestnode(self):
434 434 return self._changeset.manifest
435 435
436 436 def user(self):
437 437 return self._changeset.user
438 438 def date(self):
439 439 return self._changeset.date
440 440 def files(self):
441 441 return self._changeset.files
442 442 def description(self):
443 443 return self._changeset.description
444 444 def branch(self):
445 445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 446 def closesbranch(self):
447 447 return 'close' in self._changeset.extra
448 448 def extra(self):
449 449 """Return a dict of extra information."""
450 450 return self._changeset.extra
451 451 def tags(self):
452 452 """Return a list of byte tag names"""
453 453 return self._repo.nodetags(self._node)
454 454 def bookmarks(self):
455 455 """Return a list of byte bookmark names."""
456 456 return self._repo.nodebookmarks(self._node)
457 457 def phase(self):
458 458 return self._repo._phasecache.phase(self._repo, self._rev)
459 459 def hidden(self):
460 460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461 461
462 462 def isinmemory(self):
463 463 return False
464 464
465 465 def children(self):
466 466 """return list of changectx contexts for each child changeset.
467 467
468 468 This returns only the immediate child changesets. Use descendants() to
469 469 recursively walk children.
470 470 """
471 471 c = self._repo.changelog.children(self._node)
472 472 return [self._repo[x] for x in c]
473 473
474 474 def ancestors(self):
475 475 for a in self._repo.changelog.ancestors([self._rev]):
476 476 yield self._repo[a]
477 477
478 478 def descendants(self):
479 479 """Recursively yield all children of the changeset.
480 480
481 481 For just the immediate children, use children()
482 482 """
483 483 for d in self._repo.changelog.descendants([self._rev]):
484 484 yield self._repo[d]
485 485
486 486 def filectx(self, path, fileid=None, filelog=None):
487 487 """get a file context from this changeset"""
488 488 if fileid is None:
489 489 fileid = self.filenode(path)
490 490 return filectx(self._repo, path, fileid=fileid,
491 491 changectx=self, filelog=filelog)
492 492
493 493 def ancestor(self, c2, warn=False):
494 494 """return the "best" ancestor context of self and c2
495 495
496 496 If there are multiple candidates, it will show a message and check
497 497 merge.preferancestor configuration before falling back to the
498 498 revlog ancestor."""
499 499 # deal with workingctxs
500 500 n2 = c2._node
501 501 if n2 is None:
502 502 n2 = c2._parents[0]._node
503 503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 504 if not cahs:
505 505 anc = nullid
506 506 elif len(cahs) == 1:
507 507 anc = cahs[0]
508 508 else:
509 509 # experimental config: merge.preferancestor
510 510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 511 try:
512 512 ctx = scmutil.revsymbol(self._repo, r)
513 513 except error.RepoLookupError:
514 514 continue
515 515 anc = ctx.node()
516 516 if anc in cahs:
517 517 break
518 518 else:
519 519 anc = self._repo.changelog.ancestor(self._node, n2)
520 520 if warn:
521 521 self._repo.ui.status(
522 522 (_("note: using %s as ancestor of %s and %s\n") %
523 523 (short(anc), short(self._node), short(n2))) +
524 524 ''.join(_(" alternatively, use --config "
525 525 "merge.preferancestor=%s\n") %
526 526 short(n) for n in sorted(cahs) if n != anc))
527 527 return self._repo[anc]
528 528
529 529 def isancestorof(self, other):
530 530 """True if this changeset is an ancestor of other"""
531 531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532 532
533 533 def walk(self, match):
534 534 '''Generates matching file names.'''
535 535
536 536 # Wrap match.bad method to have message with nodeid
537 537 def bad(fn, msg):
538 538 # The manifest doesn't know about subrepos, so don't complain about
539 539 # paths into valid subrepos.
540 540 if any(fn == s or fn.startswith(s + '/')
541 541 for s in self.substate):
542 542 return
543 543 match.bad(fn, _('no such file in rev %s') % self)
544 544
545 545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 546 return self._manifest.walk(m)
547 547
548 548 def matches(self, match):
549 549 return self.walk(match)
550 550
551 551 class basefilectx(object):
552 552 """A filecontext object represents the common logic for its children:
553 553 filectx: read-only access to a filerevision that is already present
554 554 in the repo,
555 555 workingfilectx: a filecontext that represents files from the working
556 556 directory,
557 557 memfilectx: a filecontext that represents files in-memory,
558 558 """
559 559 @propertycache
560 560 def _filelog(self):
561 561 return self._repo.file(self._path)
562 562
563 563 @propertycache
564 564 def _changeid(self):
565 565 if r'_changectx' in self.__dict__:
566 566 return self._changectx.rev()
567 567 elif r'_descendantrev' in self.__dict__:
568 568 # this file context was created from a revision with a known
569 569 # descendant, we can (lazily) correct for linkrev aliases
570 570 return self._adjustlinkrev(self._descendantrev)
571 571 else:
572 572 return self._filelog.linkrev(self._filerev)
573 573
574 574 @propertycache
575 575 def _filenode(self):
576 576 if r'_fileid' in self.__dict__:
577 577 return self._filelog.lookup(self._fileid)
578 578 else:
579 579 return self._changectx.filenode(self._path)
580 580
581 581 @propertycache
582 582 def _filerev(self):
583 583 return self._filelog.rev(self._filenode)
584 584
585 585 @propertycache
586 586 def _repopath(self):
587 587 return self._path
588 588
589 589 def __nonzero__(self):
590 590 try:
591 591 self._filenode
592 592 return True
593 593 except error.LookupError:
594 594 # file is missing
595 595 return False
596 596
597 597 __bool__ = __nonzero__
598 598
599 599 def __bytes__(self):
600 600 try:
601 601 return "%s@%s" % (self.path(), self._changectx)
602 602 except error.LookupError:
603 603 return "%s@???" % self.path()
604 604
605 605 __str__ = encoding.strmethod(__bytes__)
606 606
607 607 def __repr__(self):
608 608 return r"<%s %s>" % (type(self).__name__, str(self))
609 609
610 610 def __hash__(self):
611 611 try:
612 612 return hash((self._path, self._filenode))
613 613 except AttributeError:
614 614 return id(self)
615 615
616 616 def __eq__(self, other):
617 617 try:
618 618 return (type(self) == type(other) and self._path == other._path
619 619 and self._filenode == other._filenode)
620 620 except AttributeError:
621 621 return False
622 622
623 623 def __ne__(self, other):
624 624 return not (self == other)
625 625
626 626 def filerev(self):
627 627 return self._filerev
628 628 def filenode(self):
629 629 return self._filenode
630 630 @propertycache
631 631 def _flags(self):
632 632 return self._changectx.flags(self._path)
633 633 def flags(self):
634 634 return self._flags
635 635 def filelog(self):
636 636 return self._filelog
637 637 def rev(self):
638 638 return self._changeid
639 639 def linkrev(self):
640 640 return self._filelog.linkrev(self._filerev)
641 641 def node(self):
642 642 return self._changectx.node()
643 643 def hex(self):
644 644 return self._changectx.hex()
645 645 def user(self):
646 646 return self._changectx.user()
647 647 def date(self):
648 648 return self._changectx.date()
649 649 def files(self):
650 650 return self._changectx.files()
651 651 def description(self):
652 652 return self._changectx.description()
653 653 def branch(self):
654 654 return self._changectx.branch()
655 655 def extra(self):
656 656 return self._changectx.extra()
657 657 def phase(self):
658 658 return self._changectx.phase()
659 659 def phasestr(self):
660 660 return self._changectx.phasestr()
661 661 def obsolete(self):
662 662 return self._changectx.obsolete()
663 663 def instabilities(self):
664 664 return self._changectx.instabilities()
665 665 def manifest(self):
666 666 return self._changectx.manifest()
667 667 def changectx(self):
668 668 return self._changectx
669 669 def renamed(self):
670 670 return self._copied
671 671 def repo(self):
672 672 return self._repo
673 673 def size(self):
674 674 return len(self.data())
675 675
676 676 def path(self):
677 677 return self._path
678 678
679 679 def isbinary(self):
680 680 try:
681 681 return stringutil.binary(self.data())
682 682 except IOError:
683 683 return False
684 684 def isexec(self):
685 685 return 'x' in self.flags()
686 686 def islink(self):
687 687 return 'l' in self.flags()
688 688
689 689 def isabsent(self):
690 690 """whether this filectx represents a file not in self._changectx
691 691
692 692 This is mainly for merge code to detect change/delete conflicts. This is
693 693 expected to be True for all subclasses of basectx."""
694 694 return False
695 695
696 696 _customcmp = False
697 697 def cmp(self, fctx):
698 698 """compare with other file context
699 699
700 700 returns True if different than fctx.
701 701 """
702 702 if fctx._customcmp:
703 703 return fctx.cmp(self)
704 704
705 705 if (fctx._filenode is None
706 706 and (self._repo._encodefilterpats
707 707 # if file data starts with '\1\n', empty metadata block is
708 708 # prepended, which adds 4 bytes to filelog.size().
709 709 or self.size() - 4 == fctx.size())
710 710 or self.size() == fctx.size()):
711 711 return self._filelog.cmp(self._filenode, fctx.data())
712 712
713 713 return True
714 714
715 715 def _adjustlinkrev(self, srcrev, inclusive=False):
716 716 """return the first ancestor of <srcrev> introducing <fnode>
717 717
718 718 If the linkrev of the file revision does not point to an ancestor of
719 719 srcrev, we'll walk down the ancestors until we find one introducing
720 720 this file revision.
721 721
722 722 :srcrev: the changeset revision we search ancestors from
723 723 :inclusive: if true, the src revision will also be checked
724 724 """
725 725 repo = self._repo
726 726 cl = repo.unfiltered().changelog
727 727 mfl = repo.manifestlog
728 728 # fetch the linkrev
729 729 lkr = self.linkrev()
730 730 if srcrev == lkr:
731 731 return lkr
732 732 # hack to reuse ancestor computation when searching for renames
733 733 memberanc = getattr(self, '_ancestrycontext', None)
734 734 iteranc = None
735 735 if srcrev is None:
736 736 # wctx case, used by workingfilectx during mergecopy
737 737 revs = [p.rev() for p in self._repo[None].parents()]
738 738 inclusive = True # we skipped the real (revless) source
739 739 else:
740 740 revs = [srcrev]
741 741 if memberanc is None:
742 742 memberanc = iteranc = cl.ancestors(revs, lkr,
743 743 inclusive=inclusive)
744 744 # check if this linkrev is an ancestor of srcrev
745 745 if lkr not in memberanc:
746 746 if iteranc is None:
747 747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
748 748 fnode = self._filenode
749 749 path = self._path
750 750 for a in iteranc:
751 751 ac = cl.read(a) # get changeset data (we avoid object creation)
752 752 if path in ac[3]: # checking the 'files' field.
753 753 # The file has been touched, check if the content is
754 754 # similar to the one we search for.
755 755 if fnode == mfl[ac[0]].readfast().get(path):
756 756 return a
757 757 # In theory, we should never get out of that loop without a result.
758 758 # But if manifest uses a buggy file revision (not children of the
759 759 # one it replaces) we could. Such a buggy situation will likely
760 760 # result is crash somewhere else at to some point.
761 761 return lkr
762 762
763 763 def introrev(self):
764 764 """return the rev of the changeset which introduced this file revision
765 765
766 766 This method is different from linkrev because it take into account the
767 767 changeset the filectx was created from. It ensures the returned
768 768 revision is one of its ancestors. This prevents bugs from
769 769 'linkrev-shadowing' when a file revision is used by multiple
770 770 changesets.
771 771 """
772 772 toprev = None
773 773 attrs = vars(self)
774 774 if r'_changeid' in attrs:
775 775 # We have a cached value already
776 776 toprev = self._changeid
777 777 elif r'_changectx' in attrs:
778 778 # We know which changelog entry we are coming from
779 779 toprev = self._changectx.rev()
780 780
781 781 if toprev is not None:
782 782 return self._adjustlinkrev(toprev, inclusive=True)
783 elif r'_descendantrev' in attrs:
784 introrev = self._adjustlinkrev(self._descendantrev)
785 # be nice and cache the result of the computation
786 self._changeid = introrev
787 return introrev
783 788 else:
784 789 return self.linkrev()
785 790
786 791 def introfilectx(self):
787 792 """Return filectx having identical contents, but pointing to the
788 793 changeset revision where this filectx was introduced"""
789 794 introrev = self.introrev()
790 795 if self.rev() == introrev:
791 796 return self
792 797 return self.filectx(self.filenode(), changeid=introrev)
793 798
794 799 def _parentfilectx(self, path, fileid, filelog):
795 800 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
796 801 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
797 802 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
798 803 # If self is associated with a changeset (probably explicitly
799 804 # fed), ensure the created filectx is associated with a
800 805 # changeset that is an ancestor of self.changectx.
801 806 # This lets us later use _adjustlinkrev to get a correct link.
802 807 fctx._descendantrev = self.rev()
803 808 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
804 809 elif r'_descendantrev' in vars(self):
805 810 # Otherwise propagate _descendantrev if we have one associated.
806 811 fctx._descendantrev = self._descendantrev
807 812 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
808 813 return fctx
809 814
810 815 def parents(self):
811 816 _path = self._path
812 817 fl = self._filelog
813 818 parents = self._filelog.parents(self._filenode)
814 819 pl = [(_path, node, fl) for node in parents if node != nullid]
815 820
816 821 r = fl.renamed(self._filenode)
817 822 if r:
818 823 # - In the simple rename case, both parent are nullid, pl is empty.
819 824 # - In case of merge, only one of the parent is null id and should
820 825 # be replaced with the rename information. This parent is -always-
821 826 # the first one.
822 827 #
823 828 # As null id have always been filtered out in the previous list
824 829 # comprehension, inserting to 0 will always result in "replacing
825 830 # first nullid parent with rename information.
826 831 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
827 832
828 833 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
829 834
830 835 def p1(self):
831 836 return self.parents()[0]
832 837
833 838 def p2(self):
834 839 p = self.parents()
835 840 if len(p) == 2:
836 841 return p[1]
837 842 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
838 843
839 844 def annotate(self, follow=False, skiprevs=None, diffopts=None):
840 845 """Returns a list of annotateline objects for each line in the file
841 846
842 847 - line.fctx is the filectx of the node where that line was last changed
843 848 - line.lineno is the line number at the first appearance in the managed
844 849 file
845 850 - line.text is the data on that line (including newline character)
846 851 """
847 852 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
848 853
849 854 def parents(f):
850 855 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
851 856 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
852 857 # from the topmost introrev (= srcrev) down to p.linkrev() if it
853 858 # isn't an ancestor of the srcrev.
854 859 f._changeid
855 860 pl = f.parents()
856 861
857 862 # Don't return renamed parents if we aren't following.
858 863 if not follow:
859 864 pl = [p for p in pl if p.path() == f.path()]
860 865
861 866 # renamed filectx won't have a filelog yet, so set it
862 867 # from the cache to save time
863 868 for p in pl:
864 869 if not r'_filelog' in p.__dict__:
865 870 p._filelog = getlog(p.path())
866 871
867 872 return pl
868 873
869 874 # use linkrev to find the first changeset where self appeared
870 875 base = self.introfilectx()
871 876 if getattr(base, '_ancestrycontext', None) is None:
872 877 cl = self._repo.changelog
873 878 if base.rev() is None:
874 879 # wctx is not inclusive, but works because _ancestrycontext
875 880 # is used to test filelog revisions
876 881 ac = cl.ancestors([p.rev() for p in base.parents()],
877 882 inclusive=True)
878 883 else:
879 884 ac = cl.ancestors([base.rev()], inclusive=True)
880 885 base._ancestrycontext = ac
881 886
882 887 return dagop.annotate(base, parents, skiprevs=skiprevs,
883 888 diffopts=diffopts)
884 889
885 890 def ancestors(self, followfirst=False):
886 891 visit = {}
887 892 c = self
888 893 if followfirst:
889 894 cut = 1
890 895 else:
891 896 cut = None
892 897
893 898 while True:
894 899 for parent in c.parents()[:cut]:
895 900 visit[(parent.linkrev(), parent.filenode())] = parent
896 901 if not visit:
897 902 break
898 903 c = visit.pop(max(visit))
899 904 yield c
900 905
901 906 def decodeddata(self):
902 907 """Returns `data()` after running repository decoding filters.
903 908
904 909 This is often equivalent to how the data would be expressed on disk.
905 910 """
906 911 return self._repo.wwritedata(self.path(), self.data())
907 912
908 913 class filectx(basefilectx):
909 914 """A filecontext object makes access to data related to a particular
910 915 filerevision convenient."""
911 916 def __init__(self, repo, path, changeid=None, fileid=None,
912 917 filelog=None, changectx=None):
913 918 """changeid must be a revision number, if specified.
914 919 fileid can be a file revision or node."""
915 920 self._repo = repo
916 921 self._path = path
917 922
918 923 assert (changeid is not None
919 924 or fileid is not None
920 925 or changectx is not None), \
921 926 ("bad args: changeid=%r, fileid=%r, changectx=%r"
922 927 % (changeid, fileid, changectx))
923 928
924 929 if filelog is not None:
925 930 self._filelog = filelog
926 931
927 932 if changeid is not None:
928 933 self._changeid = changeid
929 934 if changectx is not None:
930 935 self._changectx = changectx
931 936 if fileid is not None:
932 937 self._fileid = fileid
933 938
934 939 @propertycache
935 940 def _changectx(self):
936 941 try:
937 942 return self._repo[self._changeid]
938 943 except error.FilteredRepoLookupError:
939 944 # Linkrev may point to any revision in the repository. When the
940 945 # repository is filtered this may lead to `filectx` trying to build
941 946 # `changectx` for filtered revision. In such case we fallback to
942 947 # creating `changectx` on the unfiltered version of the reposition.
943 948 # This fallback should not be an issue because `changectx` from
944 949 # `filectx` are not used in complex operations that care about
945 950 # filtering.
946 951 #
947 952 # This fallback is a cheap and dirty fix that prevent several
948 953 # crashes. It does not ensure the behavior is correct. However the
949 954 # behavior was not correct before filtering either and "incorrect
950 955 # behavior" is seen as better as "crash"
951 956 #
952 957 # Linkrevs have several serious troubles with filtering that are
953 958 # complicated to solve. Proper handling of the issue here should be
954 959 # considered when solving linkrev issue are on the table.
955 960 return self._repo.unfiltered()[self._changeid]
956 961
957 962 def filectx(self, fileid, changeid=None):
958 963 '''opens an arbitrary revision of the file without
959 964 opening a new filelog'''
960 965 return filectx(self._repo, self._path, fileid=fileid,
961 966 filelog=self._filelog, changeid=changeid)
962 967
963 968 def rawdata(self):
964 969 return self._filelog.revision(self._filenode, raw=True)
965 970
966 971 def rawflags(self):
967 972 """low-level revlog flags"""
968 973 return self._filelog.flags(self._filerev)
969 974
970 975 def data(self):
971 976 try:
972 977 return self._filelog.read(self._filenode)
973 978 except error.CensoredNodeError:
974 979 if self._repo.ui.config("censor", "policy") == "ignore":
975 980 return ""
976 981 raise error.Abort(_("censored node: %s") % short(self._filenode),
977 982 hint=_("set censor.policy to ignore errors"))
978 983
979 984 def size(self):
980 985 return self._filelog.size(self._filerev)
981 986
982 987 @propertycache
983 988 def _copied(self):
984 989 """check if file was actually renamed in this changeset revision
985 990
986 991 If rename logged in file revision, we report copy for changeset only
987 992 if file revisions linkrev points back to the changeset in question
988 993 or both changeset parents contain different file revisions.
989 994 """
990 995
991 996 renamed = self._filelog.renamed(self._filenode)
992 997 if not renamed:
993 998 return None
994 999
995 1000 if self.rev() == self.linkrev():
996 1001 return renamed
997 1002
998 1003 name = self.path()
999 1004 fnode = self._filenode
1000 1005 for p in self._changectx.parents():
1001 1006 try:
1002 1007 if fnode == p.filenode(name):
1003 1008 return None
1004 1009 except error.LookupError:
1005 1010 pass
1006 1011 return renamed
1007 1012
1008 1013 def children(self):
1009 1014 # hard for renames
1010 1015 c = self._filelog.children(self._filenode)
1011 1016 return [filectx(self._repo, self._path, fileid=x,
1012 1017 filelog=self._filelog) for x in c]
1013 1018
1014 1019 class committablectx(basectx):
1015 1020 """A committablectx object provides common functionality for a context that
1016 1021 wants the ability to commit, e.g. workingctx or memctx."""
1017 1022 def __init__(self, repo, text="", user=None, date=None, extra=None,
1018 1023 changes=None):
1019 1024 super(committablectx, self).__init__(repo)
1020 1025 self._rev = None
1021 1026 self._node = None
1022 1027 self._text = text
1023 1028 if date:
1024 1029 self._date = dateutil.parsedate(date)
1025 1030 if user:
1026 1031 self._user = user
1027 1032 if changes:
1028 1033 self._status = changes
1029 1034
1030 1035 self._extra = {}
1031 1036 if extra:
1032 1037 self._extra = extra.copy()
1033 1038 if 'branch' not in self._extra:
1034 1039 try:
1035 1040 branch = encoding.fromlocal(self._repo.dirstate.branch())
1036 1041 except UnicodeDecodeError:
1037 1042 raise error.Abort(_('branch name not in UTF-8!'))
1038 1043 self._extra['branch'] = branch
1039 1044 if self._extra['branch'] == '':
1040 1045 self._extra['branch'] = 'default'
1041 1046
1042 1047 def __bytes__(self):
1043 1048 return bytes(self._parents[0]) + "+"
1044 1049
1045 1050 __str__ = encoding.strmethod(__bytes__)
1046 1051
1047 1052 def __nonzero__(self):
1048 1053 return True
1049 1054
1050 1055 __bool__ = __nonzero__
1051 1056
1052 1057 def _buildflagfunc(self):
1053 1058 # Create a fallback function for getting file flags when the
1054 1059 # filesystem doesn't support them
1055 1060
1056 1061 copiesget = self._repo.dirstate.copies().get
1057 1062 parents = self.parents()
1058 1063 if len(parents) < 2:
1059 1064 # when we have one parent, it's easy: copy from parent
1060 1065 man = parents[0].manifest()
1061 1066 def func(f):
1062 1067 f = copiesget(f, f)
1063 1068 return man.flags(f)
1064 1069 else:
1065 1070 # merges are tricky: we try to reconstruct the unstored
1066 1071 # result from the merge (issue1802)
1067 1072 p1, p2 = parents
1068 1073 pa = p1.ancestor(p2)
1069 1074 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1070 1075
1071 1076 def func(f):
1072 1077 f = copiesget(f, f) # may be wrong for merges with copies
1073 1078 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1074 1079 if fl1 == fl2:
1075 1080 return fl1
1076 1081 if fl1 == fla:
1077 1082 return fl2
1078 1083 if fl2 == fla:
1079 1084 return fl1
1080 1085 return '' # punt for conflicts
1081 1086
1082 1087 return func
1083 1088
1084 1089 @propertycache
1085 1090 def _flagfunc(self):
1086 1091 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1087 1092
1088 1093 @propertycache
1089 1094 def _status(self):
1090 1095 return self._repo.status()
1091 1096
1092 1097 @propertycache
1093 1098 def _user(self):
1094 1099 return self._repo.ui.username()
1095 1100
1096 1101 @propertycache
1097 1102 def _date(self):
1098 1103 ui = self._repo.ui
1099 1104 date = ui.configdate('devel', 'default-date')
1100 1105 if date is None:
1101 1106 date = dateutil.makedate()
1102 1107 return date
1103 1108
1104 1109 def subrev(self, subpath):
1105 1110 return None
1106 1111
1107 1112 def manifestnode(self):
1108 1113 return None
1109 1114 def user(self):
1110 1115 return self._user or self._repo.ui.username()
1111 1116 def date(self):
1112 1117 return self._date
1113 1118 def description(self):
1114 1119 return self._text
1115 1120 def files(self):
1116 1121 return sorted(self._status.modified + self._status.added +
1117 1122 self._status.removed)
1118 1123
1119 1124 def modified(self):
1120 1125 return self._status.modified
1121 1126 def added(self):
1122 1127 return self._status.added
1123 1128 def removed(self):
1124 1129 return self._status.removed
1125 1130 def deleted(self):
1126 1131 return self._status.deleted
1127 1132 def branch(self):
1128 1133 return encoding.tolocal(self._extra['branch'])
1129 1134 def closesbranch(self):
1130 1135 return 'close' in self._extra
1131 1136 def extra(self):
1132 1137 return self._extra
1133 1138
1134 1139 def isinmemory(self):
1135 1140 return False
1136 1141
1137 1142 def tags(self):
1138 1143 return []
1139 1144
1140 1145 def bookmarks(self):
1141 1146 b = []
1142 1147 for p in self.parents():
1143 1148 b.extend(p.bookmarks())
1144 1149 return b
1145 1150
1146 1151 def phase(self):
1147 1152 phase = phases.draft # default phase to draft
1148 1153 for p in self.parents():
1149 1154 phase = max(phase, p.phase())
1150 1155 return phase
1151 1156
1152 1157 def hidden(self):
1153 1158 return False
1154 1159
1155 1160 def children(self):
1156 1161 return []
1157 1162
1158 1163 def flags(self, path):
1159 1164 if r'_manifest' in self.__dict__:
1160 1165 try:
1161 1166 return self._manifest.flags(path)
1162 1167 except KeyError:
1163 1168 return ''
1164 1169
1165 1170 try:
1166 1171 return self._flagfunc(path)
1167 1172 except OSError:
1168 1173 return ''
1169 1174
1170 1175 def ancestor(self, c2):
1171 1176 """return the "best" ancestor context of self and c2"""
1172 1177 return self._parents[0].ancestor(c2) # punt on two parents for now
1173 1178
1174 1179 def walk(self, match):
1175 1180 '''Generates matching file names.'''
1176 1181 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1177 1182 subrepos=sorted(self.substate),
1178 1183 unknown=True, ignored=False))
1179 1184
1180 1185 def matches(self, match):
1181 1186 match = self._repo.narrowmatch(match)
1182 1187 ds = self._repo.dirstate
1183 1188 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1184 1189
1185 1190 def ancestors(self):
1186 1191 for p in self._parents:
1187 1192 yield p
1188 1193 for a in self._repo.changelog.ancestors(
1189 1194 [p.rev() for p in self._parents]):
1190 1195 yield self._repo[a]
1191 1196
1192 1197 def markcommitted(self, node):
1193 1198 """Perform post-commit cleanup necessary after committing this ctx
1194 1199
1195 1200 Specifically, this updates backing stores this working context
1196 1201 wraps to reflect the fact that the changes reflected by this
1197 1202 workingctx have been committed. For example, it marks
1198 1203 modified and added files as normal in the dirstate.
1199 1204
1200 1205 """
1201 1206
1202 1207 with self._repo.dirstate.parentchange():
1203 1208 for f in self.modified() + self.added():
1204 1209 self._repo.dirstate.normal(f)
1205 1210 for f in self.removed():
1206 1211 self._repo.dirstate.drop(f)
1207 1212 self._repo.dirstate.setparents(node)
1208 1213
1209 1214 # write changes out explicitly, because nesting wlock at
1210 1215 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1211 1216 # from immediately doing so for subsequent changing files
1212 1217 self._repo.dirstate.write(self._repo.currenttransaction())
1213 1218
1214 1219 def dirty(self, missing=False, merge=True, branch=True):
1215 1220 return False
1216 1221
1217 1222 class workingctx(committablectx):
1218 1223 """A workingctx object makes access to data related to
1219 1224 the current working directory convenient.
1220 1225 date - any valid date string or (unixtime, offset), or None.
1221 1226 user - username string, or None.
1222 1227 extra - a dictionary of extra values, or None.
1223 1228 changes - a list of file lists as returned by localrepo.status()
1224 1229 or None to use the repository status.
1225 1230 """
1226 1231 def __init__(self, repo, text="", user=None, date=None, extra=None,
1227 1232 changes=None):
1228 1233 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1229 1234
1230 1235 def __iter__(self):
1231 1236 d = self._repo.dirstate
1232 1237 for f in d:
1233 1238 if d[f] != 'r':
1234 1239 yield f
1235 1240
1236 1241 def __contains__(self, key):
1237 1242 return self._repo.dirstate[key] not in "?r"
1238 1243
1239 1244 def hex(self):
1240 1245 return hex(wdirid)
1241 1246
1242 1247 @propertycache
1243 1248 def _parents(self):
1244 1249 p = self._repo.dirstate.parents()
1245 1250 if p[1] == nullid:
1246 1251 p = p[:-1]
1247 1252 # use unfiltered repo to delay/avoid loading obsmarkers
1248 1253 unfi = self._repo.unfiltered()
1249 1254 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1250 1255
1251 1256 def _fileinfo(self, path):
1252 1257 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1253 1258 self._manifest
1254 1259 return super(workingctx, self)._fileinfo(path)
1255 1260
1256 1261 def filectx(self, path, filelog=None):
1257 1262 """get a file context from the working directory"""
1258 1263 return workingfilectx(self._repo, path, workingctx=self,
1259 1264 filelog=filelog)
1260 1265
1261 1266 def dirty(self, missing=False, merge=True, branch=True):
1262 1267 "check whether a working directory is modified"
1263 1268 # check subrepos first
1264 1269 for s in sorted(self.substate):
1265 1270 if self.sub(s).dirty(missing=missing):
1266 1271 return True
1267 1272 # check current working dir
1268 1273 return ((merge and self.p2()) or
1269 1274 (branch and self.branch() != self.p1().branch()) or
1270 1275 self.modified() or self.added() or self.removed() or
1271 1276 (missing and self.deleted()))
1272 1277
1273 1278 def add(self, list, prefix=""):
1274 1279 with self._repo.wlock():
1275 1280 ui, ds = self._repo.ui, self._repo.dirstate
1276 1281 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1277 1282 rejected = []
1278 1283 lstat = self._repo.wvfs.lstat
1279 1284 for f in list:
1280 1285 # ds.pathto() returns an absolute file when this is invoked from
1281 1286 # the keyword extension. That gets flagged as non-portable on
1282 1287 # Windows, since it contains the drive letter and colon.
1283 1288 scmutil.checkportable(ui, os.path.join(prefix, f))
1284 1289 try:
1285 1290 st = lstat(f)
1286 1291 except OSError:
1287 1292 ui.warn(_("%s does not exist!\n") % uipath(f))
1288 1293 rejected.append(f)
1289 1294 continue
1290 1295 limit = ui.configbytes('ui', 'large-file-limit')
1291 1296 if limit != 0 and st.st_size > limit:
1292 1297 ui.warn(_("%s: up to %d MB of RAM may be required "
1293 1298 "to manage this file\n"
1294 1299 "(use 'hg revert %s' to cancel the "
1295 1300 "pending addition)\n")
1296 1301 % (f, 3 * st.st_size // 1000000, uipath(f)))
1297 1302 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1298 1303 ui.warn(_("%s not added: only files and symlinks "
1299 1304 "supported currently\n") % uipath(f))
1300 1305 rejected.append(f)
1301 1306 elif ds[f] in 'amn':
1302 1307 ui.warn(_("%s already tracked!\n") % uipath(f))
1303 1308 elif ds[f] == 'r':
1304 1309 ds.normallookup(f)
1305 1310 else:
1306 1311 ds.add(f)
1307 1312 return rejected
1308 1313
1309 1314 def forget(self, files, prefix=""):
1310 1315 with self._repo.wlock():
1311 1316 ds = self._repo.dirstate
1312 1317 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1313 1318 rejected = []
1314 1319 for f in files:
1315 1320 if f not in self._repo.dirstate:
1316 1321 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1317 1322 rejected.append(f)
1318 1323 elif self._repo.dirstate[f] != 'a':
1319 1324 self._repo.dirstate.remove(f)
1320 1325 else:
1321 1326 self._repo.dirstate.drop(f)
1322 1327 return rejected
1323 1328
1324 1329 def undelete(self, list):
1325 1330 pctxs = self.parents()
1326 1331 with self._repo.wlock():
1327 1332 ds = self._repo.dirstate
1328 1333 for f in list:
1329 1334 if self._repo.dirstate[f] != 'r':
1330 1335 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1331 1336 else:
1332 1337 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1333 1338 t = fctx.data()
1334 1339 self._repo.wwrite(f, t, fctx.flags())
1335 1340 self._repo.dirstate.normal(f)
1336 1341
1337 1342 def copy(self, source, dest):
1338 1343 try:
1339 1344 st = self._repo.wvfs.lstat(dest)
1340 1345 except OSError as err:
1341 1346 if err.errno != errno.ENOENT:
1342 1347 raise
1343 1348 self._repo.ui.warn(_("%s does not exist!\n")
1344 1349 % self._repo.dirstate.pathto(dest))
1345 1350 return
1346 1351 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1347 1352 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1348 1353 "symbolic link\n")
1349 1354 % self._repo.dirstate.pathto(dest))
1350 1355 else:
1351 1356 with self._repo.wlock():
1352 1357 if self._repo.dirstate[dest] in '?':
1353 1358 self._repo.dirstate.add(dest)
1354 1359 elif self._repo.dirstate[dest] in 'r':
1355 1360 self._repo.dirstate.normallookup(dest)
1356 1361 self._repo.dirstate.copy(source, dest)
1357 1362
1358 1363 def match(self, pats=None, include=None, exclude=None, default='glob',
1359 1364 listsubrepos=False, badfn=None):
1360 1365 r = self._repo
1361 1366
1362 1367 # Only a case insensitive filesystem needs magic to translate user input
1363 1368 # to actual case in the filesystem.
1364 1369 icasefs = not util.fscasesensitive(r.root)
1365 1370 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1366 1371 default, auditor=r.auditor, ctx=self,
1367 1372 listsubrepos=listsubrepos, badfn=badfn,
1368 1373 icasefs=icasefs)
1369 1374
1370 1375 def _filtersuspectsymlink(self, files):
1371 1376 if not files or self._repo.dirstate._checklink:
1372 1377 return files
1373 1378
1374 1379 # Symlink placeholders may get non-symlink-like contents
1375 1380 # via user error or dereferencing by NFS or Samba servers,
1376 1381 # so we filter out any placeholders that don't look like a
1377 1382 # symlink
1378 1383 sane = []
1379 1384 for f in files:
1380 1385 if self.flags(f) == 'l':
1381 1386 d = self[f].data()
1382 1387 if (d == '' or len(d) >= 1024 or '\n' in d
1383 1388 or stringutil.binary(d)):
1384 1389 self._repo.ui.debug('ignoring suspect symlink placeholder'
1385 1390 ' "%s"\n' % f)
1386 1391 continue
1387 1392 sane.append(f)
1388 1393 return sane
1389 1394
1390 1395 def _checklookup(self, files):
1391 1396 # check for any possibly clean files
1392 1397 if not files:
1393 1398 return [], [], []
1394 1399
1395 1400 modified = []
1396 1401 deleted = []
1397 1402 fixup = []
1398 1403 pctx = self._parents[0]
1399 1404 # do a full compare of any files that might have changed
1400 1405 for f in sorted(files):
1401 1406 try:
1402 1407 # This will return True for a file that got replaced by a
1403 1408 # directory in the interim, but fixing that is pretty hard.
1404 1409 if (f not in pctx or self.flags(f) != pctx.flags(f)
1405 1410 or pctx[f].cmp(self[f])):
1406 1411 modified.append(f)
1407 1412 else:
1408 1413 fixup.append(f)
1409 1414 except (IOError, OSError):
1410 1415 # A file become inaccessible in between? Mark it as deleted,
1411 1416 # matching dirstate behavior (issue5584).
1412 1417 # The dirstate has more complex behavior around whether a
1413 1418 # missing file matches a directory, etc, but we don't need to
1414 1419 # bother with that: if f has made it to this point, we're sure
1415 1420 # it's in the dirstate.
1416 1421 deleted.append(f)
1417 1422
1418 1423 return modified, deleted, fixup
1419 1424
1420 1425 def _poststatusfixup(self, status, fixup):
1421 1426 """update dirstate for files that are actually clean"""
1422 1427 poststatus = self._repo.postdsstatus()
1423 1428 if fixup or poststatus:
1424 1429 try:
1425 1430 oldid = self._repo.dirstate.identity()
1426 1431
1427 1432 # updating the dirstate is optional
1428 1433 # so we don't wait on the lock
1429 1434 # wlock can invalidate the dirstate, so cache normal _after_
1430 1435 # taking the lock
1431 1436 with self._repo.wlock(False):
1432 1437 if self._repo.dirstate.identity() == oldid:
1433 1438 if fixup:
1434 1439 normal = self._repo.dirstate.normal
1435 1440 for f in fixup:
1436 1441 normal(f)
1437 1442 # write changes out explicitly, because nesting
1438 1443 # wlock at runtime may prevent 'wlock.release()'
1439 1444 # after this block from doing so for subsequent
1440 1445 # changing files
1441 1446 tr = self._repo.currenttransaction()
1442 1447 self._repo.dirstate.write(tr)
1443 1448
1444 1449 if poststatus:
1445 1450 for ps in poststatus:
1446 1451 ps(self, status)
1447 1452 else:
1448 1453 # in this case, writing changes out breaks
1449 1454 # consistency, because .hg/dirstate was
1450 1455 # already changed simultaneously after last
1451 1456 # caching (see also issue5584 for detail)
1452 1457 self._repo.ui.debug('skip updating dirstate: '
1453 1458 'identity mismatch\n')
1454 1459 except error.LockError:
1455 1460 pass
1456 1461 finally:
1457 1462 # Even if the wlock couldn't be grabbed, clear out the list.
1458 1463 self._repo.clearpostdsstatus()
1459 1464
1460 1465 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1461 1466 '''Gets the status from the dirstate -- internal use only.'''
1462 1467 subrepos = []
1463 1468 if '.hgsub' in self:
1464 1469 subrepos = sorted(self.substate)
1465 1470 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1466 1471 clean=clean, unknown=unknown)
1467 1472
1468 1473 # check for any possibly clean files
1469 1474 fixup = []
1470 1475 if cmp:
1471 1476 modified2, deleted2, fixup = self._checklookup(cmp)
1472 1477 s.modified.extend(modified2)
1473 1478 s.deleted.extend(deleted2)
1474 1479
1475 1480 if fixup and clean:
1476 1481 s.clean.extend(fixup)
1477 1482
1478 1483 self._poststatusfixup(s, fixup)
1479 1484
1480 1485 if match.always():
1481 1486 # cache for performance
1482 1487 if s.unknown or s.ignored or s.clean:
1483 1488 # "_status" is cached with list*=False in the normal route
1484 1489 self._status = scmutil.status(s.modified, s.added, s.removed,
1485 1490 s.deleted, [], [], [])
1486 1491 else:
1487 1492 self._status = s
1488 1493
1489 1494 return s
1490 1495
1491 1496 @propertycache
1492 1497 def _manifest(self):
1493 1498 """generate a manifest corresponding to the values in self._status
1494 1499
1495 1500 This reuse the file nodeid from parent, but we use special node
1496 1501 identifiers for added and modified files. This is used by manifests
1497 1502 merge to see that files are different and by update logic to avoid
1498 1503 deleting newly added files.
1499 1504 """
1500 1505 return self._buildstatusmanifest(self._status)
1501 1506
1502 1507 def _buildstatusmanifest(self, status):
1503 1508 """Builds a manifest that includes the given status results."""
1504 1509 parents = self.parents()
1505 1510
1506 1511 man = parents[0].manifest().copy()
1507 1512
1508 1513 ff = self._flagfunc
1509 1514 for i, l in ((addednodeid, status.added),
1510 1515 (modifiednodeid, status.modified)):
1511 1516 for f in l:
1512 1517 man[f] = i
1513 1518 try:
1514 1519 man.setflag(f, ff(f))
1515 1520 except OSError:
1516 1521 pass
1517 1522
1518 1523 for f in status.deleted + status.removed:
1519 1524 if f in man:
1520 1525 del man[f]
1521 1526
1522 1527 return man
1523 1528
1524 1529 def _buildstatus(self, other, s, match, listignored, listclean,
1525 1530 listunknown):
1526 1531 """build a status with respect to another context
1527 1532
1528 1533 This includes logic for maintaining the fast path of status when
1529 1534 comparing the working directory against its parent, which is to skip
1530 1535 building a new manifest if self (working directory) is not comparing
1531 1536 against its parent (repo['.']).
1532 1537 """
1533 1538 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1534 1539 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1535 1540 # might have accidentally ended up with the entire contents of the file
1536 1541 # they are supposed to be linking to.
1537 1542 s.modified[:] = self._filtersuspectsymlink(s.modified)
1538 1543 if other != self._repo['.']:
1539 1544 s = super(workingctx, self)._buildstatus(other, s, match,
1540 1545 listignored, listclean,
1541 1546 listunknown)
1542 1547 return s
1543 1548
1544 1549 def _matchstatus(self, other, match):
1545 1550 """override the match method with a filter for directory patterns
1546 1551
1547 1552 We use inheritance to customize the match.bad method only in cases of
1548 1553 workingctx since it belongs only to the working directory when
1549 1554 comparing against the parent changeset.
1550 1555
1551 1556 If we aren't comparing against the working directory's parent, then we
1552 1557 just use the default match object sent to us.
1553 1558 """
1554 1559 if other != self._repo['.']:
1555 1560 def bad(f, msg):
1556 1561 # 'f' may be a directory pattern from 'match.files()',
1557 1562 # so 'f not in ctx1' is not enough
1558 1563 if f not in other and not other.hasdir(f):
1559 1564 self._repo.ui.warn('%s: %s\n' %
1560 1565 (self._repo.dirstate.pathto(f), msg))
1561 1566 match.bad = bad
1562 1567 return match
1563 1568
1564 1569 def markcommitted(self, node):
1565 1570 super(workingctx, self).markcommitted(node)
1566 1571
1567 1572 sparse.aftercommit(self._repo, node)
1568 1573
1569 1574 class committablefilectx(basefilectx):
1570 1575 """A committablefilectx provides common functionality for a file context
1571 1576 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1572 1577 def __init__(self, repo, path, filelog=None, ctx=None):
1573 1578 self._repo = repo
1574 1579 self._path = path
1575 1580 self._changeid = None
1576 1581 self._filerev = self._filenode = None
1577 1582
1578 1583 if filelog is not None:
1579 1584 self._filelog = filelog
1580 1585 if ctx:
1581 1586 self._changectx = ctx
1582 1587
1583 1588 def __nonzero__(self):
1584 1589 return True
1585 1590
1586 1591 __bool__ = __nonzero__
1587 1592
1588 1593 def linkrev(self):
1589 1594 # linked to self._changectx no matter if file is modified or not
1590 1595 return self.rev()
1591 1596
1592 1597 def parents(self):
1593 1598 '''return parent filectxs, following copies if necessary'''
1594 1599 def filenode(ctx, path):
1595 1600 return ctx._manifest.get(path, nullid)
1596 1601
1597 1602 path = self._path
1598 1603 fl = self._filelog
1599 1604 pcl = self._changectx._parents
1600 1605 renamed = self.renamed()
1601 1606
1602 1607 if renamed:
1603 1608 pl = [renamed + (None,)]
1604 1609 else:
1605 1610 pl = [(path, filenode(pcl[0], path), fl)]
1606 1611
1607 1612 for pc in pcl[1:]:
1608 1613 pl.append((path, filenode(pc, path), fl))
1609 1614
1610 1615 return [self._parentfilectx(p, fileid=n, filelog=l)
1611 1616 for p, n, l in pl if n != nullid]
1612 1617
1613 1618 def children(self):
1614 1619 return []
1615 1620
1616 1621 class workingfilectx(committablefilectx):
1617 1622 """A workingfilectx object makes access to data related to a particular
1618 1623 file in the working directory convenient."""
1619 1624 def __init__(self, repo, path, filelog=None, workingctx=None):
1620 1625 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1621 1626
1622 1627 @propertycache
1623 1628 def _changectx(self):
1624 1629 return workingctx(self._repo)
1625 1630
1626 1631 def data(self):
1627 1632 return self._repo.wread(self._path)
1628 1633 def renamed(self):
1629 1634 rp = self._repo.dirstate.copied(self._path)
1630 1635 if not rp:
1631 1636 return None
1632 1637 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1633 1638
1634 1639 def size(self):
1635 1640 return self._repo.wvfs.lstat(self._path).st_size
1636 1641 def date(self):
1637 1642 t, tz = self._changectx.date()
1638 1643 try:
1639 1644 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1640 1645 except OSError as err:
1641 1646 if err.errno != errno.ENOENT:
1642 1647 raise
1643 1648 return (t, tz)
1644 1649
1645 1650 def exists(self):
1646 1651 return self._repo.wvfs.exists(self._path)
1647 1652
1648 1653 def lexists(self):
1649 1654 return self._repo.wvfs.lexists(self._path)
1650 1655
1651 1656 def audit(self):
1652 1657 return self._repo.wvfs.audit(self._path)
1653 1658
1654 1659 def cmp(self, fctx):
1655 1660 """compare with other file context
1656 1661
1657 1662 returns True if different than fctx.
1658 1663 """
1659 1664 # fctx should be a filectx (not a workingfilectx)
1660 1665 # invert comparison to reuse the same code path
1661 1666 return fctx.cmp(self)
1662 1667
1663 1668 def remove(self, ignoremissing=False):
1664 1669 """wraps unlink for a repo's working directory"""
1665 1670 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1666 1671 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1667 1672 rmdir=rmdir)
1668 1673
1669 1674 def write(self, data, flags, backgroundclose=False, **kwargs):
1670 1675 """wraps repo.wwrite"""
1671 1676 self._repo.wwrite(self._path, data, flags,
1672 1677 backgroundclose=backgroundclose,
1673 1678 **kwargs)
1674 1679
1675 1680 def markcopied(self, src):
1676 1681 """marks this file a copy of `src`"""
1677 1682 if self._repo.dirstate[self._path] in "nma":
1678 1683 self._repo.dirstate.copy(src, self._path)
1679 1684
1680 1685 def clearunknown(self):
1681 1686 """Removes conflicting items in the working directory so that
1682 1687 ``write()`` can be called successfully.
1683 1688 """
1684 1689 wvfs = self._repo.wvfs
1685 1690 f = self._path
1686 1691 wvfs.audit(f)
1687 1692 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1688 1693 # remove files under the directory as they should already be
1689 1694 # warned and backed up
1690 1695 if wvfs.isdir(f) and not wvfs.islink(f):
1691 1696 wvfs.rmtree(f, forcibly=True)
1692 1697 for p in reversed(list(util.finddirs(f))):
1693 1698 if wvfs.isfileorlink(p):
1694 1699 wvfs.unlink(p)
1695 1700 break
1696 1701 else:
1697 1702 # don't remove files if path conflicts are not processed
1698 1703 if wvfs.isdir(f) and not wvfs.islink(f):
1699 1704 wvfs.removedirs(f)
1700 1705
1701 1706 def setflags(self, l, x):
1702 1707 self._repo.wvfs.setflags(self._path, l, x)
1703 1708
1704 1709 class overlayworkingctx(committablectx):
1705 1710 """Wraps another mutable context with a write-back cache that can be
1706 1711 converted into a commit context.
1707 1712
1708 1713 self._cache[path] maps to a dict with keys: {
1709 1714 'exists': bool?
1710 1715 'date': date?
1711 1716 'data': str?
1712 1717 'flags': str?
1713 1718 'copied': str? (path or None)
1714 1719 }
1715 1720 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1716 1721 is `False`, the file was deleted.
1717 1722 """
1718 1723
1719 1724 def __init__(self, repo):
1720 1725 super(overlayworkingctx, self).__init__(repo)
1721 1726 self.clean()
1722 1727
1723 1728 def setbase(self, wrappedctx):
1724 1729 self._wrappedctx = wrappedctx
1725 1730 self._parents = [wrappedctx]
1726 1731 # Drop old manifest cache as it is now out of date.
1727 1732 # This is necessary when, e.g., rebasing several nodes with one
1728 1733 # ``overlayworkingctx`` (e.g. with --collapse).
1729 1734 util.clearcachedproperty(self, '_manifest')
1730 1735
1731 1736 def data(self, path):
1732 1737 if self.isdirty(path):
1733 1738 if self._cache[path]['exists']:
1734 1739 if self._cache[path]['data']:
1735 1740 return self._cache[path]['data']
1736 1741 else:
1737 1742 # Must fallback here, too, because we only set flags.
1738 1743 return self._wrappedctx[path].data()
1739 1744 else:
1740 1745 raise error.ProgrammingError("No such file or directory: %s" %
1741 1746 path)
1742 1747 else:
1743 1748 return self._wrappedctx[path].data()
1744 1749
1745 1750 @propertycache
1746 1751 def _manifest(self):
1747 1752 parents = self.parents()
1748 1753 man = parents[0].manifest().copy()
1749 1754
1750 1755 flag = self._flagfunc
1751 1756 for path in self.added():
1752 1757 man[path] = addednodeid
1753 1758 man.setflag(path, flag(path))
1754 1759 for path in self.modified():
1755 1760 man[path] = modifiednodeid
1756 1761 man.setflag(path, flag(path))
1757 1762 for path in self.removed():
1758 1763 del man[path]
1759 1764 return man
1760 1765
1761 1766 @propertycache
1762 1767 def _flagfunc(self):
1763 1768 def f(path):
1764 1769 return self._cache[path]['flags']
1765 1770 return f
1766 1771
1767 1772 def files(self):
1768 1773 return sorted(self.added() + self.modified() + self.removed())
1769 1774
1770 1775 def modified(self):
1771 1776 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1772 1777 self._existsinparent(f)]
1773 1778
1774 1779 def added(self):
1775 1780 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1776 1781 not self._existsinparent(f)]
1777 1782
1778 1783 def removed(self):
1779 1784 return [f for f in self._cache.keys() if
1780 1785 not self._cache[f]['exists'] and self._existsinparent(f)]
1781 1786
1782 1787 def isinmemory(self):
1783 1788 return True
1784 1789
1785 1790 def filedate(self, path):
1786 1791 if self.isdirty(path):
1787 1792 return self._cache[path]['date']
1788 1793 else:
1789 1794 return self._wrappedctx[path].date()
1790 1795
1791 1796 def markcopied(self, path, origin):
1792 1797 if self.isdirty(path):
1793 1798 self._cache[path]['copied'] = origin
1794 1799 else:
1795 1800 raise error.ProgrammingError('markcopied() called on clean context')
1796 1801
1797 1802 def copydata(self, path):
1798 1803 if self.isdirty(path):
1799 1804 return self._cache[path]['copied']
1800 1805 else:
1801 1806 raise error.ProgrammingError('copydata() called on clean context')
1802 1807
1803 1808 def flags(self, path):
1804 1809 if self.isdirty(path):
1805 1810 if self._cache[path]['exists']:
1806 1811 return self._cache[path]['flags']
1807 1812 else:
1808 1813 raise error.ProgrammingError("No such file or directory: %s" %
1809 1814 self._path)
1810 1815 else:
1811 1816 return self._wrappedctx[path].flags()
1812 1817
1813 1818 def _existsinparent(self, path):
1814 1819 try:
1815 1820 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1816 1821 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1817 1822 # with an ``exists()`` function.
1818 1823 self._wrappedctx[path]
1819 1824 return True
1820 1825 except error.ManifestLookupError:
1821 1826 return False
1822 1827
1823 1828 def _auditconflicts(self, path):
1824 1829 """Replicates conflict checks done by wvfs.write().
1825 1830
1826 1831 Since we never write to the filesystem and never call `applyupdates` in
1827 1832 IMM, we'll never check that a path is actually writable -- e.g., because
1828 1833 it adds `a/foo`, but `a` is actually a file in the other commit.
1829 1834 """
1830 1835 def fail(path, component):
1831 1836 # p1() is the base and we're receiving "writes" for p2()'s
1832 1837 # files.
1833 1838 if 'l' in self.p1()[component].flags():
1834 1839 raise error.Abort("error: %s conflicts with symlink %s "
1835 1840 "in %s." % (path, component,
1836 1841 self.p1().rev()))
1837 1842 else:
1838 1843 raise error.Abort("error: '%s' conflicts with file '%s' in "
1839 1844 "%s." % (path, component,
1840 1845 self.p1().rev()))
1841 1846
1842 1847 # Test that each new directory to be created to write this path from p2
1843 1848 # is not a file in p1.
1844 1849 components = path.split('/')
1845 1850 for i in pycompat.xrange(len(components)):
1846 1851 component = "/".join(components[0:i])
1847 1852 if component in self.p1() and self._cache[component]['exists']:
1848 1853 fail(path, component)
1849 1854
1850 1855 # Test the other direction -- that this path from p2 isn't a directory
1851 1856 # in p1 (test that p1 doesn't any paths matching `path/*`).
1852 1857 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1853 1858 matches = self.p1().manifest().matches(match)
1854 1859 mfiles = matches.keys()
1855 1860 if len(mfiles) > 0:
1856 1861 if len(mfiles) == 1 and mfiles[0] == path:
1857 1862 return
1858 1863 # omit the files which are deleted in current IMM wctx
1859 1864 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1860 1865 if not mfiles:
1861 1866 return
1862 1867 raise error.Abort("error: file '%s' cannot be written because "
1863 1868 " '%s/' is a folder in %s (containing %d "
1864 1869 "entries: %s)"
1865 1870 % (path, path, self.p1(), len(mfiles),
1866 1871 ', '.join(mfiles)))
1867 1872
1868 1873 def write(self, path, data, flags='', **kwargs):
1869 1874 if data is None:
1870 1875 raise error.ProgrammingError("data must be non-None")
1871 1876 self._auditconflicts(path)
1872 1877 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1873 1878 flags=flags)
1874 1879
1875 1880 def setflags(self, path, l, x):
1876 1881 flag = ''
1877 1882 if l:
1878 1883 flag = 'l'
1879 1884 elif x:
1880 1885 flag = 'x'
1881 1886 self._markdirty(path, exists=True, date=dateutil.makedate(),
1882 1887 flags=flag)
1883 1888
1884 1889 def remove(self, path):
1885 1890 self._markdirty(path, exists=False)
1886 1891
1887 1892 def exists(self, path):
1888 1893 """exists behaves like `lexists`, but needs to follow symlinks and
1889 1894 return False if they are broken.
1890 1895 """
1891 1896 if self.isdirty(path):
1892 1897 # If this path exists and is a symlink, "follow" it by calling
1893 1898 # exists on the destination path.
1894 1899 if (self._cache[path]['exists'] and
1895 1900 'l' in self._cache[path]['flags']):
1896 1901 return self.exists(self._cache[path]['data'].strip())
1897 1902 else:
1898 1903 return self._cache[path]['exists']
1899 1904
1900 1905 return self._existsinparent(path)
1901 1906
1902 1907 def lexists(self, path):
1903 1908 """lexists returns True if the path exists"""
1904 1909 if self.isdirty(path):
1905 1910 return self._cache[path]['exists']
1906 1911
1907 1912 return self._existsinparent(path)
1908 1913
1909 1914 def size(self, path):
1910 1915 if self.isdirty(path):
1911 1916 if self._cache[path]['exists']:
1912 1917 return len(self._cache[path]['data'])
1913 1918 else:
1914 1919 raise error.ProgrammingError("No such file or directory: %s" %
1915 1920 self._path)
1916 1921 return self._wrappedctx[path].size()
1917 1922
1918 1923 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1919 1924 user=None, editor=None):
1920 1925 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1921 1926 committed.
1922 1927
1923 1928 ``text`` is the commit message.
1924 1929 ``parents`` (optional) are rev numbers.
1925 1930 """
1926 1931 # Default parents to the wrapped contexts' if not passed.
1927 1932 if parents is None:
1928 1933 parents = self._wrappedctx.parents()
1929 1934 if len(parents) == 1:
1930 1935 parents = (parents[0], None)
1931 1936
1932 1937 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1933 1938 if parents[1] is None:
1934 1939 parents = (self._repo[parents[0]], None)
1935 1940 else:
1936 1941 parents = (self._repo[parents[0]], self._repo[parents[1]])
1937 1942
1938 1943 files = self._cache.keys()
1939 1944 def getfile(repo, memctx, path):
1940 1945 if self._cache[path]['exists']:
1941 1946 return memfilectx(repo, memctx, path,
1942 1947 self._cache[path]['data'],
1943 1948 'l' in self._cache[path]['flags'],
1944 1949 'x' in self._cache[path]['flags'],
1945 1950 self._cache[path]['copied'])
1946 1951 else:
1947 1952 # Returning None, but including the path in `files`, is
1948 1953 # necessary for memctx to register a deletion.
1949 1954 return None
1950 1955 return memctx(self._repo, parents, text, files, getfile, date=date,
1951 1956 extra=extra, user=user, branch=branch, editor=editor)
1952 1957
1953 1958 def isdirty(self, path):
1954 1959 return path in self._cache
1955 1960
1956 1961 def isempty(self):
1957 1962 # We need to discard any keys that are actually clean before the empty
1958 1963 # commit check.
1959 1964 self._compact()
1960 1965 return len(self._cache) == 0
1961 1966
1962 1967 def clean(self):
1963 1968 self._cache = {}
1964 1969
1965 1970 def _compact(self):
1966 1971 """Removes keys from the cache that are actually clean, by comparing
1967 1972 them with the underlying context.
1968 1973
1969 1974 This can occur during the merge process, e.g. by passing --tool :local
1970 1975 to resolve a conflict.
1971 1976 """
1972 1977 keys = []
1973 1978 for path in self._cache.keys():
1974 1979 cache = self._cache[path]
1975 1980 try:
1976 1981 underlying = self._wrappedctx[path]
1977 1982 if (underlying.data() == cache['data'] and
1978 1983 underlying.flags() == cache['flags']):
1979 1984 keys.append(path)
1980 1985 except error.ManifestLookupError:
1981 1986 # Path not in the underlying manifest (created).
1982 1987 continue
1983 1988
1984 1989 for path in keys:
1985 1990 del self._cache[path]
1986 1991 return keys
1987 1992
1988 1993 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1989 1994 # data not provided, let's see if we already have some; if not, let's
1990 1995 # grab it from our underlying context, so that we always have data if
1991 1996 # the file is marked as existing.
1992 1997 if exists and data is None:
1993 1998 oldentry = self._cache.get(path) or {}
1994 1999 data = oldentry.get('data') or self._wrappedctx[path].data()
1995 2000
1996 2001 self._cache[path] = {
1997 2002 'exists': exists,
1998 2003 'data': data,
1999 2004 'date': date,
2000 2005 'flags': flags,
2001 2006 'copied': None,
2002 2007 }
2003 2008
2004 2009 def filectx(self, path, filelog=None):
2005 2010 return overlayworkingfilectx(self._repo, path, parent=self,
2006 2011 filelog=filelog)
2007 2012
2008 2013 class overlayworkingfilectx(committablefilectx):
2009 2014 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2010 2015 cache, which can be flushed through later by calling ``flush()``."""
2011 2016
2012 2017 def __init__(self, repo, path, filelog=None, parent=None):
2013 2018 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2014 2019 parent)
2015 2020 self._repo = repo
2016 2021 self._parent = parent
2017 2022 self._path = path
2018 2023
2019 2024 def cmp(self, fctx):
2020 2025 return self.data() != fctx.data()
2021 2026
2022 2027 def changectx(self):
2023 2028 return self._parent
2024 2029
2025 2030 def data(self):
2026 2031 return self._parent.data(self._path)
2027 2032
2028 2033 def date(self):
2029 2034 return self._parent.filedate(self._path)
2030 2035
2031 2036 def exists(self):
2032 2037 return self.lexists()
2033 2038
2034 2039 def lexists(self):
2035 2040 return self._parent.exists(self._path)
2036 2041
2037 2042 def renamed(self):
2038 2043 path = self._parent.copydata(self._path)
2039 2044 if not path:
2040 2045 return None
2041 2046 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2042 2047
2043 2048 def size(self):
2044 2049 return self._parent.size(self._path)
2045 2050
2046 2051 def markcopied(self, origin):
2047 2052 self._parent.markcopied(self._path, origin)
2048 2053
2049 2054 def audit(self):
2050 2055 pass
2051 2056
2052 2057 def flags(self):
2053 2058 return self._parent.flags(self._path)
2054 2059
2055 2060 def setflags(self, islink, isexec):
2056 2061 return self._parent.setflags(self._path, islink, isexec)
2057 2062
2058 2063 def write(self, data, flags, backgroundclose=False, **kwargs):
2059 2064 return self._parent.write(self._path, data, flags, **kwargs)
2060 2065
2061 2066 def remove(self, ignoremissing=False):
2062 2067 return self._parent.remove(self._path)
2063 2068
2064 2069 def clearunknown(self):
2065 2070 pass
2066 2071
2067 2072 class workingcommitctx(workingctx):
2068 2073 """A workingcommitctx object makes access to data related to
2069 2074 the revision being committed convenient.
2070 2075
2071 2076 This hides changes in the working directory, if they aren't
2072 2077 committed in this context.
2073 2078 """
2074 2079 def __init__(self, repo, changes,
2075 2080 text="", user=None, date=None, extra=None):
2076 2081 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2077 2082 changes)
2078 2083
2079 2084 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2080 2085 """Return matched files only in ``self._status``
2081 2086
2082 2087 Uncommitted files appear "clean" via this context, even if
2083 2088 they aren't actually so in the working directory.
2084 2089 """
2085 2090 if clean:
2086 2091 clean = [f for f in self._manifest if f not in self._changedset]
2087 2092 else:
2088 2093 clean = []
2089 2094 return scmutil.status([f for f in self._status.modified if match(f)],
2090 2095 [f for f in self._status.added if match(f)],
2091 2096 [f for f in self._status.removed if match(f)],
2092 2097 [], [], [], clean)
2093 2098
2094 2099 @propertycache
2095 2100 def _changedset(self):
2096 2101 """Return the set of files changed in this context
2097 2102 """
2098 2103 changed = set(self._status.modified)
2099 2104 changed.update(self._status.added)
2100 2105 changed.update(self._status.removed)
2101 2106 return changed
2102 2107
2103 2108 def makecachingfilectxfn(func):
2104 2109 """Create a filectxfn that caches based on the path.
2105 2110
2106 2111 We can't use util.cachefunc because it uses all arguments as the cache
2107 2112 key and this creates a cycle since the arguments include the repo and
2108 2113 memctx.
2109 2114 """
2110 2115 cache = {}
2111 2116
2112 2117 def getfilectx(repo, memctx, path):
2113 2118 if path not in cache:
2114 2119 cache[path] = func(repo, memctx, path)
2115 2120 return cache[path]
2116 2121
2117 2122 return getfilectx
2118 2123
2119 2124 def memfilefromctx(ctx):
2120 2125 """Given a context return a memfilectx for ctx[path]
2121 2126
2122 2127 This is a convenience method for building a memctx based on another
2123 2128 context.
2124 2129 """
2125 2130 def getfilectx(repo, memctx, path):
2126 2131 fctx = ctx[path]
2127 2132 # this is weird but apparently we only keep track of one parent
2128 2133 # (why not only store that instead of a tuple?)
2129 2134 copied = fctx.renamed()
2130 2135 if copied:
2131 2136 copied = copied[0]
2132 2137 return memfilectx(repo, memctx, path, fctx.data(),
2133 2138 islink=fctx.islink(), isexec=fctx.isexec(),
2134 2139 copied=copied)
2135 2140
2136 2141 return getfilectx
2137 2142
2138 2143 def memfilefrompatch(patchstore):
2139 2144 """Given a patch (e.g. patchstore object) return a memfilectx
2140 2145
2141 2146 This is a convenience method for building a memctx based on a patchstore.
2142 2147 """
2143 2148 def getfilectx(repo, memctx, path):
2144 2149 data, mode, copied = patchstore.getfile(path)
2145 2150 if data is None:
2146 2151 return None
2147 2152 islink, isexec = mode
2148 2153 return memfilectx(repo, memctx, path, data, islink=islink,
2149 2154 isexec=isexec, copied=copied)
2150 2155
2151 2156 return getfilectx
2152 2157
2153 2158 class memctx(committablectx):
2154 2159 """Use memctx to perform in-memory commits via localrepo.commitctx().
2155 2160
2156 2161 Revision information is supplied at initialization time while
2157 2162 related files data and is made available through a callback
2158 2163 mechanism. 'repo' is the current localrepo, 'parents' is a
2159 2164 sequence of two parent revisions identifiers (pass None for every
2160 2165 missing parent), 'text' is the commit message and 'files' lists
2161 2166 names of files touched by the revision (normalized and relative to
2162 2167 repository root).
2163 2168
2164 2169 filectxfn(repo, memctx, path) is a callable receiving the
2165 2170 repository, the current memctx object and the normalized path of
2166 2171 requested file, relative to repository root. It is fired by the
2167 2172 commit function for every file in 'files', but calls order is
2168 2173 undefined. If the file is available in the revision being
2169 2174 committed (updated or added), filectxfn returns a memfilectx
2170 2175 object. If the file was removed, filectxfn return None for recent
2171 2176 Mercurial. Moved files are represented by marking the source file
2172 2177 removed and the new file added with copy information (see
2173 2178 memfilectx).
2174 2179
2175 2180 user receives the committer name and defaults to current
2176 2181 repository username, date is the commit date in any format
2177 2182 supported by dateutil.parsedate() and defaults to current date, extra
2178 2183 is a dictionary of metadata or is left empty.
2179 2184 """
2180 2185
2181 2186 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2182 2187 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2183 2188 # this field to determine what to do in filectxfn.
2184 2189 _returnnoneformissingfiles = True
2185 2190
2186 2191 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2187 2192 date=None, extra=None, branch=None, editor=False):
2188 2193 super(memctx, self).__init__(repo, text, user, date, extra)
2189 2194 self._rev = None
2190 2195 self._node = None
2191 2196 parents = [(p or nullid) for p in parents]
2192 2197 p1, p2 = parents
2193 2198 self._parents = [self._repo[p] for p in (p1, p2)]
2194 2199 files = sorted(set(files))
2195 2200 self._files = files
2196 2201 if branch is not None:
2197 2202 self._extra['branch'] = encoding.fromlocal(branch)
2198 2203 self.substate = {}
2199 2204
2200 2205 if isinstance(filectxfn, patch.filestore):
2201 2206 filectxfn = memfilefrompatch(filectxfn)
2202 2207 elif not callable(filectxfn):
2203 2208 # if store is not callable, wrap it in a function
2204 2209 filectxfn = memfilefromctx(filectxfn)
2205 2210
2206 2211 # memoizing increases performance for e.g. vcs convert scenarios.
2207 2212 self._filectxfn = makecachingfilectxfn(filectxfn)
2208 2213
2209 2214 if editor:
2210 2215 self._text = editor(self._repo, self, [])
2211 2216 self._repo.savecommitmessage(self._text)
2212 2217
2213 2218 def filectx(self, path, filelog=None):
2214 2219 """get a file context from the working directory
2215 2220
2216 2221 Returns None if file doesn't exist and should be removed."""
2217 2222 return self._filectxfn(self._repo, self, path)
2218 2223
2219 2224 def commit(self):
2220 2225 """commit context to the repo"""
2221 2226 return self._repo.commitctx(self)
2222 2227
2223 2228 @propertycache
2224 2229 def _manifest(self):
2225 2230 """generate a manifest based on the return values of filectxfn"""
2226 2231
2227 2232 # keep this simple for now; just worry about p1
2228 2233 pctx = self._parents[0]
2229 2234 man = pctx.manifest().copy()
2230 2235
2231 2236 for f in self._status.modified:
2232 2237 man[f] = modifiednodeid
2233 2238
2234 2239 for f in self._status.added:
2235 2240 man[f] = addednodeid
2236 2241
2237 2242 for f in self._status.removed:
2238 2243 if f in man:
2239 2244 del man[f]
2240 2245
2241 2246 return man
2242 2247
2243 2248 @propertycache
2244 2249 def _status(self):
2245 2250 """Calculate exact status from ``files`` specified at construction
2246 2251 """
2247 2252 man1 = self.p1().manifest()
2248 2253 p2 = self._parents[1]
2249 2254 # "1 < len(self._parents)" can't be used for checking
2250 2255 # existence of the 2nd parent, because "memctx._parents" is
2251 2256 # explicitly initialized by the list, of which length is 2.
2252 2257 if p2.node() != nullid:
2253 2258 man2 = p2.manifest()
2254 2259 managing = lambda f: f in man1 or f in man2
2255 2260 else:
2256 2261 managing = lambda f: f in man1
2257 2262
2258 2263 modified, added, removed = [], [], []
2259 2264 for f in self._files:
2260 2265 if not managing(f):
2261 2266 added.append(f)
2262 2267 elif self[f]:
2263 2268 modified.append(f)
2264 2269 else:
2265 2270 removed.append(f)
2266 2271
2267 2272 return scmutil.status(modified, added, removed, [], [], [], [])
2268 2273
2269 2274 class memfilectx(committablefilectx):
2270 2275 """memfilectx represents an in-memory file to commit.
2271 2276
2272 2277 See memctx and committablefilectx for more details.
2273 2278 """
2274 2279 def __init__(self, repo, changectx, path, data, islink=False,
2275 2280 isexec=False, copied=None):
2276 2281 """
2277 2282 path is the normalized file path relative to repository root.
2278 2283 data is the file content as a string.
2279 2284 islink is True if the file is a symbolic link.
2280 2285 isexec is True if the file is executable.
2281 2286 copied is the source file path if current file was copied in the
2282 2287 revision being committed, or None."""
2283 2288 super(memfilectx, self).__init__(repo, path, None, changectx)
2284 2289 self._data = data
2285 2290 if islink:
2286 2291 self._flags = 'l'
2287 2292 elif isexec:
2288 2293 self._flags = 'x'
2289 2294 else:
2290 2295 self._flags = ''
2291 2296 self._copied = None
2292 2297 if copied:
2293 2298 self._copied = (copied, nullid)
2294 2299
2295 2300 def data(self):
2296 2301 return self._data
2297 2302
2298 2303 def remove(self, ignoremissing=False):
2299 2304 """wraps unlink for a repo's working directory"""
2300 2305 # need to figure out what to do here
2301 2306 del self._changectx[self._path]
2302 2307
2303 2308 def write(self, data, flags, **kwargs):
2304 2309 """wraps repo.wwrite"""
2305 2310 self._data = data
2306 2311
2307 2312
2308 2313 class metadataonlyctx(committablectx):
2309 2314 """Like memctx but it's reusing the manifest of different commit.
2310 2315 Intended to be used by lightweight operations that are creating
2311 2316 metadata-only changes.
2312 2317
2313 2318 Revision information is supplied at initialization time. 'repo' is the
2314 2319 current localrepo, 'ctx' is original revision which manifest we're reuisng
2315 2320 'parents' is a sequence of two parent revisions identifiers (pass None for
2316 2321 every missing parent), 'text' is the commit.
2317 2322
2318 2323 user receives the committer name and defaults to current repository
2319 2324 username, date is the commit date in any format supported by
2320 2325 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2321 2326 metadata or is left empty.
2322 2327 """
2323 2328 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2324 2329 date=None, extra=None, editor=False):
2325 2330 if text is None:
2326 2331 text = originalctx.description()
2327 2332 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2328 2333 self._rev = None
2329 2334 self._node = None
2330 2335 self._originalctx = originalctx
2331 2336 self._manifestnode = originalctx.manifestnode()
2332 2337 if parents is None:
2333 2338 parents = originalctx.parents()
2334 2339 else:
2335 2340 parents = [repo[p] for p in parents if p is not None]
2336 2341 parents = parents[:]
2337 2342 while len(parents) < 2:
2338 2343 parents.append(repo[nullid])
2339 2344 p1, p2 = self._parents = parents
2340 2345
2341 2346 # sanity check to ensure that the reused manifest parents are
2342 2347 # manifests of our commit parents
2343 2348 mp1, mp2 = self.manifestctx().parents
2344 2349 if p1 != nullid and p1.manifestnode() != mp1:
2345 2350 raise RuntimeError(r"can't reuse the manifest: its p1 "
2346 2351 r"doesn't match the new ctx p1")
2347 2352 if p2 != nullid and p2.manifestnode() != mp2:
2348 2353 raise RuntimeError(r"can't reuse the manifest: "
2349 2354 r"its p2 doesn't match the new ctx p2")
2350 2355
2351 2356 self._files = originalctx.files()
2352 2357 self.substate = {}
2353 2358
2354 2359 if editor:
2355 2360 self._text = editor(self._repo, self, [])
2356 2361 self._repo.savecommitmessage(self._text)
2357 2362
2358 2363 def manifestnode(self):
2359 2364 return self._manifestnode
2360 2365
2361 2366 @property
2362 2367 def _manifestctx(self):
2363 2368 return self._repo.manifestlog[self._manifestnode]
2364 2369
2365 2370 def filectx(self, path, filelog=None):
2366 2371 return self._originalctx.filectx(path, filelog=filelog)
2367 2372
2368 2373 def commit(self):
2369 2374 """commit context to the repo"""
2370 2375 return self._repo.commitctx(self)
2371 2376
2372 2377 @property
2373 2378 def _manifest(self):
2374 2379 return self._originalctx.manifest()
2375 2380
2376 2381 @propertycache
2377 2382 def _status(self):
2378 2383 """Calculate exact status from ``files`` specified in the ``origctx``
2379 2384 and parents manifests.
2380 2385 """
2381 2386 man1 = self.p1().manifest()
2382 2387 p2 = self._parents[1]
2383 2388 # "1 < len(self._parents)" can't be used for checking
2384 2389 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2385 2390 # explicitly initialized by the list, of which length is 2.
2386 2391 if p2.node() != nullid:
2387 2392 man2 = p2.manifest()
2388 2393 managing = lambda f: f in man1 or f in man2
2389 2394 else:
2390 2395 managing = lambda f: f in man1
2391 2396
2392 2397 modified, added, removed = [], [], []
2393 2398 for f in self._files:
2394 2399 if not managing(f):
2395 2400 added.append(f)
2396 2401 elif f in self:
2397 2402 modified.append(f)
2398 2403 else:
2399 2404 removed.append(f)
2400 2405
2401 2406 return scmutil.status(modified, added, removed, [], [], [], [])
2402 2407
2403 2408 class arbitraryfilectx(object):
2404 2409 """Allows you to use filectx-like functions on a file in an arbitrary
2405 2410 location on disk, possibly not in the working directory.
2406 2411 """
2407 2412 def __init__(self, path, repo=None):
2408 2413 # Repo is optional because contrib/simplemerge uses this class.
2409 2414 self._repo = repo
2410 2415 self._path = path
2411 2416
2412 2417 def cmp(self, fctx):
2413 2418 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2414 2419 # path if either side is a symlink.
2415 2420 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2416 2421 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2417 2422 # Add a fast-path for merge if both sides are disk-backed.
2418 2423 # Note that filecmp uses the opposite return values (True if same)
2419 2424 # from our cmp functions (True if different).
2420 2425 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2421 2426 return self.data() != fctx.data()
2422 2427
2423 2428 def path(self):
2424 2429 return self._path
2425 2430
2426 2431 def flags(self):
2427 2432 return ''
2428 2433
2429 2434 def data(self):
2430 2435 return util.readfile(self._path)
2431 2436
2432 2437 def decodeddata(self):
2433 2438 with open(self._path, "rb") as f:
2434 2439 return f.read()
2435 2440
2436 2441 def remove(self):
2437 2442 util.unlink(self._path)
2438 2443
2439 2444 def write(self, data, flags, **kwargs):
2440 2445 assert not flags
2441 2446 with open(self._path, "wb") as f:
2442 2447 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now