##// END OF EJS Templates
context: fast path linkrev adjustement in trivial case...
Boris Feld -
r40080:ccf4d808 default
parent child Browse files
Show More
@@ -1,2439 +1,2441 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 narrowmatch = self._repo.narrowmatch()
375 375 if not narrowmatch.always():
376 376 for l in r:
377 377 l[:] = list(filter(narrowmatch, l))
378 378 for l in r:
379 379 l.sort()
380 380
381 381 return r
382 382
383 383 class changectx(basectx):
384 384 """A changecontext object makes access to data related to a particular
385 385 changeset convenient. It represents a read-only context already present in
386 386 the repo."""
387 387 def __init__(self, repo, rev, node):
388 388 """changeid is a revision number, node, or tag"""
389 389 super(changectx, self).__init__(repo)
390 390 self._rev = rev
391 391 self._node = node
392 392
393 393 def __hash__(self):
394 394 try:
395 395 return hash(self._rev)
396 396 except AttributeError:
397 397 return id(self)
398 398
399 399 def __nonzero__(self):
400 400 return self._rev != nullrev
401 401
402 402 __bool__ = __nonzero__
403 403
404 404 @propertycache
405 405 def _changeset(self):
406 406 return self._repo.changelog.changelogrevision(self.rev())
407 407
408 408 @propertycache
409 409 def _manifest(self):
410 410 return self._manifestctx.read()
411 411
412 412 @property
413 413 def _manifestctx(self):
414 414 return self._repo.manifestlog[self._changeset.manifest]
415 415
416 416 @propertycache
417 417 def _manifestdelta(self):
418 418 return self._manifestctx.readdelta()
419 419
420 420 @propertycache
421 421 def _parents(self):
422 422 repo = self._repo
423 423 p1, p2 = repo.changelog.parentrevs(self._rev)
424 424 if p2 == nullrev:
425 425 return [repo[p1]]
426 426 return [repo[p1], repo[p2]]
427 427
428 428 def changeset(self):
429 429 c = self._changeset
430 430 return (
431 431 c.manifest,
432 432 c.user,
433 433 c.date,
434 434 c.files,
435 435 c.description,
436 436 c.extra,
437 437 )
438 438 def manifestnode(self):
439 439 return self._changeset.manifest
440 440
441 441 def user(self):
442 442 return self._changeset.user
443 443 def date(self):
444 444 return self._changeset.date
445 445 def files(self):
446 446 return self._changeset.files
447 447 def description(self):
448 448 return self._changeset.description
449 449 def branch(self):
450 450 return encoding.tolocal(self._changeset.extra.get("branch"))
451 451 def closesbranch(self):
452 452 return 'close' in self._changeset.extra
453 453 def extra(self):
454 454 """Return a dict of extra information."""
455 455 return self._changeset.extra
456 456 def tags(self):
457 457 """Return a list of byte tag names"""
458 458 return self._repo.nodetags(self._node)
459 459 def bookmarks(self):
460 460 """Return a list of byte bookmark names."""
461 461 return self._repo.nodebookmarks(self._node)
462 462 def phase(self):
463 463 return self._repo._phasecache.phase(self._repo, self._rev)
464 464 def hidden(self):
465 465 return self._rev in repoview.filterrevs(self._repo, 'visible')
466 466
467 467 def isinmemory(self):
468 468 return False
469 469
470 470 def children(self):
471 471 """return list of changectx contexts for each child changeset.
472 472
473 473 This returns only the immediate child changesets. Use descendants() to
474 474 recursively walk children.
475 475 """
476 476 c = self._repo.changelog.children(self._node)
477 477 return [self._repo[x] for x in c]
478 478
479 479 def ancestors(self):
480 480 for a in self._repo.changelog.ancestors([self._rev]):
481 481 yield self._repo[a]
482 482
483 483 def descendants(self):
484 484 """Recursively yield all children of the changeset.
485 485
486 486 For just the immediate children, use children()
487 487 """
488 488 for d in self._repo.changelog.descendants([self._rev]):
489 489 yield self._repo[d]
490 490
491 491 def filectx(self, path, fileid=None, filelog=None):
492 492 """get a file context from this changeset"""
493 493 if fileid is None:
494 494 fileid = self.filenode(path)
495 495 return filectx(self._repo, path, fileid=fileid,
496 496 changectx=self, filelog=filelog)
497 497
498 498 def ancestor(self, c2, warn=False):
499 499 """return the "best" ancestor context of self and c2
500 500
501 501 If there are multiple candidates, it will show a message and check
502 502 merge.preferancestor configuration before falling back to the
503 503 revlog ancestor."""
504 504 # deal with workingctxs
505 505 n2 = c2._node
506 506 if n2 is None:
507 507 n2 = c2._parents[0]._node
508 508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
509 509 if not cahs:
510 510 anc = nullid
511 511 elif len(cahs) == 1:
512 512 anc = cahs[0]
513 513 else:
514 514 # experimental config: merge.preferancestor
515 515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
516 516 try:
517 517 ctx = scmutil.revsymbol(self._repo, r)
518 518 except error.RepoLookupError:
519 519 continue
520 520 anc = ctx.node()
521 521 if anc in cahs:
522 522 break
523 523 else:
524 524 anc = self._repo.changelog.ancestor(self._node, n2)
525 525 if warn:
526 526 self._repo.ui.status(
527 527 (_("note: using %s as ancestor of %s and %s\n") %
528 528 (short(anc), short(self._node), short(n2))) +
529 529 ''.join(_(" alternatively, use --config "
530 530 "merge.preferancestor=%s\n") %
531 531 short(n) for n in sorted(cahs) if n != anc))
532 532 return self._repo[anc]
533 533
534 534 def isancestorof(self, other):
535 535 """True if this changeset is an ancestor of other"""
536 536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
537 537
538 538 def walk(self, match):
539 539 '''Generates matching file names.'''
540 540
541 541 # Wrap match.bad method to have message with nodeid
542 542 def bad(fn, msg):
543 543 # The manifest doesn't know about subrepos, so don't complain about
544 544 # paths into valid subrepos.
545 545 if any(fn == s or fn.startswith(s + '/')
546 546 for s in self.substate):
547 547 return
548 548 match.bad(fn, _('no such file in rev %s') % self)
549 549
550 550 m = matchmod.badmatch(match, bad)
551 551 return self._manifest.walk(m)
552 552
553 553 def matches(self, match):
554 554 return self.walk(match)
555 555
556 556 class basefilectx(object):
557 557 """A filecontext object represents the common logic for its children:
558 558 filectx: read-only access to a filerevision that is already present
559 559 in the repo,
560 560 workingfilectx: a filecontext that represents files from the working
561 561 directory,
562 562 memfilectx: a filecontext that represents files in-memory,
563 563 """
564 564 @propertycache
565 565 def _filelog(self):
566 566 return self._repo.file(self._path)
567 567
568 568 @propertycache
569 569 def _changeid(self):
570 570 if r'_changeid' in self.__dict__:
571 571 return self._changeid
572 572 elif r'_changectx' in self.__dict__:
573 573 return self._changectx.rev()
574 574 elif r'_descendantrev' in self.__dict__:
575 575 # this file context was created from a revision with a known
576 576 # descendant, we can (lazily) correct for linkrev aliases
577 577 return self._adjustlinkrev(self._descendantrev)
578 578 else:
579 579 return self._filelog.linkrev(self._filerev)
580 580
581 581 @propertycache
582 582 def _filenode(self):
583 583 if r'_fileid' in self.__dict__:
584 584 return self._filelog.lookup(self._fileid)
585 585 else:
586 586 return self._changectx.filenode(self._path)
587 587
588 588 @propertycache
589 589 def _filerev(self):
590 590 return self._filelog.rev(self._filenode)
591 591
592 592 @propertycache
593 593 def _repopath(self):
594 594 return self._path
595 595
596 596 def __nonzero__(self):
597 597 try:
598 598 self._filenode
599 599 return True
600 600 except error.LookupError:
601 601 # file is missing
602 602 return False
603 603
604 604 __bool__ = __nonzero__
605 605
606 606 def __bytes__(self):
607 607 try:
608 608 return "%s@%s" % (self.path(), self._changectx)
609 609 except error.LookupError:
610 610 return "%s@???" % self.path()
611 611
612 612 __str__ = encoding.strmethod(__bytes__)
613 613
614 614 def __repr__(self):
615 615 return r"<%s %s>" % (type(self).__name__, str(self))
616 616
617 617 def __hash__(self):
618 618 try:
619 619 return hash((self._path, self._filenode))
620 620 except AttributeError:
621 621 return id(self)
622 622
623 623 def __eq__(self, other):
624 624 try:
625 625 return (type(self) == type(other) and self._path == other._path
626 626 and self._filenode == other._filenode)
627 627 except AttributeError:
628 628 return False
629 629
630 630 def __ne__(self, other):
631 631 return not (self == other)
632 632
633 633 def filerev(self):
634 634 return self._filerev
635 635 def filenode(self):
636 636 return self._filenode
637 637 @propertycache
638 638 def _flags(self):
639 639 return self._changectx.flags(self._path)
640 640 def flags(self):
641 641 return self._flags
642 642 def filelog(self):
643 643 return self._filelog
644 644 def rev(self):
645 645 return self._changeid
646 646 def linkrev(self):
647 647 return self._filelog.linkrev(self._filerev)
648 648 def node(self):
649 649 return self._changectx.node()
650 650 def hex(self):
651 651 return self._changectx.hex()
652 652 def user(self):
653 653 return self._changectx.user()
654 654 def date(self):
655 655 return self._changectx.date()
656 656 def files(self):
657 657 return self._changectx.files()
658 658 def description(self):
659 659 return self._changectx.description()
660 660 def branch(self):
661 661 return self._changectx.branch()
662 662 def extra(self):
663 663 return self._changectx.extra()
664 664 def phase(self):
665 665 return self._changectx.phase()
666 666 def phasestr(self):
667 667 return self._changectx.phasestr()
668 668 def obsolete(self):
669 669 return self._changectx.obsolete()
670 670 def instabilities(self):
671 671 return self._changectx.instabilities()
672 672 def manifest(self):
673 673 return self._changectx.manifest()
674 674 def changectx(self):
675 675 return self._changectx
676 676 def renamed(self):
677 677 return self._copied
678 678 def repo(self):
679 679 return self._repo
680 680 def size(self):
681 681 return len(self.data())
682 682
683 683 def path(self):
684 684 return self._path
685 685
686 686 def isbinary(self):
687 687 try:
688 688 return stringutil.binary(self.data())
689 689 except IOError:
690 690 return False
691 691 def isexec(self):
692 692 return 'x' in self.flags()
693 693 def islink(self):
694 694 return 'l' in self.flags()
695 695
696 696 def isabsent(self):
697 697 """whether this filectx represents a file not in self._changectx
698 698
699 699 This is mainly for merge code to detect change/delete conflicts. This is
700 700 expected to be True for all subclasses of basectx."""
701 701 return False
702 702
703 703 _customcmp = False
704 704 def cmp(self, fctx):
705 705 """compare with other file context
706 706
707 707 returns True if different than fctx.
708 708 """
709 709 if fctx._customcmp:
710 710 return fctx.cmp(self)
711 711
712 712 if (fctx._filenode is None
713 713 and (self._repo._encodefilterpats
714 714 # if file data starts with '\1\n', empty metadata block is
715 715 # prepended, which adds 4 bytes to filelog.size().
716 716 or self.size() - 4 == fctx.size())
717 717 or self.size() == fctx.size()):
718 718 return self._filelog.cmp(self._filenode, fctx.data())
719 719
720 720 return True
721 721
722 722 def _adjustlinkrev(self, srcrev, inclusive=False):
723 723 """return the first ancestor of <srcrev> introducing <fnode>
724 724
725 725 If the linkrev of the file revision does not point to an ancestor of
726 726 srcrev, we'll walk down the ancestors until we find one introducing
727 727 this file revision.
728 728
729 729 :srcrev: the changeset revision we search ancestors from
730 730 :inclusive: if true, the src revision will also be checked
731 731 """
732 732 repo = self._repo
733 733 cl = repo.unfiltered().changelog
734 734 mfl = repo.manifestlog
735 735 # fetch the linkrev
736 736 lkr = self.linkrev()
737 if srcrev == lkr:
738 return lkr
737 739 # hack to reuse ancestor computation when searching for renames
738 740 memberanc = getattr(self, '_ancestrycontext', None)
739 741 iteranc = None
740 742 if srcrev is None:
741 743 # wctx case, used by workingfilectx during mergecopy
742 744 revs = [p.rev() for p in self._repo[None].parents()]
743 745 inclusive = True # we skipped the real (revless) source
744 746 else:
745 747 revs = [srcrev]
746 748 if memberanc is None:
747 749 memberanc = iteranc = cl.ancestors(revs, lkr,
748 750 inclusive=inclusive)
749 751 # check if this linkrev is an ancestor of srcrev
750 752 if lkr not in memberanc:
751 753 if iteranc is None:
752 754 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
753 755 fnode = self._filenode
754 756 path = self._path
755 757 for a in iteranc:
756 758 ac = cl.read(a) # get changeset data (we avoid object creation)
757 759 if path in ac[3]: # checking the 'files' field.
758 760 # The file has been touched, check if the content is
759 761 # similar to the one we search for.
760 762 if fnode == mfl[ac[0]].readfast().get(path):
761 763 return a
762 764 # In theory, we should never get out of that loop without a result.
763 765 # But if manifest uses a buggy file revision (not children of the
764 766 # one it replaces) we could. Such a buggy situation will likely
765 767 # result is crash somewhere else at to some point.
766 768 return lkr
767 769
768 770 def introrev(self):
769 771 """return the rev of the changeset which introduced this file revision
770 772
771 773 This method is different from linkrev because it take into account the
772 774 changeset the filectx was created from. It ensures the returned
773 775 revision is one of its ancestors. This prevents bugs from
774 776 'linkrev-shadowing' when a file revision is used by multiple
775 777 changesets.
776 778 """
777 779 lkr = self.linkrev()
778 780 attrs = vars(self)
779 781 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
780 782 if noctx or self.rev() == lkr:
781 783 return self.linkrev()
782 784 return self._adjustlinkrev(self.rev(), inclusive=True)
783 785
784 786 def introfilectx(self):
785 787 """Return filectx having identical contents, but pointing to the
786 788 changeset revision where this filectx was introduced"""
787 789 introrev = self.introrev()
788 790 if self.rev() == introrev:
789 791 return self
790 792 return self.filectx(self.filenode(), changeid=introrev)
791 793
792 794 def _parentfilectx(self, path, fileid, filelog):
793 795 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
794 796 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
795 797 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
796 798 # If self is associated with a changeset (probably explicitly
797 799 # fed), ensure the created filectx is associated with a
798 800 # changeset that is an ancestor of self.changectx.
799 801 # This lets us later use _adjustlinkrev to get a correct link.
800 802 fctx._descendantrev = self.rev()
801 803 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
802 804 elif r'_descendantrev' in vars(self):
803 805 # Otherwise propagate _descendantrev if we have one associated.
804 806 fctx._descendantrev = self._descendantrev
805 807 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
806 808 return fctx
807 809
808 810 def parents(self):
809 811 _path = self._path
810 812 fl = self._filelog
811 813 parents = self._filelog.parents(self._filenode)
812 814 pl = [(_path, node, fl) for node in parents if node != nullid]
813 815
814 816 r = fl.renamed(self._filenode)
815 817 if r:
816 818 # - In the simple rename case, both parent are nullid, pl is empty.
817 819 # - In case of merge, only one of the parent is null id and should
818 820 # be replaced with the rename information. This parent is -always-
819 821 # the first one.
820 822 #
821 823 # As null id have always been filtered out in the previous list
822 824 # comprehension, inserting to 0 will always result in "replacing
823 825 # first nullid parent with rename information.
824 826 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
825 827
826 828 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
827 829
828 830 def p1(self):
829 831 return self.parents()[0]
830 832
831 833 def p2(self):
832 834 p = self.parents()
833 835 if len(p) == 2:
834 836 return p[1]
835 837 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
836 838
837 839 def annotate(self, follow=False, skiprevs=None, diffopts=None):
838 840 """Returns a list of annotateline objects for each line in the file
839 841
840 842 - line.fctx is the filectx of the node where that line was last changed
841 843 - line.lineno is the line number at the first appearance in the managed
842 844 file
843 845 - line.text is the data on that line (including newline character)
844 846 """
845 847 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
846 848
847 849 def parents(f):
848 850 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
849 851 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
850 852 # from the topmost introrev (= srcrev) down to p.linkrev() if it
851 853 # isn't an ancestor of the srcrev.
852 854 f._changeid
853 855 pl = f.parents()
854 856
855 857 # Don't return renamed parents if we aren't following.
856 858 if not follow:
857 859 pl = [p for p in pl if p.path() == f.path()]
858 860
859 861 # renamed filectx won't have a filelog yet, so set it
860 862 # from the cache to save time
861 863 for p in pl:
862 864 if not r'_filelog' in p.__dict__:
863 865 p._filelog = getlog(p.path())
864 866
865 867 return pl
866 868
867 869 # use linkrev to find the first changeset where self appeared
868 870 base = self.introfilectx()
869 871 if getattr(base, '_ancestrycontext', None) is None:
870 872 cl = self._repo.changelog
871 873 if base.rev() is None:
872 874 # wctx is not inclusive, but works because _ancestrycontext
873 875 # is used to test filelog revisions
874 876 ac = cl.ancestors([p.rev() for p in base.parents()],
875 877 inclusive=True)
876 878 else:
877 879 ac = cl.ancestors([base.rev()], inclusive=True)
878 880 base._ancestrycontext = ac
879 881
880 882 return dagop.annotate(base, parents, skiprevs=skiprevs,
881 883 diffopts=diffopts)
882 884
883 885 def ancestors(self, followfirst=False):
884 886 visit = {}
885 887 c = self
886 888 if followfirst:
887 889 cut = 1
888 890 else:
889 891 cut = None
890 892
891 893 while True:
892 894 for parent in c.parents()[:cut]:
893 895 visit[(parent.linkrev(), parent.filenode())] = parent
894 896 if not visit:
895 897 break
896 898 c = visit.pop(max(visit))
897 899 yield c
898 900
899 901 def decodeddata(self):
900 902 """Returns `data()` after running repository decoding filters.
901 903
902 904 This is often equivalent to how the data would be expressed on disk.
903 905 """
904 906 return self._repo.wwritedata(self.path(), self.data())
905 907
906 908 class filectx(basefilectx):
907 909 """A filecontext object makes access to data related to a particular
908 910 filerevision convenient."""
909 911 def __init__(self, repo, path, changeid=None, fileid=None,
910 912 filelog=None, changectx=None):
911 913 """changeid can be a changeset revision, node, or tag.
912 914 fileid can be a file revision or node."""
913 915 self._repo = repo
914 916 self._path = path
915 917
916 918 assert (changeid is not None
917 919 or fileid is not None
918 920 or changectx is not None), \
919 921 ("bad args: changeid=%r, fileid=%r, changectx=%r"
920 922 % (changeid, fileid, changectx))
921 923
922 924 if filelog is not None:
923 925 self._filelog = filelog
924 926
925 927 if changeid is not None:
926 928 self._changeid = changeid
927 929 if changectx is not None:
928 930 self._changectx = changectx
929 931 if fileid is not None:
930 932 self._fileid = fileid
931 933
932 934 @propertycache
933 935 def _changectx(self):
934 936 try:
935 937 return self._repo[self._changeid]
936 938 except error.FilteredRepoLookupError:
937 939 # Linkrev may point to any revision in the repository. When the
938 940 # repository is filtered this may lead to `filectx` trying to build
939 941 # `changectx` for filtered revision. In such case we fallback to
940 942 # creating `changectx` on the unfiltered version of the reposition.
941 943 # This fallback should not be an issue because `changectx` from
942 944 # `filectx` are not used in complex operations that care about
943 945 # filtering.
944 946 #
945 947 # This fallback is a cheap and dirty fix that prevent several
946 948 # crashes. It does not ensure the behavior is correct. However the
947 949 # behavior was not correct before filtering either and "incorrect
948 950 # behavior" is seen as better as "crash"
949 951 #
950 952 # Linkrevs have several serious troubles with filtering that are
951 953 # complicated to solve. Proper handling of the issue here should be
952 954 # considered when solving linkrev issue are on the table.
953 955 return self._repo.unfiltered()[self._changeid]
954 956
955 957 def filectx(self, fileid, changeid=None):
956 958 '''opens an arbitrary revision of the file without
957 959 opening a new filelog'''
958 960 return filectx(self._repo, self._path, fileid=fileid,
959 961 filelog=self._filelog, changeid=changeid)
960 962
961 963 def rawdata(self):
962 964 return self._filelog.revision(self._filenode, raw=True)
963 965
964 966 def rawflags(self):
965 967 """low-level revlog flags"""
966 968 return self._filelog.flags(self._filerev)
967 969
968 970 def data(self):
969 971 try:
970 972 return self._filelog.read(self._filenode)
971 973 except error.CensoredNodeError:
972 974 if self._repo.ui.config("censor", "policy") == "ignore":
973 975 return ""
974 976 raise error.Abort(_("censored node: %s") % short(self._filenode),
975 977 hint=_("set censor.policy to ignore errors"))
976 978
977 979 def size(self):
978 980 return self._filelog.size(self._filerev)
979 981
980 982 @propertycache
981 983 def _copied(self):
982 984 """check if file was actually renamed in this changeset revision
983 985
984 986 If rename logged in file revision, we report copy for changeset only
985 987 if file revisions linkrev points back to the changeset in question
986 988 or both changeset parents contain different file revisions.
987 989 """
988 990
989 991 renamed = self._filelog.renamed(self._filenode)
990 992 if not renamed:
991 993 return None
992 994
993 995 if self.rev() == self.linkrev():
994 996 return renamed
995 997
996 998 name = self.path()
997 999 fnode = self._filenode
998 1000 for p in self._changectx.parents():
999 1001 try:
1000 1002 if fnode == p.filenode(name):
1001 1003 return None
1002 1004 except error.LookupError:
1003 1005 pass
1004 1006 return renamed
1005 1007
1006 1008 def children(self):
1007 1009 # hard for renames
1008 1010 c = self._filelog.children(self._filenode)
1009 1011 return [filectx(self._repo, self._path, fileid=x,
1010 1012 filelog=self._filelog) for x in c]
1011 1013
1012 1014 class committablectx(basectx):
1013 1015 """A committablectx object provides common functionality for a context that
1014 1016 wants the ability to commit, e.g. workingctx or memctx."""
1015 1017 def __init__(self, repo, text="", user=None, date=None, extra=None,
1016 1018 changes=None):
1017 1019 super(committablectx, self).__init__(repo)
1018 1020 self._rev = None
1019 1021 self._node = None
1020 1022 self._text = text
1021 1023 if date:
1022 1024 self._date = dateutil.parsedate(date)
1023 1025 if user:
1024 1026 self._user = user
1025 1027 if changes:
1026 1028 self._status = changes
1027 1029
1028 1030 self._extra = {}
1029 1031 if extra:
1030 1032 self._extra = extra.copy()
1031 1033 if 'branch' not in self._extra:
1032 1034 try:
1033 1035 branch = encoding.fromlocal(self._repo.dirstate.branch())
1034 1036 except UnicodeDecodeError:
1035 1037 raise error.Abort(_('branch name not in UTF-8!'))
1036 1038 self._extra['branch'] = branch
1037 1039 if self._extra['branch'] == '':
1038 1040 self._extra['branch'] = 'default'
1039 1041
1040 1042 def __bytes__(self):
1041 1043 return bytes(self._parents[0]) + "+"
1042 1044
1043 1045 __str__ = encoding.strmethod(__bytes__)
1044 1046
1045 1047 def __nonzero__(self):
1046 1048 return True
1047 1049
1048 1050 __bool__ = __nonzero__
1049 1051
1050 1052 def _buildflagfunc(self):
1051 1053 # Create a fallback function for getting file flags when the
1052 1054 # filesystem doesn't support them
1053 1055
1054 1056 copiesget = self._repo.dirstate.copies().get
1055 1057 parents = self.parents()
1056 1058 if len(parents) < 2:
1057 1059 # when we have one parent, it's easy: copy from parent
1058 1060 man = parents[0].manifest()
1059 1061 def func(f):
1060 1062 f = copiesget(f, f)
1061 1063 return man.flags(f)
1062 1064 else:
1063 1065 # merges are tricky: we try to reconstruct the unstored
1064 1066 # result from the merge (issue1802)
1065 1067 p1, p2 = parents
1066 1068 pa = p1.ancestor(p2)
1067 1069 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1068 1070
1069 1071 def func(f):
1070 1072 f = copiesget(f, f) # may be wrong for merges with copies
1071 1073 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1072 1074 if fl1 == fl2:
1073 1075 return fl1
1074 1076 if fl1 == fla:
1075 1077 return fl2
1076 1078 if fl2 == fla:
1077 1079 return fl1
1078 1080 return '' # punt for conflicts
1079 1081
1080 1082 return func
1081 1083
1082 1084 @propertycache
1083 1085 def _flagfunc(self):
1084 1086 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1085 1087
1086 1088 @propertycache
1087 1089 def _status(self):
1088 1090 return self._repo.status()
1089 1091
1090 1092 @propertycache
1091 1093 def _user(self):
1092 1094 return self._repo.ui.username()
1093 1095
1094 1096 @propertycache
1095 1097 def _date(self):
1096 1098 ui = self._repo.ui
1097 1099 date = ui.configdate('devel', 'default-date')
1098 1100 if date is None:
1099 1101 date = dateutil.makedate()
1100 1102 return date
1101 1103
1102 1104 def subrev(self, subpath):
1103 1105 return None
1104 1106
1105 1107 def manifestnode(self):
1106 1108 return None
1107 1109 def user(self):
1108 1110 return self._user or self._repo.ui.username()
1109 1111 def date(self):
1110 1112 return self._date
1111 1113 def description(self):
1112 1114 return self._text
1113 1115 def files(self):
1114 1116 return sorted(self._status.modified + self._status.added +
1115 1117 self._status.removed)
1116 1118
1117 1119 def modified(self):
1118 1120 return self._status.modified
1119 1121 def added(self):
1120 1122 return self._status.added
1121 1123 def removed(self):
1122 1124 return self._status.removed
1123 1125 def deleted(self):
1124 1126 return self._status.deleted
1125 1127 def branch(self):
1126 1128 return encoding.tolocal(self._extra['branch'])
1127 1129 def closesbranch(self):
1128 1130 return 'close' in self._extra
1129 1131 def extra(self):
1130 1132 return self._extra
1131 1133
1132 1134 def isinmemory(self):
1133 1135 return False
1134 1136
1135 1137 def tags(self):
1136 1138 return []
1137 1139
1138 1140 def bookmarks(self):
1139 1141 b = []
1140 1142 for p in self.parents():
1141 1143 b.extend(p.bookmarks())
1142 1144 return b
1143 1145
1144 1146 def phase(self):
1145 1147 phase = phases.draft # default phase to draft
1146 1148 for p in self.parents():
1147 1149 phase = max(phase, p.phase())
1148 1150 return phase
1149 1151
1150 1152 def hidden(self):
1151 1153 return False
1152 1154
1153 1155 def children(self):
1154 1156 return []
1155 1157
1156 1158 def flags(self, path):
1157 1159 if r'_manifest' in self.__dict__:
1158 1160 try:
1159 1161 return self._manifest.flags(path)
1160 1162 except KeyError:
1161 1163 return ''
1162 1164
1163 1165 try:
1164 1166 return self._flagfunc(path)
1165 1167 except OSError:
1166 1168 return ''
1167 1169
1168 1170 def ancestor(self, c2):
1169 1171 """return the "best" ancestor context of self and c2"""
1170 1172 return self._parents[0].ancestor(c2) # punt on two parents for now
1171 1173
1172 1174 def walk(self, match):
1173 1175 '''Generates matching file names.'''
1174 1176 return sorted(self._repo.dirstate.walk(match,
1175 1177 subrepos=sorted(self.substate),
1176 1178 unknown=True, ignored=False))
1177 1179
1178 1180 def matches(self, match):
1179 1181 ds = self._repo.dirstate
1180 1182 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1181 1183
1182 1184 def ancestors(self):
1183 1185 for p in self._parents:
1184 1186 yield p
1185 1187 for a in self._repo.changelog.ancestors(
1186 1188 [p.rev() for p in self._parents]):
1187 1189 yield self._repo[a]
1188 1190
1189 1191 def markcommitted(self, node):
1190 1192 """Perform post-commit cleanup necessary after committing this ctx
1191 1193
1192 1194 Specifically, this updates backing stores this working context
1193 1195 wraps to reflect the fact that the changes reflected by this
1194 1196 workingctx have been committed. For example, it marks
1195 1197 modified and added files as normal in the dirstate.
1196 1198
1197 1199 """
1198 1200
1199 1201 with self._repo.dirstate.parentchange():
1200 1202 for f in self.modified() + self.added():
1201 1203 self._repo.dirstate.normal(f)
1202 1204 for f in self.removed():
1203 1205 self._repo.dirstate.drop(f)
1204 1206 self._repo.dirstate.setparents(node)
1205 1207
1206 1208 # write changes out explicitly, because nesting wlock at
1207 1209 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1208 1210 # from immediately doing so for subsequent changing files
1209 1211 self._repo.dirstate.write(self._repo.currenttransaction())
1210 1212
1211 1213 def dirty(self, missing=False, merge=True, branch=True):
1212 1214 return False
1213 1215
1214 1216 class workingctx(committablectx):
1215 1217 """A workingctx object makes access to data related to
1216 1218 the current working directory convenient.
1217 1219 date - any valid date string or (unixtime, offset), or None.
1218 1220 user - username string, or None.
1219 1221 extra - a dictionary of extra values, or None.
1220 1222 changes - a list of file lists as returned by localrepo.status()
1221 1223 or None to use the repository status.
1222 1224 """
1223 1225 def __init__(self, repo, text="", user=None, date=None, extra=None,
1224 1226 changes=None):
1225 1227 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1226 1228
1227 1229 def __iter__(self):
1228 1230 d = self._repo.dirstate
1229 1231 for f in d:
1230 1232 if d[f] != 'r':
1231 1233 yield f
1232 1234
1233 1235 def __contains__(self, key):
1234 1236 return self._repo.dirstate[key] not in "?r"
1235 1237
1236 1238 def hex(self):
1237 1239 return hex(wdirid)
1238 1240
1239 1241 @propertycache
1240 1242 def _parents(self):
1241 1243 p = self._repo.dirstate.parents()
1242 1244 if p[1] == nullid:
1243 1245 p = p[:-1]
1244 1246 # use unfiltered repo to delay/avoid loading obsmarkers
1245 1247 unfi = self._repo.unfiltered()
1246 1248 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1247 1249
1248 1250 def _fileinfo(self, path):
1249 1251 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1250 1252 self._manifest
1251 1253 return super(workingctx, self)._fileinfo(path)
1252 1254
1253 1255 def filectx(self, path, filelog=None):
1254 1256 """get a file context from the working directory"""
1255 1257 return workingfilectx(self._repo, path, workingctx=self,
1256 1258 filelog=filelog)
1257 1259
1258 1260 def dirty(self, missing=False, merge=True, branch=True):
1259 1261 "check whether a working directory is modified"
1260 1262 # check subrepos first
1261 1263 for s in sorted(self.substate):
1262 1264 if self.sub(s).dirty(missing=missing):
1263 1265 return True
1264 1266 # check current working dir
1265 1267 return ((merge and self.p2()) or
1266 1268 (branch and self.branch() != self.p1().branch()) or
1267 1269 self.modified() or self.added() or self.removed() or
1268 1270 (missing and self.deleted()))
1269 1271
1270 1272 def add(self, list, prefix=""):
1271 1273 with self._repo.wlock():
1272 1274 ui, ds = self._repo.ui, self._repo.dirstate
1273 1275 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1274 1276 rejected = []
1275 1277 lstat = self._repo.wvfs.lstat
1276 1278 for f in list:
1277 1279 # ds.pathto() returns an absolute file when this is invoked from
1278 1280 # the keyword extension. That gets flagged as non-portable on
1279 1281 # Windows, since it contains the drive letter and colon.
1280 1282 scmutil.checkportable(ui, os.path.join(prefix, f))
1281 1283 try:
1282 1284 st = lstat(f)
1283 1285 except OSError:
1284 1286 ui.warn(_("%s does not exist!\n") % uipath(f))
1285 1287 rejected.append(f)
1286 1288 continue
1287 1289 limit = ui.configbytes('ui', 'large-file-limit')
1288 1290 if limit != 0 and st.st_size > limit:
1289 1291 ui.warn(_("%s: up to %d MB of RAM may be required "
1290 1292 "to manage this file\n"
1291 1293 "(use 'hg revert %s' to cancel the "
1292 1294 "pending addition)\n")
1293 1295 % (f, 3 * st.st_size // 1000000, uipath(f)))
1294 1296 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1295 1297 ui.warn(_("%s not added: only files and symlinks "
1296 1298 "supported currently\n") % uipath(f))
1297 1299 rejected.append(f)
1298 1300 elif ds[f] in 'amn':
1299 1301 ui.warn(_("%s already tracked!\n") % uipath(f))
1300 1302 elif ds[f] == 'r':
1301 1303 ds.normallookup(f)
1302 1304 else:
1303 1305 ds.add(f)
1304 1306 return rejected
1305 1307
1306 1308 def forget(self, files, prefix=""):
1307 1309 with self._repo.wlock():
1308 1310 ds = self._repo.dirstate
1309 1311 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1310 1312 rejected = []
1311 1313 for f in files:
1312 1314 if f not in self._repo.dirstate:
1313 1315 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1314 1316 rejected.append(f)
1315 1317 elif self._repo.dirstate[f] != 'a':
1316 1318 self._repo.dirstate.remove(f)
1317 1319 else:
1318 1320 self._repo.dirstate.drop(f)
1319 1321 return rejected
1320 1322
1321 1323 def undelete(self, list):
1322 1324 pctxs = self.parents()
1323 1325 with self._repo.wlock():
1324 1326 ds = self._repo.dirstate
1325 1327 for f in list:
1326 1328 if self._repo.dirstate[f] != 'r':
1327 1329 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1328 1330 else:
1329 1331 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1330 1332 t = fctx.data()
1331 1333 self._repo.wwrite(f, t, fctx.flags())
1332 1334 self._repo.dirstate.normal(f)
1333 1335
1334 1336 def copy(self, source, dest):
1335 1337 try:
1336 1338 st = self._repo.wvfs.lstat(dest)
1337 1339 except OSError as err:
1338 1340 if err.errno != errno.ENOENT:
1339 1341 raise
1340 1342 self._repo.ui.warn(_("%s does not exist!\n")
1341 1343 % self._repo.dirstate.pathto(dest))
1342 1344 return
1343 1345 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1344 1346 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1345 1347 "symbolic link\n")
1346 1348 % self._repo.dirstate.pathto(dest))
1347 1349 else:
1348 1350 with self._repo.wlock():
1349 1351 if self._repo.dirstate[dest] in '?':
1350 1352 self._repo.dirstate.add(dest)
1351 1353 elif self._repo.dirstate[dest] in 'r':
1352 1354 self._repo.dirstate.normallookup(dest)
1353 1355 self._repo.dirstate.copy(source, dest)
1354 1356
1355 1357 def match(self, pats=None, include=None, exclude=None, default='glob',
1356 1358 listsubrepos=False, badfn=None):
1357 1359 r = self._repo
1358 1360
1359 1361 # Only a case insensitive filesystem needs magic to translate user input
1360 1362 # to actual case in the filesystem.
1361 1363 icasefs = not util.fscasesensitive(r.root)
1362 1364 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1363 1365 default, auditor=r.auditor, ctx=self,
1364 1366 listsubrepos=listsubrepos, badfn=badfn,
1365 1367 icasefs=icasefs)
1366 1368
1367 1369 def _filtersuspectsymlink(self, files):
1368 1370 if not files or self._repo.dirstate._checklink:
1369 1371 return files
1370 1372
1371 1373 # Symlink placeholders may get non-symlink-like contents
1372 1374 # via user error or dereferencing by NFS or Samba servers,
1373 1375 # so we filter out any placeholders that don't look like a
1374 1376 # symlink
1375 1377 sane = []
1376 1378 for f in files:
1377 1379 if self.flags(f) == 'l':
1378 1380 d = self[f].data()
1379 1381 if (d == '' or len(d) >= 1024 or '\n' in d
1380 1382 or stringutil.binary(d)):
1381 1383 self._repo.ui.debug('ignoring suspect symlink placeholder'
1382 1384 ' "%s"\n' % f)
1383 1385 continue
1384 1386 sane.append(f)
1385 1387 return sane
1386 1388
1387 1389 def _checklookup(self, files):
1388 1390 # check for any possibly clean files
1389 1391 if not files:
1390 1392 return [], [], []
1391 1393
1392 1394 modified = []
1393 1395 deleted = []
1394 1396 fixup = []
1395 1397 pctx = self._parents[0]
1396 1398 # do a full compare of any files that might have changed
1397 1399 for f in sorted(files):
1398 1400 try:
1399 1401 # This will return True for a file that got replaced by a
1400 1402 # directory in the interim, but fixing that is pretty hard.
1401 1403 if (f not in pctx or self.flags(f) != pctx.flags(f)
1402 1404 or pctx[f].cmp(self[f])):
1403 1405 modified.append(f)
1404 1406 else:
1405 1407 fixup.append(f)
1406 1408 except (IOError, OSError):
1407 1409 # A file become inaccessible in between? Mark it as deleted,
1408 1410 # matching dirstate behavior (issue5584).
1409 1411 # The dirstate has more complex behavior around whether a
1410 1412 # missing file matches a directory, etc, but we don't need to
1411 1413 # bother with that: if f has made it to this point, we're sure
1412 1414 # it's in the dirstate.
1413 1415 deleted.append(f)
1414 1416
1415 1417 return modified, deleted, fixup
1416 1418
1417 1419 def _poststatusfixup(self, status, fixup):
1418 1420 """update dirstate for files that are actually clean"""
1419 1421 poststatus = self._repo.postdsstatus()
1420 1422 if fixup or poststatus:
1421 1423 try:
1422 1424 oldid = self._repo.dirstate.identity()
1423 1425
1424 1426 # updating the dirstate is optional
1425 1427 # so we don't wait on the lock
1426 1428 # wlock can invalidate the dirstate, so cache normal _after_
1427 1429 # taking the lock
1428 1430 with self._repo.wlock(False):
1429 1431 if self._repo.dirstate.identity() == oldid:
1430 1432 if fixup:
1431 1433 normal = self._repo.dirstate.normal
1432 1434 for f in fixup:
1433 1435 normal(f)
1434 1436 # write changes out explicitly, because nesting
1435 1437 # wlock at runtime may prevent 'wlock.release()'
1436 1438 # after this block from doing so for subsequent
1437 1439 # changing files
1438 1440 tr = self._repo.currenttransaction()
1439 1441 self._repo.dirstate.write(tr)
1440 1442
1441 1443 if poststatus:
1442 1444 for ps in poststatus:
1443 1445 ps(self, status)
1444 1446 else:
1445 1447 # in this case, writing changes out breaks
1446 1448 # consistency, because .hg/dirstate was
1447 1449 # already changed simultaneously after last
1448 1450 # caching (see also issue5584 for detail)
1449 1451 self._repo.ui.debug('skip updating dirstate: '
1450 1452 'identity mismatch\n')
1451 1453 except error.LockError:
1452 1454 pass
1453 1455 finally:
1454 1456 # Even if the wlock couldn't be grabbed, clear out the list.
1455 1457 self._repo.clearpostdsstatus()
1456 1458
1457 1459 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1458 1460 '''Gets the status from the dirstate -- internal use only.'''
1459 1461 subrepos = []
1460 1462 if '.hgsub' in self:
1461 1463 subrepos = sorted(self.substate)
1462 1464 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1463 1465 clean=clean, unknown=unknown)
1464 1466
1465 1467 # check for any possibly clean files
1466 1468 fixup = []
1467 1469 if cmp:
1468 1470 modified2, deleted2, fixup = self._checklookup(cmp)
1469 1471 s.modified.extend(modified2)
1470 1472 s.deleted.extend(deleted2)
1471 1473
1472 1474 if fixup and clean:
1473 1475 s.clean.extend(fixup)
1474 1476
1475 1477 self._poststatusfixup(s, fixup)
1476 1478
1477 1479 if match.always():
1478 1480 # cache for performance
1479 1481 if s.unknown or s.ignored or s.clean:
1480 1482 # "_status" is cached with list*=False in the normal route
1481 1483 self._status = scmutil.status(s.modified, s.added, s.removed,
1482 1484 s.deleted, [], [], [])
1483 1485 else:
1484 1486 self._status = s
1485 1487
1486 1488 return s
1487 1489
1488 1490 @propertycache
1489 1491 def _manifest(self):
1490 1492 """generate a manifest corresponding to the values in self._status
1491 1493
1492 1494 This reuse the file nodeid from parent, but we use special node
1493 1495 identifiers for added and modified files. This is used by manifests
1494 1496 merge to see that files are different and by update logic to avoid
1495 1497 deleting newly added files.
1496 1498 """
1497 1499 return self._buildstatusmanifest(self._status)
1498 1500
1499 1501 def _buildstatusmanifest(self, status):
1500 1502 """Builds a manifest that includes the given status results."""
1501 1503 parents = self.parents()
1502 1504
1503 1505 man = parents[0].manifest().copy()
1504 1506
1505 1507 ff = self._flagfunc
1506 1508 for i, l in ((addednodeid, status.added),
1507 1509 (modifiednodeid, status.modified)):
1508 1510 for f in l:
1509 1511 man[f] = i
1510 1512 try:
1511 1513 man.setflag(f, ff(f))
1512 1514 except OSError:
1513 1515 pass
1514 1516
1515 1517 for f in status.deleted + status.removed:
1516 1518 if f in man:
1517 1519 del man[f]
1518 1520
1519 1521 return man
1520 1522
1521 1523 def _buildstatus(self, other, s, match, listignored, listclean,
1522 1524 listunknown):
1523 1525 """build a status with respect to another context
1524 1526
1525 1527 This includes logic for maintaining the fast path of status when
1526 1528 comparing the working directory against its parent, which is to skip
1527 1529 building a new manifest if self (working directory) is not comparing
1528 1530 against its parent (repo['.']).
1529 1531 """
1530 1532 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1531 1533 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1532 1534 # might have accidentally ended up with the entire contents of the file
1533 1535 # they are supposed to be linking to.
1534 1536 s.modified[:] = self._filtersuspectsymlink(s.modified)
1535 1537 if other != self._repo['.']:
1536 1538 s = super(workingctx, self)._buildstatus(other, s, match,
1537 1539 listignored, listclean,
1538 1540 listunknown)
1539 1541 return s
1540 1542
1541 1543 def _matchstatus(self, other, match):
1542 1544 """override the match method with a filter for directory patterns
1543 1545
1544 1546 We use inheritance to customize the match.bad method only in cases of
1545 1547 workingctx since it belongs only to the working directory when
1546 1548 comparing against the parent changeset.
1547 1549
1548 1550 If we aren't comparing against the working directory's parent, then we
1549 1551 just use the default match object sent to us.
1550 1552 """
1551 1553 if other != self._repo['.']:
1552 1554 def bad(f, msg):
1553 1555 # 'f' may be a directory pattern from 'match.files()',
1554 1556 # so 'f not in ctx1' is not enough
1555 1557 if f not in other and not other.hasdir(f):
1556 1558 self._repo.ui.warn('%s: %s\n' %
1557 1559 (self._repo.dirstate.pathto(f), msg))
1558 1560 match.bad = bad
1559 1561 return match
1560 1562
1561 1563 def markcommitted(self, node):
1562 1564 super(workingctx, self).markcommitted(node)
1563 1565
1564 1566 sparse.aftercommit(self._repo, node)
1565 1567
1566 1568 class committablefilectx(basefilectx):
1567 1569 """A committablefilectx provides common functionality for a file context
1568 1570 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1569 1571 def __init__(self, repo, path, filelog=None, ctx=None):
1570 1572 self._repo = repo
1571 1573 self._path = path
1572 1574 self._changeid = None
1573 1575 self._filerev = self._filenode = None
1574 1576
1575 1577 if filelog is not None:
1576 1578 self._filelog = filelog
1577 1579 if ctx:
1578 1580 self._changectx = ctx
1579 1581
1580 1582 def __nonzero__(self):
1581 1583 return True
1582 1584
1583 1585 __bool__ = __nonzero__
1584 1586
1585 1587 def linkrev(self):
1586 1588 # linked to self._changectx no matter if file is modified or not
1587 1589 return self.rev()
1588 1590
1589 1591 def parents(self):
1590 1592 '''return parent filectxs, following copies if necessary'''
1591 1593 def filenode(ctx, path):
1592 1594 return ctx._manifest.get(path, nullid)
1593 1595
1594 1596 path = self._path
1595 1597 fl = self._filelog
1596 1598 pcl = self._changectx._parents
1597 1599 renamed = self.renamed()
1598 1600
1599 1601 if renamed:
1600 1602 pl = [renamed + (None,)]
1601 1603 else:
1602 1604 pl = [(path, filenode(pcl[0], path), fl)]
1603 1605
1604 1606 for pc in pcl[1:]:
1605 1607 pl.append((path, filenode(pc, path), fl))
1606 1608
1607 1609 return [self._parentfilectx(p, fileid=n, filelog=l)
1608 1610 for p, n, l in pl if n != nullid]
1609 1611
1610 1612 def children(self):
1611 1613 return []
1612 1614
1613 1615 class workingfilectx(committablefilectx):
1614 1616 """A workingfilectx object makes access to data related to a particular
1615 1617 file in the working directory convenient."""
1616 1618 def __init__(self, repo, path, filelog=None, workingctx=None):
1617 1619 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1618 1620
1619 1621 @propertycache
1620 1622 def _changectx(self):
1621 1623 return workingctx(self._repo)
1622 1624
1623 1625 def data(self):
1624 1626 return self._repo.wread(self._path)
1625 1627 def renamed(self):
1626 1628 rp = self._repo.dirstate.copied(self._path)
1627 1629 if not rp:
1628 1630 return None
1629 1631 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1630 1632
1631 1633 def size(self):
1632 1634 return self._repo.wvfs.lstat(self._path).st_size
1633 1635 def date(self):
1634 1636 t, tz = self._changectx.date()
1635 1637 try:
1636 1638 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1637 1639 except OSError as err:
1638 1640 if err.errno != errno.ENOENT:
1639 1641 raise
1640 1642 return (t, tz)
1641 1643
1642 1644 def exists(self):
1643 1645 return self._repo.wvfs.exists(self._path)
1644 1646
1645 1647 def lexists(self):
1646 1648 return self._repo.wvfs.lexists(self._path)
1647 1649
1648 1650 def audit(self):
1649 1651 return self._repo.wvfs.audit(self._path)
1650 1652
1651 1653 def cmp(self, fctx):
1652 1654 """compare with other file context
1653 1655
1654 1656 returns True if different than fctx.
1655 1657 """
1656 1658 # fctx should be a filectx (not a workingfilectx)
1657 1659 # invert comparison to reuse the same code path
1658 1660 return fctx.cmp(self)
1659 1661
1660 1662 def remove(self, ignoremissing=False):
1661 1663 """wraps unlink for a repo's working directory"""
1662 1664 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1663 1665 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1664 1666 rmdir=rmdir)
1665 1667
1666 1668 def write(self, data, flags, backgroundclose=False, **kwargs):
1667 1669 """wraps repo.wwrite"""
1668 1670 self._repo.wwrite(self._path, data, flags,
1669 1671 backgroundclose=backgroundclose,
1670 1672 **kwargs)
1671 1673
1672 1674 def markcopied(self, src):
1673 1675 """marks this file a copy of `src`"""
1674 1676 if self._repo.dirstate[self._path] in "nma":
1675 1677 self._repo.dirstate.copy(src, self._path)
1676 1678
1677 1679 def clearunknown(self):
1678 1680 """Removes conflicting items in the working directory so that
1679 1681 ``write()`` can be called successfully.
1680 1682 """
1681 1683 wvfs = self._repo.wvfs
1682 1684 f = self._path
1683 1685 wvfs.audit(f)
1684 1686 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1685 1687 # remove files under the directory as they should already be
1686 1688 # warned and backed up
1687 1689 if wvfs.isdir(f) and not wvfs.islink(f):
1688 1690 wvfs.rmtree(f, forcibly=True)
1689 1691 for p in reversed(list(util.finddirs(f))):
1690 1692 if wvfs.isfileorlink(p):
1691 1693 wvfs.unlink(p)
1692 1694 break
1693 1695 else:
1694 1696 # don't remove files if path conflicts are not processed
1695 1697 if wvfs.isdir(f) and not wvfs.islink(f):
1696 1698 wvfs.removedirs(f)
1697 1699
1698 1700 def setflags(self, l, x):
1699 1701 self._repo.wvfs.setflags(self._path, l, x)
1700 1702
1701 1703 class overlayworkingctx(committablectx):
1702 1704 """Wraps another mutable context with a write-back cache that can be
1703 1705 converted into a commit context.
1704 1706
1705 1707 self._cache[path] maps to a dict with keys: {
1706 1708 'exists': bool?
1707 1709 'date': date?
1708 1710 'data': str?
1709 1711 'flags': str?
1710 1712 'copied': str? (path or None)
1711 1713 }
1712 1714 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1713 1715 is `False`, the file was deleted.
1714 1716 """
1715 1717
1716 1718 def __init__(self, repo):
1717 1719 super(overlayworkingctx, self).__init__(repo)
1718 1720 self.clean()
1719 1721
1720 1722 def setbase(self, wrappedctx):
1721 1723 self._wrappedctx = wrappedctx
1722 1724 self._parents = [wrappedctx]
1723 1725 # Drop old manifest cache as it is now out of date.
1724 1726 # This is necessary when, e.g., rebasing several nodes with one
1725 1727 # ``overlayworkingctx`` (e.g. with --collapse).
1726 1728 util.clearcachedproperty(self, '_manifest')
1727 1729
1728 1730 def data(self, path):
1729 1731 if self.isdirty(path):
1730 1732 if self._cache[path]['exists']:
1731 1733 if self._cache[path]['data']:
1732 1734 return self._cache[path]['data']
1733 1735 else:
1734 1736 # Must fallback here, too, because we only set flags.
1735 1737 return self._wrappedctx[path].data()
1736 1738 else:
1737 1739 raise error.ProgrammingError("No such file or directory: %s" %
1738 1740 path)
1739 1741 else:
1740 1742 return self._wrappedctx[path].data()
1741 1743
1742 1744 @propertycache
1743 1745 def _manifest(self):
1744 1746 parents = self.parents()
1745 1747 man = parents[0].manifest().copy()
1746 1748
1747 1749 flag = self._flagfunc
1748 1750 for path in self.added():
1749 1751 man[path] = addednodeid
1750 1752 man.setflag(path, flag(path))
1751 1753 for path in self.modified():
1752 1754 man[path] = modifiednodeid
1753 1755 man.setflag(path, flag(path))
1754 1756 for path in self.removed():
1755 1757 del man[path]
1756 1758 return man
1757 1759
1758 1760 @propertycache
1759 1761 def _flagfunc(self):
1760 1762 def f(path):
1761 1763 return self._cache[path]['flags']
1762 1764 return f
1763 1765
1764 1766 def files(self):
1765 1767 return sorted(self.added() + self.modified() + self.removed())
1766 1768
1767 1769 def modified(self):
1768 1770 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1769 1771 self._existsinparent(f)]
1770 1772
1771 1773 def added(self):
1772 1774 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1773 1775 not self._existsinparent(f)]
1774 1776
1775 1777 def removed(self):
1776 1778 return [f for f in self._cache.keys() if
1777 1779 not self._cache[f]['exists'] and self._existsinparent(f)]
1778 1780
1779 1781 def isinmemory(self):
1780 1782 return True
1781 1783
1782 1784 def filedate(self, path):
1783 1785 if self.isdirty(path):
1784 1786 return self._cache[path]['date']
1785 1787 else:
1786 1788 return self._wrappedctx[path].date()
1787 1789
1788 1790 def markcopied(self, path, origin):
1789 1791 if self.isdirty(path):
1790 1792 self._cache[path]['copied'] = origin
1791 1793 else:
1792 1794 raise error.ProgrammingError('markcopied() called on clean context')
1793 1795
1794 1796 def copydata(self, path):
1795 1797 if self.isdirty(path):
1796 1798 return self._cache[path]['copied']
1797 1799 else:
1798 1800 raise error.ProgrammingError('copydata() called on clean context')
1799 1801
1800 1802 def flags(self, path):
1801 1803 if self.isdirty(path):
1802 1804 if self._cache[path]['exists']:
1803 1805 return self._cache[path]['flags']
1804 1806 else:
1805 1807 raise error.ProgrammingError("No such file or directory: %s" %
1806 1808 self._path)
1807 1809 else:
1808 1810 return self._wrappedctx[path].flags()
1809 1811
1810 1812 def _existsinparent(self, path):
1811 1813 try:
1812 1814 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1813 1815 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1814 1816 # with an ``exists()`` function.
1815 1817 self._wrappedctx[path]
1816 1818 return True
1817 1819 except error.ManifestLookupError:
1818 1820 return False
1819 1821
1820 1822 def _auditconflicts(self, path):
1821 1823 """Replicates conflict checks done by wvfs.write().
1822 1824
1823 1825 Since we never write to the filesystem and never call `applyupdates` in
1824 1826 IMM, we'll never check that a path is actually writable -- e.g., because
1825 1827 it adds `a/foo`, but `a` is actually a file in the other commit.
1826 1828 """
1827 1829 def fail(path, component):
1828 1830 # p1() is the base and we're receiving "writes" for p2()'s
1829 1831 # files.
1830 1832 if 'l' in self.p1()[component].flags():
1831 1833 raise error.Abort("error: %s conflicts with symlink %s "
1832 1834 "in %s." % (path, component,
1833 1835 self.p1().rev()))
1834 1836 else:
1835 1837 raise error.Abort("error: '%s' conflicts with file '%s' in "
1836 1838 "%s." % (path, component,
1837 1839 self.p1().rev()))
1838 1840
1839 1841 # Test that each new directory to be created to write this path from p2
1840 1842 # is not a file in p1.
1841 1843 components = path.split('/')
1842 1844 for i in pycompat.xrange(len(components)):
1843 1845 component = "/".join(components[0:i])
1844 1846 if component in self.p1() and self._cache[component]['exists']:
1845 1847 fail(path, component)
1846 1848
1847 1849 # Test the other direction -- that this path from p2 isn't a directory
1848 1850 # in p1 (test that p1 doesn't any paths matching `path/*`).
1849 1851 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1850 1852 matches = self.p1().manifest().matches(match)
1851 1853 mfiles = matches.keys()
1852 1854 if len(mfiles) > 0:
1853 1855 if len(mfiles) == 1 and mfiles[0] == path:
1854 1856 return
1855 1857 # omit the files which are deleted in current IMM wctx
1856 1858 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1857 1859 if not mfiles:
1858 1860 return
1859 1861 raise error.Abort("error: file '%s' cannot be written because "
1860 1862 " '%s/' is a folder in %s (containing %d "
1861 1863 "entries: %s)"
1862 1864 % (path, path, self.p1(), len(mfiles),
1863 1865 ', '.join(mfiles)))
1864 1866
1865 1867 def write(self, path, data, flags='', **kwargs):
1866 1868 if data is None:
1867 1869 raise error.ProgrammingError("data must be non-None")
1868 1870 self._auditconflicts(path)
1869 1871 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1870 1872 flags=flags)
1871 1873
1872 1874 def setflags(self, path, l, x):
1873 1875 flag = ''
1874 1876 if l:
1875 1877 flag = 'l'
1876 1878 elif x:
1877 1879 flag = 'x'
1878 1880 self._markdirty(path, exists=True, date=dateutil.makedate(),
1879 1881 flags=flag)
1880 1882
1881 1883 def remove(self, path):
1882 1884 self._markdirty(path, exists=False)
1883 1885
1884 1886 def exists(self, path):
1885 1887 """exists behaves like `lexists`, but needs to follow symlinks and
1886 1888 return False if they are broken.
1887 1889 """
1888 1890 if self.isdirty(path):
1889 1891 # If this path exists and is a symlink, "follow" it by calling
1890 1892 # exists on the destination path.
1891 1893 if (self._cache[path]['exists'] and
1892 1894 'l' in self._cache[path]['flags']):
1893 1895 return self.exists(self._cache[path]['data'].strip())
1894 1896 else:
1895 1897 return self._cache[path]['exists']
1896 1898
1897 1899 return self._existsinparent(path)
1898 1900
1899 1901 def lexists(self, path):
1900 1902 """lexists returns True if the path exists"""
1901 1903 if self.isdirty(path):
1902 1904 return self._cache[path]['exists']
1903 1905
1904 1906 return self._existsinparent(path)
1905 1907
1906 1908 def size(self, path):
1907 1909 if self.isdirty(path):
1908 1910 if self._cache[path]['exists']:
1909 1911 return len(self._cache[path]['data'])
1910 1912 else:
1911 1913 raise error.ProgrammingError("No such file or directory: %s" %
1912 1914 self._path)
1913 1915 return self._wrappedctx[path].size()
1914 1916
1915 1917 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1916 1918 user=None, editor=None):
1917 1919 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1918 1920 committed.
1919 1921
1920 1922 ``text`` is the commit message.
1921 1923 ``parents`` (optional) are rev numbers.
1922 1924 """
1923 1925 # Default parents to the wrapped contexts' if not passed.
1924 1926 if parents is None:
1925 1927 parents = self._wrappedctx.parents()
1926 1928 if len(parents) == 1:
1927 1929 parents = (parents[0], None)
1928 1930
1929 1931 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1930 1932 if parents[1] is None:
1931 1933 parents = (self._repo[parents[0]], None)
1932 1934 else:
1933 1935 parents = (self._repo[parents[0]], self._repo[parents[1]])
1934 1936
1935 1937 files = self._cache.keys()
1936 1938 def getfile(repo, memctx, path):
1937 1939 if self._cache[path]['exists']:
1938 1940 return memfilectx(repo, memctx, path,
1939 1941 self._cache[path]['data'],
1940 1942 'l' in self._cache[path]['flags'],
1941 1943 'x' in self._cache[path]['flags'],
1942 1944 self._cache[path]['copied'])
1943 1945 else:
1944 1946 # Returning None, but including the path in `files`, is
1945 1947 # necessary for memctx to register a deletion.
1946 1948 return None
1947 1949 return memctx(self._repo, parents, text, files, getfile, date=date,
1948 1950 extra=extra, user=user, branch=branch, editor=editor)
1949 1951
1950 1952 def isdirty(self, path):
1951 1953 return path in self._cache
1952 1954
1953 1955 def isempty(self):
1954 1956 # We need to discard any keys that are actually clean before the empty
1955 1957 # commit check.
1956 1958 self._compact()
1957 1959 return len(self._cache) == 0
1958 1960
1959 1961 def clean(self):
1960 1962 self._cache = {}
1961 1963
1962 1964 def _compact(self):
1963 1965 """Removes keys from the cache that are actually clean, by comparing
1964 1966 them with the underlying context.
1965 1967
1966 1968 This can occur during the merge process, e.g. by passing --tool :local
1967 1969 to resolve a conflict.
1968 1970 """
1969 1971 keys = []
1970 1972 for path in self._cache.keys():
1971 1973 cache = self._cache[path]
1972 1974 try:
1973 1975 underlying = self._wrappedctx[path]
1974 1976 if (underlying.data() == cache['data'] and
1975 1977 underlying.flags() == cache['flags']):
1976 1978 keys.append(path)
1977 1979 except error.ManifestLookupError:
1978 1980 # Path not in the underlying manifest (created).
1979 1981 continue
1980 1982
1981 1983 for path in keys:
1982 1984 del self._cache[path]
1983 1985 return keys
1984 1986
1985 1987 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1986 1988 # data not provided, let's see if we already have some; if not, let's
1987 1989 # grab it from our underlying context, so that we always have data if
1988 1990 # the file is marked as existing.
1989 1991 if exists and data is None:
1990 1992 oldentry = self._cache.get(path) or {}
1991 1993 data = oldentry.get('data') or self._wrappedctx[path].data()
1992 1994
1993 1995 self._cache[path] = {
1994 1996 'exists': exists,
1995 1997 'data': data,
1996 1998 'date': date,
1997 1999 'flags': flags,
1998 2000 'copied': None,
1999 2001 }
2000 2002
2001 2003 def filectx(self, path, filelog=None):
2002 2004 return overlayworkingfilectx(self._repo, path, parent=self,
2003 2005 filelog=filelog)
2004 2006
2005 2007 class overlayworkingfilectx(committablefilectx):
2006 2008 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2007 2009 cache, which can be flushed through later by calling ``flush()``."""
2008 2010
2009 2011 def __init__(self, repo, path, filelog=None, parent=None):
2010 2012 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2011 2013 parent)
2012 2014 self._repo = repo
2013 2015 self._parent = parent
2014 2016 self._path = path
2015 2017
2016 2018 def cmp(self, fctx):
2017 2019 return self.data() != fctx.data()
2018 2020
2019 2021 def changectx(self):
2020 2022 return self._parent
2021 2023
2022 2024 def data(self):
2023 2025 return self._parent.data(self._path)
2024 2026
2025 2027 def date(self):
2026 2028 return self._parent.filedate(self._path)
2027 2029
2028 2030 def exists(self):
2029 2031 return self.lexists()
2030 2032
2031 2033 def lexists(self):
2032 2034 return self._parent.exists(self._path)
2033 2035
2034 2036 def renamed(self):
2035 2037 path = self._parent.copydata(self._path)
2036 2038 if not path:
2037 2039 return None
2038 2040 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2039 2041
2040 2042 def size(self):
2041 2043 return self._parent.size(self._path)
2042 2044
2043 2045 def markcopied(self, origin):
2044 2046 self._parent.markcopied(self._path, origin)
2045 2047
2046 2048 def audit(self):
2047 2049 pass
2048 2050
2049 2051 def flags(self):
2050 2052 return self._parent.flags(self._path)
2051 2053
2052 2054 def setflags(self, islink, isexec):
2053 2055 return self._parent.setflags(self._path, islink, isexec)
2054 2056
2055 2057 def write(self, data, flags, backgroundclose=False, **kwargs):
2056 2058 return self._parent.write(self._path, data, flags, **kwargs)
2057 2059
2058 2060 def remove(self, ignoremissing=False):
2059 2061 return self._parent.remove(self._path)
2060 2062
2061 2063 def clearunknown(self):
2062 2064 pass
2063 2065
2064 2066 class workingcommitctx(workingctx):
2065 2067 """A workingcommitctx object makes access to data related to
2066 2068 the revision being committed convenient.
2067 2069
2068 2070 This hides changes in the working directory, if they aren't
2069 2071 committed in this context.
2070 2072 """
2071 2073 def __init__(self, repo, changes,
2072 2074 text="", user=None, date=None, extra=None):
2073 2075 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2074 2076 changes)
2075 2077
2076 2078 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2077 2079 """Return matched files only in ``self._status``
2078 2080
2079 2081 Uncommitted files appear "clean" via this context, even if
2080 2082 they aren't actually so in the working directory.
2081 2083 """
2082 2084 if clean:
2083 2085 clean = [f for f in self._manifest if f not in self._changedset]
2084 2086 else:
2085 2087 clean = []
2086 2088 return scmutil.status([f for f in self._status.modified if match(f)],
2087 2089 [f for f in self._status.added if match(f)],
2088 2090 [f for f in self._status.removed if match(f)],
2089 2091 [], [], [], clean)
2090 2092
2091 2093 @propertycache
2092 2094 def _changedset(self):
2093 2095 """Return the set of files changed in this context
2094 2096 """
2095 2097 changed = set(self._status.modified)
2096 2098 changed.update(self._status.added)
2097 2099 changed.update(self._status.removed)
2098 2100 return changed
2099 2101
2100 2102 def makecachingfilectxfn(func):
2101 2103 """Create a filectxfn that caches based on the path.
2102 2104
2103 2105 We can't use util.cachefunc because it uses all arguments as the cache
2104 2106 key and this creates a cycle since the arguments include the repo and
2105 2107 memctx.
2106 2108 """
2107 2109 cache = {}
2108 2110
2109 2111 def getfilectx(repo, memctx, path):
2110 2112 if path not in cache:
2111 2113 cache[path] = func(repo, memctx, path)
2112 2114 return cache[path]
2113 2115
2114 2116 return getfilectx
2115 2117
2116 2118 def memfilefromctx(ctx):
2117 2119 """Given a context return a memfilectx for ctx[path]
2118 2120
2119 2121 This is a convenience method for building a memctx based on another
2120 2122 context.
2121 2123 """
2122 2124 def getfilectx(repo, memctx, path):
2123 2125 fctx = ctx[path]
2124 2126 # this is weird but apparently we only keep track of one parent
2125 2127 # (why not only store that instead of a tuple?)
2126 2128 copied = fctx.renamed()
2127 2129 if copied:
2128 2130 copied = copied[0]
2129 2131 return memfilectx(repo, memctx, path, fctx.data(),
2130 2132 islink=fctx.islink(), isexec=fctx.isexec(),
2131 2133 copied=copied)
2132 2134
2133 2135 return getfilectx
2134 2136
2135 2137 def memfilefrompatch(patchstore):
2136 2138 """Given a patch (e.g. patchstore object) return a memfilectx
2137 2139
2138 2140 This is a convenience method for building a memctx based on a patchstore.
2139 2141 """
2140 2142 def getfilectx(repo, memctx, path):
2141 2143 data, mode, copied = patchstore.getfile(path)
2142 2144 if data is None:
2143 2145 return None
2144 2146 islink, isexec = mode
2145 2147 return memfilectx(repo, memctx, path, data, islink=islink,
2146 2148 isexec=isexec, copied=copied)
2147 2149
2148 2150 return getfilectx
2149 2151
2150 2152 class memctx(committablectx):
2151 2153 """Use memctx to perform in-memory commits via localrepo.commitctx().
2152 2154
2153 2155 Revision information is supplied at initialization time while
2154 2156 related files data and is made available through a callback
2155 2157 mechanism. 'repo' is the current localrepo, 'parents' is a
2156 2158 sequence of two parent revisions identifiers (pass None for every
2157 2159 missing parent), 'text' is the commit message and 'files' lists
2158 2160 names of files touched by the revision (normalized and relative to
2159 2161 repository root).
2160 2162
2161 2163 filectxfn(repo, memctx, path) is a callable receiving the
2162 2164 repository, the current memctx object and the normalized path of
2163 2165 requested file, relative to repository root. It is fired by the
2164 2166 commit function for every file in 'files', but calls order is
2165 2167 undefined. If the file is available in the revision being
2166 2168 committed (updated or added), filectxfn returns a memfilectx
2167 2169 object. If the file was removed, filectxfn return None for recent
2168 2170 Mercurial. Moved files are represented by marking the source file
2169 2171 removed and the new file added with copy information (see
2170 2172 memfilectx).
2171 2173
2172 2174 user receives the committer name and defaults to current
2173 2175 repository username, date is the commit date in any format
2174 2176 supported by dateutil.parsedate() and defaults to current date, extra
2175 2177 is a dictionary of metadata or is left empty.
2176 2178 """
2177 2179
2178 2180 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2179 2181 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2180 2182 # this field to determine what to do in filectxfn.
2181 2183 _returnnoneformissingfiles = True
2182 2184
2183 2185 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2184 2186 date=None, extra=None, branch=None, editor=False):
2185 2187 super(memctx, self).__init__(repo, text, user, date, extra)
2186 2188 self._rev = None
2187 2189 self._node = None
2188 2190 parents = [(p or nullid) for p in parents]
2189 2191 p1, p2 = parents
2190 2192 self._parents = [self._repo[p] for p in (p1, p2)]
2191 2193 files = sorted(set(files))
2192 2194 self._files = files
2193 2195 if branch is not None:
2194 2196 self._extra['branch'] = encoding.fromlocal(branch)
2195 2197 self.substate = {}
2196 2198
2197 2199 if isinstance(filectxfn, patch.filestore):
2198 2200 filectxfn = memfilefrompatch(filectxfn)
2199 2201 elif not callable(filectxfn):
2200 2202 # if store is not callable, wrap it in a function
2201 2203 filectxfn = memfilefromctx(filectxfn)
2202 2204
2203 2205 # memoizing increases performance for e.g. vcs convert scenarios.
2204 2206 self._filectxfn = makecachingfilectxfn(filectxfn)
2205 2207
2206 2208 if editor:
2207 2209 self._text = editor(self._repo, self, [])
2208 2210 self._repo.savecommitmessage(self._text)
2209 2211
2210 2212 def filectx(self, path, filelog=None):
2211 2213 """get a file context from the working directory
2212 2214
2213 2215 Returns None if file doesn't exist and should be removed."""
2214 2216 return self._filectxfn(self._repo, self, path)
2215 2217
2216 2218 def commit(self):
2217 2219 """commit context to the repo"""
2218 2220 return self._repo.commitctx(self)
2219 2221
2220 2222 @propertycache
2221 2223 def _manifest(self):
2222 2224 """generate a manifest based on the return values of filectxfn"""
2223 2225
2224 2226 # keep this simple for now; just worry about p1
2225 2227 pctx = self._parents[0]
2226 2228 man = pctx.manifest().copy()
2227 2229
2228 2230 for f in self._status.modified:
2229 2231 man[f] = modifiednodeid
2230 2232
2231 2233 for f in self._status.added:
2232 2234 man[f] = addednodeid
2233 2235
2234 2236 for f in self._status.removed:
2235 2237 if f in man:
2236 2238 del man[f]
2237 2239
2238 2240 return man
2239 2241
2240 2242 @propertycache
2241 2243 def _status(self):
2242 2244 """Calculate exact status from ``files`` specified at construction
2243 2245 """
2244 2246 man1 = self.p1().manifest()
2245 2247 p2 = self._parents[1]
2246 2248 # "1 < len(self._parents)" can't be used for checking
2247 2249 # existence of the 2nd parent, because "memctx._parents" is
2248 2250 # explicitly initialized by the list, of which length is 2.
2249 2251 if p2.node() != nullid:
2250 2252 man2 = p2.manifest()
2251 2253 managing = lambda f: f in man1 or f in man2
2252 2254 else:
2253 2255 managing = lambda f: f in man1
2254 2256
2255 2257 modified, added, removed = [], [], []
2256 2258 for f in self._files:
2257 2259 if not managing(f):
2258 2260 added.append(f)
2259 2261 elif self[f]:
2260 2262 modified.append(f)
2261 2263 else:
2262 2264 removed.append(f)
2263 2265
2264 2266 return scmutil.status(modified, added, removed, [], [], [], [])
2265 2267
2266 2268 class memfilectx(committablefilectx):
2267 2269 """memfilectx represents an in-memory file to commit.
2268 2270
2269 2271 See memctx and committablefilectx for more details.
2270 2272 """
2271 2273 def __init__(self, repo, changectx, path, data, islink=False,
2272 2274 isexec=False, copied=None):
2273 2275 """
2274 2276 path is the normalized file path relative to repository root.
2275 2277 data is the file content as a string.
2276 2278 islink is True if the file is a symbolic link.
2277 2279 isexec is True if the file is executable.
2278 2280 copied is the source file path if current file was copied in the
2279 2281 revision being committed, or None."""
2280 2282 super(memfilectx, self).__init__(repo, path, None, changectx)
2281 2283 self._data = data
2282 2284 if islink:
2283 2285 self._flags = 'l'
2284 2286 elif isexec:
2285 2287 self._flags = 'x'
2286 2288 else:
2287 2289 self._flags = ''
2288 2290 self._copied = None
2289 2291 if copied:
2290 2292 self._copied = (copied, nullid)
2291 2293
2292 2294 def data(self):
2293 2295 return self._data
2294 2296
2295 2297 def remove(self, ignoremissing=False):
2296 2298 """wraps unlink for a repo's working directory"""
2297 2299 # need to figure out what to do here
2298 2300 del self._changectx[self._path]
2299 2301
2300 2302 def write(self, data, flags, **kwargs):
2301 2303 """wraps repo.wwrite"""
2302 2304 self._data = data
2303 2305
2304 2306
2305 2307 class metadataonlyctx(committablectx):
2306 2308 """Like memctx but it's reusing the manifest of different commit.
2307 2309 Intended to be used by lightweight operations that are creating
2308 2310 metadata-only changes.
2309 2311
2310 2312 Revision information is supplied at initialization time. 'repo' is the
2311 2313 current localrepo, 'ctx' is original revision which manifest we're reuisng
2312 2314 'parents' is a sequence of two parent revisions identifiers (pass None for
2313 2315 every missing parent), 'text' is the commit.
2314 2316
2315 2317 user receives the committer name and defaults to current repository
2316 2318 username, date is the commit date in any format supported by
2317 2319 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2318 2320 metadata or is left empty.
2319 2321 """
2320 2322 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2321 2323 date=None, extra=None, editor=False):
2322 2324 if text is None:
2323 2325 text = originalctx.description()
2324 2326 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2325 2327 self._rev = None
2326 2328 self._node = None
2327 2329 self._originalctx = originalctx
2328 2330 self._manifestnode = originalctx.manifestnode()
2329 2331 if parents is None:
2330 2332 parents = originalctx.parents()
2331 2333 else:
2332 2334 parents = [repo[p] for p in parents if p is not None]
2333 2335 parents = parents[:]
2334 2336 while len(parents) < 2:
2335 2337 parents.append(repo[nullid])
2336 2338 p1, p2 = self._parents = parents
2337 2339
2338 2340 # sanity check to ensure that the reused manifest parents are
2339 2341 # manifests of our commit parents
2340 2342 mp1, mp2 = self.manifestctx().parents
2341 2343 if p1 != nullid and p1.manifestnode() != mp1:
2342 2344 raise RuntimeError('can\'t reuse the manifest: '
2343 2345 'its p1 doesn\'t match the new ctx p1')
2344 2346 if p2 != nullid and p2.manifestnode() != mp2:
2345 2347 raise RuntimeError('can\'t reuse the manifest: '
2346 2348 'its p2 doesn\'t match the new ctx p2')
2347 2349
2348 2350 self._files = originalctx.files()
2349 2351 self.substate = {}
2350 2352
2351 2353 if editor:
2352 2354 self._text = editor(self._repo, self, [])
2353 2355 self._repo.savecommitmessage(self._text)
2354 2356
2355 2357 def manifestnode(self):
2356 2358 return self._manifestnode
2357 2359
2358 2360 @property
2359 2361 def _manifestctx(self):
2360 2362 return self._repo.manifestlog[self._manifestnode]
2361 2363
2362 2364 def filectx(self, path, filelog=None):
2363 2365 return self._originalctx.filectx(path, filelog=filelog)
2364 2366
2365 2367 def commit(self):
2366 2368 """commit context to the repo"""
2367 2369 return self._repo.commitctx(self)
2368 2370
2369 2371 @property
2370 2372 def _manifest(self):
2371 2373 return self._originalctx.manifest()
2372 2374
2373 2375 @propertycache
2374 2376 def _status(self):
2375 2377 """Calculate exact status from ``files`` specified in the ``origctx``
2376 2378 and parents manifests.
2377 2379 """
2378 2380 man1 = self.p1().manifest()
2379 2381 p2 = self._parents[1]
2380 2382 # "1 < len(self._parents)" can't be used for checking
2381 2383 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2382 2384 # explicitly initialized by the list, of which length is 2.
2383 2385 if p2.node() != nullid:
2384 2386 man2 = p2.manifest()
2385 2387 managing = lambda f: f in man1 or f in man2
2386 2388 else:
2387 2389 managing = lambda f: f in man1
2388 2390
2389 2391 modified, added, removed = [], [], []
2390 2392 for f in self._files:
2391 2393 if not managing(f):
2392 2394 added.append(f)
2393 2395 elif f in self:
2394 2396 modified.append(f)
2395 2397 else:
2396 2398 removed.append(f)
2397 2399
2398 2400 return scmutil.status(modified, added, removed, [], [], [], [])
2399 2401
2400 2402 class arbitraryfilectx(object):
2401 2403 """Allows you to use filectx-like functions on a file in an arbitrary
2402 2404 location on disk, possibly not in the working directory.
2403 2405 """
2404 2406 def __init__(self, path, repo=None):
2405 2407 # Repo is optional because contrib/simplemerge uses this class.
2406 2408 self._repo = repo
2407 2409 self._path = path
2408 2410
2409 2411 def cmp(self, fctx):
2410 2412 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2411 2413 # path if either side is a symlink.
2412 2414 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2413 2415 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2414 2416 # Add a fast-path for merge if both sides are disk-backed.
2415 2417 # Note that filecmp uses the opposite return values (True if same)
2416 2418 # from our cmp functions (True if different).
2417 2419 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2418 2420 return self.data() != fctx.data()
2419 2421
2420 2422 def path(self):
2421 2423 return self._path
2422 2424
2423 2425 def flags(self):
2424 2426 return ''
2425 2427
2426 2428 def data(self):
2427 2429 return util.readfile(self._path)
2428 2430
2429 2431 def decodeddata(self):
2430 2432 with open(self._path, "rb") as f:
2431 2433 return f.read()
2432 2434
2433 2435 def remove(self):
2434 2436 util.unlink(self._path)
2435 2437
2436 2438 def write(self, data, flags, **kwargs):
2437 2439 assert not flags
2438 2440 with open(self._path, "w") as f:
2439 2441 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now