##// END OF EJS Templates
context: remove seemingly impossible code branch...
Martin von Zweigbergk -
r40707:1423ff45 default
parent child Browse files
Show More
@@ -1,2437 +1,2435 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = self._repo.narrowmatch(match)
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 for l in r:
375 375 l.sort()
376 376
377 377 return r
378 378
379 379 class changectx(basectx):
380 380 """A changecontext object makes access to data related to a particular
381 381 changeset convenient. It represents a read-only context already present in
382 382 the repo."""
383 383 def __init__(self, repo, rev, node):
384 384 super(changectx, self).__init__(repo)
385 385 self._rev = rev
386 386 self._node = node
387 387
388 388 def __hash__(self):
389 389 try:
390 390 return hash(self._rev)
391 391 except AttributeError:
392 392 return id(self)
393 393
394 394 def __nonzero__(self):
395 395 return self._rev != nullrev
396 396
397 397 __bool__ = __nonzero__
398 398
399 399 @propertycache
400 400 def _changeset(self):
401 401 return self._repo.changelog.changelogrevision(self.rev())
402 402
403 403 @propertycache
404 404 def _manifest(self):
405 405 return self._manifestctx.read()
406 406
407 407 @property
408 408 def _manifestctx(self):
409 409 return self._repo.manifestlog[self._changeset.manifest]
410 410
411 411 @propertycache
412 412 def _manifestdelta(self):
413 413 return self._manifestctx.readdelta()
414 414
415 415 @propertycache
416 416 def _parents(self):
417 417 repo = self._repo
418 418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 419 if p2 == nullrev:
420 420 return [repo[p1]]
421 421 return [repo[p1], repo[p2]]
422 422
423 423 def changeset(self):
424 424 c = self._changeset
425 425 return (
426 426 c.manifest,
427 427 c.user,
428 428 c.date,
429 429 c.files,
430 430 c.description,
431 431 c.extra,
432 432 )
433 433 def manifestnode(self):
434 434 return self._changeset.manifest
435 435
436 436 def user(self):
437 437 return self._changeset.user
438 438 def date(self):
439 439 return self._changeset.date
440 440 def files(self):
441 441 return self._changeset.files
442 442 def description(self):
443 443 return self._changeset.description
444 444 def branch(self):
445 445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 446 def closesbranch(self):
447 447 return 'close' in self._changeset.extra
448 448 def extra(self):
449 449 """Return a dict of extra information."""
450 450 return self._changeset.extra
451 451 def tags(self):
452 452 """Return a list of byte tag names"""
453 453 return self._repo.nodetags(self._node)
454 454 def bookmarks(self):
455 455 """Return a list of byte bookmark names."""
456 456 return self._repo.nodebookmarks(self._node)
457 457 def phase(self):
458 458 return self._repo._phasecache.phase(self._repo, self._rev)
459 459 def hidden(self):
460 460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461 461
462 462 def isinmemory(self):
463 463 return False
464 464
465 465 def children(self):
466 466 """return list of changectx contexts for each child changeset.
467 467
468 468 This returns only the immediate child changesets. Use descendants() to
469 469 recursively walk children.
470 470 """
471 471 c = self._repo.changelog.children(self._node)
472 472 return [self._repo[x] for x in c]
473 473
474 474 def ancestors(self):
475 475 for a in self._repo.changelog.ancestors([self._rev]):
476 476 yield self._repo[a]
477 477
478 478 def descendants(self):
479 479 """Recursively yield all children of the changeset.
480 480
481 481 For just the immediate children, use children()
482 482 """
483 483 for d in self._repo.changelog.descendants([self._rev]):
484 484 yield self._repo[d]
485 485
486 486 def filectx(self, path, fileid=None, filelog=None):
487 487 """get a file context from this changeset"""
488 488 if fileid is None:
489 489 fileid = self.filenode(path)
490 490 return filectx(self._repo, path, fileid=fileid,
491 491 changectx=self, filelog=filelog)
492 492
493 493 def ancestor(self, c2, warn=False):
494 494 """return the "best" ancestor context of self and c2
495 495
496 496 If there are multiple candidates, it will show a message and check
497 497 merge.preferancestor configuration before falling back to the
498 498 revlog ancestor."""
499 499 # deal with workingctxs
500 500 n2 = c2._node
501 501 if n2 is None:
502 502 n2 = c2._parents[0]._node
503 503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 504 if not cahs:
505 505 anc = nullid
506 506 elif len(cahs) == 1:
507 507 anc = cahs[0]
508 508 else:
509 509 # experimental config: merge.preferancestor
510 510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 511 try:
512 512 ctx = scmutil.revsymbol(self._repo, r)
513 513 except error.RepoLookupError:
514 514 continue
515 515 anc = ctx.node()
516 516 if anc in cahs:
517 517 break
518 518 else:
519 519 anc = self._repo.changelog.ancestor(self._node, n2)
520 520 if warn:
521 521 self._repo.ui.status(
522 522 (_("note: using %s as ancestor of %s and %s\n") %
523 523 (short(anc), short(self._node), short(n2))) +
524 524 ''.join(_(" alternatively, use --config "
525 525 "merge.preferancestor=%s\n") %
526 526 short(n) for n in sorted(cahs) if n != anc))
527 527 return self._repo[anc]
528 528
529 529 def isancestorof(self, other):
530 530 """True if this changeset is an ancestor of other"""
531 531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532 532
533 533 def walk(self, match):
534 534 '''Generates matching file names.'''
535 535
536 536 # Wrap match.bad method to have message with nodeid
537 537 def bad(fn, msg):
538 538 # The manifest doesn't know about subrepos, so don't complain about
539 539 # paths into valid subrepos.
540 540 if any(fn == s or fn.startswith(s + '/')
541 541 for s in self.substate):
542 542 return
543 543 match.bad(fn, _('no such file in rev %s') % self)
544 544
545 545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 546 return self._manifest.walk(m)
547 547
548 548 def matches(self, match):
549 549 return self.walk(match)
550 550
551 551 class basefilectx(object):
552 552 """A filecontext object represents the common logic for its children:
553 553 filectx: read-only access to a filerevision that is already present
554 554 in the repo,
555 555 workingfilectx: a filecontext that represents files from the working
556 556 directory,
557 557 memfilectx: a filecontext that represents files in-memory,
558 558 """
559 559 @propertycache
560 560 def _filelog(self):
561 561 return self._repo.file(self._path)
562 562
563 563 @propertycache
564 564 def _changeid(self):
565 if r'_changeid' in self.__dict__:
566 return self._changeid
567 elif r'_changectx' in self.__dict__:
565 if r'_changectx' in self.__dict__:
568 566 return self._changectx.rev()
569 567 elif r'_descendantrev' in self.__dict__:
570 568 # this file context was created from a revision with a known
571 569 # descendant, we can (lazily) correct for linkrev aliases
572 570 return self._adjustlinkrev(self._descendantrev)
573 571 else:
574 572 return self._filelog.linkrev(self._filerev)
575 573
576 574 @propertycache
577 575 def _filenode(self):
578 576 if r'_fileid' in self.__dict__:
579 577 return self._filelog.lookup(self._fileid)
580 578 else:
581 579 return self._changectx.filenode(self._path)
582 580
583 581 @propertycache
584 582 def _filerev(self):
585 583 return self._filelog.rev(self._filenode)
586 584
587 585 @propertycache
588 586 def _repopath(self):
589 587 return self._path
590 588
591 589 def __nonzero__(self):
592 590 try:
593 591 self._filenode
594 592 return True
595 593 except error.LookupError:
596 594 # file is missing
597 595 return False
598 596
599 597 __bool__ = __nonzero__
600 598
601 599 def __bytes__(self):
602 600 try:
603 601 return "%s@%s" % (self.path(), self._changectx)
604 602 except error.LookupError:
605 603 return "%s@???" % self.path()
606 604
607 605 __str__ = encoding.strmethod(__bytes__)
608 606
609 607 def __repr__(self):
610 608 return r"<%s %s>" % (type(self).__name__, str(self))
611 609
612 610 def __hash__(self):
613 611 try:
614 612 return hash((self._path, self._filenode))
615 613 except AttributeError:
616 614 return id(self)
617 615
618 616 def __eq__(self, other):
619 617 try:
620 618 return (type(self) == type(other) and self._path == other._path
621 619 and self._filenode == other._filenode)
622 620 except AttributeError:
623 621 return False
624 622
625 623 def __ne__(self, other):
626 624 return not (self == other)
627 625
628 626 def filerev(self):
629 627 return self._filerev
630 628 def filenode(self):
631 629 return self._filenode
632 630 @propertycache
633 631 def _flags(self):
634 632 return self._changectx.flags(self._path)
635 633 def flags(self):
636 634 return self._flags
637 635 def filelog(self):
638 636 return self._filelog
639 637 def rev(self):
640 638 return self._changeid
641 639 def linkrev(self):
642 640 return self._filelog.linkrev(self._filerev)
643 641 def node(self):
644 642 return self._changectx.node()
645 643 def hex(self):
646 644 return self._changectx.hex()
647 645 def user(self):
648 646 return self._changectx.user()
649 647 def date(self):
650 648 return self._changectx.date()
651 649 def files(self):
652 650 return self._changectx.files()
653 651 def description(self):
654 652 return self._changectx.description()
655 653 def branch(self):
656 654 return self._changectx.branch()
657 655 def extra(self):
658 656 return self._changectx.extra()
659 657 def phase(self):
660 658 return self._changectx.phase()
661 659 def phasestr(self):
662 660 return self._changectx.phasestr()
663 661 def obsolete(self):
664 662 return self._changectx.obsolete()
665 663 def instabilities(self):
666 664 return self._changectx.instabilities()
667 665 def manifest(self):
668 666 return self._changectx.manifest()
669 667 def changectx(self):
670 668 return self._changectx
671 669 def renamed(self):
672 670 return self._copied
673 671 def repo(self):
674 672 return self._repo
675 673 def size(self):
676 674 return len(self.data())
677 675
678 676 def path(self):
679 677 return self._path
680 678
681 679 def isbinary(self):
682 680 try:
683 681 return stringutil.binary(self.data())
684 682 except IOError:
685 683 return False
686 684 def isexec(self):
687 685 return 'x' in self.flags()
688 686 def islink(self):
689 687 return 'l' in self.flags()
690 688
691 689 def isabsent(self):
692 690 """whether this filectx represents a file not in self._changectx
693 691
694 692 This is mainly for merge code to detect change/delete conflicts. This is
695 693 expected to be True for all subclasses of basectx."""
696 694 return False
697 695
698 696 _customcmp = False
699 697 def cmp(self, fctx):
700 698 """compare with other file context
701 699
702 700 returns True if different than fctx.
703 701 """
704 702 if fctx._customcmp:
705 703 return fctx.cmp(self)
706 704
707 705 if (fctx._filenode is None
708 706 and (self._repo._encodefilterpats
709 707 # if file data starts with '\1\n', empty metadata block is
710 708 # prepended, which adds 4 bytes to filelog.size().
711 709 or self.size() - 4 == fctx.size())
712 710 or self.size() == fctx.size()):
713 711 return self._filelog.cmp(self._filenode, fctx.data())
714 712
715 713 return True
716 714
717 715 def _adjustlinkrev(self, srcrev, inclusive=False):
718 716 """return the first ancestor of <srcrev> introducing <fnode>
719 717
720 718 If the linkrev of the file revision does not point to an ancestor of
721 719 srcrev, we'll walk down the ancestors until we find one introducing
722 720 this file revision.
723 721
724 722 :srcrev: the changeset revision we search ancestors from
725 723 :inclusive: if true, the src revision will also be checked
726 724 """
727 725 repo = self._repo
728 726 cl = repo.unfiltered().changelog
729 727 mfl = repo.manifestlog
730 728 # fetch the linkrev
731 729 lkr = self.linkrev()
732 730 if srcrev == lkr:
733 731 return lkr
734 732 # hack to reuse ancestor computation when searching for renames
735 733 memberanc = getattr(self, '_ancestrycontext', None)
736 734 iteranc = None
737 735 if srcrev is None:
738 736 # wctx case, used by workingfilectx during mergecopy
739 737 revs = [p.rev() for p in self._repo[None].parents()]
740 738 inclusive = True # we skipped the real (revless) source
741 739 else:
742 740 revs = [srcrev]
743 741 if memberanc is None:
744 742 memberanc = iteranc = cl.ancestors(revs, lkr,
745 743 inclusive=inclusive)
746 744 # check if this linkrev is an ancestor of srcrev
747 745 if lkr not in memberanc:
748 746 if iteranc is None:
749 747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
750 748 fnode = self._filenode
751 749 path = self._path
752 750 for a in iteranc:
753 751 ac = cl.read(a) # get changeset data (we avoid object creation)
754 752 if path in ac[3]: # checking the 'files' field.
755 753 # The file has been touched, check if the content is
756 754 # similar to the one we search for.
757 755 if fnode == mfl[ac[0]].readfast().get(path):
758 756 return a
759 757 # In theory, we should never get out of that loop without a result.
760 758 # But if manifest uses a buggy file revision (not children of the
761 759 # one it replaces) we could. Such a buggy situation will likely
762 760 # result is crash somewhere else at to some point.
763 761 return lkr
764 762
765 763 def introrev(self):
766 764 """return the rev of the changeset which introduced this file revision
767 765
768 766 This method is different from linkrev because it take into account the
769 767 changeset the filectx was created from. It ensures the returned
770 768 revision is one of its ancestors. This prevents bugs from
771 769 'linkrev-shadowing' when a file revision is used by multiple
772 770 changesets.
773 771 """
774 772 attrs = vars(self)
775 773 hastoprev = (r'_changeid' in attrs or r'_changectx' in attrs)
776 774 if hastoprev:
777 775 return self._adjustlinkrev(self.rev(), inclusive=True)
778 776 else:
779 777 return self.linkrev()
780 778
781 779 def introfilectx(self):
782 780 """Return filectx having identical contents, but pointing to the
783 781 changeset revision where this filectx was introduced"""
784 782 introrev = self.introrev()
785 783 if self.rev() == introrev:
786 784 return self
787 785 return self.filectx(self.filenode(), changeid=introrev)
788 786
789 787 def _parentfilectx(self, path, fileid, filelog):
790 788 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
791 789 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
792 790 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
793 791 # If self is associated with a changeset (probably explicitly
794 792 # fed), ensure the created filectx is associated with a
795 793 # changeset that is an ancestor of self.changectx.
796 794 # This lets us later use _adjustlinkrev to get a correct link.
797 795 fctx._descendantrev = self.rev()
798 796 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
799 797 elif r'_descendantrev' in vars(self):
800 798 # Otherwise propagate _descendantrev if we have one associated.
801 799 fctx._descendantrev = self._descendantrev
802 800 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
803 801 return fctx
804 802
805 803 def parents(self):
806 804 _path = self._path
807 805 fl = self._filelog
808 806 parents = self._filelog.parents(self._filenode)
809 807 pl = [(_path, node, fl) for node in parents if node != nullid]
810 808
811 809 r = fl.renamed(self._filenode)
812 810 if r:
813 811 # - In the simple rename case, both parent are nullid, pl is empty.
814 812 # - In case of merge, only one of the parent is null id and should
815 813 # be replaced with the rename information. This parent is -always-
816 814 # the first one.
817 815 #
818 816 # As null id have always been filtered out in the previous list
819 817 # comprehension, inserting to 0 will always result in "replacing
820 818 # first nullid parent with rename information.
821 819 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
822 820
823 821 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
824 822
825 823 def p1(self):
826 824 return self.parents()[0]
827 825
828 826 def p2(self):
829 827 p = self.parents()
830 828 if len(p) == 2:
831 829 return p[1]
832 830 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
833 831
834 832 def annotate(self, follow=False, skiprevs=None, diffopts=None):
835 833 """Returns a list of annotateline objects for each line in the file
836 834
837 835 - line.fctx is the filectx of the node where that line was last changed
838 836 - line.lineno is the line number at the first appearance in the managed
839 837 file
840 838 - line.text is the data on that line (including newline character)
841 839 """
842 840 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
843 841
844 842 def parents(f):
845 843 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
846 844 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
847 845 # from the topmost introrev (= srcrev) down to p.linkrev() if it
848 846 # isn't an ancestor of the srcrev.
849 847 f._changeid
850 848 pl = f.parents()
851 849
852 850 # Don't return renamed parents if we aren't following.
853 851 if not follow:
854 852 pl = [p for p in pl if p.path() == f.path()]
855 853
856 854 # renamed filectx won't have a filelog yet, so set it
857 855 # from the cache to save time
858 856 for p in pl:
859 857 if not r'_filelog' in p.__dict__:
860 858 p._filelog = getlog(p.path())
861 859
862 860 return pl
863 861
864 862 # use linkrev to find the first changeset where self appeared
865 863 base = self.introfilectx()
866 864 if getattr(base, '_ancestrycontext', None) is None:
867 865 cl = self._repo.changelog
868 866 if base.rev() is None:
869 867 # wctx is not inclusive, but works because _ancestrycontext
870 868 # is used to test filelog revisions
871 869 ac = cl.ancestors([p.rev() for p in base.parents()],
872 870 inclusive=True)
873 871 else:
874 872 ac = cl.ancestors([base.rev()], inclusive=True)
875 873 base._ancestrycontext = ac
876 874
877 875 return dagop.annotate(base, parents, skiprevs=skiprevs,
878 876 diffopts=diffopts)
879 877
880 878 def ancestors(self, followfirst=False):
881 879 visit = {}
882 880 c = self
883 881 if followfirst:
884 882 cut = 1
885 883 else:
886 884 cut = None
887 885
888 886 while True:
889 887 for parent in c.parents()[:cut]:
890 888 visit[(parent.linkrev(), parent.filenode())] = parent
891 889 if not visit:
892 890 break
893 891 c = visit.pop(max(visit))
894 892 yield c
895 893
896 894 def decodeddata(self):
897 895 """Returns `data()` after running repository decoding filters.
898 896
899 897 This is often equivalent to how the data would be expressed on disk.
900 898 """
901 899 return self._repo.wwritedata(self.path(), self.data())
902 900
903 901 class filectx(basefilectx):
904 902 """A filecontext object makes access to data related to a particular
905 903 filerevision convenient."""
906 904 def __init__(self, repo, path, changeid=None, fileid=None,
907 905 filelog=None, changectx=None):
908 906 """changeid must be a revision number, if specified.
909 907 fileid can be a file revision or node."""
910 908 self._repo = repo
911 909 self._path = path
912 910
913 911 assert (changeid is not None
914 912 or fileid is not None
915 913 or changectx is not None), \
916 914 ("bad args: changeid=%r, fileid=%r, changectx=%r"
917 915 % (changeid, fileid, changectx))
918 916
919 917 if filelog is not None:
920 918 self._filelog = filelog
921 919
922 920 if changeid is not None:
923 921 self._changeid = changeid
924 922 if changectx is not None:
925 923 self._changectx = changectx
926 924 if fileid is not None:
927 925 self._fileid = fileid
928 926
929 927 @propertycache
930 928 def _changectx(self):
931 929 try:
932 930 return self._repo[self._changeid]
933 931 except error.FilteredRepoLookupError:
934 932 # Linkrev may point to any revision in the repository. When the
935 933 # repository is filtered this may lead to `filectx` trying to build
936 934 # `changectx` for filtered revision. In such case we fallback to
937 935 # creating `changectx` on the unfiltered version of the reposition.
938 936 # This fallback should not be an issue because `changectx` from
939 937 # `filectx` are not used in complex operations that care about
940 938 # filtering.
941 939 #
942 940 # This fallback is a cheap and dirty fix that prevent several
943 941 # crashes. It does not ensure the behavior is correct. However the
944 942 # behavior was not correct before filtering either and "incorrect
945 943 # behavior" is seen as better as "crash"
946 944 #
947 945 # Linkrevs have several serious troubles with filtering that are
948 946 # complicated to solve. Proper handling of the issue here should be
949 947 # considered when solving linkrev issue are on the table.
950 948 return self._repo.unfiltered()[self._changeid]
951 949
952 950 def filectx(self, fileid, changeid=None):
953 951 '''opens an arbitrary revision of the file without
954 952 opening a new filelog'''
955 953 return filectx(self._repo, self._path, fileid=fileid,
956 954 filelog=self._filelog, changeid=changeid)
957 955
958 956 def rawdata(self):
959 957 return self._filelog.revision(self._filenode, raw=True)
960 958
961 959 def rawflags(self):
962 960 """low-level revlog flags"""
963 961 return self._filelog.flags(self._filerev)
964 962
965 963 def data(self):
966 964 try:
967 965 return self._filelog.read(self._filenode)
968 966 except error.CensoredNodeError:
969 967 if self._repo.ui.config("censor", "policy") == "ignore":
970 968 return ""
971 969 raise error.Abort(_("censored node: %s") % short(self._filenode),
972 970 hint=_("set censor.policy to ignore errors"))
973 971
974 972 def size(self):
975 973 return self._filelog.size(self._filerev)
976 974
977 975 @propertycache
978 976 def _copied(self):
979 977 """check if file was actually renamed in this changeset revision
980 978
981 979 If rename logged in file revision, we report copy for changeset only
982 980 if file revisions linkrev points back to the changeset in question
983 981 or both changeset parents contain different file revisions.
984 982 """
985 983
986 984 renamed = self._filelog.renamed(self._filenode)
987 985 if not renamed:
988 986 return None
989 987
990 988 if self.rev() == self.linkrev():
991 989 return renamed
992 990
993 991 name = self.path()
994 992 fnode = self._filenode
995 993 for p in self._changectx.parents():
996 994 try:
997 995 if fnode == p.filenode(name):
998 996 return None
999 997 except error.LookupError:
1000 998 pass
1001 999 return renamed
1002 1000
1003 1001 def children(self):
1004 1002 # hard for renames
1005 1003 c = self._filelog.children(self._filenode)
1006 1004 return [filectx(self._repo, self._path, fileid=x,
1007 1005 filelog=self._filelog) for x in c]
1008 1006
1009 1007 class committablectx(basectx):
1010 1008 """A committablectx object provides common functionality for a context that
1011 1009 wants the ability to commit, e.g. workingctx or memctx."""
1012 1010 def __init__(self, repo, text="", user=None, date=None, extra=None,
1013 1011 changes=None):
1014 1012 super(committablectx, self).__init__(repo)
1015 1013 self._rev = None
1016 1014 self._node = None
1017 1015 self._text = text
1018 1016 if date:
1019 1017 self._date = dateutil.parsedate(date)
1020 1018 if user:
1021 1019 self._user = user
1022 1020 if changes:
1023 1021 self._status = changes
1024 1022
1025 1023 self._extra = {}
1026 1024 if extra:
1027 1025 self._extra = extra.copy()
1028 1026 if 'branch' not in self._extra:
1029 1027 try:
1030 1028 branch = encoding.fromlocal(self._repo.dirstate.branch())
1031 1029 except UnicodeDecodeError:
1032 1030 raise error.Abort(_('branch name not in UTF-8!'))
1033 1031 self._extra['branch'] = branch
1034 1032 if self._extra['branch'] == '':
1035 1033 self._extra['branch'] = 'default'
1036 1034
1037 1035 def __bytes__(self):
1038 1036 return bytes(self._parents[0]) + "+"
1039 1037
1040 1038 __str__ = encoding.strmethod(__bytes__)
1041 1039
1042 1040 def __nonzero__(self):
1043 1041 return True
1044 1042
1045 1043 __bool__ = __nonzero__
1046 1044
1047 1045 def _buildflagfunc(self):
1048 1046 # Create a fallback function for getting file flags when the
1049 1047 # filesystem doesn't support them
1050 1048
1051 1049 copiesget = self._repo.dirstate.copies().get
1052 1050 parents = self.parents()
1053 1051 if len(parents) < 2:
1054 1052 # when we have one parent, it's easy: copy from parent
1055 1053 man = parents[0].manifest()
1056 1054 def func(f):
1057 1055 f = copiesget(f, f)
1058 1056 return man.flags(f)
1059 1057 else:
1060 1058 # merges are tricky: we try to reconstruct the unstored
1061 1059 # result from the merge (issue1802)
1062 1060 p1, p2 = parents
1063 1061 pa = p1.ancestor(p2)
1064 1062 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1065 1063
1066 1064 def func(f):
1067 1065 f = copiesget(f, f) # may be wrong for merges with copies
1068 1066 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1069 1067 if fl1 == fl2:
1070 1068 return fl1
1071 1069 if fl1 == fla:
1072 1070 return fl2
1073 1071 if fl2 == fla:
1074 1072 return fl1
1075 1073 return '' # punt for conflicts
1076 1074
1077 1075 return func
1078 1076
1079 1077 @propertycache
1080 1078 def _flagfunc(self):
1081 1079 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1082 1080
1083 1081 @propertycache
1084 1082 def _status(self):
1085 1083 return self._repo.status()
1086 1084
1087 1085 @propertycache
1088 1086 def _user(self):
1089 1087 return self._repo.ui.username()
1090 1088
1091 1089 @propertycache
1092 1090 def _date(self):
1093 1091 ui = self._repo.ui
1094 1092 date = ui.configdate('devel', 'default-date')
1095 1093 if date is None:
1096 1094 date = dateutil.makedate()
1097 1095 return date
1098 1096
1099 1097 def subrev(self, subpath):
1100 1098 return None
1101 1099
1102 1100 def manifestnode(self):
1103 1101 return None
1104 1102 def user(self):
1105 1103 return self._user or self._repo.ui.username()
1106 1104 def date(self):
1107 1105 return self._date
1108 1106 def description(self):
1109 1107 return self._text
1110 1108 def files(self):
1111 1109 return sorted(self._status.modified + self._status.added +
1112 1110 self._status.removed)
1113 1111
1114 1112 def modified(self):
1115 1113 return self._status.modified
1116 1114 def added(self):
1117 1115 return self._status.added
1118 1116 def removed(self):
1119 1117 return self._status.removed
1120 1118 def deleted(self):
1121 1119 return self._status.deleted
1122 1120 def branch(self):
1123 1121 return encoding.tolocal(self._extra['branch'])
1124 1122 def closesbranch(self):
1125 1123 return 'close' in self._extra
1126 1124 def extra(self):
1127 1125 return self._extra
1128 1126
1129 1127 def isinmemory(self):
1130 1128 return False
1131 1129
1132 1130 def tags(self):
1133 1131 return []
1134 1132
1135 1133 def bookmarks(self):
1136 1134 b = []
1137 1135 for p in self.parents():
1138 1136 b.extend(p.bookmarks())
1139 1137 return b
1140 1138
1141 1139 def phase(self):
1142 1140 phase = phases.draft # default phase to draft
1143 1141 for p in self.parents():
1144 1142 phase = max(phase, p.phase())
1145 1143 return phase
1146 1144
1147 1145 def hidden(self):
1148 1146 return False
1149 1147
1150 1148 def children(self):
1151 1149 return []
1152 1150
1153 1151 def flags(self, path):
1154 1152 if r'_manifest' in self.__dict__:
1155 1153 try:
1156 1154 return self._manifest.flags(path)
1157 1155 except KeyError:
1158 1156 return ''
1159 1157
1160 1158 try:
1161 1159 return self._flagfunc(path)
1162 1160 except OSError:
1163 1161 return ''
1164 1162
1165 1163 def ancestor(self, c2):
1166 1164 """return the "best" ancestor context of self and c2"""
1167 1165 return self._parents[0].ancestor(c2) # punt on two parents for now
1168 1166
1169 1167 def walk(self, match):
1170 1168 '''Generates matching file names.'''
1171 1169 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1172 1170 subrepos=sorted(self.substate),
1173 1171 unknown=True, ignored=False))
1174 1172
1175 1173 def matches(self, match):
1176 1174 match = self._repo.narrowmatch(match)
1177 1175 ds = self._repo.dirstate
1178 1176 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1179 1177
1180 1178 def ancestors(self):
1181 1179 for p in self._parents:
1182 1180 yield p
1183 1181 for a in self._repo.changelog.ancestors(
1184 1182 [p.rev() for p in self._parents]):
1185 1183 yield self._repo[a]
1186 1184
1187 1185 def markcommitted(self, node):
1188 1186 """Perform post-commit cleanup necessary after committing this ctx
1189 1187
1190 1188 Specifically, this updates backing stores this working context
1191 1189 wraps to reflect the fact that the changes reflected by this
1192 1190 workingctx have been committed. For example, it marks
1193 1191 modified and added files as normal in the dirstate.
1194 1192
1195 1193 """
1196 1194
1197 1195 with self._repo.dirstate.parentchange():
1198 1196 for f in self.modified() + self.added():
1199 1197 self._repo.dirstate.normal(f)
1200 1198 for f in self.removed():
1201 1199 self._repo.dirstate.drop(f)
1202 1200 self._repo.dirstate.setparents(node)
1203 1201
1204 1202 # write changes out explicitly, because nesting wlock at
1205 1203 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1206 1204 # from immediately doing so for subsequent changing files
1207 1205 self._repo.dirstate.write(self._repo.currenttransaction())
1208 1206
1209 1207 def dirty(self, missing=False, merge=True, branch=True):
1210 1208 return False
1211 1209
1212 1210 class workingctx(committablectx):
1213 1211 """A workingctx object makes access to data related to
1214 1212 the current working directory convenient.
1215 1213 date - any valid date string or (unixtime, offset), or None.
1216 1214 user - username string, or None.
1217 1215 extra - a dictionary of extra values, or None.
1218 1216 changes - a list of file lists as returned by localrepo.status()
1219 1217 or None to use the repository status.
1220 1218 """
1221 1219 def __init__(self, repo, text="", user=None, date=None, extra=None,
1222 1220 changes=None):
1223 1221 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1224 1222
1225 1223 def __iter__(self):
1226 1224 d = self._repo.dirstate
1227 1225 for f in d:
1228 1226 if d[f] != 'r':
1229 1227 yield f
1230 1228
1231 1229 def __contains__(self, key):
1232 1230 return self._repo.dirstate[key] not in "?r"
1233 1231
1234 1232 def hex(self):
1235 1233 return hex(wdirid)
1236 1234
1237 1235 @propertycache
1238 1236 def _parents(self):
1239 1237 p = self._repo.dirstate.parents()
1240 1238 if p[1] == nullid:
1241 1239 p = p[:-1]
1242 1240 # use unfiltered repo to delay/avoid loading obsmarkers
1243 1241 unfi = self._repo.unfiltered()
1244 1242 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1245 1243
1246 1244 def _fileinfo(self, path):
1247 1245 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1248 1246 self._manifest
1249 1247 return super(workingctx, self)._fileinfo(path)
1250 1248
1251 1249 def filectx(self, path, filelog=None):
1252 1250 """get a file context from the working directory"""
1253 1251 return workingfilectx(self._repo, path, workingctx=self,
1254 1252 filelog=filelog)
1255 1253
1256 1254 def dirty(self, missing=False, merge=True, branch=True):
1257 1255 "check whether a working directory is modified"
1258 1256 # check subrepos first
1259 1257 for s in sorted(self.substate):
1260 1258 if self.sub(s).dirty(missing=missing):
1261 1259 return True
1262 1260 # check current working dir
1263 1261 return ((merge and self.p2()) or
1264 1262 (branch and self.branch() != self.p1().branch()) or
1265 1263 self.modified() or self.added() or self.removed() or
1266 1264 (missing and self.deleted()))
1267 1265
1268 1266 def add(self, list, prefix=""):
1269 1267 with self._repo.wlock():
1270 1268 ui, ds = self._repo.ui, self._repo.dirstate
1271 1269 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1272 1270 rejected = []
1273 1271 lstat = self._repo.wvfs.lstat
1274 1272 for f in list:
1275 1273 # ds.pathto() returns an absolute file when this is invoked from
1276 1274 # the keyword extension. That gets flagged as non-portable on
1277 1275 # Windows, since it contains the drive letter and colon.
1278 1276 scmutil.checkportable(ui, os.path.join(prefix, f))
1279 1277 try:
1280 1278 st = lstat(f)
1281 1279 except OSError:
1282 1280 ui.warn(_("%s does not exist!\n") % uipath(f))
1283 1281 rejected.append(f)
1284 1282 continue
1285 1283 limit = ui.configbytes('ui', 'large-file-limit')
1286 1284 if limit != 0 and st.st_size > limit:
1287 1285 ui.warn(_("%s: up to %d MB of RAM may be required "
1288 1286 "to manage this file\n"
1289 1287 "(use 'hg revert %s' to cancel the "
1290 1288 "pending addition)\n")
1291 1289 % (f, 3 * st.st_size // 1000000, uipath(f)))
1292 1290 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1293 1291 ui.warn(_("%s not added: only files and symlinks "
1294 1292 "supported currently\n") % uipath(f))
1295 1293 rejected.append(f)
1296 1294 elif ds[f] in 'amn':
1297 1295 ui.warn(_("%s already tracked!\n") % uipath(f))
1298 1296 elif ds[f] == 'r':
1299 1297 ds.normallookup(f)
1300 1298 else:
1301 1299 ds.add(f)
1302 1300 return rejected
1303 1301
1304 1302 def forget(self, files, prefix=""):
1305 1303 with self._repo.wlock():
1306 1304 ds = self._repo.dirstate
1307 1305 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1308 1306 rejected = []
1309 1307 for f in files:
1310 1308 if f not in self._repo.dirstate:
1311 1309 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1312 1310 rejected.append(f)
1313 1311 elif self._repo.dirstate[f] != 'a':
1314 1312 self._repo.dirstate.remove(f)
1315 1313 else:
1316 1314 self._repo.dirstate.drop(f)
1317 1315 return rejected
1318 1316
1319 1317 def undelete(self, list):
1320 1318 pctxs = self.parents()
1321 1319 with self._repo.wlock():
1322 1320 ds = self._repo.dirstate
1323 1321 for f in list:
1324 1322 if self._repo.dirstate[f] != 'r':
1325 1323 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1326 1324 else:
1327 1325 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1328 1326 t = fctx.data()
1329 1327 self._repo.wwrite(f, t, fctx.flags())
1330 1328 self._repo.dirstate.normal(f)
1331 1329
1332 1330 def copy(self, source, dest):
1333 1331 try:
1334 1332 st = self._repo.wvfs.lstat(dest)
1335 1333 except OSError as err:
1336 1334 if err.errno != errno.ENOENT:
1337 1335 raise
1338 1336 self._repo.ui.warn(_("%s does not exist!\n")
1339 1337 % self._repo.dirstate.pathto(dest))
1340 1338 return
1341 1339 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 1340 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1343 1341 "symbolic link\n")
1344 1342 % self._repo.dirstate.pathto(dest))
1345 1343 else:
1346 1344 with self._repo.wlock():
1347 1345 if self._repo.dirstate[dest] in '?':
1348 1346 self._repo.dirstate.add(dest)
1349 1347 elif self._repo.dirstate[dest] in 'r':
1350 1348 self._repo.dirstate.normallookup(dest)
1351 1349 self._repo.dirstate.copy(source, dest)
1352 1350
1353 1351 def match(self, pats=None, include=None, exclude=None, default='glob',
1354 1352 listsubrepos=False, badfn=None):
1355 1353 r = self._repo
1356 1354
1357 1355 # Only a case insensitive filesystem needs magic to translate user input
1358 1356 # to actual case in the filesystem.
1359 1357 icasefs = not util.fscasesensitive(r.root)
1360 1358 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1361 1359 default, auditor=r.auditor, ctx=self,
1362 1360 listsubrepos=listsubrepos, badfn=badfn,
1363 1361 icasefs=icasefs)
1364 1362
1365 1363 def _filtersuspectsymlink(self, files):
1366 1364 if not files or self._repo.dirstate._checklink:
1367 1365 return files
1368 1366
1369 1367 # Symlink placeholders may get non-symlink-like contents
1370 1368 # via user error or dereferencing by NFS or Samba servers,
1371 1369 # so we filter out any placeholders that don't look like a
1372 1370 # symlink
1373 1371 sane = []
1374 1372 for f in files:
1375 1373 if self.flags(f) == 'l':
1376 1374 d = self[f].data()
1377 1375 if (d == '' or len(d) >= 1024 or '\n' in d
1378 1376 or stringutil.binary(d)):
1379 1377 self._repo.ui.debug('ignoring suspect symlink placeholder'
1380 1378 ' "%s"\n' % f)
1381 1379 continue
1382 1380 sane.append(f)
1383 1381 return sane
1384 1382
1385 1383 def _checklookup(self, files):
1386 1384 # check for any possibly clean files
1387 1385 if not files:
1388 1386 return [], [], []
1389 1387
1390 1388 modified = []
1391 1389 deleted = []
1392 1390 fixup = []
1393 1391 pctx = self._parents[0]
1394 1392 # do a full compare of any files that might have changed
1395 1393 for f in sorted(files):
1396 1394 try:
1397 1395 # This will return True for a file that got replaced by a
1398 1396 # directory in the interim, but fixing that is pretty hard.
1399 1397 if (f not in pctx or self.flags(f) != pctx.flags(f)
1400 1398 or pctx[f].cmp(self[f])):
1401 1399 modified.append(f)
1402 1400 else:
1403 1401 fixup.append(f)
1404 1402 except (IOError, OSError):
1405 1403 # A file become inaccessible in between? Mark it as deleted,
1406 1404 # matching dirstate behavior (issue5584).
1407 1405 # The dirstate has more complex behavior around whether a
1408 1406 # missing file matches a directory, etc, but we don't need to
1409 1407 # bother with that: if f has made it to this point, we're sure
1410 1408 # it's in the dirstate.
1411 1409 deleted.append(f)
1412 1410
1413 1411 return modified, deleted, fixup
1414 1412
1415 1413 def _poststatusfixup(self, status, fixup):
1416 1414 """update dirstate for files that are actually clean"""
1417 1415 poststatus = self._repo.postdsstatus()
1418 1416 if fixup or poststatus:
1419 1417 try:
1420 1418 oldid = self._repo.dirstate.identity()
1421 1419
1422 1420 # updating the dirstate is optional
1423 1421 # so we don't wait on the lock
1424 1422 # wlock can invalidate the dirstate, so cache normal _after_
1425 1423 # taking the lock
1426 1424 with self._repo.wlock(False):
1427 1425 if self._repo.dirstate.identity() == oldid:
1428 1426 if fixup:
1429 1427 normal = self._repo.dirstate.normal
1430 1428 for f in fixup:
1431 1429 normal(f)
1432 1430 # write changes out explicitly, because nesting
1433 1431 # wlock at runtime may prevent 'wlock.release()'
1434 1432 # after this block from doing so for subsequent
1435 1433 # changing files
1436 1434 tr = self._repo.currenttransaction()
1437 1435 self._repo.dirstate.write(tr)
1438 1436
1439 1437 if poststatus:
1440 1438 for ps in poststatus:
1441 1439 ps(self, status)
1442 1440 else:
1443 1441 # in this case, writing changes out breaks
1444 1442 # consistency, because .hg/dirstate was
1445 1443 # already changed simultaneously after last
1446 1444 # caching (see also issue5584 for detail)
1447 1445 self._repo.ui.debug('skip updating dirstate: '
1448 1446 'identity mismatch\n')
1449 1447 except error.LockError:
1450 1448 pass
1451 1449 finally:
1452 1450 # Even if the wlock couldn't be grabbed, clear out the list.
1453 1451 self._repo.clearpostdsstatus()
1454 1452
1455 1453 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1456 1454 '''Gets the status from the dirstate -- internal use only.'''
1457 1455 subrepos = []
1458 1456 if '.hgsub' in self:
1459 1457 subrepos = sorted(self.substate)
1460 1458 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1461 1459 clean=clean, unknown=unknown)
1462 1460
1463 1461 # check for any possibly clean files
1464 1462 fixup = []
1465 1463 if cmp:
1466 1464 modified2, deleted2, fixup = self._checklookup(cmp)
1467 1465 s.modified.extend(modified2)
1468 1466 s.deleted.extend(deleted2)
1469 1467
1470 1468 if fixup and clean:
1471 1469 s.clean.extend(fixup)
1472 1470
1473 1471 self._poststatusfixup(s, fixup)
1474 1472
1475 1473 if match.always():
1476 1474 # cache for performance
1477 1475 if s.unknown or s.ignored or s.clean:
1478 1476 # "_status" is cached with list*=False in the normal route
1479 1477 self._status = scmutil.status(s.modified, s.added, s.removed,
1480 1478 s.deleted, [], [], [])
1481 1479 else:
1482 1480 self._status = s
1483 1481
1484 1482 return s
1485 1483
1486 1484 @propertycache
1487 1485 def _manifest(self):
1488 1486 """generate a manifest corresponding to the values in self._status
1489 1487
1490 1488 This reuse the file nodeid from parent, but we use special node
1491 1489 identifiers for added and modified files. This is used by manifests
1492 1490 merge to see that files are different and by update logic to avoid
1493 1491 deleting newly added files.
1494 1492 """
1495 1493 return self._buildstatusmanifest(self._status)
1496 1494
1497 1495 def _buildstatusmanifest(self, status):
1498 1496 """Builds a manifest that includes the given status results."""
1499 1497 parents = self.parents()
1500 1498
1501 1499 man = parents[0].manifest().copy()
1502 1500
1503 1501 ff = self._flagfunc
1504 1502 for i, l in ((addednodeid, status.added),
1505 1503 (modifiednodeid, status.modified)):
1506 1504 for f in l:
1507 1505 man[f] = i
1508 1506 try:
1509 1507 man.setflag(f, ff(f))
1510 1508 except OSError:
1511 1509 pass
1512 1510
1513 1511 for f in status.deleted + status.removed:
1514 1512 if f in man:
1515 1513 del man[f]
1516 1514
1517 1515 return man
1518 1516
1519 1517 def _buildstatus(self, other, s, match, listignored, listclean,
1520 1518 listunknown):
1521 1519 """build a status with respect to another context
1522 1520
1523 1521 This includes logic for maintaining the fast path of status when
1524 1522 comparing the working directory against its parent, which is to skip
1525 1523 building a new manifest if self (working directory) is not comparing
1526 1524 against its parent (repo['.']).
1527 1525 """
1528 1526 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1529 1527 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1530 1528 # might have accidentally ended up with the entire contents of the file
1531 1529 # they are supposed to be linking to.
1532 1530 s.modified[:] = self._filtersuspectsymlink(s.modified)
1533 1531 if other != self._repo['.']:
1534 1532 s = super(workingctx, self)._buildstatus(other, s, match,
1535 1533 listignored, listclean,
1536 1534 listunknown)
1537 1535 return s
1538 1536
1539 1537 def _matchstatus(self, other, match):
1540 1538 """override the match method with a filter for directory patterns
1541 1539
1542 1540 We use inheritance to customize the match.bad method only in cases of
1543 1541 workingctx since it belongs only to the working directory when
1544 1542 comparing against the parent changeset.
1545 1543
1546 1544 If we aren't comparing against the working directory's parent, then we
1547 1545 just use the default match object sent to us.
1548 1546 """
1549 1547 if other != self._repo['.']:
1550 1548 def bad(f, msg):
1551 1549 # 'f' may be a directory pattern from 'match.files()',
1552 1550 # so 'f not in ctx1' is not enough
1553 1551 if f not in other and not other.hasdir(f):
1554 1552 self._repo.ui.warn('%s: %s\n' %
1555 1553 (self._repo.dirstate.pathto(f), msg))
1556 1554 match.bad = bad
1557 1555 return match
1558 1556
1559 1557 def markcommitted(self, node):
1560 1558 super(workingctx, self).markcommitted(node)
1561 1559
1562 1560 sparse.aftercommit(self._repo, node)
1563 1561
1564 1562 class committablefilectx(basefilectx):
1565 1563 """A committablefilectx provides common functionality for a file context
1566 1564 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1567 1565 def __init__(self, repo, path, filelog=None, ctx=None):
1568 1566 self._repo = repo
1569 1567 self._path = path
1570 1568 self._changeid = None
1571 1569 self._filerev = self._filenode = None
1572 1570
1573 1571 if filelog is not None:
1574 1572 self._filelog = filelog
1575 1573 if ctx:
1576 1574 self._changectx = ctx
1577 1575
1578 1576 def __nonzero__(self):
1579 1577 return True
1580 1578
1581 1579 __bool__ = __nonzero__
1582 1580
1583 1581 def linkrev(self):
1584 1582 # linked to self._changectx no matter if file is modified or not
1585 1583 return self.rev()
1586 1584
1587 1585 def parents(self):
1588 1586 '''return parent filectxs, following copies if necessary'''
1589 1587 def filenode(ctx, path):
1590 1588 return ctx._manifest.get(path, nullid)
1591 1589
1592 1590 path = self._path
1593 1591 fl = self._filelog
1594 1592 pcl = self._changectx._parents
1595 1593 renamed = self.renamed()
1596 1594
1597 1595 if renamed:
1598 1596 pl = [renamed + (None,)]
1599 1597 else:
1600 1598 pl = [(path, filenode(pcl[0], path), fl)]
1601 1599
1602 1600 for pc in pcl[1:]:
1603 1601 pl.append((path, filenode(pc, path), fl))
1604 1602
1605 1603 return [self._parentfilectx(p, fileid=n, filelog=l)
1606 1604 for p, n, l in pl if n != nullid]
1607 1605
1608 1606 def children(self):
1609 1607 return []
1610 1608
1611 1609 class workingfilectx(committablefilectx):
1612 1610 """A workingfilectx object makes access to data related to a particular
1613 1611 file in the working directory convenient."""
1614 1612 def __init__(self, repo, path, filelog=None, workingctx=None):
1615 1613 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1616 1614
1617 1615 @propertycache
1618 1616 def _changectx(self):
1619 1617 return workingctx(self._repo)
1620 1618
1621 1619 def data(self):
1622 1620 return self._repo.wread(self._path)
1623 1621 def renamed(self):
1624 1622 rp = self._repo.dirstate.copied(self._path)
1625 1623 if not rp:
1626 1624 return None
1627 1625 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1628 1626
1629 1627 def size(self):
1630 1628 return self._repo.wvfs.lstat(self._path).st_size
1631 1629 def date(self):
1632 1630 t, tz = self._changectx.date()
1633 1631 try:
1634 1632 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1635 1633 except OSError as err:
1636 1634 if err.errno != errno.ENOENT:
1637 1635 raise
1638 1636 return (t, tz)
1639 1637
1640 1638 def exists(self):
1641 1639 return self._repo.wvfs.exists(self._path)
1642 1640
1643 1641 def lexists(self):
1644 1642 return self._repo.wvfs.lexists(self._path)
1645 1643
1646 1644 def audit(self):
1647 1645 return self._repo.wvfs.audit(self._path)
1648 1646
1649 1647 def cmp(self, fctx):
1650 1648 """compare with other file context
1651 1649
1652 1650 returns True if different than fctx.
1653 1651 """
1654 1652 # fctx should be a filectx (not a workingfilectx)
1655 1653 # invert comparison to reuse the same code path
1656 1654 return fctx.cmp(self)
1657 1655
1658 1656 def remove(self, ignoremissing=False):
1659 1657 """wraps unlink for a repo's working directory"""
1660 1658 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1661 1659 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1662 1660 rmdir=rmdir)
1663 1661
1664 1662 def write(self, data, flags, backgroundclose=False, **kwargs):
1665 1663 """wraps repo.wwrite"""
1666 1664 self._repo.wwrite(self._path, data, flags,
1667 1665 backgroundclose=backgroundclose,
1668 1666 **kwargs)
1669 1667
1670 1668 def markcopied(self, src):
1671 1669 """marks this file a copy of `src`"""
1672 1670 if self._repo.dirstate[self._path] in "nma":
1673 1671 self._repo.dirstate.copy(src, self._path)
1674 1672
1675 1673 def clearunknown(self):
1676 1674 """Removes conflicting items in the working directory so that
1677 1675 ``write()`` can be called successfully.
1678 1676 """
1679 1677 wvfs = self._repo.wvfs
1680 1678 f = self._path
1681 1679 wvfs.audit(f)
1682 1680 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1683 1681 # remove files under the directory as they should already be
1684 1682 # warned and backed up
1685 1683 if wvfs.isdir(f) and not wvfs.islink(f):
1686 1684 wvfs.rmtree(f, forcibly=True)
1687 1685 for p in reversed(list(util.finddirs(f))):
1688 1686 if wvfs.isfileorlink(p):
1689 1687 wvfs.unlink(p)
1690 1688 break
1691 1689 else:
1692 1690 # don't remove files if path conflicts are not processed
1693 1691 if wvfs.isdir(f) and not wvfs.islink(f):
1694 1692 wvfs.removedirs(f)
1695 1693
1696 1694 def setflags(self, l, x):
1697 1695 self._repo.wvfs.setflags(self._path, l, x)
1698 1696
1699 1697 class overlayworkingctx(committablectx):
1700 1698 """Wraps another mutable context with a write-back cache that can be
1701 1699 converted into a commit context.
1702 1700
1703 1701 self._cache[path] maps to a dict with keys: {
1704 1702 'exists': bool?
1705 1703 'date': date?
1706 1704 'data': str?
1707 1705 'flags': str?
1708 1706 'copied': str? (path or None)
1709 1707 }
1710 1708 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1711 1709 is `False`, the file was deleted.
1712 1710 """
1713 1711
1714 1712 def __init__(self, repo):
1715 1713 super(overlayworkingctx, self).__init__(repo)
1716 1714 self.clean()
1717 1715
1718 1716 def setbase(self, wrappedctx):
1719 1717 self._wrappedctx = wrappedctx
1720 1718 self._parents = [wrappedctx]
1721 1719 # Drop old manifest cache as it is now out of date.
1722 1720 # This is necessary when, e.g., rebasing several nodes with one
1723 1721 # ``overlayworkingctx`` (e.g. with --collapse).
1724 1722 util.clearcachedproperty(self, '_manifest')
1725 1723
1726 1724 def data(self, path):
1727 1725 if self.isdirty(path):
1728 1726 if self._cache[path]['exists']:
1729 1727 if self._cache[path]['data']:
1730 1728 return self._cache[path]['data']
1731 1729 else:
1732 1730 # Must fallback here, too, because we only set flags.
1733 1731 return self._wrappedctx[path].data()
1734 1732 else:
1735 1733 raise error.ProgrammingError("No such file or directory: %s" %
1736 1734 path)
1737 1735 else:
1738 1736 return self._wrappedctx[path].data()
1739 1737
1740 1738 @propertycache
1741 1739 def _manifest(self):
1742 1740 parents = self.parents()
1743 1741 man = parents[0].manifest().copy()
1744 1742
1745 1743 flag = self._flagfunc
1746 1744 for path in self.added():
1747 1745 man[path] = addednodeid
1748 1746 man.setflag(path, flag(path))
1749 1747 for path in self.modified():
1750 1748 man[path] = modifiednodeid
1751 1749 man.setflag(path, flag(path))
1752 1750 for path in self.removed():
1753 1751 del man[path]
1754 1752 return man
1755 1753
1756 1754 @propertycache
1757 1755 def _flagfunc(self):
1758 1756 def f(path):
1759 1757 return self._cache[path]['flags']
1760 1758 return f
1761 1759
1762 1760 def files(self):
1763 1761 return sorted(self.added() + self.modified() + self.removed())
1764 1762
1765 1763 def modified(self):
1766 1764 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1767 1765 self._existsinparent(f)]
1768 1766
1769 1767 def added(self):
1770 1768 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1771 1769 not self._existsinparent(f)]
1772 1770
1773 1771 def removed(self):
1774 1772 return [f for f in self._cache.keys() if
1775 1773 not self._cache[f]['exists'] and self._existsinparent(f)]
1776 1774
1777 1775 def isinmemory(self):
1778 1776 return True
1779 1777
1780 1778 def filedate(self, path):
1781 1779 if self.isdirty(path):
1782 1780 return self._cache[path]['date']
1783 1781 else:
1784 1782 return self._wrappedctx[path].date()
1785 1783
1786 1784 def markcopied(self, path, origin):
1787 1785 if self.isdirty(path):
1788 1786 self._cache[path]['copied'] = origin
1789 1787 else:
1790 1788 raise error.ProgrammingError('markcopied() called on clean context')
1791 1789
1792 1790 def copydata(self, path):
1793 1791 if self.isdirty(path):
1794 1792 return self._cache[path]['copied']
1795 1793 else:
1796 1794 raise error.ProgrammingError('copydata() called on clean context')
1797 1795
1798 1796 def flags(self, path):
1799 1797 if self.isdirty(path):
1800 1798 if self._cache[path]['exists']:
1801 1799 return self._cache[path]['flags']
1802 1800 else:
1803 1801 raise error.ProgrammingError("No such file or directory: %s" %
1804 1802 self._path)
1805 1803 else:
1806 1804 return self._wrappedctx[path].flags()
1807 1805
1808 1806 def _existsinparent(self, path):
1809 1807 try:
1810 1808 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1811 1809 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1812 1810 # with an ``exists()`` function.
1813 1811 self._wrappedctx[path]
1814 1812 return True
1815 1813 except error.ManifestLookupError:
1816 1814 return False
1817 1815
1818 1816 def _auditconflicts(self, path):
1819 1817 """Replicates conflict checks done by wvfs.write().
1820 1818
1821 1819 Since we never write to the filesystem and never call `applyupdates` in
1822 1820 IMM, we'll never check that a path is actually writable -- e.g., because
1823 1821 it adds `a/foo`, but `a` is actually a file in the other commit.
1824 1822 """
1825 1823 def fail(path, component):
1826 1824 # p1() is the base and we're receiving "writes" for p2()'s
1827 1825 # files.
1828 1826 if 'l' in self.p1()[component].flags():
1829 1827 raise error.Abort("error: %s conflicts with symlink %s "
1830 1828 "in %s." % (path, component,
1831 1829 self.p1().rev()))
1832 1830 else:
1833 1831 raise error.Abort("error: '%s' conflicts with file '%s' in "
1834 1832 "%s." % (path, component,
1835 1833 self.p1().rev()))
1836 1834
1837 1835 # Test that each new directory to be created to write this path from p2
1838 1836 # is not a file in p1.
1839 1837 components = path.split('/')
1840 1838 for i in pycompat.xrange(len(components)):
1841 1839 component = "/".join(components[0:i])
1842 1840 if component in self.p1() and self._cache[component]['exists']:
1843 1841 fail(path, component)
1844 1842
1845 1843 # Test the other direction -- that this path from p2 isn't a directory
1846 1844 # in p1 (test that p1 doesn't any paths matching `path/*`).
1847 1845 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1848 1846 matches = self.p1().manifest().matches(match)
1849 1847 mfiles = matches.keys()
1850 1848 if len(mfiles) > 0:
1851 1849 if len(mfiles) == 1 and mfiles[0] == path:
1852 1850 return
1853 1851 # omit the files which are deleted in current IMM wctx
1854 1852 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1855 1853 if not mfiles:
1856 1854 return
1857 1855 raise error.Abort("error: file '%s' cannot be written because "
1858 1856 " '%s/' is a folder in %s (containing %d "
1859 1857 "entries: %s)"
1860 1858 % (path, path, self.p1(), len(mfiles),
1861 1859 ', '.join(mfiles)))
1862 1860
1863 1861 def write(self, path, data, flags='', **kwargs):
1864 1862 if data is None:
1865 1863 raise error.ProgrammingError("data must be non-None")
1866 1864 self._auditconflicts(path)
1867 1865 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1868 1866 flags=flags)
1869 1867
1870 1868 def setflags(self, path, l, x):
1871 1869 flag = ''
1872 1870 if l:
1873 1871 flag = 'l'
1874 1872 elif x:
1875 1873 flag = 'x'
1876 1874 self._markdirty(path, exists=True, date=dateutil.makedate(),
1877 1875 flags=flag)
1878 1876
1879 1877 def remove(self, path):
1880 1878 self._markdirty(path, exists=False)
1881 1879
1882 1880 def exists(self, path):
1883 1881 """exists behaves like `lexists`, but needs to follow symlinks and
1884 1882 return False if they are broken.
1885 1883 """
1886 1884 if self.isdirty(path):
1887 1885 # If this path exists and is a symlink, "follow" it by calling
1888 1886 # exists on the destination path.
1889 1887 if (self._cache[path]['exists'] and
1890 1888 'l' in self._cache[path]['flags']):
1891 1889 return self.exists(self._cache[path]['data'].strip())
1892 1890 else:
1893 1891 return self._cache[path]['exists']
1894 1892
1895 1893 return self._existsinparent(path)
1896 1894
1897 1895 def lexists(self, path):
1898 1896 """lexists returns True if the path exists"""
1899 1897 if self.isdirty(path):
1900 1898 return self._cache[path]['exists']
1901 1899
1902 1900 return self._existsinparent(path)
1903 1901
1904 1902 def size(self, path):
1905 1903 if self.isdirty(path):
1906 1904 if self._cache[path]['exists']:
1907 1905 return len(self._cache[path]['data'])
1908 1906 else:
1909 1907 raise error.ProgrammingError("No such file or directory: %s" %
1910 1908 self._path)
1911 1909 return self._wrappedctx[path].size()
1912 1910
1913 1911 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1914 1912 user=None, editor=None):
1915 1913 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1916 1914 committed.
1917 1915
1918 1916 ``text`` is the commit message.
1919 1917 ``parents`` (optional) are rev numbers.
1920 1918 """
1921 1919 # Default parents to the wrapped contexts' if not passed.
1922 1920 if parents is None:
1923 1921 parents = self._wrappedctx.parents()
1924 1922 if len(parents) == 1:
1925 1923 parents = (parents[0], None)
1926 1924
1927 1925 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1928 1926 if parents[1] is None:
1929 1927 parents = (self._repo[parents[0]], None)
1930 1928 else:
1931 1929 parents = (self._repo[parents[0]], self._repo[parents[1]])
1932 1930
1933 1931 files = self._cache.keys()
1934 1932 def getfile(repo, memctx, path):
1935 1933 if self._cache[path]['exists']:
1936 1934 return memfilectx(repo, memctx, path,
1937 1935 self._cache[path]['data'],
1938 1936 'l' in self._cache[path]['flags'],
1939 1937 'x' in self._cache[path]['flags'],
1940 1938 self._cache[path]['copied'])
1941 1939 else:
1942 1940 # Returning None, but including the path in `files`, is
1943 1941 # necessary for memctx to register a deletion.
1944 1942 return None
1945 1943 return memctx(self._repo, parents, text, files, getfile, date=date,
1946 1944 extra=extra, user=user, branch=branch, editor=editor)
1947 1945
1948 1946 def isdirty(self, path):
1949 1947 return path in self._cache
1950 1948
1951 1949 def isempty(self):
1952 1950 # We need to discard any keys that are actually clean before the empty
1953 1951 # commit check.
1954 1952 self._compact()
1955 1953 return len(self._cache) == 0
1956 1954
1957 1955 def clean(self):
1958 1956 self._cache = {}
1959 1957
1960 1958 def _compact(self):
1961 1959 """Removes keys from the cache that are actually clean, by comparing
1962 1960 them with the underlying context.
1963 1961
1964 1962 This can occur during the merge process, e.g. by passing --tool :local
1965 1963 to resolve a conflict.
1966 1964 """
1967 1965 keys = []
1968 1966 for path in self._cache.keys():
1969 1967 cache = self._cache[path]
1970 1968 try:
1971 1969 underlying = self._wrappedctx[path]
1972 1970 if (underlying.data() == cache['data'] and
1973 1971 underlying.flags() == cache['flags']):
1974 1972 keys.append(path)
1975 1973 except error.ManifestLookupError:
1976 1974 # Path not in the underlying manifest (created).
1977 1975 continue
1978 1976
1979 1977 for path in keys:
1980 1978 del self._cache[path]
1981 1979 return keys
1982 1980
1983 1981 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1984 1982 # data not provided, let's see if we already have some; if not, let's
1985 1983 # grab it from our underlying context, so that we always have data if
1986 1984 # the file is marked as existing.
1987 1985 if exists and data is None:
1988 1986 oldentry = self._cache.get(path) or {}
1989 1987 data = oldentry.get('data') or self._wrappedctx[path].data()
1990 1988
1991 1989 self._cache[path] = {
1992 1990 'exists': exists,
1993 1991 'data': data,
1994 1992 'date': date,
1995 1993 'flags': flags,
1996 1994 'copied': None,
1997 1995 }
1998 1996
1999 1997 def filectx(self, path, filelog=None):
2000 1998 return overlayworkingfilectx(self._repo, path, parent=self,
2001 1999 filelog=filelog)
2002 2000
2003 2001 class overlayworkingfilectx(committablefilectx):
2004 2002 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2005 2003 cache, which can be flushed through later by calling ``flush()``."""
2006 2004
2007 2005 def __init__(self, repo, path, filelog=None, parent=None):
2008 2006 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2009 2007 parent)
2010 2008 self._repo = repo
2011 2009 self._parent = parent
2012 2010 self._path = path
2013 2011
2014 2012 def cmp(self, fctx):
2015 2013 return self.data() != fctx.data()
2016 2014
2017 2015 def changectx(self):
2018 2016 return self._parent
2019 2017
2020 2018 def data(self):
2021 2019 return self._parent.data(self._path)
2022 2020
2023 2021 def date(self):
2024 2022 return self._parent.filedate(self._path)
2025 2023
2026 2024 def exists(self):
2027 2025 return self.lexists()
2028 2026
2029 2027 def lexists(self):
2030 2028 return self._parent.exists(self._path)
2031 2029
2032 2030 def renamed(self):
2033 2031 path = self._parent.copydata(self._path)
2034 2032 if not path:
2035 2033 return None
2036 2034 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2037 2035
2038 2036 def size(self):
2039 2037 return self._parent.size(self._path)
2040 2038
2041 2039 def markcopied(self, origin):
2042 2040 self._parent.markcopied(self._path, origin)
2043 2041
2044 2042 def audit(self):
2045 2043 pass
2046 2044
2047 2045 def flags(self):
2048 2046 return self._parent.flags(self._path)
2049 2047
2050 2048 def setflags(self, islink, isexec):
2051 2049 return self._parent.setflags(self._path, islink, isexec)
2052 2050
2053 2051 def write(self, data, flags, backgroundclose=False, **kwargs):
2054 2052 return self._parent.write(self._path, data, flags, **kwargs)
2055 2053
2056 2054 def remove(self, ignoremissing=False):
2057 2055 return self._parent.remove(self._path)
2058 2056
2059 2057 def clearunknown(self):
2060 2058 pass
2061 2059
2062 2060 class workingcommitctx(workingctx):
2063 2061 """A workingcommitctx object makes access to data related to
2064 2062 the revision being committed convenient.
2065 2063
2066 2064 This hides changes in the working directory, if they aren't
2067 2065 committed in this context.
2068 2066 """
2069 2067 def __init__(self, repo, changes,
2070 2068 text="", user=None, date=None, extra=None):
2071 2069 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2072 2070 changes)
2073 2071
2074 2072 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2075 2073 """Return matched files only in ``self._status``
2076 2074
2077 2075 Uncommitted files appear "clean" via this context, even if
2078 2076 they aren't actually so in the working directory.
2079 2077 """
2080 2078 if clean:
2081 2079 clean = [f for f in self._manifest if f not in self._changedset]
2082 2080 else:
2083 2081 clean = []
2084 2082 return scmutil.status([f for f in self._status.modified if match(f)],
2085 2083 [f for f in self._status.added if match(f)],
2086 2084 [f for f in self._status.removed if match(f)],
2087 2085 [], [], [], clean)
2088 2086
2089 2087 @propertycache
2090 2088 def _changedset(self):
2091 2089 """Return the set of files changed in this context
2092 2090 """
2093 2091 changed = set(self._status.modified)
2094 2092 changed.update(self._status.added)
2095 2093 changed.update(self._status.removed)
2096 2094 return changed
2097 2095
2098 2096 def makecachingfilectxfn(func):
2099 2097 """Create a filectxfn that caches based on the path.
2100 2098
2101 2099 We can't use util.cachefunc because it uses all arguments as the cache
2102 2100 key and this creates a cycle since the arguments include the repo and
2103 2101 memctx.
2104 2102 """
2105 2103 cache = {}
2106 2104
2107 2105 def getfilectx(repo, memctx, path):
2108 2106 if path not in cache:
2109 2107 cache[path] = func(repo, memctx, path)
2110 2108 return cache[path]
2111 2109
2112 2110 return getfilectx
2113 2111
2114 2112 def memfilefromctx(ctx):
2115 2113 """Given a context return a memfilectx for ctx[path]
2116 2114
2117 2115 This is a convenience method for building a memctx based on another
2118 2116 context.
2119 2117 """
2120 2118 def getfilectx(repo, memctx, path):
2121 2119 fctx = ctx[path]
2122 2120 # this is weird but apparently we only keep track of one parent
2123 2121 # (why not only store that instead of a tuple?)
2124 2122 copied = fctx.renamed()
2125 2123 if copied:
2126 2124 copied = copied[0]
2127 2125 return memfilectx(repo, memctx, path, fctx.data(),
2128 2126 islink=fctx.islink(), isexec=fctx.isexec(),
2129 2127 copied=copied)
2130 2128
2131 2129 return getfilectx
2132 2130
2133 2131 def memfilefrompatch(patchstore):
2134 2132 """Given a patch (e.g. patchstore object) return a memfilectx
2135 2133
2136 2134 This is a convenience method for building a memctx based on a patchstore.
2137 2135 """
2138 2136 def getfilectx(repo, memctx, path):
2139 2137 data, mode, copied = patchstore.getfile(path)
2140 2138 if data is None:
2141 2139 return None
2142 2140 islink, isexec = mode
2143 2141 return memfilectx(repo, memctx, path, data, islink=islink,
2144 2142 isexec=isexec, copied=copied)
2145 2143
2146 2144 return getfilectx
2147 2145
2148 2146 class memctx(committablectx):
2149 2147 """Use memctx to perform in-memory commits via localrepo.commitctx().
2150 2148
2151 2149 Revision information is supplied at initialization time while
2152 2150 related files data and is made available through a callback
2153 2151 mechanism. 'repo' is the current localrepo, 'parents' is a
2154 2152 sequence of two parent revisions identifiers (pass None for every
2155 2153 missing parent), 'text' is the commit message and 'files' lists
2156 2154 names of files touched by the revision (normalized and relative to
2157 2155 repository root).
2158 2156
2159 2157 filectxfn(repo, memctx, path) is a callable receiving the
2160 2158 repository, the current memctx object and the normalized path of
2161 2159 requested file, relative to repository root. It is fired by the
2162 2160 commit function for every file in 'files', but calls order is
2163 2161 undefined. If the file is available in the revision being
2164 2162 committed (updated or added), filectxfn returns a memfilectx
2165 2163 object. If the file was removed, filectxfn return None for recent
2166 2164 Mercurial. Moved files are represented by marking the source file
2167 2165 removed and the new file added with copy information (see
2168 2166 memfilectx).
2169 2167
2170 2168 user receives the committer name and defaults to current
2171 2169 repository username, date is the commit date in any format
2172 2170 supported by dateutil.parsedate() and defaults to current date, extra
2173 2171 is a dictionary of metadata or is left empty.
2174 2172 """
2175 2173
2176 2174 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2177 2175 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2178 2176 # this field to determine what to do in filectxfn.
2179 2177 _returnnoneformissingfiles = True
2180 2178
2181 2179 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2182 2180 date=None, extra=None, branch=None, editor=False):
2183 2181 super(memctx, self).__init__(repo, text, user, date, extra)
2184 2182 self._rev = None
2185 2183 self._node = None
2186 2184 parents = [(p or nullid) for p in parents]
2187 2185 p1, p2 = parents
2188 2186 self._parents = [self._repo[p] for p in (p1, p2)]
2189 2187 files = sorted(set(files))
2190 2188 self._files = files
2191 2189 if branch is not None:
2192 2190 self._extra['branch'] = encoding.fromlocal(branch)
2193 2191 self.substate = {}
2194 2192
2195 2193 if isinstance(filectxfn, patch.filestore):
2196 2194 filectxfn = memfilefrompatch(filectxfn)
2197 2195 elif not callable(filectxfn):
2198 2196 # if store is not callable, wrap it in a function
2199 2197 filectxfn = memfilefromctx(filectxfn)
2200 2198
2201 2199 # memoizing increases performance for e.g. vcs convert scenarios.
2202 2200 self._filectxfn = makecachingfilectxfn(filectxfn)
2203 2201
2204 2202 if editor:
2205 2203 self._text = editor(self._repo, self, [])
2206 2204 self._repo.savecommitmessage(self._text)
2207 2205
2208 2206 def filectx(self, path, filelog=None):
2209 2207 """get a file context from the working directory
2210 2208
2211 2209 Returns None if file doesn't exist and should be removed."""
2212 2210 return self._filectxfn(self._repo, self, path)
2213 2211
2214 2212 def commit(self):
2215 2213 """commit context to the repo"""
2216 2214 return self._repo.commitctx(self)
2217 2215
2218 2216 @propertycache
2219 2217 def _manifest(self):
2220 2218 """generate a manifest based on the return values of filectxfn"""
2221 2219
2222 2220 # keep this simple for now; just worry about p1
2223 2221 pctx = self._parents[0]
2224 2222 man = pctx.manifest().copy()
2225 2223
2226 2224 for f in self._status.modified:
2227 2225 man[f] = modifiednodeid
2228 2226
2229 2227 for f in self._status.added:
2230 2228 man[f] = addednodeid
2231 2229
2232 2230 for f in self._status.removed:
2233 2231 if f in man:
2234 2232 del man[f]
2235 2233
2236 2234 return man
2237 2235
2238 2236 @propertycache
2239 2237 def _status(self):
2240 2238 """Calculate exact status from ``files`` specified at construction
2241 2239 """
2242 2240 man1 = self.p1().manifest()
2243 2241 p2 = self._parents[1]
2244 2242 # "1 < len(self._parents)" can't be used for checking
2245 2243 # existence of the 2nd parent, because "memctx._parents" is
2246 2244 # explicitly initialized by the list, of which length is 2.
2247 2245 if p2.node() != nullid:
2248 2246 man2 = p2.manifest()
2249 2247 managing = lambda f: f in man1 or f in man2
2250 2248 else:
2251 2249 managing = lambda f: f in man1
2252 2250
2253 2251 modified, added, removed = [], [], []
2254 2252 for f in self._files:
2255 2253 if not managing(f):
2256 2254 added.append(f)
2257 2255 elif self[f]:
2258 2256 modified.append(f)
2259 2257 else:
2260 2258 removed.append(f)
2261 2259
2262 2260 return scmutil.status(modified, added, removed, [], [], [], [])
2263 2261
2264 2262 class memfilectx(committablefilectx):
2265 2263 """memfilectx represents an in-memory file to commit.
2266 2264
2267 2265 See memctx and committablefilectx for more details.
2268 2266 """
2269 2267 def __init__(self, repo, changectx, path, data, islink=False,
2270 2268 isexec=False, copied=None):
2271 2269 """
2272 2270 path is the normalized file path relative to repository root.
2273 2271 data is the file content as a string.
2274 2272 islink is True if the file is a symbolic link.
2275 2273 isexec is True if the file is executable.
2276 2274 copied is the source file path if current file was copied in the
2277 2275 revision being committed, or None."""
2278 2276 super(memfilectx, self).__init__(repo, path, None, changectx)
2279 2277 self._data = data
2280 2278 if islink:
2281 2279 self._flags = 'l'
2282 2280 elif isexec:
2283 2281 self._flags = 'x'
2284 2282 else:
2285 2283 self._flags = ''
2286 2284 self._copied = None
2287 2285 if copied:
2288 2286 self._copied = (copied, nullid)
2289 2287
2290 2288 def data(self):
2291 2289 return self._data
2292 2290
2293 2291 def remove(self, ignoremissing=False):
2294 2292 """wraps unlink for a repo's working directory"""
2295 2293 # need to figure out what to do here
2296 2294 del self._changectx[self._path]
2297 2295
2298 2296 def write(self, data, flags, **kwargs):
2299 2297 """wraps repo.wwrite"""
2300 2298 self._data = data
2301 2299
2302 2300
2303 2301 class metadataonlyctx(committablectx):
2304 2302 """Like memctx but it's reusing the manifest of different commit.
2305 2303 Intended to be used by lightweight operations that are creating
2306 2304 metadata-only changes.
2307 2305
2308 2306 Revision information is supplied at initialization time. 'repo' is the
2309 2307 current localrepo, 'ctx' is original revision which manifest we're reuisng
2310 2308 'parents' is a sequence of two parent revisions identifiers (pass None for
2311 2309 every missing parent), 'text' is the commit.
2312 2310
2313 2311 user receives the committer name and defaults to current repository
2314 2312 username, date is the commit date in any format supported by
2315 2313 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2316 2314 metadata or is left empty.
2317 2315 """
2318 2316 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2319 2317 date=None, extra=None, editor=False):
2320 2318 if text is None:
2321 2319 text = originalctx.description()
2322 2320 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2323 2321 self._rev = None
2324 2322 self._node = None
2325 2323 self._originalctx = originalctx
2326 2324 self._manifestnode = originalctx.manifestnode()
2327 2325 if parents is None:
2328 2326 parents = originalctx.parents()
2329 2327 else:
2330 2328 parents = [repo[p] for p in parents if p is not None]
2331 2329 parents = parents[:]
2332 2330 while len(parents) < 2:
2333 2331 parents.append(repo[nullid])
2334 2332 p1, p2 = self._parents = parents
2335 2333
2336 2334 # sanity check to ensure that the reused manifest parents are
2337 2335 # manifests of our commit parents
2338 2336 mp1, mp2 = self.manifestctx().parents
2339 2337 if p1 != nullid and p1.manifestnode() != mp1:
2340 2338 raise RuntimeError(r"can't reuse the manifest: its p1 "
2341 2339 r"doesn't match the new ctx p1")
2342 2340 if p2 != nullid and p2.manifestnode() != mp2:
2343 2341 raise RuntimeError(r"can't reuse the manifest: "
2344 2342 r"its p2 doesn't match the new ctx p2")
2345 2343
2346 2344 self._files = originalctx.files()
2347 2345 self.substate = {}
2348 2346
2349 2347 if editor:
2350 2348 self._text = editor(self._repo, self, [])
2351 2349 self._repo.savecommitmessage(self._text)
2352 2350
2353 2351 def manifestnode(self):
2354 2352 return self._manifestnode
2355 2353
2356 2354 @property
2357 2355 def _manifestctx(self):
2358 2356 return self._repo.manifestlog[self._manifestnode]
2359 2357
2360 2358 def filectx(self, path, filelog=None):
2361 2359 return self._originalctx.filectx(path, filelog=filelog)
2362 2360
2363 2361 def commit(self):
2364 2362 """commit context to the repo"""
2365 2363 return self._repo.commitctx(self)
2366 2364
2367 2365 @property
2368 2366 def _manifest(self):
2369 2367 return self._originalctx.manifest()
2370 2368
2371 2369 @propertycache
2372 2370 def _status(self):
2373 2371 """Calculate exact status from ``files`` specified in the ``origctx``
2374 2372 and parents manifests.
2375 2373 """
2376 2374 man1 = self.p1().manifest()
2377 2375 p2 = self._parents[1]
2378 2376 # "1 < len(self._parents)" can't be used for checking
2379 2377 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2380 2378 # explicitly initialized by the list, of which length is 2.
2381 2379 if p2.node() != nullid:
2382 2380 man2 = p2.manifest()
2383 2381 managing = lambda f: f in man1 or f in man2
2384 2382 else:
2385 2383 managing = lambda f: f in man1
2386 2384
2387 2385 modified, added, removed = [], [], []
2388 2386 for f in self._files:
2389 2387 if not managing(f):
2390 2388 added.append(f)
2391 2389 elif f in self:
2392 2390 modified.append(f)
2393 2391 else:
2394 2392 removed.append(f)
2395 2393
2396 2394 return scmutil.status(modified, added, removed, [], [], [], [])
2397 2395
2398 2396 class arbitraryfilectx(object):
2399 2397 """Allows you to use filectx-like functions on a file in an arbitrary
2400 2398 location on disk, possibly not in the working directory.
2401 2399 """
2402 2400 def __init__(self, path, repo=None):
2403 2401 # Repo is optional because contrib/simplemerge uses this class.
2404 2402 self._repo = repo
2405 2403 self._path = path
2406 2404
2407 2405 def cmp(self, fctx):
2408 2406 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2409 2407 # path if either side is a symlink.
2410 2408 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2411 2409 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2412 2410 # Add a fast-path for merge if both sides are disk-backed.
2413 2411 # Note that filecmp uses the opposite return values (True if same)
2414 2412 # from our cmp functions (True if different).
2415 2413 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2416 2414 return self.data() != fctx.data()
2417 2415
2418 2416 def path(self):
2419 2417 return self._path
2420 2418
2421 2419 def flags(self):
2422 2420 return ''
2423 2421
2424 2422 def data(self):
2425 2423 return util.readfile(self._path)
2426 2424
2427 2425 def decodeddata(self):
2428 2426 with open(self._path, "rb") as f:
2429 2427 return f.read()
2430 2428
2431 2429 def remove(self):
2432 2430 util.unlink(self._path)
2433 2431
2434 2432 def write(self, data, flags, **kwargs):
2435 2433 assert not flags
2436 2434 with open(self._path, "wb") as f:
2437 2435 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now