##// END OF EJS Templates
context: replace repeated "self._repo.dirstate" by "ds" variable...
Martin von Zweigbergk -
r41763:fbd4ce55 default
parent child Browse files
Show More
@@ -1,2484 +1,2485 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = self._repo.narrowmatch(match)
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 for l in r:
375 375 l.sort()
376 376
377 377 return r
378 378
379 379 class changectx(basectx):
380 380 """A changecontext object makes access to data related to a particular
381 381 changeset convenient. It represents a read-only context already present in
382 382 the repo."""
383 383 def __init__(self, repo, rev, node):
384 384 super(changectx, self).__init__(repo)
385 385 self._rev = rev
386 386 self._node = node
387 387
388 388 def __hash__(self):
389 389 try:
390 390 return hash(self._rev)
391 391 except AttributeError:
392 392 return id(self)
393 393
394 394 def __nonzero__(self):
395 395 return self._rev != nullrev
396 396
397 397 __bool__ = __nonzero__
398 398
399 399 @propertycache
400 400 def _changeset(self):
401 401 return self._repo.changelog.changelogrevision(self.rev())
402 402
403 403 @propertycache
404 404 def _manifest(self):
405 405 return self._manifestctx.read()
406 406
407 407 @property
408 408 def _manifestctx(self):
409 409 return self._repo.manifestlog[self._changeset.manifest]
410 410
411 411 @propertycache
412 412 def _manifestdelta(self):
413 413 return self._manifestctx.readdelta()
414 414
415 415 @propertycache
416 416 def _parents(self):
417 417 repo = self._repo
418 418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 419 if p2 == nullrev:
420 420 return [repo[p1]]
421 421 return [repo[p1], repo[p2]]
422 422
423 423 def changeset(self):
424 424 c = self._changeset
425 425 return (
426 426 c.manifest,
427 427 c.user,
428 428 c.date,
429 429 c.files,
430 430 c.description,
431 431 c.extra,
432 432 )
433 433 def manifestnode(self):
434 434 return self._changeset.manifest
435 435
436 436 def user(self):
437 437 return self._changeset.user
438 438 def date(self):
439 439 return self._changeset.date
440 440 def files(self):
441 441 return self._changeset.files
442 442 def description(self):
443 443 return self._changeset.description
444 444 def branch(self):
445 445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 446 def closesbranch(self):
447 447 return 'close' in self._changeset.extra
448 448 def extra(self):
449 449 """Return a dict of extra information."""
450 450 return self._changeset.extra
451 451 def tags(self):
452 452 """Return a list of byte tag names"""
453 453 return self._repo.nodetags(self._node)
454 454 def bookmarks(self):
455 455 """Return a list of byte bookmark names."""
456 456 return self._repo.nodebookmarks(self._node)
457 457 def phase(self):
458 458 return self._repo._phasecache.phase(self._repo, self._rev)
459 459 def hidden(self):
460 460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461 461
462 462 def isinmemory(self):
463 463 return False
464 464
465 465 def children(self):
466 466 """return list of changectx contexts for each child changeset.
467 467
468 468 This returns only the immediate child changesets. Use descendants() to
469 469 recursively walk children.
470 470 """
471 471 c = self._repo.changelog.children(self._node)
472 472 return [self._repo[x] for x in c]
473 473
474 474 def ancestors(self):
475 475 for a in self._repo.changelog.ancestors([self._rev]):
476 476 yield self._repo[a]
477 477
478 478 def descendants(self):
479 479 """Recursively yield all children of the changeset.
480 480
481 481 For just the immediate children, use children()
482 482 """
483 483 for d in self._repo.changelog.descendants([self._rev]):
484 484 yield self._repo[d]
485 485
486 486 def filectx(self, path, fileid=None, filelog=None):
487 487 """get a file context from this changeset"""
488 488 if fileid is None:
489 489 fileid = self.filenode(path)
490 490 return filectx(self._repo, path, fileid=fileid,
491 491 changectx=self, filelog=filelog)
492 492
493 493 def ancestor(self, c2, warn=False):
494 494 """return the "best" ancestor context of self and c2
495 495
496 496 If there are multiple candidates, it will show a message and check
497 497 merge.preferancestor configuration before falling back to the
498 498 revlog ancestor."""
499 499 # deal with workingctxs
500 500 n2 = c2._node
501 501 if n2 is None:
502 502 n2 = c2._parents[0]._node
503 503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 504 if not cahs:
505 505 anc = nullid
506 506 elif len(cahs) == 1:
507 507 anc = cahs[0]
508 508 else:
509 509 # experimental config: merge.preferancestor
510 510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 511 try:
512 512 ctx = scmutil.revsymbol(self._repo, r)
513 513 except error.RepoLookupError:
514 514 continue
515 515 anc = ctx.node()
516 516 if anc in cahs:
517 517 break
518 518 else:
519 519 anc = self._repo.changelog.ancestor(self._node, n2)
520 520 if warn:
521 521 self._repo.ui.status(
522 522 (_("note: using %s as ancestor of %s and %s\n") %
523 523 (short(anc), short(self._node), short(n2))) +
524 524 ''.join(_(" alternatively, use --config "
525 525 "merge.preferancestor=%s\n") %
526 526 short(n) for n in sorted(cahs) if n != anc))
527 527 return self._repo[anc]
528 528
529 529 def isancestorof(self, other):
530 530 """True if this changeset is an ancestor of other"""
531 531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532 532
533 533 def walk(self, match):
534 534 '''Generates matching file names.'''
535 535
536 536 # Wrap match.bad method to have message with nodeid
537 537 def bad(fn, msg):
538 538 # The manifest doesn't know about subrepos, so don't complain about
539 539 # paths into valid subrepos.
540 540 if any(fn == s or fn.startswith(s + '/')
541 541 for s in self.substate):
542 542 return
543 543 match.bad(fn, _('no such file in rev %s') % self)
544 544
545 545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 546 return self._manifest.walk(m)
547 547
548 548 def matches(self, match):
549 549 return self.walk(match)
550 550
551 551 class basefilectx(object):
552 552 """A filecontext object represents the common logic for its children:
553 553 filectx: read-only access to a filerevision that is already present
554 554 in the repo,
555 555 workingfilectx: a filecontext that represents files from the working
556 556 directory,
557 557 memfilectx: a filecontext that represents files in-memory,
558 558 """
559 559 @propertycache
560 560 def _filelog(self):
561 561 return self._repo.file(self._path)
562 562
563 563 @propertycache
564 564 def _changeid(self):
565 565 if r'_changectx' in self.__dict__:
566 566 return self._changectx.rev()
567 567 elif r'_descendantrev' in self.__dict__:
568 568 # this file context was created from a revision with a known
569 569 # descendant, we can (lazily) correct for linkrev aliases
570 570 return self._adjustlinkrev(self._descendantrev)
571 571 else:
572 572 return self._filelog.linkrev(self._filerev)
573 573
574 574 @propertycache
575 575 def _filenode(self):
576 576 if r'_fileid' in self.__dict__:
577 577 return self._filelog.lookup(self._fileid)
578 578 else:
579 579 return self._changectx.filenode(self._path)
580 580
581 581 @propertycache
582 582 def _filerev(self):
583 583 return self._filelog.rev(self._filenode)
584 584
585 585 @propertycache
586 586 def _repopath(self):
587 587 return self._path
588 588
589 589 def __nonzero__(self):
590 590 try:
591 591 self._filenode
592 592 return True
593 593 except error.LookupError:
594 594 # file is missing
595 595 return False
596 596
597 597 __bool__ = __nonzero__
598 598
599 599 def __bytes__(self):
600 600 try:
601 601 return "%s@%s" % (self.path(), self._changectx)
602 602 except error.LookupError:
603 603 return "%s@???" % self.path()
604 604
605 605 __str__ = encoding.strmethod(__bytes__)
606 606
607 607 def __repr__(self):
608 608 return r"<%s %s>" % (type(self).__name__, str(self))
609 609
610 610 def __hash__(self):
611 611 try:
612 612 return hash((self._path, self._filenode))
613 613 except AttributeError:
614 614 return id(self)
615 615
616 616 def __eq__(self, other):
617 617 try:
618 618 return (type(self) == type(other) and self._path == other._path
619 619 and self._filenode == other._filenode)
620 620 except AttributeError:
621 621 return False
622 622
623 623 def __ne__(self, other):
624 624 return not (self == other)
625 625
626 626 def filerev(self):
627 627 return self._filerev
628 628 def filenode(self):
629 629 return self._filenode
630 630 @propertycache
631 631 def _flags(self):
632 632 return self._changectx.flags(self._path)
633 633 def flags(self):
634 634 return self._flags
635 635 def filelog(self):
636 636 return self._filelog
637 637 def rev(self):
638 638 return self._changeid
639 639 def linkrev(self):
640 640 return self._filelog.linkrev(self._filerev)
641 641 def node(self):
642 642 return self._changectx.node()
643 643 def hex(self):
644 644 return self._changectx.hex()
645 645 def user(self):
646 646 return self._changectx.user()
647 647 def date(self):
648 648 return self._changectx.date()
649 649 def files(self):
650 650 return self._changectx.files()
651 651 def description(self):
652 652 return self._changectx.description()
653 653 def branch(self):
654 654 return self._changectx.branch()
655 655 def extra(self):
656 656 return self._changectx.extra()
657 657 def phase(self):
658 658 return self._changectx.phase()
659 659 def phasestr(self):
660 660 return self._changectx.phasestr()
661 661 def obsolete(self):
662 662 return self._changectx.obsolete()
663 663 def instabilities(self):
664 664 return self._changectx.instabilities()
665 665 def manifest(self):
666 666 return self._changectx.manifest()
667 667 def changectx(self):
668 668 return self._changectx
669 669 def renamed(self):
670 670 return self._copied
671 671 def repo(self):
672 672 return self._repo
673 673 def size(self):
674 674 return len(self.data())
675 675
676 676 def path(self):
677 677 return self._path
678 678
679 679 def isbinary(self):
680 680 try:
681 681 return stringutil.binary(self.data())
682 682 except IOError:
683 683 return False
684 684 def isexec(self):
685 685 return 'x' in self.flags()
686 686 def islink(self):
687 687 return 'l' in self.flags()
688 688
689 689 def isabsent(self):
690 690 """whether this filectx represents a file not in self._changectx
691 691
692 692 This is mainly for merge code to detect change/delete conflicts. This is
693 693 expected to be True for all subclasses of basectx."""
694 694 return False
695 695
696 696 _customcmp = False
697 697 def cmp(self, fctx):
698 698 """compare with other file context
699 699
700 700 returns True if different than fctx.
701 701 """
702 702 if fctx._customcmp:
703 703 return fctx.cmp(self)
704 704
705 705 if self._filenode is None:
706 706 raise error.ProgrammingError(
707 707 'filectx.cmp() must be reimplemented if not backed by revlog')
708 708
709 709 if fctx._filenode is None:
710 710 if self._repo._encodefilterpats:
711 711 # can't rely on size() because wdir content may be decoded
712 712 return self._filelog.cmp(self._filenode, fctx.data())
713 713 if self.size() - 4 == fctx.size():
714 714 # size() can match:
715 715 # if file data starts with '\1\n', empty metadata block is
716 716 # prepended, which adds 4 bytes to filelog.size().
717 717 return self._filelog.cmp(self._filenode, fctx.data())
718 718 if self.size() == fctx.size():
719 719 # size() matches: need to compare content
720 720 return self._filelog.cmp(self._filenode, fctx.data())
721 721
722 722 # size() differs
723 723 return True
724 724
725 725 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
726 726 """return the first ancestor of <srcrev> introducing <fnode>
727 727
728 728 If the linkrev of the file revision does not point to an ancestor of
729 729 srcrev, we'll walk down the ancestors until we find one introducing
730 730 this file revision.
731 731
732 732 :srcrev: the changeset revision we search ancestors from
733 733 :inclusive: if true, the src revision will also be checked
734 734 :stoprev: an optional revision to stop the walk at. If no introduction
735 735 of this file content could be found before this floor
736 736 revision, the function will returns "None" and stops its
737 737 iteration.
738 738 """
739 739 repo = self._repo
740 740 cl = repo.unfiltered().changelog
741 741 mfl = repo.manifestlog
742 742 # fetch the linkrev
743 743 lkr = self.linkrev()
744 744 if srcrev == lkr:
745 745 return lkr
746 746 # hack to reuse ancestor computation when searching for renames
747 747 memberanc = getattr(self, '_ancestrycontext', None)
748 748 iteranc = None
749 749 if srcrev is None:
750 750 # wctx case, used by workingfilectx during mergecopy
751 751 revs = [p.rev() for p in self._repo[None].parents()]
752 752 inclusive = True # we skipped the real (revless) source
753 753 else:
754 754 revs = [srcrev]
755 755 if memberanc is None:
756 756 memberanc = iteranc = cl.ancestors(revs, lkr,
757 757 inclusive=inclusive)
758 758 # check if this linkrev is an ancestor of srcrev
759 759 if lkr not in memberanc:
760 760 if iteranc is None:
761 761 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
762 762 fnode = self._filenode
763 763 path = self._path
764 764 for a in iteranc:
765 765 if stoprev is not None and a < stoprev:
766 766 return None
767 767 ac = cl.read(a) # get changeset data (we avoid object creation)
768 768 if path in ac[3]: # checking the 'files' field.
769 769 # The file has been touched, check if the content is
770 770 # similar to the one we search for.
771 771 if fnode == mfl[ac[0]].readfast().get(path):
772 772 return a
773 773 # In theory, we should never get out of that loop without a result.
774 774 # But if manifest uses a buggy file revision (not children of the
775 775 # one it replaces) we could. Such a buggy situation will likely
776 776 # result is crash somewhere else at to some point.
777 777 return lkr
778 778
779 779 def isintroducedafter(self, changelogrev):
780 780 """True if a filectx has been introduced after a given floor revision
781 781 """
782 782 if self.linkrev() >= changelogrev:
783 783 return True
784 784 introrev = self._introrev(stoprev=changelogrev)
785 785 if introrev is None:
786 786 return False
787 787 return introrev >= changelogrev
788 788
789 789 def introrev(self):
790 790 """return the rev of the changeset which introduced this file revision
791 791
792 792 This method is different from linkrev because it take into account the
793 793 changeset the filectx was created from. It ensures the returned
794 794 revision is one of its ancestors. This prevents bugs from
795 795 'linkrev-shadowing' when a file revision is used by multiple
796 796 changesets.
797 797 """
798 798 return self._introrev()
799 799
800 800 def _introrev(self, stoprev=None):
801 801 """
802 802 Same as `introrev` but, with an extra argument to limit changelog
803 803 iteration range in some internal usecase.
804 804
805 805 If `stoprev` is set, the `introrev` will not be searched past that
806 806 `stoprev` revision and "None" might be returned. This is useful to
807 807 limit the iteration range.
808 808 """
809 809 toprev = None
810 810 attrs = vars(self)
811 811 if r'_changeid' in attrs:
812 812 # We have a cached value already
813 813 toprev = self._changeid
814 814 elif r'_changectx' in attrs:
815 815 # We know which changelog entry we are coming from
816 816 toprev = self._changectx.rev()
817 817
818 818 if toprev is not None:
819 819 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
820 820 elif r'_descendantrev' in attrs:
821 821 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
822 822 # be nice and cache the result of the computation
823 823 if introrev is not None:
824 824 self._changeid = introrev
825 825 return introrev
826 826 else:
827 827 return self.linkrev()
828 828
829 829 def introfilectx(self):
830 830 """Return filectx having identical contents, but pointing to the
831 831 changeset revision where this filectx was introduced"""
832 832 introrev = self.introrev()
833 833 if self.rev() == introrev:
834 834 return self
835 835 return self.filectx(self.filenode(), changeid=introrev)
836 836
837 837 def _parentfilectx(self, path, fileid, filelog):
838 838 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
839 839 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
840 840 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
841 841 # If self is associated with a changeset (probably explicitly
842 842 # fed), ensure the created filectx is associated with a
843 843 # changeset that is an ancestor of self.changectx.
844 844 # This lets us later use _adjustlinkrev to get a correct link.
845 845 fctx._descendantrev = self.rev()
846 846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 847 elif r'_descendantrev' in vars(self):
848 848 # Otherwise propagate _descendantrev if we have one associated.
849 849 fctx._descendantrev = self._descendantrev
850 850 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
851 851 return fctx
852 852
853 853 def parents(self):
854 854 _path = self._path
855 855 fl = self._filelog
856 856 parents = self._filelog.parents(self._filenode)
857 857 pl = [(_path, node, fl) for node in parents if node != nullid]
858 858
859 859 r = fl.renamed(self._filenode)
860 860 if r:
861 861 # - In the simple rename case, both parent are nullid, pl is empty.
862 862 # - In case of merge, only one of the parent is null id and should
863 863 # be replaced with the rename information. This parent is -always-
864 864 # the first one.
865 865 #
866 866 # As null id have always been filtered out in the previous list
867 867 # comprehension, inserting to 0 will always result in "replacing
868 868 # first nullid parent with rename information.
869 869 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
870 870
871 871 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
872 872
873 873 def p1(self):
874 874 return self.parents()[0]
875 875
876 876 def p2(self):
877 877 p = self.parents()
878 878 if len(p) == 2:
879 879 return p[1]
880 880 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
881 881
882 882 def annotate(self, follow=False, skiprevs=None, diffopts=None):
883 883 """Returns a list of annotateline objects for each line in the file
884 884
885 885 - line.fctx is the filectx of the node where that line was last changed
886 886 - line.lineno is the line number at the first appearance in the managed
887 887 file
888 888 - line.text is the data on that line (including newline character)
889 889 """
890 890 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
891 891
892 892 def parents(f):
893 893 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
894 894 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
895 895 # from the topmost introrev (= srcrev) down to p.linkrev() if it
896 896 # isn't an ancestor of the srcrev.
897 897 f._changeid
898 898 pl = f.parents()
899 899
900 900 # Don't return renamed parents if we aren't following.
901 901 if not follow:
902 902 pl = [p for p in pl if p.path() == f.path()]
903 903
904 904 # renamed filectx won't have a filelog yet, so set it
905 905 # from the cache to save time
906 906 for p in pl:
907 907 if not r'_filelog' in p.__dict__:
908 908 p._filelog = getlog(p.path())
909 909
910 910 return pl
911 911
912 912 # use linkrev to find the first changeset where self appeared
913 913 base = self.introfilectx()
914 914 if getattr(base, '_ancestrycontext', None) is None:
915 915 cl = self._repo.changelog
916 916 if base.rev() is None:
917 917 # wctx is not inclusive, but works because _ancestrycontext
918 918 # is used to test filelog revisions
919 919 ac = cl.ancestors([p.rev() for p in base.parents()],
920 920 inclusive=True)
921 921 else:
922 922 ac = cl.ancestors([base.rev()], inclusive=True)
923 923 base._ancestrycontext = ac
924 924
925 925 return dagop.annotate(base, parents, skiprevs=skiprevs,
926 926 diffopts=diffopts)
927 927
928 928 def ancestors(self, followfirst=False):
929 929 visit = {}
930 930 c = self
931 931 if followfirst:
932 932 cut = 1
933 933 else:
934 934 cut = None
935 935
936 936 while True:
937 937 for parent in c.parents()[:cut]:
938 938 visit[(parent.linkrev(), parent.filenode())] = parent
939 939 if not visit:
940 940 break
941 941 c = visit.pop(max(visit))
942 942 yield c
943 943
944 944 def decodeddata(self):
945 945 """Returns `data()` after running repository decoding filters.
946 946
947 947 This is often equivalent to how the data would be expressed on disk.
948 948 """
949 949 return self._repo.wwritedata(self.path(), self.data())
950 950
951 951 class filectx(basefilectx):
952 952 """A filecontext object makes access to data related to a particular
953 953 filerevision convenient."""
954 954 def __init__(self, repo, path, changeid=None, fileid=None,
955 955 filelog=None, changectx=None):
956 956 """changeid must be a revision number, if specified.
957 957 fileid can be a file revision or node."""
958 958 self._repo = repo
959 959 self._path = path
960 960
961 961 assert (changeid is not None
962 962 or fileid is not None
963 963 or changectx is not None), \
964 964 ("bad args: changeid=%r, fileid=%r, changectx=%r"
965 965 % (changeid, fileid, changectx))
966 966
967 967 if filelog is not None:
968 968 self._filelog = filelog
969 969
970 970 if changeid is not None:
971 971 self._changeid = changeid
972 972 if changectx is not None:
973 973 self._changectx = changectx
974 974 if fileid is not None:
975 975 self._fileid = fileid
976 976
977 977 @propertycache
978 978 def _changectx(self):
979 979 try:
980 980 return self._repo[self._changeid]
981 981 except error.FilteredRepoLookupError:
982 982 # Linkrev may point to any revision in the repository. When the
983 983 # repository is filtered this may lead to `filectx` trying to build
984 984 # `changectx` for filtered revision. In such case we fallback to
985 985 # creating `changectx` on the unfiltered version of the reposition.
986 986 # This fallback should not be an issue because `changectx` from
987 987 # `filectx` are not used in complex operations that care about
988 988 # filtering.
989 989 #
990 990 # This fallback is a cheap and dirty fix that prevent several
991 991 # crashes. It does not ensure the behavior is correct. However the
992 992 # behavior was not correct before filtering either and "incorrect
993 993 # behavior" is seen as better as "crash"
994 994 #
995 995 # Linkrevs have several serious troubles with filtering that are
996 996 # complicated to solve. Proper handling of the issue here should be
997 997 # considered when solving linkrev issue are on the table.
998 998 return self._repo.unfiltered()[self._changeid]
999 999
1000 1000 def filectx(self, fileid, changeid=None):
1001 1001 '''opens an arbitrary revision of the file without
1002 1002 opening a new filelog'''
1003 1003 return filectx(self._repo, self._path, fileid=fileid,
1004 1004 filelog=self._filelog, changeid=changeid)
1005 1005
1006 1006 def rawdata(self):
1007 1007 return self._filelog.revision(self._filenode, raw=True)
1008 1008
1009 1009 def rawflags(self):
1010 1010 """low-level revlog flags"""
1011 1011 return self._filelog.flags(self._filerev)
1012 1012
1013 1013 def data(self):
1014 1014 try:
1015 1015 return self._filelog.read(self._filenode)
1016 1016 except error.CensoredNodeError:
1017 1017 if self._repo.ui.config("censor", "policy") == "ignore":
1018 1018 return ""
1019 1019 raise error.Abort(_("censored node: %s") % short(self._filenode),
1020 1020 hint=_("set censor.policy to ignore errors"))
1021 1021
1022 1022 def size(self):
1023 1023 return self._filelog.size(self._filerev)
1024 1024
1025 1025 @propertycache
1026 1026 def _copied(self):
1027 1027 """check if file was actually renamed in this changeset revision
1028 1028
1029 1029 If rename logged in file revision, we report copy for changeset only
1030 1030 if file revisions linkrev points back to the changeset in question
1031 1031 or both changeset parents contain different file revisions.
1032 1032 """
1033 1033
1034 1034 renamed = self._filelog.renamed(self._filenode)
1035 1035 if not renamed:
1036 1036 return None
1037 1037
1038 1038 if self.rev() == self.linkrev():
1039 1039 return renamed
1040 1040
1041 1041 name = self.path()
1042 1042 fnode = self._filenode
1043 1043 for p in self._changectx.parents():
1044 1044 try:
1045 1045 if fnode == p.filenode(name):
1046 1046 return None
1047 1047 except error.LookupError:
1048 1048 pass
1049 1049 return renamed
1050 1050
1051 1051 def children(self):
1052 1052 # hard for renames
1053 1053 c = self._filelog.children(self._filenode)
1054 1054 return [filectx(self._repo, self._path, fileid=x,
1055 1055 filelog=self._filelog) for x in c]
1056 1056
1057 1057 class committablectx(basectx):
1058 1058 """A committablectx object provides common functionality for a context that
1059 1059 wants the ability to commit, e.g. workingctx or memctx."""
1060 1060 def __init__(self, repo, text="", user=None, date=None, extra=None,
1061 1061 changes=None):
1062 1062 super(committablectx, self).__init__(repo)
1063 1063 self._rev = None
1064 1064 self._node = None
1065 1065 self._text = text
1066 1066 if date:
1067 1067 self._date = dateutil.parsedate(date)
1068 1068 if user:
1069 1069 self._user = user
1070 1070 if changes:
1071 1071 self._status = changes
1072 1072
1073 1073 self._extra = {}
1074 1074 if extra:
1075 1075 self._extra = extra.copy()
1076 1076 if 'branch' not in self._extra:
1077 1077 try:
1078 1078 branch = encoding.fromlocal(self._repo.dirstate.branch())
1079 1079 except UnicodeDecodeError:
1080 1080 raise error.Abort(_('branch name not in UTF-8!'))
1081 1081 self._extra['branch'] = branch
1082 1082 if self._extra['branch'] == '':
1083 1083 self._extra['branch'] = 'default'
1084 1084
1085 1085 def __bytes__(self):
1086 1086 return bytes(self._parents[0]) + "+"
1087 1087
1088 1088 __str__ = encoding.strmethod(__bytes__)
1089 1089
1090 1090 def __nonzero__(self):
1091 1091 return True
1092 1092
1093 1093 __bool__ = __nonzero__
1094 1094
1095 1095 def _buildflagfunc(self):
1096 1096 # Create a fallback function for getting file flags when the
1097 1097 # filesystem doesn't support them
1098 1098
1099 1099 copiesget = self._repo.dirstate.copies().get
1100 1100 parents = self.parents()
1101 1101 if len(parents) < 2:
1102 1102 # when we have one parent, it's easy: copy from parent
1103 1103 man = parents[0].manifest()
1104 1104 def func(f):
1105 1105 f = copiesget(f, f)
1106 1106 return man.flags(f)
1107 1107 else:
1108 1108 # merges are tricky: we try to reconstruct the unstored
1109 1109 # result from the merge (issue1802)
1110 1110 p1, p2 = parents
1111 1111 pa = p1.ancestor(p2)
1112 1112 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1113 1113
1114 1114 def func(f):
1115 1115 f = copiesget(f, f) # may be wrong for merges with copies
1116 1116 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1117 1117 if fl1 == fl2:
1118 1118 return fl1
1119 1119 if fl1 == fla:
1120 1120 return fl2
1121 1121 if fl2 == fla:
1122 1122 return fl1
1123 1123 return '' # punt for conflicts
1124 1124
1125 1125 return func
1126 1126
1127 1127 @propertycache
1128 1128 def _flagfunc(self):
1129 1129 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1130 1130
1131 1131 @propertycache
1132 1132 def _status(self):
1133 1133 return self._repo.status()
1134 1134
1135 1135 @propertycache
1136 1136 def _user(self):
1137 1137 return self._repo.ui.username()
1138 1138
1139 1139 @propertycache
1140 1140 def _date(self):
1141 1141 ui = self._repo.ui
1142 1142 date = ui.configdate('devel', 'default-date')
1143 1143 if date is None:
1144 1144 date = dateutil.makedate()
1145 1145 return date
1146 1146
1147 1147 def subrev(self, subpath):
1148 1148 return None
1149 1149
1150 1150 def manifestnode(self):
1151 1151 return None
1152 1152 def user(self):
1153 1153 return self._user or self._repo.ui.username()
1154 1154 def date(self):
1155 1155 return self._date
1156 1156 def description(self):
1157 1157 return self._text
1158 1158 def files(self):
1159 1159 return sorted(self._status.modified + self._status.added +
1160 1160 self._status.removed)
1161 1161
1162 1162 def modified(self):
1163 1163 return self._status.modified
1164 1164 def added(self):
1165 1165 return self._status.added
1166 1166 def removed(self):
1167 1167 return self._status.removed
1168 1168 def deleted(self):
1169 1169 return self._status.deleted
1170 1170 def branch(self):
1171 1171 return encoding.tolocal(self._extra['branch'])
1172 1172 def closesbranch(self):
1173 1173 return 'close' in self._extra
1174 1174 def extra(self):
1175 1175 return self._extra
1176 1176
1177 1177 def isinmemory(self):
1178 1178 return False
1179 1179
1180 1180 def tags(self):
1181 1181 return []
1182 1182
1183 1183 def bookmarks(self):
1184 1184 b = []
1185 1185 for p in self.parents():
1186 1186 b.extend(p.bookmarks())
1187 1187 return b
1188 1188
1189 1189 def phase(self):
1190 1190 phase = phases.draft # default phase to draft
1191 1191 for p in self.parents():
1192 1192 phase = max(phase, p.phase())
1193 1193 return phase
1194 1194
1195 1195 def hidden(self):
1196 1196 return False
1197 1197
1198 1198 def children(self):
1199 1199 return []
1200 1200
1201 1201 def flags(self, path):
1202 1202 if r'_manifest' in self.__dict__:
1203 1203 try:
1204 1204 return self._manifest.flags(path)
1205 1205 except KeyError:
1206 1206 return ''
1207 1207
1208 1208 try:
1209 1209 return self._flagfunc(path)
1210 1210 except OSError:
1211 1211 return ''
1212 1212
1213 1213 def ancestor(self, c2):
1214 1214 """return the "best" ancestor context of self and c2"""
1215 1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1216 1216
1217 1217 def walk(self, match):
1218 1218 '''Generates matching file names.'''
1219 1219 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1220 1220 subrepos=sorted(self.substate),
1221 1221 unknown=True, ignored=False))
1222 1222
1223 1223 def matches(self, match):
1224 1224 match = self._repo.narrowmatch(match)
1225 1225 ds = self._repo.dirstate
1226 1226 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1227 1227
1228 1228 def ancestors(self):
1229 1229 for p in self._parents:
1230 1230 yield p
1231 1231 for a in self._repo.changelog.ancestors(
1232 1232 [p.rev() for p in self._parents]):
1233 1233 yield self._repo[a]
1234 1234
1235 1235 def markcommitted(self, node):
1236 1236 """Perform post-commit cleanup necessary after committing this ctx
1237 1237
1238 1238 Specifically, this updates backing stores this working context
1239 1239 wraps to reflect the fact that the changes reflected by this
1240 1240 workingctx have been committed. For example, it marks
1241 1241 modified and added files as normal in the dirstate.
1242 1242
1243 1243 """
1244 1244
1245 1245 with self._repo.dirstate.parentchange():
1246 1246 for f in self.modified() + self.added():
1247 1247 self._repo.dirstate.normal(f)
1248 1248 for f in self.removed():
1249 1249 self._repo.dirstate.drop(f)
1250 1250 self._repo.dirstate.setparents(node)
1251 1251
1252 1252 # write changes out explicitly, because nesting wlock at
1253 1253 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1254 1254 # from immediately doing so for subsequent changing files
1255 1255 self._repo.dirstate.write(self._repo.currenttransaction())
1256 1256
1257 1257 def dirty(self, missing=False, merge=True, branch=True):
1258 1258 return False
1259 1259
1260 1260 class workingctx(committablectx):
1261 1261 """A workingctx object makes access to data related to
1262 1262 the current working directory convenient.
1263 1263 date - any valid date string or (unixtime, offset), or None.
1264 1264 user - username string, or None.
1265 1265 extra - a dictionary of extra values, or None.
1266 1266 changes - a list of file lists as returned by localrepo.status()
1267 1267 or None to use the repository status.
1268 1268 """
1269 1269 def __init__(self, repo, text="", user=None, date=None, extra=None,
1270 1270 changes=None):
1271 1271 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1272 1272
1273 1273 def __iter__(self):
1274 1274 d = self._repo.dirstate
1275 1275 for f in d:
1276 1276 if d[f] != 'r':
1277 1277 yield f
1278 1278
1279 1279 def __contains__(self, key):
1280 1280 return self._repo.dirstate[key] not in "?r"
1281 1281
1282 1282 def hex(self):
1283 1283 return hex(wdirid)
1284 1284
1285 1285 @propertycache
1286 1286 def _parents(self):
1287 1287 p = self._repo.dirstate.parents()
1288 1288 if p[1] == nullid:
1289 1289 p = p[:-1]
1290 1290 # use unfiltered repo to delay/avoid loading obsmarkers
1291 1291 unfi = self._repo.unfiltered()
1292 1292 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1293 1293
1294 1294 def _fileinfo(self, path):
1295 1295 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1296 1296 self._manifest
1297 1297 return super(workingctx, self)._fileinfo(path)
1298 1298
1299 1299 def filectx(self, path, filelog=None):
1300 1300 """get a file context from the working directory"""
1301 1301 return workingfilectx(self._repo, path, workingctx=self,
1302 1302 filelog=filelog)
1303 1303
1304 1304 def dirty(self, missing=False, merge=True, branch=True):
1305 1305 "check whether a working directory is modified"
1306 1306 # check subrepos first
1307 1307 for s in sorted(self.substate):
1308 1308 if self.sub(s).dirty(missing=missing):
1309 1309 return True
1310 1310 # check current working dir
1311 1311 return ((merge and self.p2()) or
1312 1312 (branch and self.branch() != self.p1().branch()) or
1313 1313 self.modified() or self.added() or self.removed() or
1314 1314 (missing and self.deleted()))
1315 1315
1316 1316 def add(self, list, prefix=""):
1317 1317 with self._repo.wlock():
1318 1318 ui, ds = self._repo.ui, self._repo.dirstate
1319 1319 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1320 1320 rejected = []
1321 1321 lstat = self._repo.wvfs.lstat
1322 1322 for f in list:
1323 1323 # ds.pathto() returns an absolute file when this is invoked from
1324 1324 # the keyword extension. That gets flagged as non-portable on
1325 1325 # Windows, since it contains the drive letter and colon.
1326 1326 scmutil.checkportable(ui, os.path.join(prefix, f))
1327 1327 try:
1328 1328 st = lstat(f)
1329 1329 except OSError:
1330 1330 ui.warn(_("%s does not exist!\n") % uipath(f))
1331 1331 rejected.append(f)
1332 1332 continue
1333 1333 limit = ui.configbytes('ui', 'large-file-limit')
1334 1334 if limit != 0 and st.st_size > limit:
1335 1335 ui.warn(_("%s: up to %d MB of RAM may be required "
1336 1336 "to manage this file\n"
1337 1337 "(use 'hg revert %s' to cancel the "
1338 1338 "pending addition)\n")
1339 1339 % (f, 3 * st.st_size // 1000000, uipath(f)))
1340 1340 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 1341 ui.warn(_("%s not added: only files and symlinks "
1342 1342 "supported currently\n") % uipath(f))
1343 1343 rejected.append(f)
1344 1344 elif ds[f] in 'amn':
1345 1345 ui.warn(_("%s already tracked!\n") % uipath(f))
1346 1346 elif ds[f] == 'r':
1347 1347 ds.normallookup(f)
1348 1348 else:
1349 1349 ds.add(f)
1350 1350 return rejected
1351 1351
1352 1352 def forget(self, files, prefix=""):
1353 1353 with self._repo.wlock():
1354 1354 ds = self._repo.dirstate
1355 1355 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1356 1356 rejected = []
1357 1357 for f in files:
1358 if f not in self._repo.dirstate:
1358 if f not in ds:
1359 1359 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1360 1360 rejected.append(f)
1361 elif self._repo.dirstate[f] != 'a':
1362 self._repo.dirstate.remove(f)
1361 elif ds[f] != 'a':
1362 ds.remove(f)
1363 1363 else:
1364 self._repo.dirstate.drop(f)
1364 ds.drop(f)
1365 1365 return rejected
1366 1366
1367 1367 def copy(self, source, dest):
1368 1368 try:
1369 1369 st = self._repo.wvfs.lstat(dest)
1370 1370 except OSError as err:
1371 1371 if err.errno != errno.ENOENT:
1372 1372 raise
1373 1373 self._repo.ui.warn(_("%s does not exist!\n")
1374 1374 % self._repo.dirstate.pathto(dest))
1375 1375 return
1376 1376 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1377 1377 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1378 1378 "symbolic link\n")
1379 1379 % self._repo.dirstate.pathto(dest))
1380 1380 else:
1381 1381 with self._repo.wlock():
1382 if self._repo.dirstate[dest] in '?':
1383 self._repo.dirstate.add(dest)
1384 elif self._repo.dirstate[dest] in 'r':
1385 self._repo.dirstate.normallookup(dest)
1386 self._repo.dirstate.copy(source, dest)
1382 ds = self._repo.dirstate
1383 if ds[dest] in '?':
1384 ds.add(dest)
1385 elif ds[dest] in 'r':
1386 ds.normallookup(dest)
1387 ds.copy(source, dest)
1387 1388
1388 1389 def match(self, pats=None, include=None, exclude=None, default='glob',
1389 1390 listsubrepos=False, badfn=None):
1390 1391 r = self._repo
1391 1392
1392 1393 # Only a case insensitive filesystem needs magic to translate user input
1393 1394 # to actual case in the filesystem.
1394 1395 icasefs = not util.fscasesensitive(r.root)
1395 1396 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1396 1397 default, auditor=r.auditor, ctx=self,
1397 1398 listsubrepos=listsubrepos, badfn=badfn,
1398 1399 icasefs=icasefs)
1399 1400
1400 1401 def _filtersuspectsymlink(self, files):
1401 1402 if not files or self._repo.dirstate._checklink:
1402 1403 return files
1403 1404
1404 1405 # Symlink placeholders may get non-symlink-like contents
1405 1406 # via user error or dereferencing by NFS or Samba servers,
1406 1407 # so we filter out any placeholders that don't look like a
1407 1408 # symlink
1408 1409 sane = []
1409 1410 for f in files:
1410 1411 if self.flags(f) == 'l':
1411 1412 d = self[f].data()
1412 1413 if (d == '' or len(d) >= 1024 or '\n' in d
1413 1414 or stringutil.binary(d)):
1414 1415 self._repo.ui.debug('ignoring suspect symlink placeholder'
1415 1416 ' "%s"\n' % f)
1416 1417 continue
1417 1418 sane.append(f)
1418 1419 return sane
1419 1420
1420 1421 def _checklookup(self, files):
1421 1422 # check for any possibly clean files
1422 1423 if not files:
1423 1424 return [], [], []
1424 1425
1425 1426 modified = []
1426 1427 deleted = []
1427 1428 fixup = []
1428 1429 pctx = self._parents[0]
1429 1430 # do a full compare of any files that might have changed
1430 1431 for f in sorted(files):
1431 1432 try:
1432 1433 # This will return True for a file that got replaced by a
1433 1434 # directory in the interim, but fixing that is pretty hard.
1434 1435 if (f not in pctx or self.flags(f) != pctx.flags(f)
1435 1436 or pctx[f].cmp(self[f])):
1436 1437 modified.append(f)
1437 1438 else:
1438 1439 fixup.append(f)
1439 1440 except (IOError, OSError):
1440 1441 # A file become inaccessible in between? Mark it as deleted,
1441 1442 # matching dirstate behavior (issue5584).
1442 1443 # The dirstate has more complex behavior around whether a
1443 1444 # missing file matches a directory, etc, but we don't need to
1444 1445 # bother with that: if f has made it to this point, we're sure
1445 1446 # it's in the dirstate.
1446 1447 deleted.append(f)
1447 1448
1448 1449 return modified, deleted, fixup
1449 1450
1450 1451 def _poststatusfixup(self, status, fixup):
1451 1452 """update dirstate for files that are actually clean"""
1452 1453 poststatus = self._repo.postdsstatus()
1453 1454 if fixup or poststatus:
1454 1455 try:
1455 1456 oldid = self._repo.dirstate.identity()
1456 1457
1457 1458 # updating the dirstate is optional
1458 1459 # so we don't wait on the lock
1459 1460 # wlock can invalidate the dirstate, so cache normal _after_
1460 1461 # taking the lock
1461 1462 with self._repo.wlock(False):
1462 1463 if self._repo.dirstate.identity() == oldid:
1463 1464 if fixup:
1464 1465 normal = self._repo.dirstate.normal
1465 1466 for f in fixup:
1466 1467 normal(f)
1467 1468 # write changes out explicitly, because nesting
1468 1469 # wlock at runtime may prevent 'wlock.release()'
1469 1470 # after this block from doing so for subsequent
1470 1471 # changing files
1471 1472 tr = self._repo.currenttransaction()
1472 1473 self._repo.dirstate.write(tr)
1473 1474
1474 1475 if poststatus:
1475 1476 for ps in poststatus:
1476 1477 ps(self, status)
1477 1478 else:
1478 1479 # in this case, writing changes out breaks
1479 1480 # consistency, because .hg/dirstate was
1480 1481 # already changed simultaneously after last
1481 1482 # caching (see also issue5584 for detail)
1482 1483 self._repo.ui.debug('skip updating dirstate: '
1483 1484 'identity mismatch\n')
1484 1485 except error.LockError:
1485 1486 pass
1486 1487 finally:
1487 1488 # Even if the wlock couldn't be grabbed, clear out the list.
1488 1489 self._repo.clearpostdsstatus()
1489 1490
1490 1491 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1491 1492 '''Gets the status from the dirstate -- internal use only.'''
1492 1493 subrepos = []
1493 1494 if '.hgsub' in self:
1494 1495 subrepos = sorted(self.substate)
1495 1496 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1496 1497 clean=clean, unknown=unknown)
1497 1498
1498 1499 # check for any possibly clean files
1499 1500 fixup = []
1500 1501 if cmp:
1501 1502 modified2, deleted2, fixup = self._checklookup(cmp)
1502 1503 s.modified.extend(modified2)
1503 1504 s.deleted.extend(deleted2)
1504 1505
1505 1506 if fixup and clean:
1506 1507 s.clean.extend(fixup)
1507 1508
1508 1509 self._poststatusfixup(s, fixup)
1509 1510
1510 1511 if match.always():
1511 1512 # cache for performance
1512 1513 if s.unknown or s.ignored or s.clean:
1513 1514 # "_status" is cached with list*=False in the normal route
1514 1515 self._status = scmutil.status(s.modified, s.added, s.removed,
1515 1516 s.deleted, [], [], [])
1516 1517 else:
1517 1518 self._status = s
1518 1519
1519 1520 return s
1520 1521
1521 1522 @propertycache
1522 1523 def _manifest(self):
1523 1524 """generate a manifest corresponding to the values in self._status
1524 1525
1525 1526 This reuse the file nodeid from parent, but we use special node
1526 1527 identifiers for added and modified files. This is used by manifests
1527 1528 merge to see that files are different and by update logic to avoid
1528 1529 deleting newly added files.
1529 1530 """
1530 1531 return self._buildstatusmanifest(self._status)
1531 1532
1532 1533 def _buildstatusmanifest(self, status):
1533 1534 """Builds a manifest that includes the given status results."""
1534 1535 parents = self.parents()
1535 1536
1536 1537 man = parents[0].manifest().copy()
1537 1538
1538 1539 ff = self._flagfunc
1539 1540 for i, l in ((addednodeid, status.added),
1540 1541 (modifiednodeid, status.modified)):
1541 1542 for f in l:
1542 1543 man[f] = i
1543 1544 try:
1544 1545 man.setflag(f, ff(f))
1545 1546 except OSError:
1546 1547 pass
1547 1548
1548 1549 for f in status.deleted + status.removed:
1549 1550 if f in man:
1550 1551 del man[f]
1551 1552
1552 1553 return man
1553 1554
1554 1555 def _buildstatus(self, other, s, match, listignored, listclean,
1555 1556 listunknown):
1556 1557 """build a status with respect to another context
1557 1558
1558 1559 This includes logic for maintaining the fast path of status when
1559 1560 comparing the working directory against its parent, which is to skip
1560 1561 building a new manifest if self (working directory) is not comparing
1561 1562 against its parent (repo['.']).
1562 1563 """
1563 1564 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1564 1565 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1565 1566 # might have accidentally ended up with the entire contents of the file
1566 1567 # they are supposed to be linking to.
1567 1568 s.modified[:] = self._filtersuspectsymlink(s.modified)
1568 1569 if other != self._repo['.']:
1569 1570 s = super(workingctx, self)._buildstatus(other, s, match,
1570 1571 listignored, listclean,
1571 1572 listunknown)
1572 1573 return s
1573 1574
1574 1575 def _matchstatus(self, other, match):
1575 1576 """override the match method with a filter for directory patterns
1576 1577
1577 1578 We use inheritance to customize the match.bad method only in cases of
1578 1579 workingctx since it belongs only to the working directory when
1579 1580 comparing against the parent changeset.
1580 1581
1581 1582 If we aren't comparing against the working directory's parent, then we
1582 1583 just use the default match object sent to us.
1583 1584 """
1584 1585 if other != self._repo['.']:
1585 1586 def bad(f, msg):
1586 1587 # 'f' may be a directory pattern from 'match.files()',
1587 1588 # so 'f not in ctx1' is not enough
1588 1589 if f not in other and not other.hasdir(f):
1589 1590 self._repo.ui.warn('%s: %s\n' %
1590 1591 (self._repo.dirstate.pathto(f), msg))
1591 1592 match.bad = bad
1592 1593 return match
1593 1594
1594 1595 def markcommitted(self, node):
1595 1596 super(workingctx, self).markcommitted(node)
1596 1597
1597 1598 sparse.aftercommit(self._repo, node)
1598 1599
1599 1600 class committablefilectx(basefilectx):
1600 1601 """A committablefilectx provides common functionality for a file context
1601 1602 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1602 1603 def __init__(self, repo, path, filelog=None, ctx=None):
1603 1604 self._repo = repo
1604 1605 self._path = path
1605 1606 self._changeid = None
1606 1607 self._filerev = self._filenode = None
1607 1608
1608 1609 if filelog is not None:
1609 1610 self._filelog = filelog
1610 1611 if ctx:
1611 1612 self._changectx = ctx
1612 1613
1613 1614 def __nonzero__(self):
1614 1615 return True
1615 1616
1616 1617 __bool__ = __nonzero__
1617 1618
1618 1619 def linkrev(self):
1619 1620 # linked to self._changectx no matter if file is modified or not
1620 1621 return self.rev()
1621 1622
1622 1623 def parents(self):
1623 1624 '''return parent filectxs, following copies if necessary'''
1624 1625 def filenode(ctx, path):
1625 1626 return ctx._manifest.get(path, nullid)
1626 1627
1627 1628 path = self._path
1628 1629 fl = self._filelog
1629 1630 pcl = self._changectx._parents
1630 1631 renamed = self.renamed()
1631 1632
1632 1633 if renamed:
1633 1634 pl = [renamed + (None,)]
1634 1635 else:
1635 1636 pl = [(path, filenode(pcl[0], path), fl)]
1636 1637
1637 1638 for pc in pcl[1:]:
1638 1639 pl.append((path, filenode(pc, path), fl))
1639 1640
1640 1641 return [self._parentfilectx(p, fileid=n, filelog=l)
1641 1642 for p, n, l in pl if n != nullid]
1642 1643
1643 1644 def children(self):
1644 1645 return []
1645 1646
1646 1647 class workingfilectx(committablefilectx):
1647 1648 """A workingfilectx object makes access to data related to a particular
1648 1649 file in the working directory convenient."""
1649 1650 def __init__(self, repo, path, filelog=None, workingctx=None):
1650 1651 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1651 1652
1652 1653 @propertycache
1653 1654 def _changectx(self):
1654 1655 return workingctx(self._repo)
1655 1656
1656 1657 def data(self):
1657 1658 return self._repo.wread(self._path)
1658 1659 def renamed(self):
1659 1660 rp = self._repo.dirstate.copied(self._path)
1660 1661 if not rp:
1661 1662 return None
1662 1663 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1663 1664
1664 1665 def size(self):
1665 1666 return self._repo.wvfs.lstat(self._path).st_size
1666 1667 def date(self):
1667 1668 t, tz = self._changectx.date()
1668 1669 try:
1669 1670 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1670 1671 except OSError as err:
1671 1672 if err.errno != errno.ENOENT:
1672 1673 raise
1673 1674 return (t, tz)
1674 1675
1675 1676 def exists(self):
1676 1677 return self._repo.wvfs.exists(self._path)
1677 1678
1678 1679 def lexists(self):
1679 1680 return self._repo.wvfs.lexists(self._path)
1680 1681
1681 1682 def audit(self):
1682 1683 return self._repo.wvfs.audit(self._path)
1683 1684
1684 1685 def cmp(self, fctx):
1685 1686 """compare with other file context
1686 1687
1687 1688 returns True if different than fctx.
1688 1689 """
1689 1690 # fctx should be a filectx (not a workingfilectx)
1690 1691 # invert comparison to reuse the same code path
1691 1692 return fctx.cmp(self)
1692 1693
1693 1694 def remove(self, ignoremissing=False):
1694 1695 """wraps unlink for a repo's working directory"""
1695 1696 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1696 1697 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1697 1698 rmdir=rmdir)
1698 1699
1699 1700 def write(self, data, flags, backgroundclose=False, **kwargs):
1700 1701 """wraps repo.wwrite"""
1701 1702 self._repo.wwrite(self._path, data, flags,
1702 1703 backgroundclose=backgroundclose,
1703 1704 **kwargs)
1704 1705
1705 1706 def markcopied(self, src):
1706 1707 """marks this file a copy of `src`"""
1707 1708 if self._repo.dirstate[self._path] in "nma":
1708 1709 self._repo.dirstate.copy(src, self._path)
1709 1710
1710 1711 def clearunknown(self):
1711 1712 """Removes conflicting items in the working directory so that
1712 1713 ``write()`` can be called successfully.
1713 1714 """
1714 1715 wvfs = self._repo.wvfs
1715 1716 f = self._path
1716 1717 wvfs.audit(f)
1717 1718 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1718 1719 # remove files under the directory as they should already be
1719 1720 # warned and backed up
1720 1721 if wvfs.isdir(f) and not wvfs.islink(f):
1721 1722 wvfs.rmtree(f, forcibly=True)
1722 1723 for p in reversed(list(util.finddirs(f))):
1723 1724 if wvfs.isfileorlink(p):
1724 1725 wvfs.unlink(p)
1725 1726 break
1726 1727 else:
1727 1728 # don't remove files if path conflicts are not processed
1728 1729 if wvfs.isdir(f) and not wvfs.islink(f):
1729 1730 wvfs.removedirs(f)
1730 1731
1731 1732 def setflags(self, l, x):
1732 1733 self._repo.wvfs.setflags(self._path, l, x)
1733 1734
1734 1735 class overlayworkingctx(committablectx):
1735 1736 """Wraps another mutable context with a write-back cache that can be
1736 1737 converted into a commit context.
1737 1738
1738 1739 self._cache[path] maps to a dict with keys: {
1739 1740 'exists': bool?
1740 1741 'date': date?
1741 1742 'data': str?
1742 1743 'flags': str?
1743 1744 'copied': str? (path or None)
1744 1745 }
1745 1746 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1746 1747 is `False`, the file was deleted.
1747 1748 """
1748 1749
1749 1750 def __init__(self, repo):
1750 1751 super(overlayworkingctx, self).__init__(repo)
1751 1752 self.clean()
1752 1753
1753 1754 def setbase(self, wrappedctx):
1754 1755 self._wrappedctx = wrappedctx
1755 1756 self._parents = [wrappedctx]
1756 1757 # Drop old manifest cache as it is now out of date.
1757 1758 # This is necessary when, e.g., rebasing several nodes with one
1758 1759 # ``overlayworkingctx`` (e.g. with --collapse).
1759 1760 util.clearcachedproperty(self, '_manifest')
1760 1761
1761 1762 def data(self, path):
1762 1763 if self.isdirty(path):
1763 1764 if self._cache[path]['exists']:
1764 1765 if self._cache[path]['data']:
1765 1766 return self._cache[path]['data']
1766 1767 else:
1767 1768 # Must fallback here, too, because we only set flags.
1768 1769 return self._wrappedctx[path].data()
1769 1770 else:
1770 1771 raise error.ProgrammingError("No such file or directory: %s" %
1771 1772 path)
1772 1773 else:
1773 1774 return self._wrappedctx[path].data()
1774 1775
1775 1776 @propertycache
1776 1777 def _manifest(self):
1777 1778 parents = self.parents()
1778 1779 man = parents[0].manifest().copy()
1779 1780
1780 1781 flag = self._flagfunc
1781 1782 for path in self.added():
1782 1783 man[path] = addednodeid
1783 1784 man.setflag(path, flag(path))
1784 1785 for path in self.modified():
1785 1786 man[path] = modifiednodeid
1786 1787 man.setflag(path, flag(path))
1787 1788 for path in self.removed():
1788 1789 del man[path]
1789 1790 return man
1790 1791
1791 1792 @propertycache
1792 1793 def _flagfunc(self):
1793 1794 def f(path):
1794 1795 return self._cache[path]['flags']
1795 1796 return f
1796 1797
1797 1798 def files(self):
1798 1799 return sorted(self.added() + self.modified() + self.removed())
1799 1800
1800 1801 def modified(self):
1801 1802 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1802 1803 self._existsinparent(f)]
1803 1804
1804 1805 def added(self):
1805 1806 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1806 1807 not self._existsinparent(f)]
1807 1808
1808 1809 def removed(self):
1809 1810 return [f for f in self._cache.keys() if
1810 1811 not self._cache[f]['exists'] and self._existsinparent(f)]
1811 1812
1812 1813 def isinmemory(self):
1813 1814 return True
1814 1815
1815 1816 def filedate(self, path):
1816 1817 if self.isdirty(path):
1817 1818 return self._cache[path]['date']
1818 1819 else:
1819 1820 return self._wrappedctx[path].date()
1820 1821
1821 1822 def markcopied(self, path, origin):
1822 1823 if self.isdirty(path):
1823 1824 self._cache[path]['copied'] = origin
1824 1825 else:
1825 1826 raise error.ProgrammingError('markcopied() called on clean context')
1826 1827
1827 1828 def copydata(self, path):
1828 1829 if self.isdirty(path):
1829 1830 return self._cache[path]['copied']
1830 1831 else:
1831 1832 raise error.ProgrammingError('copydata() called on clean context')
1832 1833
1833 1834 def flags(self, path):
1834 1835 if self.isdirty(path):
1835 1836 if self._cache[path]['exists']:
1836 1837 return self._cache[path]['flags']
1837 1838 else:
1838 1839 raise error.ProgrammingError("No such file or directory: %s" %
1839 1840 self._path)
1840 1841 else:
1841 1842 return self._wrappedctx[path].flags()
1842 1843
1843 1844 def __contains__(self, key):
1844 1845 if key in self._cache:
1845 1846 return self._cache[key]['exists']
1846 1847 return key in self.p1()
1847 1848
1848 1849 def _existsinparent(self, path):
1849 1850 try:
1850 1851 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1851 1852 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1852 1853 # with an ``exists()`` function.
1853 1854 self._wrappedctx[path]
1854 1855 return True
1855 1856 except error.ManifestLookupError:
1856 1857 return False
1857 1858
1858 1859 def _auditconflicts(self, path):
1859 1860 """Replicates conflict checks done by wvfs.write().
1860 1861
1861 1862 Since we never write to the filesystem and never call `applyupdates` in
1862 1863 IMM, we'll never check that a path is actually writable -- e.g., because
1863 1864 it adds `a/foo`, but `a` is actually a file in the other commit.
1864 1865 """
1865 1866 def fail(path, component):
1866 1867 # p1() is the base and we're receiving "writes" for p2()'s
1867 1868 # files.
1868 1869 if 'l' in self.p1()[component].flags():
1869 1870 raise error.Abort("error: %s conflicts with symlink %s "
1870 1871 "in %d." % (path, component,
1871 1872 self.p1().rev()))
1872 1873 else:
1873 1874 raise error.Abort("error: '%s' conflicts with file '%s' in "
1874 1875 "%d." % (path, component,
1875 1876 self.p1().rev()))
1876 1877
1877 1878 # Test that each new directory to be created to write this path from p2
1878 1879 # is not a file in p1.
1879 1880 components = path.split('/')
1880 1881 for i in pycompat.xrange(len(components)):
1881 1882 component = "/".join(components[0:i])
1882 1883 if component in self:
1883 1884 fail(path, component)
1884 1885
1885 1886 # Test the other direction -- that this path from p2 isn't a directory
1886 1887 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1887 1888 match = self.match(pats=[path + '/'], default=b'path')
1888 1889 matches = self.p1().manifest().matches(match)
1889 1890 mfiles = matches.keys()
1890 1891 if len(mfiles) > 0:
1891 1892 if len(mfiles) == 1 and mfiles[0] == path:
1892 1893 return
1893 1894 # omit the files which are deleted in current IMM wctx
1894 1895 mfiles = [m for m in mfiles if m in self]
1895 1896 if not mfiles:
1896 1897 return
1897 1898 raise error.Abort("error: file '%s' cannot be written because "
1898 1899 " '%s/' is a folder in %s (containing %d "
1899 1900 "entries: %s)"
1900 1901 % (path, path, self.p1(), len(mfiles),
1901 1902 ', '.join(mfiles)))
1902 1903
1903 1904 def write(self, path, data, flags='', **kwargs):
1904 1905 if data is None:
1905 1906 raise error.ProgrammingError("data must be non-None")
1906 1907 self._auditconflicts(path)
1907 1908 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1908 1909 flags=flags)
1909 1910
1910 1911 def setflags(self, path, l, x):
1911 1912 flag = ''
1912 1913 if l:
1913 1914 flag = 'l'
1914 1915 elif x:
1915 1916 flag = 'x'
1916 1917 self._markdirty(path, exists=True, date=dateutil.makedate(),
1917 1918 flags=flag)
1918 1919
1919 1920 def remove(self, path):
1920 1921 self._markdirty(path, exists=False)
1921 1922
1922 1923 def exists(self, path):
1923 1924 """exists behaves like `lexists`, but needs to follow symlinks and
1924 1925 return False if they are broken.
1925 1926 """
1926 1927 if self.isdirty(path):
1927 1928 # If this path exists and is a symlink, "follow" it by calling
1928 1929 # exists on the destination path.
1929 1930 if (self._cache[path]['exists'] and
1930 1931 'l' in self._cache[path]['flags']):
1931 1932 return self.exists(self._cache[path]['data'].strip())
1932 1933 else:
1933 1934 return self._cache[path]['exists']
1934 1935
1935 1936 return self._existsinparent(path)
1936 1937
1937 1938 def lexists(self, path):
1938 1939 """lexists returns True if the path exists"""
1939 1940 if self.isdirty(path):
1940 1941 return self._cache[path]['exists']
1941 1942
1942 1943 return self._existsinparent(path)
1943 1944
1944 1945 def size(self, path):
1945 1946 if self.isdirty(path):
1946 1947 if self._cache[path]['exists']:
1947 1948 return len(self._cache[path]['data'])
1948 1949 else:
1949 1950 raise error.ProgrammingError("No such file or directory: %s" %
1950 1951 self._path)
1951 1952 return self._wrappedctx[path].size()
1952 1953
1953 1954 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1954 1955 user=None, editor=None):
1955 1956 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1956 1957 committed.
1957 1958
1958 1959 ``text`` is the commit message.
1959 1960 ``parents`` (optional) are rev numbers.
1960 1961 """
1961 1962 # Default parents to the wrapped contexts' if not passed.
1962 1963 if parents is None:
1963 1964 parents = self._wrappedctx.parents()
1964 1965 if len(parents) == 1:
1965 1966 parents = (parents[0], None)
1966 1967
1967 1968 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1968 1969 if parents[1] is None:
1969 1970 parents = (self._repo[parents[0]], None)
1970 1971 else:
1971 1972 parents = (self._repo[parents[0]], self._repo[parents[1]])
1972 1973
1973 1974 files = self._cache.keys()
1974 1975 def getfile(repo, memctx, path):
1975 1976 if self._cache[path]['exists']:
1976 1977 return memfilectx(repo, memctx, path,
1977 1978 self._cache[path]['data'],
1978 1979 'l' in self._cache[path]['flags'],
1979 1980 'x' in self._cache[path]['flags'],
1980 1981 self._cache[path]['copied'])
1981 1982 else:
1982 1983 # Returning None, but including the path in `files`, is
1983 1984 # necessary for memctx to register a deletion.
1984 1985 return None
1985 1986 return memctx(self._repo, parents, text, files, getfile, date=date,
1986 1987 extra=extra, user=user, branch=branch, editor=editor)
1987 1988
1988 1989 def isdirty(self, path):
1989 1990 return path in self._cache
1990 1991
1991 1992 def isempty(self):
1992 1993 # We need to discard any keys that are actually clean before the empty
1993 1994 # commit check.
1994 1995 self._compact()
1995 1996 return len(self._cache) == 0
1996 1997
1997 1998 def clean(self):
1998 1999 self._cache = {}
1999 2000
2000 2001 def _compact(self):
2001 2002 """Removes keys from the cache that are actually clean, by comparing
2002 2003 them with the underlying context.
2003 2004
2004 2005 This can occur during the merge process, e.g. by passing --tool :local
2005 2006 to resolve a conflict.
2006 2007 """
2007 2008 keys = []
2008 2009 # This won't be perfect, but can help performance significantly when
2009 2010 # using things like remotefilelog.
2010 2011 scmutil.prefetchfiles(
2011 2012 self.repo(), [self.p1().rev()],
2012 2013 scmutil.matchfiles(self.repo(), self._cache.keys()))
2013 2014
2014 2015 for path in self._cache.keys():
2015 2016 cache = self._cache[path]
2016 2017 try:
2017 2018 underlying = self._wrappedctx[path]
2018 2019 if (underlying.data() == cache['data'] and
2019 2020 underlying.flags() == cache['flags']):
2020 2021 keys.append(path)
2021 2022 except error.ManifestLookupError:
2022 2023 # Path not in the underlying manifest (created).
2023 2024 continue
2024 2025
2025 2026 for path in keys:
2026 2027 del self._cache[path]
2027 2028 return keys
2028 2029
2029 2030 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2030 2031 # data not provided, let's see if we already have some; if not, let's
2031 2032 # grab it from our underlying context, so that we always have data if
2032 2033 # the file is marked as existing.
2033 2034 if exists and data is None:
2034 2035 oldentry = self._cache.get(path) or {}
2035 2036 data = oldentry.get('data') or self._wrappedctx[path].data()
2036 2037
2037 2038 self._cache[path] = {
2038 2039 'exists': exists,
2039 2040 'data': data,
2040 2041 'date': date,
2041 2042 'flags': flags,
2042 2043 'copied': None,
2043 2044 }
2044 2045
2045 2046 def filectx(self, path, filelog=None):
2046 2047 return overlayworkingfilectx(self._repo, path, parent=self,
2047 2048 filelog=filelog)
2048 2049
2049 2050 class overlayworkingfilectx(committablefilectx):
2050 2051 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2051 2052 cache, which can be flushed through later by calling ``flush()``."""
2052 2053
2053 2054 def __init__(self, repo, path, filelog=None, parent=None):
2054 2055 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2055 2056 parent)
2056 2057 self._repo = repo
2057 2058 self._parent = parent
2058 2059 self._path = path
2059 2060
2060 2061 def cmp(self, fctx):
2061 2062 return self.data() != fctx.data()
2062 2063
2063 2064 def changectx(self):
2064 2065 return self._parent
2065 2066
2066 2067 def data(self):
2067 2068 return self._parent.data(self._path)
2068 2069
2069 2070 def date(self):
2070 2071 return self._parent.filedate(self._path)
2071 2072
2072 2073 def exists(self):
2073 2074 return self.lexists()
2074 2075
2075 2076 def lexists(self):
2076 2077 return self._parent.exists(self._path)
2077 2078
2078 2079 def renamed(self):
2079 2080 path = self._parent.copydata(self._path)
2080 2081 if not path:
2081 2082 return None
2082 2083 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2083 2084
2084 2085 def size(self):
2085 2086 return self._parent.size(self._path)
2086 2087
2087 2088 def markcopied(self, origin):
2088 2089 self._parent.markcopied(self._path, origin)
2089 2090
2090 2091 def audit(self):
2091 2092 pass
2092 2093
2093 2094 def flags(self):
2094 2095 return self._parent.flags(self._path)
2095 2096
2096 2097 def setflags(self, islink, isexec):
2097 2098 return self._parent.setflags(self._path, islink, isexec)
2098 2099
2099 2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2100 2101 return self._parent.write(self._path, data, flags, **kwargs)
2101 2102
2102 2103 def remove(self, ignoremissing=False):
2103 2104 return self._parent.remove(self._path)
2104 2105
2105 2106 def clearunknown(self):
2106 2107 pass
2107 2108
2108 2109 class workingcommitctx(workingctx):
2109 2110 """A workingcommitctx object makes access to data related to
2110 2111 the revision being committed convenient.
2111 2112
2112 2113 This hides changes in the working directory, if they aren't
2113 2114 committed in this context.
2114 2115 """
2115 2116 def __init__(self, repo, changes,
2116 2117 text="", user=None, date=None, extra=None):
2117 2118 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2118 2119 changes)
2119 2120
2120 2121 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2121 2122 """Return matched files only in ``self._status``
2122 2123
2123 2124 Uncommitted files appear "clean" via this context, even if
2124 2125 they aren't actually so in the working directory.
2125 2126 """
2126 2127 if clean:
2127 2128 clean = [f for f in self._manifest if f not in self._changedset]
2128 2129 else:
2129 2130 clean = []
2130 2131 return scmutil.status([f for f in self._status.modified if match(f)],
2131 2132 [f for f in self._status.added if match(f)],
2132 2133 [f for f in self._status.removed if match(f)],
2133 2134 [], [], [], clean)
2134 2135
2135 2136 @propertycache
2136 2137 def _changedset(self):
2137 2138 """Return the set of files changed in this context
2138 2139 """
2139 2140 changed = set(self._status.modified)
2140 2141 changed.update(self._status.added)
2141 2142 changed.update(self._status.removed)
2142 2143 return changed
2143 2144
2144 2145 def makecachingfilectxfn(func):
2145 2146 """Create a filectxfn that caches based on the path.
2146 2147
2147 2148 We can't use util.cachefunc because it uses all arguments as the cache
2148 2149 key and this creates a cycle since the arguments include the repo and
2149 2150 memctx.
2150 2151 """
2151 2152 cache = {}
2152 2153
2153 2154 def getfilectx(repo, memctx, path):
2154 2155 if path not in cache:
2155 2156 cache[path] = func(repo, memctx, path)
2156 2157 return cache[path]
2157 2158
2158 2159 return getfilectx
2159 2160
2160 2161 def memfilefromctx(ctx):
2161 2162 """Given a context return a memfilectx for ctx[path]
2162 2163
2163 2164 This is a convenience method for building a memctx based on another
2164 2165 context.
2165 2166 """
2166 2167 def getfilectx(repo, memctx, path):
2167 2168 fctx = ctx[path]
2168 2169 copied = fctx.renamed()
2169 2170 if copied:
2170 2171 copied = copied[0]
2171 2172 return memfilectx(repo, memctx, path, fctx.data(),
2172 2173 islink=fctx.islink(), isexec=fctx.isexec(),
2173 2174 copied=copied)
2174 2175
2175 2176 return getfilectx
2176 2177
2177 2178 def memfilefrompatch(patchstore):
2178 2179 """Given a patch (e.g. patchstore object) return a memfilectx
2179 2180
2180 2181 This is a convenience method for building a memctx based on a patchstore.
2181 2182 """
2182 2183 def getfilectx(repo, memctx, path):
2183 2184 data, mode, copied = patchstore.getfile(path)
2184 2185 if data is None:
2185 2186 return None
2186 2187 islink, isexec = mode
2187 2188 return memfilectx(repo, memctx, path, data, islink=islink,
2188 2189 isexec=isexec, copied=copied)
2189 2190
2190 2191 return getfilectx
2191 2192
2192 2193 class memctx(committablectx):
2193 2194 """Use memctx to perform in-memory commits via localrepo.commitctx().
2194 2195
2195 2196 Revision information is supplied at initialization time while
2196 2197 related files data and is made available through a callback
2197 2198 mechanism. 'repo' is the current localrepo, 'parents' is a
2198 2199 sequence of two parent revisions identifiers (pass None for every
2199 2200 missing parent), 'text' is the commit message and 'files' lists
2200 2201 names of files touched by the revision (normalized and relative to
2201 2202 repository root).
2202 2203
2203 2204 filectxfn(repo, memctx, path) is a callable receiving the
2204 2205 repository, the current memctx object and the normalized path of
2205 2206 requested file, relative to repository root. It is fired by the
2206 2207 commit function for every file in 'files', but calls order is
2207 2208 undefined. If the file is available in the revision being
2208 2209 committed (updated or added), filectxfn returns a memfilectx
2209 2210 object. If the file was removed, filectxfn return None for recent
2210 2211 Mercurial. Moved files are represented by marking the source file
2211 2212 removed and the new file added with copy information (see
2212 2213 memfilectx).
2213 2214
2214 2215 user receives the committer name and defaults to current
2215 2216 repository username, date is the commit date in any format
2216 2217 supported by dateutil.parsedate() and defaults to current date, extra
2217 2218 is a dictionary of metadata or is left empty.
2218 2219 """
2219 2220
2220 2221 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2221 2222 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2222 2223 # this field to determine what to do in filectxfn.
2223 2224 _returnnoneformissingfiles = True
2224 2225
2225 2226 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2226 2227 date=None, extra=None, branch=None, editor=False):
2227 2228 super(memctx, self).__init__(repo, text, user, date, extra)
2228 2229 self._rev = None
2229 2230 self._node = None
2230 2231 parents = [(p or nullid) for p in parents]
2231 2232 p1, p2 = parents
2232 2233 self._parents = [self._repo[p] for p in (p1, p2)]
2233 2234 files = sorted(set(files))
2234 2235 self._files = files
2235 2236 if branch is not None:
2236 2237 self._extra['branch'] = encoding.fromlocal(branch)
2237 2238 self.substate = {}
2238 2239
2239 2240 if isinstance(filectxfn, patch.filestore):
2240 2241 filectxfn = memfilefrompatch(filectxfn)
2241 2242 elif not callable(filectxfn):
2242 2243 # if store is not callable, wrap it in a function
2243 2244 filectxfn = memfilefromctx(filectxfn)
2244 2245
2245 2246 # memoizing increases performance for e.g. vcs convert scenarios.
2246 2247 self._filectxfn = makecachingfilectxfn(filectxfn)
2247 2248
2248 2249 if editor:
2249 2250 self._text = editor(self._repo, self, [])
2250 2251 self._repo.savecommitmessage(self._text)
2251 2252
2252 2253 def filectx(self, path, filelog=None):
2253 2254 """get a file context from the working directory
2254 2255
2255 2256 Returns None if file doesn't exist and should be removed."""
2256 2257 return self._filectxfn(self._repo, self, path)
2257 2258
2258 2259 def commit(self):
2259 2260 """commit context to the repo"""
2260 2261 return self._repo.commitctx(self)
2261 2262
2262 2263 @propertycache
2263 2264 def _manifest(self):
2264 2265 """generate a manifest based on the return values of filectxfn"""
2265 2266
2266 2267 # keep this simple for now; just worry about p1
2267 2268 pctx = self._parents[0]
2268 2269 man = pctx.manifest().copy()
2269 2270
2270 2271 for f in self._status.modified:
2271 2272 man[f] = modifiednodeid
2272 2273
2273 2274 for f in self._status.added:
2274 2275 man[f] = addednodeid
2275 2276
2276 2277 for f in self._status.removed:
2277 2278 if f in man:
2278 2279 del man[f]
2279 2280
2280 2281 return man
2281 2282
2282 2283 @propertycache
2283 2284 def _status(self):
2284 2285 """Calculate exact status from ``files`` specified at construction
2285 2286 """
2286 2287 man1 = self.p1().manifest()
2287 2288 p2 = self._parents[1]
2288 2289 # "1 < len(self._parents)" can't be used for checking
2289 2290 # existence of the 2nd parent, because "memctx._parents" is
2290 2291 # explicitly initialized by the list, of which length is 2.
2291 2292 if p2.node() != nullid:
2292 2293 man2 = p2.manifest()
2293 2294 managing = lambda f: f in man1 or f in man2
2294 2295 else:
2295 2296 managing = lambda f: f in man1
2296 2297
2297 2298 modified, added, removed = [], [], []
2298 2299 for f in self._files:
2299 2300 if not managing(f):
2300 2301 added.append(f)
2301 2302 elif self[f]:
2302 2303 modified.append(f)
2303 2304 else:
2304 2305 removed.append(f)
2305 2306
2306 2307 return scmutil.status(modified, added, removed, [], [], [], [])
2307 2308
2308 2309 class memfilectx(committablefilectx):
2309 2310 """memfilectx represents an in-memory file to commit.
2310 2311
2311 2312 See memctx and committablefilectx for more details.
2312 2313 """
2313 2314 def __init__(self, repo, changectx, path, data, islink=False,
2314 2315 isexec=False, copied=None):
2315 2316 """
2316 2317 path is the normalized file path relative to repository root.
2317 2318 data is the file content as a string.
2318 2319 islink is True if the file is a symbolic link.
2319 2320 isexec is True if the file is executable.
2320 2321 copied is the source file path if current file was copied in the
2321 2322 revision being committed, or None."""
2322 2323 super(memfilectx, self).__init__(repo, path, None, changectx)
2323 2324 self._data = data
2324 2325 if islink:
2325 2326 self._flags = 'l'
2326 2327 elif isexec:
2327 2328 self._flags = 'x'
2328 2329 else:
2329 2330 self._flags = ''
2330 2331 self._copied = None
2331 2332 if copied:
2332 2333 self._copied = (copied, nullid)
2333 2334
2334 2335 def cmp(self, fctx):
2335 2336 return self.data() != fctx.data()
2336 2337
2337 2338 def data(self):
2338 2339 return self._data
2339 2340
2340 2341 def remove(self, ignoremissing=False):
2341 2342 """wraps unlink for a repo's working directory"""
2342 2343 # need to figure out what to do here
2343 2344 del self._changectx[self._path]
2344 2345
2345 2346 def write(self, data, flags, **kwargs):
2346 2347 """wraps repo.wwrite"""
2347 2348 self._data = data
2348 2349
2349 2350
2350 2351 class metadataonlyctx(committablectx):
2351 2352 """Like memctx but it's reusing the manifest of different commit.
2352 2353 Intended to be used by lightweight operations that are creating
2353 2354 metadata-only changes.
2354 2355
2355 2356 Revision information is supplied at initialization time. 'repo' is the
2356 2357 current localrepo, 'ctx' is original revision which manifest we're reuisng
2357 2358 'parents' is a sequence of two parent revisions identifiers (pass None for
2358 2359 every missing parent), 'text' is the commit.
2359 2360
2360 2361 user receives the committer name and defaults to current repository
2361 2362 username, date is the commit date in any format supported by
2362 2363 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2363 2364 metadata or is left empty.
2364 2365 """
2365 2366 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2366 2367 date=None, extra=None, editor=False):
2367 2368 if text is None:
2368 2369 text = originalctx.description()
2369 2370 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2370 2371 self._rev = None
2371 2372 self._node = None
2372 2373 self._originalctx = originalctx
2373 2374 self._manifestnode = originalctx.manifestnode()
2374 2375 if parents is None:
2375 2376 parents = originalctx.parents()
2376 2377 else:
2377 2378 parents = [repo[p] for p in parents if p is not None]
2378 2379 parents = parents[:]
2379 2380 while len(parents) < 2:
2380 2381 parents.append(repo[nullid])
2381 2382 p1, p2 = self._parents = parents
2382 2383
2383 2384 # sanity check to ensure that the reused manifest parents are
2384 2385 # manifests of our commit parents
2385 2386 mp1, mp2 = self.manifestctx().parents
2386 2387 if p1 != nullid and p1.manifestnode() != mp1:
2387 2388 raise RuntimeError(r"can't reuse the manifest: its p1 "
2388 2389 r"doesn't match the new ctx p1")
2389 2390 if p2 != nullid and p2.manifestnode() != mp2:
2390 2391 raise RuntimeError(r"can't reuse the manifest: "
2391 2392 r"its p2 doesn't match the new ctx p2")
2392 2393
2393 2394 self._files = originalctx.files()
2394 2395 self.substate = {}
2395 2396
2396 2397 if editor:
2397 2398 self._text = editor(self._repo, self, [])
2398 2399 self._repo.savecommitmessage(self._text)
2399 2400
2400 2401 def manifestnode(self):
2401 2402 return self._manifestnode
2402 2403
2403 2404 @property
2404 2405 def _manifestctx(self):
2405 2406 return self._repo.manifestlog[self._manifestnode]
2406 2407
2407 2408 def filectx(self, path, filelog=None):
2408 2409 return self._originalctx.filectx(path, filelog=filelog)
2409 2410
2410 2411 def commit(self):
2411 2412 """commit context to the repo"""
2412 2413 return self._repo.commitctx(self)
2413 2414
2414 2415 @property
2415 2416 def _manifest(self):
2416 2417 return self._originalctx.manifest()
2417 2418
2418 2419 @propertycache
2419 2420 def _status(self):
2420 2421 """Calculate exact status from ``files`` specified in the ``origctx``
2421 2422 and parents manifests.
2422 2423 """
2423 2424 man1 = self.p1().manifest()
2424 2425 p2 = self._parents[1]
2425 2426 # "1 < len(self._parents)" can't be used for checking
2426 2427 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2427 2428 # explicitly initialized by the list, of which length is 2.
2428 2429 if p2.node() != nullid:
2429 2430 man2 = p2.manifest()
2430 2431 managing = lambda f: f in man1 or f in man2
2431 2432 else:
2432 2433 managing = lambda f: f in man1
2433 2434
2434 2435 modified, added, removed = [], [], []
2435 2436 for f in self._files:
2436 2437 if not managing(f):
2437 2438 added.append(f)
2438 2439 elif f in self:
2439 2440 modified.append(f)
2440 2441 else:
2441 2442 removed.append(f)
2442 2443
2443 2444 return scmutil.status(modified, added, removed, [], [], [], [])
2444 2445
2445 2446 class arbitraryfilectx(object):
2446 2447 """Allows you to use filectx-like functions on a file in an arbitrary
2447 2448 location on disk, possibly not in the working directory.
2448 2449 """
2449 2450 def __init__(self, path, repo=None):
2450 2451 # Repo is optional because contrib/simplemerge uses this class.
2451 2452 self._repo = repo
2452 2453 self._path = path
2453 2454
2454 2455 def cmp(self, fctx):
2455 2456 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2456 2457 # path if either side is a symlink.
2457 2458 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2458 2459 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2459 2460 # Add a fast-path for merge if both sides are disk-backed.
2460 2461 # Note that filecmp uses the opposite return values (True if same)
2461 2462 # from our cmp functions (True if different).
2462 2463 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2463 2464 return self.data() != fctx.data()
2464 2465
2465 2466 def path(self):
2466 2467 return self._path
2467 2468
2468 2469 def flags(self):
2469 2470 return ''
2470 2471
2471 2472 def data(self):
2472 2473 return util.readfile(self._path)
2473 2474
2474 2475 def decodeddata(self):
2475 2476 with open(self._path, "rb") as f:
2476 2477 return f.read()
2477 2478
2478 2479 def remove(self):
2479 2480 util.unlink(self._path)
2480 2481
2481 2482 def write(self, data, flags, **kwargs):
2482 2483 assert not flags
2483 2484 with open(self._path, "wb") as f:
2484 2485 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now