##// END OF EJS Templates
context: move contents of committablectx.markcommitted() to workingctx...
Martin von Zweigbergk -
r42481:fdd4d668 default
parent child Browse files
Show More
@@ -1,2571 +1,2569 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 @propertycache
276 276 def _copies(self):
277 277 p1copies = {}
278 278 p2copies = {}
279 279 p1 = self.p1()
280 280 p2 = self.p2()
281 281 narrowmatch = self._repo.narrowmatch()
282 282 for dst in self.files():
283 283 if not narrowmatch(dst) or dst not in self:
284 284 continue
285 285 copied = self[dst].renamed()
286 286 if not copied:
287 287 continue
288 288 src, srcnode = copied
289 289 if src in p1 and p1[src].filenode() == srcnode:
290 290 p1copies[dst] = src
291 291 elif src in p2 and p2[src].filenode() == srcnode:
292 292 p2copies[dst] = src
293 293 return p1copies, p2copies
294 294 def p1copies(self):
295 295 return self._copies[0]
296 296 def p2copies(self):
297 297 return self._copies[1]
298 298
299 299 def sub(self, path, allowcreate=True):
300 300 '''return a subrepo for the stored revision of path, never wdir()'''
301 301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
302 302
303 303 def nullsub(self, path, pctx):
304 304 return subrepo.nullsubrepo(self, path, pctx)
305 305
306 306 def workingsub(self, path):
307 307 '''return a subrepo for the stored revision, or wdir if this is a wdir
308 308 context.
309 309 '''
310 310 return subrepo.subrepo(self, path, allowwdir=True)
311 311
312 312 def match(self, pats=None, include=None, exclude=None, default='glob',
313 313 listsubrepos=False, badfn=None):
314 314 r = self._repo
315 315 return matchmod.match(r.root, r.getcwd(), pats,
316 316 include, exclude, default,
317 317 auditor=r.nofsauditor, ctx=self,
318 318 listsubrepos=listsubrepos, badfn=badfn)
319 319
320 320 def diff(self, ctx2=None, match=None, changes=None, opts=None,
321 321 losedatafn=None, pathfn=None, copy=None,
322 322 copysourcematch=None, hunksfilterfn=None):
323 323 """Returns a diff generator for the given contexts and matcher"""
324 324 if ctx2 is None:
325 325 ctx2 = self.p1()
326 326 if ctx2 is not None:
327 327 ctx2 = self._repo[ctx2]
328 328 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
329 329 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
330 330 copy=copy, copysourcematch=copysourcematch,
331 331 hunksfilterfn=hunksfilterfn)
332 332
333 333 def dirs(self):
334 334 return self._manifest.dirs()
335 335
336 336 def hasdir(self, dir):
337 337 return self._manifest.hasdir(dir)
338 338
339 339 def status(self, other=None, match=None, listignored=False,
340 340 listclean=False, listunknown=False, listsubrepos=False):
341 341 """return status of files between two nodes or node and working
342 342 directory.
343 343
344 344 If other is None, compare this node with working directory.
345 345
346 346 returns (modified, added, removed, deleted, unknown, ignored, clean)
347 347 """
348 348
349 349 ctx1 = self
350 350 ctx2 = self._repo[other]
351 351
352 352 # This next code block is, admittedly, fragile logic that tests for
353 353 # reversing the contexts and wouldn't need to exist if it weren't for
354 354 # the fast (and common) code path of comparing the working directory
355 355 # with its first parent.
356 356 #
357 357 # What we're aiming for here is the ability to call:
358 358 #
359 359 # workingctx.status(parentctx)
360 360 #
361 361 # If we always built the manifest for each context and compared those,
362 362 # then we'd be done. But the special case of the above call means we
363 363 # just copy the manifest of the parent.
364 364 reversed = False
365 365 if (not isinstance(ctx1, changectx)
366 366 and isinstance(ctx2, changectx)):
367 367 reversed = True
368 368 ctx1, ctx2 = ctx2, ctx1
369 369
370 370 match = self._repo.narrowmatch(match)
371 371 match = ctx2._matchstatus(ctx1, match)
372 372 r = scmutil.status([], [], [], [], [], [], [])
373 373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 374 listunknown)
375 375
376 376 if reversed:
377 377 # Reverse added and removed. Clear deleted, unknown and ignored as
378 378 # these make no sense to reverse.
379 379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 380 r.clean)
381 381
382 382 if listsubrepos:
383 383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 384 try:
385 385 rev2 = ctx2.subrev(subpath)
386 386 except KeyError:
387 387 # A subrepo that existed in node1 was deleted between
388 388 # node1 and node2 (inclusive). Thus, ctx2's substate
389 389 # won't contain that subpath. The best we can do ignore it.
390 390 rev2 = None
391 391 submatch = matchmod.subdirmatcher(subpath, match)
392 392 s = sub.status(rev2, match=submatch, ignored=listignored,
393 393 clean=listclean, unknown=listunknown,
394 394 listsubrepos=True)
395 395 for rfiles, sfiles in zip(r, s):
396 396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397 397
398 398 for l in r:
399 399 l.sort()
400 400
401 401 return r
402 402
403 403 class changectx(basectx):
404 404 """A changecontext object makes access to data related to a particular
405 405 changeset convenient. It represents a read-only context already present in
406 406 the repo."""
407 407 def __init__(self, repo, rev, node):
408 408 super(changectx, self).__init__(repo)
409 409 self._rev = rev
410 410 self._node = node
411 411
412 412 def __hash__(self):
413 413 try:
414 414 return hash(self._rev)
415 415 except AttributeError:
416 416 return id(self)
417 417
418 418 def __nonzero__(self):
419 419 return self._rev != nullrev
420 420
421 421 __bool__ = __nonzero__
422 422
423 423 @propertycache
424 424 def _changeset(self):
425 425 return self._repo.changelog.changelogrevision(self.rev())
426 426
427 427 @propertycache
428 428 def _manifest(self):
429 429 return self._manifestctx.read()
430 430
431 431 @property
432 432 def _manifestctx(self):
433 433 return self._repo.manifestlog[self._changeset.manifest]
434 434
435 435 @propertycache
436 436 def _manifestdelta(self):
437 437 return self._manifestctx.readdelta()
438 438
439 439 @propertycache
440 440 def _parents(self):
441 441 repo = self._repo
442 442 p1, p2 = repo.changelog.parentrevs(self._rev)
443 443 if p2 == nullrev:
444 444 return [repo[p1]]
445 445 return [repo[p1], repo[p2]]
446 446
447 447 def changeset(self):
448 448 c = self._changeset
449 449 return (
450 450 c.manifest,
451 451 c.user,
452 452 c.date,
453 453 c.files,
454 454 c.description,
455 455 c.extra,
456 456 )
457 457 def manifestnode(self):
458 458 return self._changeset.manifest
459 459
460 460 def user(self):
461 461 return self._changeset.user
462 462 def date(self):
463 463 return self._changeset.date
464 464 def files(self):
465 465 return self._changeset.files
466 466 @propertycache
467 467 def _copies(self):
468 468 source = self._repo.ui.config('experimental', 'copies.read-from')
469 469 p1copies = self._changeset.p1copies
470 470 p2copies = self._changeset.p2copies
471 471 # If config says to get copy metadata only from changeset, then return
472 472 # that, defaulting to {} if there was no copy metadata.
473 473 # In compatibility mode, we return copy data from the changeset if
474 474 # it was recorded there, and otherwise we fall back to getting it from
475 475 # the filelogs (below).
476 476 if (source == 'changeset-only' or
477 477 (source == 'compatibility' and p1copies is not None)):
478 478 return p1copies or {}, p2copies or {}
479 479
480 480 # Otherwise (config said to read only from filelog, or we are in
481 481 # compatiblity mode and there is not data in the changeset), we get
482 482 # the copy metadata from the filelogs.
483 483 return super(changectx, self)._copies
484 484 def description(self):
485 485 return self._changeset.description
486 486 def branch(self):
487 487 return encoding.tolocal(self._changeset.extra.get("branch"))
488 488 def closesbranch(self):
489 489 return 'close' in self._changeset.extra
490 490 def extra(self):
491 491 """Return a dict of extra information."""
492 492 return self._changeset.extra
493 493 def tags(self):
494 494 """Return a list of byte tag names"""
495 495 return self._repo.nodetags(self._node)
496 496 def bookmarks(self):
497 497 """Return a list of byte bookmark names."""
498 498 return self._repo.nodebookmarks(self._node)
499 499 def phase(self):
500 500 return self._repo._phasecache.phase(self._repo, self._rev)
501 501 def hidden(self):
502 502 return self._rev in repoview.filterrevs(self._repo, 'visible')
503 503
504 504 def isinmemory(self):
505 505 return False
506 506
507 507 def children(self):
508 508 """return list of changectx contexts for each child changeset.
509 509
510 510 This returns only the immediate child changesets. Use descendants() to
511 511 recursively walk children.
512 512 """
513 513 c = self._repo.changelog.children(self._node)
514 514 return [self._repo[x] for x in c]
515 515
516 516 def ancestors(self):
517 517 for a in self._repo.changelog.ancestors([self._rev]):
518 518 yield self._repo[a]
519 519
520 520 def descendants(self):
521 521 """Recursively yield all children of the changeset.
522 522
523 523 For just the immediate children, use children()
524 524 """
525 525 for d in self._repo.changelog.descendants([self._rev]):
526 526 yield self._repo[d]
527 527
528 528 def filectx(self, path, fileid=None, filelog=None):
529 529 """get a file context from this changeset"""
530 530 if fileid is None:
531 531 fileid = self.filenode(path)
532 532 return filectx(self._repo, path, fileid=fileid,
533 533 changectx=self, filelog=filelog)
534 534
535 535 def ancestor(self, c2, warn=False):
536 536 """return the "best" ancestor context of self and c2
537 537
538 538 If there are multiple candidates, it will show a message and check
539 539 merge.preferancestor configuration before falling back to the
540 540 revlog ancestor."""
541 541 # deal with workingctxs
542 542 n2 = c2._node
543 543 if n2 is None:
544 544 n2 = c2._parents[0]._node
545 545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 546 if not cahs:
547 547 anc = nullid
548 548 elif len(cahs) == 1:
549 549 anc = cahs[0]
550 550 else:
551 551 # experimental config: merge.preferancestor
552 552 for r in self._repo.ui.configlist('merge', 'preferancestor'):
553 553 try:
554 554 ctx = scmutil.revsymbol(self._repo, r)
555 555 except error.RepoLookupError:
556 556 continue
557 557 anc = ctx.node()
558 558 if anc in cahs:
559 559 break
560 560 else:
561 561 anc = self._repo.changelog.ancestor(self._node, n2)
562 562 if warn:
563 563 self._repo.ui.status(
564 564 (_("note: using %s as ancestor of %s and %s\n") %
565 565 (short(anc), short(self._node), short(n2))) +
566 566 ''.join(_(" alternatively, use --config "
567 567 "merge.preferancestor=%s\n") %
568 568 short(n) for n in sorted(cahs) if n != anc))
569 569 return self._repo[anc]
570 570
571 571 def isancestorof(self, other):
572 572 """True if this changeset is an ancestor of other"""
573 573 return self._repo.changelog.isancestorrev(self._rev, other._rev)
574 574
575 575 def walk(self, match):
576 576 '''Generates matching file names.'''
577 577
578 578 # Wrap match.bad method to have message with nodeid
579 579 def bad(fn, msg):
580 580 # The manifest doesn't know about subrepos, so don't complain about
581 581 # paths into valid subrepos.
582 582 if any(fn == s or fn.startswith(s + '/')
583 583 for s in self.substate):
584 584 return
585 585 match.bad(fn, _('no such file in rev %s') % self)
586 586
587 587 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
588 588 return self._manifest.walk(m)
589 589
590 590 def matches(self, match):
591 591 return self.walk(match)
592 592
593 593 class basefilectx(object):
594 594 """A filecontext object represents the common logic for its children:
595 595 filectx: read-only access to a filerevision that is already present
596 596 in the repo,
597 597 workingfilectx: a filecontext that represents files from the working
598 598 directory,
599 599 memfilectx: a filecontext that represents files in-memory,
600 600 """
601 601 @propertycache
602 602 def _filelog(self):
603 603 return self._repo.file(self._path)
604 604
605 605 @propertycache
606 606 def _changeid(self):
607 607 if r'_changectx' in self.__dict__:
608 608 return self._changectx.rev()
609 609 elif r'_descendantrev' in self.__dict__:
610 610 # this file context was created from a revision with a known
611 611 # descendant, we can (lazily) correct for linkrev aliases
612 612 return self._adjustlinkrev(self._descendantrev)
613 613 else:
614 614 return self._filelog.linkrev(self._filerev)
615 615
616 616 @propertycache
617 617 def _filenode(self):
618 618 if r'_fileid' in self.__dict__:
619 619 return self._filelog.lookup(self._fileid)
620 620 else:
621 621 return self._changectx.filenode(self._path)
622 622
623 623 @propertycache
624 624 def _filerev(self):
625 625 return self._filelog.rev(self._filenode)
626 626
627 627 @propertycache
628 628 def _repopath(self):
629 629 return self._path
630 630
631 631 def __nonzero__(self):
632 632 try:
633 633 self._filenode
634 634 return True
635 635 except error.LookupError:
636 636 # file is missing
637 637 return False
638 638
639 639 __bool__ = __nonzero__
640 640
641 641 def __bytes__(self):
642 642 try:
643 643 return "%s@%s" % (self.path(), self._changectx)
644 644 except error.LookupError:
645 645 return "%s@???" % self.path()
646 646
647 647 __str__ = encoding.strmethod(__bytes__)
648 648
649 649 def __repr__(self):
650 650 return r"<%s %s>" % (type(self).__name__, str(self))
651 651
652 652 def __hash__(self):
653 653 try:
654 654 return hash((self._path, self._filenode))
655 655 except AttributeError:
656 656 return id(self)
657 657
658 658 def __eq__(self, other):
659 659 try:
660 660 return (type(self) == type(other) and self._path == other._path
661 661 and self._filenode == other._filenode)
662 662 except AttributeError:
663 663 return False
664 664
665 665 def __ne__(self, other):
666 666 return not (self == other)
667 667
668 668 def filerev(self):
669 669 return self._filerev
670 670 def filenode(self):
671 671 return self._filenode
672 672 @propertycache
673 673 def _flags(self):
674 674 return self._changectx.flags(self._path)
675 675 def flags(self):
676 676 return self._flags
677 677 def filelog(self):
678 678 return self._filelog
679 679 def rev(self):
680 680 return self._changeid
681 681 def linkrev(self):
682 682 return self._filelog.linkrev(self._filerev)
683 683 def node(self):
684 684 return self._changectx.node()
685 685 def hex(self):
686 686 return self._changectx.hex()
687 687 def user(self):
688 688 return self._changectx.user()
689 689 def date(self):
690 690 return self._changectx.date()
691 691 def files(self):
692 692 return self._changectx.files()
693 693 def description(self):
694 694 return self._changectx.description()
695 695 def branch(self):
696 696 return self._changectx.branch()
697 697 def extra(self):
698 698 return self._changectx.extra()
699 699 def phase(self):
700 700 return self._changectx.phase()
701 701 def phasestr(self):
702 702 return self._changectx.phasestr()
703 703 def obsolete(self):
704 704 return self._changectx.obsolete()
705 705 def instabilities(self):
706 706 return self._changectx.instabilities()
707 707 def manifest(self):
708 708 return self._changectx.manifest()
709 709 def changectx(self):
710 710 return self._changectx
711 711 def renamed(self):
712 712 return self._copied
713 713 def copysource(self):
714 714 return self._copied and self._copied[0]
715 715 def repo(self):
716 716 return self._repo
717 717 def size(self):
718 718 return len(self.data())
719 719
720 720 def path(self):
721 721 return self._path
722 722
723 723 def isbinary(self):
724 724 try:
725 725 return stringutil.binary(self.data())
726 726 except IOError:
727 727 return False
728 728 def isexec(self):
729 729 return 'x' in self.flags()
730 730 def islink(self):
731 731 return 'l' in self.flags()
732 732
733 733 def isabsent(self):
734 734 """whether this filectx represents a file not in self._changectx
735 735
736 736 This is mainly for merge code to detect change/delete conflicts. This is
737 737 expected to be True for all subclasses of basectx."""
738 738 return False
739 739
740 740 _customcmp = False
741 741 def cmp(self, fctx):
742 742 """compare with other file context
743 743
744 744 returns True if different than fctx.
745 745 """
746 746 if fctx._customcmp:
747 747 return fctx.cmp(self)
748 748
749 749 if self._filenode is None:
750 750 raise error.ProgrammingError(
751 751 'filectx.cmp() must be reimplemented if not backed by revlog')
752 752
753 753 if fctx._filenode is None:
754 754 if self._repo._encodefilterpats:
755 755 # can't rely on size() because wdir content may be decoded
756 756 return self._filelog.cmp(self._filenode, fctx.data())
757 757 if self.size() - 4 == fctx.size():
758 758 # size() can match:
759 759 # if file data starts with '\1\n', empty metadata block is
760 760 # prepended, which adds 4 bytes to filelog.size().
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() == fctx.size():
763 763 # size() matches: need to compare content
764 764 return self._filelog.cmp(self._filenode, fctx.data())
765 765
766 766 # size() differs
767 767 return True
768 768
769 769 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
770 770 """return the first ancestor of <srcrev> introducing <fnode>
771 771
772 772 If the linkrev of the file revision does not point to an ancestor of
773 773 srcrev, we'll walk down the ancestors until we find one introducing
774 774 this file revision.
775 775
776 776 :srcrev: the changeset revision we search ancestors from
777 777 :inclusive: if true, the src revision will also be checked
778 778 :stoprev: an optional revision to stop the walk at. If no introduction
779 779 of this file content could be found before this floor
780 780 revision, the function will returns "None" and stops its
781 781 iteration.
782 782 """
783 783 repo = self._repo
784 784 cl = repo.unfiltered().changelog
785 785 mfl = repo.manifestlog
786 786 # fetch the linkrev
787 787 lkr = self.linkrev()
788 788 if srcrev == lkr:
789 789 return lkr
790 790 # hack to reuse ancestor computation when searching for renames
791 791 memberanc = getattr(self, '_ancestrycontext', None)
792 792 iteranc = None
793 793 if srcrev is None:
794 794 # wctx case, used by workingfilectx during mergecopy
795 795 revs = [p.rev() for p in self._repo[None].parents()]
796 796 inclusive = True # we skipped the real (revless) source
797 797 else:
798 798 revs = [srcrev]
799 799 if memberanc is None:
800 800 memberanc = iteranc = cl.ancestors(revs, lkr,
801 801 inclusive=inclusive)
802 802 # check if this linkrev is an ancestor of srcrev
803 803 if lkr not in memberanc:
804 804 if iteranc is None:
805 805 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
806 806 fnode = self._filenode
807 807 path = self._path
808 808 for a in iteranc:
809 809 if stoprev is not None and a < stoprev:
810 810 return None
811 811 ac = cl.read(a) # get changeset data (we avoid object creation)
812 812 if path in ac[3]: # checking the 'files' field.
813 813 # The file has been touched, check if the content is
814 814 # similar to the one we search for.
815 815 if fnode == mfl[ac[0]].readfast().get(path):
816 816 return a
817 817 # In theory, we should never get out of that loop without a result.
818 818 # But if manifest uses a buggy file revision (not children of the
819 819 # one it replaces) we could. Such a buggy situation will likely
820 820 # result is crash somewhere else at to some point.
821 821 return lkr
822 822
823 823 def isintroducedafter(self, changelogrev):
824 824 """True if a filectx has been introduced after a given floor revision
825 825 """
826 826 if self.linkrev() >= changelogrev:
827 827 return True
828 828 introrev = self._introrev(stoprev=changelogrev)
829 829 if introrev is None:
830 830 return False
831 831 return introrev >= changelogrev
832 832
833 833 def introrev(self):
834 834 """return the rev of the changeset which introduced this file revision
835 835
836 836 This method is different from linkrev because it take into account the
837 837 changeset the filectx was created from. It ensures the returned
838 838 revision is one of its ancestors. This prevents bugs from
839 839 'linkrev-shadowing' when a file revision is used by multiple
840 840 changesets.
841 841 """
842 842 return self._introrev()
843 843
844 844 def _introrev(self, stoprev=None):
845 845 """
846 846 Same as `introrev` but, with an extra argument to limit changelog
847 847 iteration range in some internal usecase.
848 848
849 849 If `stoprev` is set, the `introrev` will not be searched past that
850 850 `stoprev` revision and "None" might be returned. This is useful to
851 851 limit the iteration range.
852 852 """
853 853 toprev = None
854 854 attrs = vars(self)
855 855 if r'_changeid' in attrs:
856 856 # We have a cached value already
857 857 toprev = self._changeid
858 858 elif r'_changectx' in attrs:
859 859 # We know which changelog entry we are coming from
860 860 toprev = self._changectx.rev()
861 861
862 862 if toprev is not None:
863 863 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
864 864 elif r'_descendantrev' in attrs:
865 865 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
866 866 # be nice and cache the result of the computation
867 867 if introrev is not None:
868 868 self._changeid = introrev
869 869 return introrev
870 870 else:
871 871 return self.linkrev()
872 872
873 873 def introfilectx(self):
874 874 """Return filectx having identical contents, but pointing to the
875 875 changeset revision where this filectx was introduced"""
876 876 introrev = self.introrev()
877 877 if self.rev() == introrev:
878 878 return self
879 879 return self.filectx(self.filenode(), changeid=introrev)
880 880
881 881 def _parentfilectx(self, path, fileid, filelog):
882 882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 884 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
885 885 # If self is associated with a changeset (probably explicitly
886 886 # fed), ensure the created filectx is associated with a
887 887 # changeset that is an ancestor of self.changectx.
888 888 # This lets us later use _adjustlinkrev to get a correct link.
889 889 fctx._descendantrev = self.rev()
890 890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 891 elif r'_descendantrev' in vars(self):
892 892 # Otherwise propagate _descendantrev if we have one associated.
893 893 fctx._descendantrev = self._descendantrev
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 return fctx
896 896
897 897 def parents(self):
898 898 _path = self._path
899 899 fl = self._filelog
900 900 parents = self._filelog.parents(self._filenode)
901 901 pl = [(_path, node, fl) for node in parents if node != nullid]
902 902
903 903 r = fl.renamed(self._filenode)
904 904 if r:
905 905 # - In the simple rename case, both parent are nullid, pl is empty.
906 906 # - In case of merge, only one of the parent is null id and should
907 907 # be replaced with the rename information. This parent is -always-
908 908 # the first one.
909 909 #
910 910 # As null id have always been filtered out in the previous list
911 911 # comprehension, inserting to 0 will always result in "replacing
912 912 # first nullid parent with rename information.
913 913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 914
915 915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 916
917 917 def p1(self):
918 918 return self.parents()[0]
919 919
920 920 def p2(self):
921 921 p = self.parents()
922 922 if len(p) == 2:
923 923 return p[1]
924 924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 925
926 926 def annotate(self, follow=False, skiprevs=None, diffopts=None):
927 927 """Returns a list of annotateline objects for each line in the file
928 928
929 929 - line.fctx is the filectx of the node where that line was last changed
930 930 - line.lineno is the line number at the first appearance in the managed
931 931 file
932 932 - line.text is the data on that line (including newline character)
933 933 """
934 934 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
935 935
936 936 def parents(f):
937 937 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
938 938 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
939 939 # from the topmost introrev (= srcrev) down to p.linkrev() if it
940 940 # isn't an ancestor of the srcrev.
941 941 f._changeid
942 942 pl = f.parents()
943 943
944 944 # Don't return renamed parents if we aren't following.
945 945 if not follow:
946 946 pl = [p for p in pl if p.path() == f.path()]
947 947
948 948 # renamed filectx won't have a filelog yet, so set it
949 949 # from the cache to save time
950 950 for p in pl:
951 951 if not r'_filelog' in p.__dict__:
952 952 p._filelog = getlog(p.path())
953 953
954 954 return pl
955 955
956 956 # use linkrev to find the first changeset where self appeared
957 957 base = self.introfilectx()
958 958 if getattr(base, '_ancestrycontext', None) is None:
959 959 cl = self._repo.changelog
960 960 if base.rev() is None:
961 961 # wctx is not inclusive, but works because _ancestrycontext
962 962 # is used to test filelog revisions
963 963 ac = cl.ancestors([p.rev() for p in base.parents()],
964 964 inclusive=True)
965 965 else:
966 966 ac = cl.ancestors([base.rev()], inclusive=True)
967 967 base._ancestrycontext = ac
968 968
969 969 return dagop.annotate(base, parents, skiprevs=skiprevs,
970 970 diffopts=diffopts)
971 971
972 972 def ancestors(self, followfirst=False):
973 973 visit = {}
974 974 c = self
975 975 if followfirst:
976 976 cut = 1
977 977 else:
978 978 cut = None
979 979
980 980 while True:
981 981 for parent in c.parents()[:cut]:
982 982 visit[(parent.linkrev(), parent.filenode())] = parent
983 983 if not visit:
984 984 break
985 985 c = visit.pop(max(visit))
986 986 yield c
987 987
988 988 def decodeddata(self):
989 989 """Returns `data()` after running repository decoding filters.
990 990
991 991 This is often equivalent to how the data would be expressed on disk.
992 992 """
993 993 return self._repo.wwritedata(self.path(), self.data())
994 994
995 995 class filectx(basefilectx):
996 996 """A filecontext object makes access to data related to a particular
997 997 filerevision convenient."""
998 998 def __init__(self, repo, path, changeid=None, fileid=None,
999 999 filelog=None, changectx=None):
1000 1000 """changeid must be a revision number, if specified.
1001 1001 fileid can be a file revision or node."""
1002 1002 self._repo = repo
1003 1003 self._path = path
1004 1004
1005 1005 assert (changeid is not None
1006 1006 or fileid is not None
1007 1007 or changectx is not None), (
1008 1008 "bad args: changeid=%r, fileid=%r, changectx=%r"
1009 1009 % (changeid, fileid, changectx))
1010 1010
1011 1011 if filelog is not None:
1012 1012 self._filelog = filelog
1013 1013
1014 1014 if changeid is not None:
1015 1015 self._changeid = changeid
1016 1016 if changectx is not None:
1017 1017 self._changectx = changectx
1018 1018 if fileid is not None:
1019 1019 self._fileid = fileid
1020 1020
1021 1021 @propertycache
1022 1022 def _changectx(self):
1023 1023 try:
1024 1024 return self._repo[self._changeid]
1025 1025 except error.FilteredRepoLookupError:
1026 1026 # Linkrev may point to any revision in the repository. When the
1027 1027 # repository is filtered this may lead to `filectx` trying to build
1028 1028 # `changectx` for filtered revision. In such case we fallback to
1029 1029 # creating `changectx` on the unfiltered version of the reposition.
1030 1030 # This fallback should not be an issue because `changectx` from
1031 1031 # `filectx` are not used in complex operations that care about
1032 1032 # filtering.
1033 1033 #
1034 1034 # This fallback is a cheap and dirty fix that prevent several
1035 1035 # crashes. It does not ensure the behavior is correct. However the
1036 1036 # behavior was not correct before filtering either and "incorrect
1037 1037 # behavior" is seen as better as "crash"
1038 1038 #
1039 1039 # Linkrevs have several serious troubles with filtering that are
1040 1040 # complicated to solve. Proper handling of the issue here should be
1041 1041 # considered when solving linkrev issue are on the table.
1042 1042 return self._repo.unfiltered()[self._changeid]
1043 1043
1044 1044 def filectx(self, fileid, changeid=None):
1045 1045 '''opens an arbitrary revision of the file without
1046 1046 opening a new filelog'''
1047 1047 return filectx(self._repo, self._path, fileid=fileid,
1048 1048 filelog=self._filelog, changeid=changeid)
1049 1049
1050 1050 def rawdata(self):
1051 1051 return self._filelog.revision(self._filenode, raw=True)
1052 1052
1053 1053 def rawflags(self):
1054 1054 """low-level revlog flags"""
1055 1055 return self._filelog.flags(self._filerev)
1056 1056
1057 1057 def data(self):
1058 1058 try:
1059 1059 return self._filelog.read(self._filenode)
1060 1060 except error.CensoredNodeError:
1061 1061 if self._repo.ui.config("censor", "policy") == "ignore":
1062 1062 return ""
1063 1063 raise error.Abort(_("censored node: %s") % short(self._filenode),
1064 1064 hint=_("set censor.policy to ignore errors"))
1065 1065
1066 1066 def size(self):
1067 1067 return self._filelog.size(self._filerev)
1068 1068
1069 1069 @propertycache
1070 1070 def _copied(self):
1071 1071 """check if file was actually renamed in this changeset revision
1072 1072
1073 1073 If rename logged in file revision, we report copy for changeset only
1074 1074 if file revisions linkrev points back to the changeset in question
1075 1075 or both changeset parents contain different file revisions.
1076 1076 """
1077 1077
1078 1078 renamed = self._filelog.renamed(self._filenode)
1079 1079 if not renamed:
1080 1080 return None
1081 1081
1082 1082 if self.rev() == self.linkrev():
1083 1083 return renamed
1084 1084
1085 1085 name = self.path()
1086 1086 fnode = self._filenode
1087 1087 for p in self._changectx.parents():
1088 1088 try:
1089 1089 if fnode == p.filenode(name):
1090 1090 return None
1091 1091 except error.LookupError:
1092 1092 pass
1093 1093 return renamed
1094 1094
1095 1095 def children(self):
1096 1096 # hard for renames
1097 1097 c = self._filelog.children(self._filenode)
1098 1098 return [filectx(self._repo, self._path, fileid=x,
1099 1099 filelog=self._filelog) for x in c]
1100 1100
1101 1101 class committablectx(basectx):
1102 1102 """A committablectx object provides common functionality for a context that
1103 1103 wants the ability to commit, e.g. workingctx or memctx."""
1104 1104 def __init__(self, repo, text="", user=None, date=None, extra=None,
1105 1105 changes=None):
1106 1106 super(committablectx, self).__init__(repo)
1107 1107 self._rev = None
1108 1108 self._node = None
1109 1109 self._text = text
1110 1110 if date:
1111 1111 self._date = dateutil.parsedate(date)
1112 1112 if user:
1113 1113 self._user = user
1114 1114 if changes:
1115 1115 self._status = changes
1116 1116
1117 1117 self._extra = {}
1118 1118 if extra:
1119 1119 self._extra = extra.copy()
1120 1120 if 'branch' not in self._extra:
1121 1121 try:
1122 1122 branch = encoding.fromlocal(self._repo.dirstate.branch())
1123 1123 except UnicodeDecodeError:
1124 1124 raise error.Abort(_('branch name not in UTF-8!'))
1125 1125 self._extra['branch'] = branch
1126 1126 if self._extra['branch'] == '':
1127 1127 self._extra['branch'] = 'default'
1128 1128
1129 1129 def __bytes__(self):
1130 1130 return bytes(self._parents[0]) + "+"
1131 1131
1132 1132 __str__ = encoding.strmethod(__bytes__)
1133 1133
1134 1134 def __nonzero__(self):
1135 1135 return True
1136 1136
1137 1137 __bool__ = __nonzero__
1138 1138
1139 1139 @propertycache
1140 1140 def _status(self):
1141 1141 return self._repo.status()
1142 1142
1143 1143 @propertycache
1144 1144 def _user(self):
1145 1145 return self._repo.ui.username()
1146 1146
1147 1147 @propertycache
1148 1148 def _date(self):
1149 1149 ui = self._repo.ui
1150 1150 date = ui.configdate('devel', 'default-date')
1151 1151 if date is None:
1152 1152 date = dateutil.makedate()
1153 1153 return date
1154 1154
1155 1155 def subrev(self, subpath):
1156 1156 return None
1157 1157
1158 1158 def manifestnode(self):
1159 1159 return None
1160 1160 def user(self):
1161 1161 return self._user or self._repo.ui.username()
1162 1162 def date(self):
1163 1163 return self._date
1164 1164 def description(self):
1165 1165 return self._text
1166 1166 def files(self):
1167 1167 return sorted(self._status.modified + self._status.added +
1168 1168 self._status.removed)
1169 1169 def modified(self):
1170 1170 return self._status.modified
1171 1171 def added(self):
1172 1172 return self._status.added
1173 1173 def removed(self):
1174 1174 return self._status.removed
1175 1175 def deleted(self):
1176 1176 return self._status.deleted
1177 1177 def branch(self):
1178 1178 return encoding.tolocal(self._extra['branch'])
1179 1179 def closesbranch(self):
1180 1180 return 'close' in self._extra
1181 1181 def extra(self):
1182 1182 return self._extra
1183 1183
1184 1184 def isinmemory(self):
1185 1185 return False
1186 1186
1187 1187 def tags(self):
1188 1188 return []
1189 1189
1190 1190 def bookmarks(self):
1191 1191 b = []
1192 1192 for p in self.parents():
1193 1193 b.extend(p.bookmarks())
1194 1194 return b
1195 1195
1196 1196 def phase(self):
1197 1197 phase = phases.draft # default phase to draft
1198 1198 for p in self.parents():
1199 1199 phase = max(phase, p.phase())
1200 1200 return phase
1201 1201
1202 1202 def hidden(self):
1203 1203 return False
1204 1204
1205 1205 def children(self):
1206 1206 return []
1207 1207
1208 1208 def ancestor(self, c2):
1209 1209 """return the "best" ancestor context of self and c2"""
1210 1210 return self._parents[0].ancestor(c2) # punt on two parents for now
1211 1211
1212 1212 def ancestors(self):
1213 1213 for p in self._parents:
1214 1214 yield p
1215 1215 for a in self._repo.changelog.ancestors(
1216 1216 [p.rev() for p in self._parents]):
1217 1217 yield self._repo[a]
1218 1218
1219 1219 def markcommitted(self, node):
1220 1220 """Perform post-commit cleanup necessary after committing this ctx
1221 1221
1222 1222 Specifically, this updates backing stores this working context
1223 1223 wraps to reflect the fact that the changes reflected by this
1224 1224 workingctx have been committed. For example, it marks
1225 1225 modified and added files as normal in the dirstate.
1226 1226
1227 1227 """
1228 1228
1229 with self._repo.dirstate.parentchange():
1230 for f in self.modified() + self.added():
1231 self._repo.dirstate.normal(f)
1232 for f in self.removed():
1233 self._repo.dirstate.drop(f)
1234 self._repo.dirstate.setparents(node)
1235
1236 # write changes out explicitly, because nesting wlock at
1237 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1238 # from immediately doing so for subsequent changing files
1239 self._repo.dirstate.write(self._repo.currenttransaction())
1240
1241 1229 def dirty(self, missing=False, merge=True, branch=True):
1242 1230 return False
1243 1231
1244 1232 class workingctx(committablectx):
1245 1233 """A workingctx object makes access to data related to
1246 1234 the current working directory convenient.
1247 1235 date - any valid date string or (unixtime, offset), or None.
1248 1236 user - username string, or None.
1249 1237 extra - a dictionary of extra values, or None.
1250 1238 changes - a list of file lists as returned by localrepo.status()
1251 1239 or None to use the repository status.
1252 1240 """
1253 1241 def __init__(self, repo, text="", user=None, date=None, extra=None,
1254 1242 changes=None):
1255 1243 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1256 1244
1257 1245 def __iter__(self):
1258 1246 d = self._repo.dirstate
1259 1247 for f in d:
1260 1248 if d[f] != 'r':
1261 1249 yield f
1262 1250
1263 1251 def __contains__(self, key):
1264 1252 return self._repo.dirstate[key] not in "?r"
1265 1253
1266 1254 def hex(self):
1267 1255 return wdirhex
1268 1256
1269 1257 @propertycache
1270 1258 def _parents(self):
1271 1259 p = self._repo.dirstate.parents()
1272 1260 if p[1] == nullid:
1273 1261 p = p[:-1]
1274 1262 # use unfiltered repo to delay/avoid loading obsmarkers
1275 1263 unfi = self._repo.unfiltered()
1276 1264 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1277 1265
1278 1266 def _fileinfo(self, path):
1279 1267 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1280 1268 self._manifest
1281 1269 return super(workingctx, self)._fileinfo(path)
1282 1270
1283 1271 def _buildflagfunc(self):
1284 1272 # Create a fallback function for getting file flags when the
1285 1273 # filesystem doesn't support them
1286 1274
1287 1275 copiesget = self._repo.dirstate.copies().get
1288 1276 parents = self.parents()
1289 1277 if len(parents) < 2:
1290 1278 # when we have one parent, it's easy: copy from parent
1291 1279 man = parents[0].manifest()
1292 1280 def func(f):
1293 1281 f = copiesget(f, f)
1294 1282 return man.flags(f)
1295 1283 else:
1296 1284 # merges are tricky: we try to reconstruct the unstored
1297 1285 # result from the merge (issue1802)
1298 1286 p1, p2 = parents
1299 1287 pa = p1.ancestor(p2)
1300 1288 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1301 1289
1302 1290 def func(f):
1303 1291 f = copiesget(f, f) # may be wrong for merges with copies
1304 1292 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1305 1293 if fl1 == fl2:
1306 1294 return fl1
1307 1295 if fl1 == fla:
1308 1296 return fl2
1309 1297 if fl2 == fla:
1310 1298 return fl1
1311 1299 return '' # punt for conflicts
1312 1300
1313 1301 return func
1314 1302
1315 1303 @propertycache
1316 1304 def _flagfunc(self):
1317 1305 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1318 1306
1319 1307 def flags(self, path):
1320 1308 if r'_manifest' in self.__dict__:
1321 1309 try:
1322 1310 return self._manifest.flags(path)
1323 1311 except KeyError:
1324 1312 return ''
1325 1313
1326 1314 try:
1327 1315 return self._flagfunc(path)
1328 1316 except OSError:
1329 1317 return ''
1330 1318
1331 1319 def filectx(self, path, filelog=None):
1332 1320 """get a file context from the working directory"""
1333 1321 return workingfilectx(self._repo, path, workingctx=self,
1334 1322 filelog=filelog)
1335 1323
1336 1324 def dirty(self, missing=False, merge=True, branch=True):
1337 1325 "check whether a working directory is modified"
1338 1326 # check subrepos first
1339 1327 for s in sorted(self.substate):
1340 1328 if self.sub(s).dirty(missing=missing):
1341 1329 return True
1342 1330 # check current working dir
1343 1331 return ((merge and self.p2()) or
1344 1332 (branch and self.branch() != self.p1().branch()) or
1345 1333 self.modified() or self.added() or self.removed() or
1346 1334 (missing and self.deleted()))
1347 1335
1348 1336 def add(self, list, prefix=""):
1349 1337 with self._repo.wlock():
1350 1338 ui, ds = self._repo.ui, self._repo.dirstate
1351 1339 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1352 1340 rejected = []
1353 1341 lstat = self._repo.wvfs.lstat
1354 1342 for f in list:
1355 1343 # ds.pathto() returns an absolute file when this is invoked from
1356 1344 # the keyword extension. That gets flagged as non-portable on
1357 1345 # Windows, since it contains the drive letter and colon.
1358 1346 scmutil.checkportable(ui, os.path.join(prefix, f))
1359 1347 try:
1360 1348 st = lstat(f)
1361 1349 except OSError:
1362 1350 ui.warn(_("%s does not exist!\n") % uipath(f))
1363 1351 rejected.append(f)
1364 1352 continue
1365 1353 limit = ui.configbytes('ui', 'large-file-limit')
1366 1354 if limit != 0 and st.st_size > limit:
1367 1355 ui.warn(_("%s: up to %d MB of RAM may be required "
1368 1356 "to manage this file\n"
1369 1357 "(use 'hg revert %s' to cancel the "
1370 1358 "pending addition)\n")
1371 1359 % (f, 3 * st.st_size // 1000000, uipath(f)))
1372 1360 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1373 1361 ui.warn(_("%s not added: only files and symlinks "
1374 1362 "supported currently\n") % uipath(f))
1375 1363 rejected.append(f)
1376 1364 elif ds[f] in 'amn':
1377 1365 ui.warn(_("%s already tracked!\n") % uipath(f))
1378 1366 elif ds[f] == 'r':
1379 1367 ds.normallookup(f)
1380 1368 else:
1381 1369 ds.add(f)
1382 1370 return rejected
1383 1371
1384 1372 def forget(self, files, prefix=""):
1385 1373 with self._repo.wlock():
1386 1374 ds = self._repo.dirstate
1387 1375 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1388 1376 rejected = []
1389 1377 for f in files:
1390 1378 if f not in ds:
1391 1379 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1392 1380 rejected.append(f)
1393 1381 elif ds[f] != 'a':
1394 1382 ds.remove(f)
1395 1383 else:
1396 1384 ds.drop(f)
1397 1385 return rejected
1398 1386
1399 1387 def copy(self, source, dest):
1400 1388 try:
1401 1389 st = self._repo.wvfs.lstat(dest)
1402 1390 except OSError as err:
1403 1391 if err.errno != errno.ENOENT:
1404 1392 raise
1405 1393 self._repo.ui.warn(_("%s does not exist!\n")
1406 1394 % self._repo.dirstate.pathto(dest))
1407 1395 return
1408 1396 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1409 1397 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1410 1398 "symbolic link\n")
1411 1399 % self._repo.dirstate.pathto(dest))
1412 1400 else:
1413 1401 with self._repo.wlock():
1414 1402 ds = self._repo.dirstate
1415 1403 if ds[dest] in '?':
1416 1404 ds.add(dest)
1417 1405 elif ds[dest] in 'r':
1418 1406 ds.normallookup(dest)
1419 1407 ds.copy(source, dest)
1420 1408
1421 1409 def match(self, pats=None, include=None, exclude=None, default='glob',
1422 1410 listsubrepos=False, badfn=None):
1423 1411 r = self._repo
1424 1412
1425 1413 # Only a case insensitive filesystem needs magic to translate user input
1426 1414 # to actual case in the filesystem.
1427 1415 icasefs = not util.fscasesensitive(r.root)
1428 1416 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1429 1417 default, auditor=r.auditor, ctx=self,
1430 1418 listsubrepos=listsubrepos, badfn=badfn,
1431 1419 icasefs=icasefs)
1432 1420
1433 1421 def _filtersuspectsymlink(self, files):
1434 1422 if not files or self._repo.dirstate._checklink:
1435 1423 return files
1436 1424
1437 1425 # Symlink placeholders may get non-symlink-like contents
1438 1426 # via user error or dereferencing by NFS or Samba servers,
1439 1427 # so we filter out any placeholders that don't look like a
1440 1428 # symlink
1441 1429 sane = []
1442 1430 for f in files:
1443 1431 if self.flags(f) == 'l':
1444 1432 d = self[f].data()
1445 1433 if (d == '' or len(d) >= 1024 or '\n' in d
1446 1434 or stringutil.binary(d)):
1447 1435 self._repo.ui.debug('ignoring suspect symlink placeholder'
1448 1436 ' "%s"\n' % f)
1449 1437 continue
1450 1438 sane.append(f)
1451 1439 return sane
1452 1440
1453 1441 def _checklookup(self, files):
1454 1442 # check for any possibly clean files
1455 1443 if not files:
1456 1444 return [], [], []
1457 1445
1458 1446 modified = []
1459 1447 deleted = []
1460 1448 fixup = []
1461 1449 pctx = self._parents[0]
1462 1450 # do a full compare of any files that might have changed
1463 1451 for f in sorted(files):
1464 1452 try:
1465 1453 # This will return True for a file that got replaced by a
1466 1454 # directory in the interim, but fixing that is pretty hard.
1467 1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1468 1456 or pctx[f].cmp(self[f])):
1469 1457 modified.append(f)
1470 1458 else:
1471 1459 fixup.append(f)
1472 1460 except (IOError, OSError):
1473 1461 # A file become inaccessible in between? Mark it as deleted,
1474 1462 # matching dirstate behavior (issue5584).
1475 1463 # The dirstate has more complex behavior around whether a
1476 1464 # missing file matches a directory, etc, but we don't need to
1477 1465 # bother with that: if f has made it to this point, we're sure
1478 1466 # it's in the dirstate.
1479 1467 deleted.append(f)
1480 1468
1481 1469 return modified, deleted, fixup
1482 1470
1483 1471 def _poststatusfixup(self, status, fixup):
1484 1472 """update dirstate for files that are actually clean"""
1485 1473 poststatus = self._repo.postdsstatus()
1486 1474 if fixup or poststatus:
1487 1475 try:
1488 1476 oldid = self._repo.dirstate.identity()
1489 1477
1490 1478 # updating the dirstate is optional
1491 1479 # so we don't wait on the lock
1492 1480 # wlock can invalidate the dirstate, so cache normal _after_
1493 1481 # taking the lock
1494 1482 with self._repo.wlock(False):
1495 1483 if self._repo.dirstate.identity() == oldid:
1496 1484 if fixup:
1497 1485 normal = self._repo.dirstate.normal
1498 1486 for f in fixup:
1499 1487 normal(f)
1500 1488 # write changes out explicitly, because nesting
1501 1489 # wlock at runtime may prevent 'wlock.release()'
1502 1490 # after this block from doing so for subsequent
1503 1491 # changing files
1504 1492 tr = self._repo.currenttransaction()
1505 1493 self._repo.dirstate.write(tr)
1506 1494
1507 1495 if poststatus:
1508 1496 for ps in poststatus:
1509 1497 ps(self, status)
1510 1498 else:
1511 1499 # in this case, writing changes out breaks
1512 1500 # consistency, because .hg/dirstate was
1513 1501 # already changed simultaneously after last
1514 1502 # caching (see also issue5584 for detail)
1515 1503 self._repo.ui.debug('skip updating dirstate: '
1516 1504 'identity mismatch\n')
1517 1505 except error.LockError:
1518 1506 pass
1519 1507 finally:
1520 1508 # Even if the wlock couldn't be grabbed, clear out the list.
1521 1509 self._repo.clearpostdsstatus()
1522 1510
1523 1511 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1524 1512 '''Gets the status from the dirstate -- internal use only.'''
1525 1513 subrepos = []
1526 1514 if '.hgsub' in self:
1527 1515 subrepos = sorted(self.substate)
1528 1516 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1529 1517 clean=clean, unknown=unknown)
1530 1518
1531 1519 # check for any possibly clean files
1532 1520 fixup = []
1533 1521 if cmp:
1534 1522 modified2, deleted2, fixup = self._checklookup(cmp)
1535 1523 s.modified.extend(modified2)
1536 1524 s.deleted.extend(deleted2)
1537 1525
1538 1526 if fixup and clean:
1539 1527 s.clean.extend(fixup)
1540 1528
1541 1529 self._poststatusfixup(s, fixup)
1542 1530
1543 1531 if match.always():
1544 1532 # cache for performance
1545 1533 if s.unknown or s.ignored or s.clean:
1546 1534 # "_status" is cached with list*=False in the normal route
1547 1535 self._status = scmutil.status(s.modified, s.added, s.removed,
1548 1536 s.deleted, [], [], [])
1549 1537 else:
1550 1538 self._status = s
1551 1539
1552 1540 return s
1553 1541
1554 1542 @propertycache
1555 1543 def _copies(self):
1556 1544 p1copies = {}
1557 1545 p2copies = {}
1558 1546 parents = self._repo.dirstate.parents()
1559 1547 p1manifest = self._repo[parents[0]].manifest()
1560 1548 p2manifest = self._repo[parents[1]].manifest()
1561 1549 narrowmatch = self._repo.narrowmatch()
1562 1550 for dst, src in self._repo.dirstate.copies().items():
1563 1551 if not narrowmatch(dst):
1564 1552 continue
1565 1553 if src in p1manifest:
1566 1554 p1copies[dst] = src
1567 1555 elif src in p2manifest:
1568 1556 p2copies[dst] = src
1569 1557 return p1copies, p2copies
1570 1558 def p1copies(self):
1571 1559 return self._copies[0]
1572 1560 def p2copies(self):
1573 1561 return self._copies[1]
1574 1562
1575 1563 @propertycache
1576 1564 def _manifest(self):
1577 1565 """generate a manifest corresponding to the values in self._status
1578 1566
1579 1567 This reuse the file nodeid from parent, but we use special node
1580 1568 identifiers for added and modified files. This is used by manifests
1581 1569 merge to see that files are different and by update logic to avoid
1582 1570 deleting newly added files.
1583 1571 """
1584 1572 return self._buildstatusmanifest(self._status)
1585 1573
1586 1574 def _buildstatusmanifest(self, status):
1587 1575 """Builds a manifest that includes the given status results."""
1588 1576 parents = self.parents()
1589 1577
1590 1578 man = parents[0].manifest().copy()
1591 1579
1592 1580 ff = self._flagfunc
1593 1581 for i, l in ((addednodeid, status.added),
1594 1582 (modifiednodeid, status.modified)):
1595 1583 for f in l:
1596 1584 man[f] = i
1597 1585 try:
1598 1586 man.setflag(f, ff(f))
1599 1587 except OSError:
1600 1588 pass
1601 1589
1602 1590 for f in status.deleted + status.removed:
1603 1591 if f in man:
1604 1592 del man[f]
1605 1593
1606 1594 return man
1607 1595
1608 1596 def _buildstatus(self, other, s, match, listignored, listclean,
1609 1597 listunknown):
1610 1598 """build a status with respect to another context
1611 1599
1612 1600 This includes logic for maintaining the fast path of status when
1613 1601 comparing the working directory against its parent, which is to skip
1614 1602 building a new manifest if self (working directory) is not comparing
1615 1603 against its parent (repo['.']).
1616 1604 """
1617 1605 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1618 1606 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1619 1607 # might have accidentally ended up with the entire contents of the file
1620 1608 # they are supposed to be linking to.
1621 1609 s.modified[:] = self._filtersuspectsymlink(s.modified)
1622 1610 if other != self._repo['.']:
1623 1611 s = super(workingctx, self)._buildstatus(other, s, match,
1624 1612 listignored, listclean,
1625 1613 listunknown)
1626 1614 return s
1627 1615
1628 1616 def _matchstatus(self, other, match):
1629 1617 """override the match method with a filter for directory patterns
1630 1618
1631 1619 We use inheritance to customize the match.bad method only in cases of
1632 1620 workingctx since it belongs only to the working directory when
1633 1621 comparing against the parent changeset.
1634 1622
1635 1623 If we aren't comparing against the working directory's parent, then we
1636 1624 just use the default match object sent to us.
1637 1625 """
1638 1626 if other != self._repo['.']:
1639 1627 def bad(f, msg):
1640 1628 # 'f' may be a directory pattern from 'match.files()',
1641 1629 # so 'f not in ctx1' is not enough
1642 1630 if f not in other and not other.hasdir(f):
1643 1631 self._repo.ui.warn('%s: %s\n' %
1644 1632 (self._repo.dirstate.pathto(f), msg))
1645 1633 match.bad = bad
1646 1634 return match
1647 1635
1648 1636 def walk(self, match):
1649 1637 '''Generates matching file names.'''
1650 1638 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1651 1639 subrepos=sorted(self.substate),
1652 1640 unknown=True, ignored=False))
1653 1641
1654 1642 def matches(self, match):
1655 1643 match = self._repo.narrowmatch(match)
1656 1644 ds = self._repo.dirstate
1657 1645 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1658 1646
1659 1647 def markcommitted(self, node):
1660 super(workingctx, self).markcommitted(node)
1648 with self._repo.dirstate.parentchange():
1649 for f in self.modified() + self.added():
1650 self._repo.dirstate.normal(f)
1651 for f in self.removed():
1652 self._repo.dirstate.drop(f)
1653 self._repo.dirstate.setparents(node)
1654
1655 # write changes out explicitly, because nesting wlock at
1656 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1657 # from immediately doing so for subsequent changing files
1658 self._repo.dirstate.write(self._repo.currenttransaction())
1661 1659
1662 1660 sparse.aftercommit(self._repo, node)
1663 1661
1664 1662 class committablefilectx(basefilectx):
1665 1663 """A committablefilectx provides common functionality for a file context
1666 1664 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1667 1665 def __init__(self, repo, path, filelog=None, ctx=None):
1668 1666 self._repo = repo
1669 1667 self._path = path
1670 1668 self._changeid = None
1671 1669 self._filerev = self._filenode = None
1672 1670
1673 1671 if filelog is not None:
1674 1672 self._filelog = filelog
1675 1673 if ctx:
1676 1674 self._changectx = ctx
1677 1675
1678 1676 def __nonzero__(self):
1679 1677 return True
1680 1678
1681 1679 __bool__ = __nonzero__
1682 1680
1683 1681 def linkrev(self):
1684 1682 # linked to self._changectx no matter if file is modified or not
1685 1683 return self.rev()
1686 1684
1687 1685 def renamed(self):
1688 1686 path = self.copysource()
1689 1687 if not path:
1690 1688 return None
1691 1689 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1692 1690
1693 1691 def parents(self):
1694 1692 '''return parent filectxs, following copies if necessary'''
1695 1693 def filenode(ctx, path):
1696 1694 return ctx._manifest.get(path, nullid)
1697 1695
1698 1696 path = self._path
1699 1697 fl = self._filelog
1700 1698 pcl = self._changectx._parents
1701 1699 renamed = self.renamed()
1702 1700
1703 1701 if renamed:
1704 1702 pl = [renamed + (None,)]
1705 1703 else:
1706 1704 pl = [(path, filenode(pcl[0], path), fl)]
1707 1705
1708 1706 for pc in pcl[1:]:
1709 1707 pl.append((path, filenode(pc, path), fl))
1710 1708
1711 1709 return [self._parentfilectx(p, fileid=n, filelog=l)
1712 1710 for p, n, l in pl if n != nullid]
1713 1711
1714 1712 def children(self):
1715 1713 return []
1716 1714
1717 1715 class workingfilectx(committablefilectx):
1718 1716 """A workingfilectx object makes access to data related to a particular
1719 1717 file in the working directory convenient."""
1720 1718 def __init__(self, repo, path, filelog=None, workingctx=None):
1721 1719 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1722 1720
1723 1721 @propertycache
1724 1722 def _changectx(self):
1725 1723 return workingctx(self._repo)
1726 1724
1727 1725 def data(self):
1728 1726 return self._repo.wread(self._path)
1729 1727 def copysource(self):
1730 1728 return self._repo.dirstate.copied(self._path)
1731 1729
1732 1730 def size(self):
1733 1731 return self._repo.wvfs.lstat(self._path).st_size
1734 1732 def date(self):
1735 1733 t, tz = self._changectx.date()
1736 1734 try:
1737 1735 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1738 1736 except OSError as err:
1739 1737 if err.errno != errno.ENOENT:
1740 1738 raise
1741 1739 return (t, tz)
1742 1740
1743 1741 def exists(self):
1744 1742 return self._repo.wvfs.exists(self._path)
1745 1743
1746 1744 def lexists(self):
1747 1745 return self._repo.wvfs.lexists(self._path)
1748 1746
1749 1747 def audit(self):
1750 1748 return self._repo.wvfs.audit(self._path)
1751 1749
1752 1750 def cmp(self, fctx):
1753 1751 """compare with other file context
1754 1752
1755 1753 returns True if different than fctx.
1756 1754 """
1757 1755 # fctx should be a filectx (not a workingfilectx)
1758 1756 # invert comparison to reuse the same code path
1759 1757 return fctx.cmp(self)
1760 1758
1761 1759 def remove(self, ignoremissing=False):
1762 1760 """wraps unlink for a repo's working directory"""
1763 1761 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1764 1762 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1765 1763 rmdir=rmdir)
1766 1764
1767 1765 def write(self, data, flags, backgroundclose=False, **kwargs):
1768 1766 """wraps repo.wwrite"""
1769 1767 self._repo.wwrite(self._path, data, flags,
1770 1768 backgroundclose=backgroundclose,
1771 1769 **kwargs)
1772 1770
1773 1771 def markcopied(self, src):
1774 1772 """marks this file a copy of `src`"""
1775 1773 if self._repo.dirstate[self._path] in "nma":
1776 1774 self._repo.dirstate.copy(src, self._path)
1777 1775
1778 1776 def clearunknown(self):
1779 1777 """Removes conflicting items in the working directory so that
1780 1778 ``write()`` can be called successfully.
1781 1779 """
1782 1780 wvfs = self._repo.wvfs
1783 1781 f = self._path
1784 1782 wvfs.audit(f)
1785 1783 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1786 1784 # remove files under the directory as they should already be
1787 1785 # warned and backed up
1788 1786 if wvfs.isdir(f) and not wvfs.islink(f):
1789 1787 wvfs.rmtree(f, forcibly=True)
1790 1788 for p in reversed(list(util.finddirs(f))):
1791 1789 if wvfs.isfileorlink(p):
1792 1790 wvfs.unlink(p)
1793 1791 break
1794 1792 else:
1795 1793 # don't remove files if path conflicts are not processed
1796 1794 if wvfs.isdir(f) and not wvfs.islink(f):
1797 1795 wvfs.removedirs(f)
1798 1796
1799 1797 def setflags(self, l, x):
1800 1798 self._repo.wvfs.setflags(self._path, l, x)
1801 1799
1802 1800 class overlayworkingctx(committablectx):
1803 1801 """Wraps another mutable context with a write-back cache that can be
1804 1802 converted into a commit context.
1805 1803
1806 1804 self._cache[path] maps to a dict with keys: {
1807 1805 'exists': bool?
1808 1806 'date': date?
1809 1807 'data': str?
1810 1808 'flags': str?
1811 1809 'copied': str? (path or None)
1812 1810 }
1813 1811 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1814 1812 is `False`, the file was deleted.
1815 1813 """
1816 1814
1817 1815 def __init__(self, repo):
1818 1816 super(overlayworkingctx, self).__init__(repo)
1819 1817 self.clean()
1820 1818
1821 1819 def setbase(self, wrappedctx):
1822 1820 self._wrappedctx = wrappedctx
1823 1821 self._parents = [wrappedctx]
1824 1822 # Drop old manifest cache as it is now out of date.
1825 1823 # This is necessary when, e.g., rebasing several nodes with one
1826 1824 # ``overlayworkingctx`` (e.g. with --collapse).
1827 1825 util.clearcachedproperty(self, '_manifest')
1828 1826
1829 1827 def data(self, path):
1830 1828 if self.isdirty(path):
1831 1829 if self._cache[path]['exists']:
1832 1830 if self._cache[path]['data'] is not None:
1833 1831 return self._cache[path]['data']
1834 1832 else:
1835 1833 # Must fallback here, too, because we only set flags.
1836 1834 return self._wrappedctx[path].data()
1837 1835 else:
1838 1836 raise error.ProgrammingError("No such file or directory: %s" %
1839 1837 path)
1840 1838 else:
1841 1839 return self._wrappedctx[path].data()
1842 1840
1843 1841 @propertycache
1844 1842 def _manifest(self):
1845 1843 parents = self.parents()
1846 1844 man = parents[0].manifest().copy()
1847 1845
1848 1846 flag = self._flagfunc
1849 1847 for path in self.added():
1850 1848 man[path] = addednodeid
1851 1849 man.setflag(path, flag(path))
1852 1850 for path in self.modified():
1853 1851 man[path] = modifiednodeid
1854 1852 man.setflag(path, flag(path))
1855 1853 for path in self.removed():
1856 1854 del man[path]
1857 1855 return man
1858 1856
1859 1857 @propertycache
1860 1858 def _flagfunc(self):
1861 1859 def f(path):
1862 1860 return self._cache[path]['flags']
1863 1861 return f
1864 1862
1865 1863 def files(self):
1866 1864 return sorted(self.added() + self.modified() + self.removed())
1867 1865
1868 1866 def modified(self):
1869 1867 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1870 1868 self._existsinparent(f)]
1871 1869
1872 1870 def added(self):
1873 1871 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1874 1872 not self._existsinparent(f)]
1875 1873
1876 1874 def removed(self):
1877 1875 return [f for f in self._cache.keys() if
1878 1876 not self._cache[f]['exists'] and self._existsinparent(f)]
1879 1877
1880 1878 def p1copies(self):
1881 1879 copies = self._repo._wrappedctx.p1copies().copy()
1882 1880 narrowmatch = self._repo.narrowmatch()
1883 1881 for f in self._cache.keys():
1884 1882 if not narrowmatch(f):
1885 1883 continue
1886 1884 copies.pop(f, None) # delete if it exists
1887 1885 source = self._cache[f]['copied']
1888 1886 if source:
1889 1887 copies[f] = source
1890 1888 return copies
1891 1889
1892 1890 def p2copies(self):
1893 1891 copies = self._repo._wrappedctx.p2copies().copy()
1894 1892 narrowmatch = self._repo.narrowmatch()
1895 1893 for f in self._cache.keys():
1896 1894 if not narrowmatch(f):
1897 1895 continue
1898 1896 copies.pop(f, None) # delete if it exists
1899 1897 source = self._cache[f]['copied']
1900 1898 if source:
1901 1899 copies[f] = source
1902 1900 return copies
1903 1901
1904 1902 def isinmemory(self):
1905 1903 return True
1906 1904
1907 1905 def filedate(self, path):
1908 1906 if self.isdirty(path):
1909 1907 return self._cache[path]['date']
1910 1908 else:
1911 1909 return self._wrappedctx[path].date()
1912 1910
1913 1911 def markcopied(self, path, origin):
1914 1912 self._markdirty(path, exists=True, date=self.filedate(path),
1915 1913 flags=self.flags(path), copied=origin)
1916 1914
1917 1915 def copydata(self, path):
1918 1916 if self.isdirty(path):
1919 1917 return self._cache[path]['copied']
1920 1918 else:
1921 1919 return None
1922 1920
1923 1921 def flags(self, path):
1924 1922 if self.isdirty(path):
1925 1923 if self._cache[path]['exists']:
1926 1924 return self._cache[path]['flags']
1927 1925 else:
1928 1926 raise error.ProgrammingError("No such file or directory: %s" %
1929 1927 self._path)
1930 1928 else:
1931 1929 return self._wrappedctx[path].flags()
1932 1930
1933 1931 def __contains__(self, key):
1934 1932 if key in self._cache:
1935 1933 return self._cache[key]['exists']
1936 1934 return key in self.p1()
1937 1935
1938 1936 def _existsinparent(self, path):
1939 1937 try:
1940 1938 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1941 1939 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1942 1940 # with an ``exists()`` function.
1943 1941 self._wrappedctx[path]
1944 1942 return True
1945 1943 except error.ManifestLookupError:
1946 1944 return False
1947 1945
1948 1946 def _auditconflicts(self, path):
1949 1947 """Replicates conflict checks done by wvfs.write().
1950 1948
1951 1949 Since we never write to the filesystem and never call `applyupdates` in
1952 1950 IMM, we'll never check that a path is actually writable -- e.g., because
1953 1951 it adds `a/foo`, but `a` is actually a file in the other commit.
1954 1952 """
1955 1953 def fail(path, component):
1956 1954 # p1() is the base and we're receiving "writes" for p2()'s
1957 1955 # files.
1958 1956 if 'l' in self.p1()[component].flags():
1959 1957 raise error.Abort("error: %s conflicts with symlink %s "
1960 1958 "in %d." % (path, component,
1961 1959 self.p1().rev()))
1962 1960 else:
1963 1961 raise error.Abort("error: '%s' conflicts with file '%s' in "
1964 1962 "%d." % (path, component,
1965 1963 self.p1().rev()))
1966 1964
1967 1965 # Test that each new directory to be created to write this path from p2
1968 1966 # is not a file in p1.
1969 1967 components = path.split('/')
1970 1968 for i in pycompat.xrange(len(components)):
1971 1969 component = "/".join(components[0:i])
1972 1970 if component in self:
1973 1971 fail(path, component)
1974 1972
1975 1973 # Test the other direction -- that this path from p2 isn't a directory
1976 1974 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1977 1975 match = self.match([path], default=b'path')
1978 1976 matches = self.p1().manifest().matches(match)
1979 1977 mfiles = matches.keys()
1980 1978 if len(mfiles) > 0:
1981 1979 if len(mfiles) == 1 and mfiles[0] == path:
1982 1980 return
1983 1981 # omit the files which are deleted in current IMM wctx
1984 1982 mfiles = [m for m in mfiles if m in self]
1985 1983 if not mfiles:
1986 1984 return
1987 1985 raise error.Abort("error: file '%s' cannot be written because "
1988 1986 " '%s/' is a directory in %s (containing %d "
1989 1987 "entries: %s)"
1990 1988 % (path, path, self.p1(), len(mfiles),
1991 1989 ', '.join(mfiles)))
1992 1990
1993 1991 def write(self, path, data, flags='', **kwargs):
1994 1992 if data is None:
1995 1993 raise error.ProgrammingError("data must be non-None")
1996 1994 self._auditconflicts(path)
1997 1995 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1998 1996 flags=flags)
1999 1997
2000 1998 def setflags(self, path, l, x):
2001 1999 flag = ''
2002 2000 if l:
2003 2001 flag = 'l'
2004 2002 elif x:
2005 2003 flag = 'x'
2006 2004 self._markdirty(path, exists=True, date=dateutil.makedate(),
2007 2005 flags=flag)
2008 2006
2009 2007 def remove(self, path):
2010 2008 self._markdirty(path, exists=False)
2011 2009
2012 2010 def exists(self, path):
2013 2011 """exists behaves like `lexists`, but needs to follow symlinks and
2014 2012 return False if they are broken.
2015 2013 """
2016 2014 if self.isdirty(path):
2017 2015 # If this path exists and is a symlink, "follow" it by calling
2018 2016 # exists on the destination path.
2019 2017 if (self._cache[path]['exists'] and
2020 2018 'l' in self._cache[path]['flags']):
2021 2019 return self.exists(self._cache[path]['data'].strip())
2022 2020 else:
2023 2021 return self._cache[path]['exists']
2024 2022
2025 2023 return self._existsinparent(path)
2026 2024
2027 2025 def lexists(self, path):
2028 2026 """lexists returns True if the path exists"""
2029 2027 if self.isdirty(path):
2030 2028 return self._cache[path]['exists']
2031 2029
2032 2030 return self._existsinparent(path)
2033 2031
2034 2032 def size(self, path):
2035 2033 if self.isdirty(path):
2036 2034 if self._cache[path]['exists']:
2037 2035 return len(self._cache[path]['data'])
2038 2036 else:
2039 2037 raise error.ProgrammingError("No such file or directory: %s" %
2040 2038 self._path)
2041 2039 return self._wrappedctx[path].size()
2042 2040
2043 2041 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2044 2042 user=None, editor=None):
2045 2043 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2046 2044 committed.
2047 2045
2048 2046 ``text`` is the commit message.
2049 2047 ``parents`` (optional) are rev numbers.
2050 2048 """
2051 2049 # Default parents to the wrapped contexts' if not passed.
2052 2050 if parents is None:
2053 2051 parents = self._wrappedctx.parents()
2054 2052 if len(parents) == 1:
2055 2053 parents = (parents[0], None)
2056 2054
2057 2055 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2058 2056 if parents[1] is None:
2059 2057 parents = (self._repo[parents[0]], None)
2060 2058 else:
2061 2059 parents = (self._repo[parents[0]], self._repo[parents[1]])
2062 2060
2063 2061 files = self.files()
2064 2062 def getfile(repo, memctx, path):
2065 2063 if self._cache[path]['exists']:
2066 2064 return memfilectx(repo, memctx, path,
2067 2065 self._cache[path]['data'],
2068 2066 'l' in self._cache[path]['flags'],
2069 2067 'x' in self._cache[path]['flags'],
2070 2068 self._cache[path]['copied'])
2071 2069 else:
2072 2070 # Returning None, but including the path in `files`, is
2073 2071 # necessary for memctx to register a deletion.
2074 2072 return None
2075 2073 return memctx(self._repo, parents, text, files, getfile, date=date,
2076 2074 extra=extra, user=user, branch=branch, editor=editor)
2077 2075
2078 2076 def isdirty(self, path):
2079 2077 return path in self._cache
2080 2078
2081 2079 def isempty(self):
2082 2080 # We need to discard any keys that are actually clean before the empty
2083 2081 # commit check.
2084 2082 self._compact()
2085 2083 return len(self._cache) == 0
2086 2084
2087 2085 def clean(self):
2088 2086 self._cache = {}
2089 2087
2090 2088 def _compact(self):
2091 2089 """Removes keys from the cache that are actually clean, by comparing
2092 2090 them with the underlying context.
2093 2091
2094 2092 This can occur during the merge process, e.g. by passing --tool :local
2095 2093 to resolve a conflict.
2096 2094 """
2097 2095 keys = []
2098 2096 # This won't be perfect, but can help performance significantly when
2099 2097 # using things like remotefilelog.
2100 2098 scmutil.prefetchfiles(
2101 2099 self.repo(), [self.p1().rev()],
2102 2100 scmutil.matchfiles(self.repo(), self._cache.keys()))
2103 2101
2104 2102 for path in self._cache.keys():
2105 2103 cache = self._cache[path]
2106 2104 try:
2107 2105 underlying = self._wrappedctx[path]
2108 2106 if (underlying.data() == cache['data'] and
2109 2107 underlying.flags() == cache['flags']):
2110 2108 keys.append(path)
2111 2109 except error.ManifestLookupError:
2112 2110 # Path not in the underlying manifest (created).
2113 2111 continue
2114 2112
2115 2113 for path in keys:
2116 2114 del self._cache[path]
2117 2115 return keys
2118 2116
2119 2117 def _markdirty(self, path, exists, data=None, date=None, flags='',
2120 2118 copied=None):
2121 2119 # data not provided, let's see if we already have some; if not, let's
2122 2120 # grab it from our underlying context, so that we always have data if
2123 2121 # the file is marked as existing.
2124 2122 if exists and data is None:
2125 2123 oldentry = self._cache.get(path) or {}
2126 2124 data = oldentry.get('data') or self._wrappedctx[path].data()
2127 2125
2128 2126 self._cache[path] = {
2129 2127 'exists': exists,
2130 2128 'data': data,
2131 2129 'date': date,
2132 2130 'flags': flags,
2133 2131 'copied': copied,
2134 2132 }
2135 2133
2136 2134 def filectx(self, path, filelog=None):
2137 2135 return overlayworkingfilectx(self._repo, path, parent=self,
2138 2136 filelog=filelog)
2139 2137
2140 2138 class overlayworkingfilectx(committablefilectx):
2141 2139 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2142 2140 cache, which can be flushed through later by calling ``flush()``."""
2143 2141
2144 2142 def __init__(self, repo, path, filelog=None, parent=None):
2145 2143 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2146 2144 parent)
2147 2145 self._repo = repo
2148 2146 self._parent = parent
2149 2147 self._path = path
2150 2148
2151 2149 def cmp(self, fctx):
2152 2150 return self.data() != fctx.data()
2153 2151
2154 2152 def changectx(self):
2155 2153 return self._parent
2156 2154
2157 2155 def data(self):
2158 2156 return self._parent.data(self._path)
2159 2157
2160 2158 def date(self):
2161 2159 return self._parent.filedate(self._path)
2162 2160
2163 2161 def exists(self):
2164 2162 return self.lexists()
2165 2163
2166 2164 def lexists(self):
2167 2165 return self._parent.exists(self._path)
2168 2166
2169 2167 def copysource(self):
2170 2168 return self._parent.copydata(self._path)
2171 2169
2172 2170 def size(self):
2173 2171 return self._parent.size(self._path)
2174 2172
2175 2173 def markcopied(self, origin):
2176 2174 self._parent.markcopied(self._path, origin)
2177 2175
2178 2176 def audit(self):
2179 2177 pass
2180 2178
2181 2179 def flags(self):
2182 2180 return self._parent.flags(self._path)
2183 2181
2184 2182 def setflags(self, islink, isexec):
2185 2183 return self._parent.setflags(self._path, islink, isexec)
2186 2184
2187 2185 def write(self, data, flags, backgroundclose=False, **kwargs):
2188 2186 return self._parent.write(self._path, data, flags, **kwargs)
2189 2187
2190 2188 def remove(self, ignoremissing=False):
2191 2189 return self._parent.remove(self._path)
2192 2190
2193 2191 def clearunknown(self):
2194 2192 pass
2195 2193
2196 2194 class workingcommitctx(workingctx):
2197 2195 """A workingcommitctx object makes access to data related to
2198 2196 the revision being committed convenient.
2199 2197
2200 2198 This hides changes in the working directory, if they aren't
2201 2199 committed in this context.
2202 2200 """
2203 2201 def __init__(self, repo, changes,
2204 2202 text="", user=None, date=None, extra=None):
2205 2203 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2206 2204 changes)
2207 2205
2208 2206 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2209 2207 """Return matched files only in ``self._status``
2210 2208
2211 2209 Uncommitted files appear "clean" via this context, even if
2212 2210 they aren't actually so in the working directory.
2213 2211 """
2214 2212 if clean:
2215 2213 clean = [f for f in self._manifest if f not in self._changedset]
2216 2214 else:
2217 2215 clean = []
2218 2216 return scmutil.status([f for f in self._status.modified if match(f)],
2219 2217 [f for f in self._status.added if match(f)],
2220 2218 [f for f in self._status.removed if match(f)],
2221 2219 [], [], [], clean)
2222 2220
2223 2221 @propertycache
2224 2222 def _changedset(self):
2225 2223 """Return the set of files changed in this context
2226 2224 """
2227 2225 changed = set(self._status.modified)
2228 2226 changed.update(self._status.added)
2229 2227 changed.update(self._status.removed)
2230 2228 return changed
2231 2229
2232 2230 def makecachingfilectxfn(func):
2233 2231 """Create a filectxfn that caches based on the path.
2234 2232
2235 2233 We can't use util.cachefunc because it uses all arguments as the cache
2236 2234 key and this creates a cycle since the arguments include the repo and
2237 2235 memctx.
2238 2236 """
2239 2237 cache = {}
2240 2238
2241 2239 def getfilectx(repo, memctx, path):
2242 2240 if path not in cache:
2243 2241 cache[path] = func(repo, memctx, path)
2244 2242 return cache[path]
2245 2243
2246 2244 return getfilectx
2247 2245
2248 2246 def memfilefromctx(ctx):
2249 2247 """Given a context return a memfilectx for ctx[path]
2250 2248
2251 2249 This is a convenience method for building a memctx based on another
2252 2250 context.
2253 2251 """
2254 2252 def getfilectx(repo, memctx, path):
2255 2253 fctx = ctx[path]
2256 2254 copysource = fctx.copysource()
2257 2255 return memfilectx(repo, memctx, path, fctx.data(),
2258 2256 islink=fctx.islink(), isexec=fctx.isexec(),
2259 2257 copysource=copysource)
2260 2258
2261 2259 return getfilectx
2262 2260
2263 2261 def memfilefrompatch(patchstore):
2264 2262 """Given a patch (e.g. patchstore object) return a memfilectx
2265 2263
2266 2264 This is a convenience method for building a memctx based on a patchstore.
2267 2265 """
2268 2266 def getfilectx(repo, memctx, path):
2269 2267 data, mode, copysource = patchstore.getfile(path)
2270 2268 if data is None:
2271 2269 return None
2272 2270 islink, isexec = mode
2273 2271 return memfilectx(repo, memctx, path, data, islink=islink,
2274 2272 isexec=isexec, copysource=copysource)
2275 2273
2276 2274 return getfilectx
2277 2275
2278 2276 class memctx(committablectx):
2279 2277 """Use memctx to perform in-memory commits via localrepo.commitctx().
2280 2278
2281 2279 Revision information is supplied at initialization time while
2282 2280 related files data and is made available through a callback
2283 2281 mechanism. 'repo' is the current localrepo, 'parents' is a
2284 2282 sequence of two parent revisions identifiers (pass None for every
2285 2283 missing parent), 'text' is the commit message and 'files' lists
2286 2284 names of files touched by the revision (normalized and relative to
2287 2285 repository root).
2288 2286
2289 2287 filectxfn(repo, memctx, path) is a callable receiving the
2290 2288 repository, the current memctx object and the normalized path of
2291 2289 requested file, relative to repository root. It is fired by the
2292 2290 commit function for every file in 'files', but calls order is
2293 2291 undefined. If the file is available in the revision being
2294 2292 committed (updated or added), filectxfn returns a memfilectx
2295 2293 object. If the file was removed, filectxfn return None for recent
2296 2294 Mercurial. Moved files are represented by marking the source file
2297 2295 removed and the new file added with copy information (see
2298 2296 memfilectx).
2299 2297
2300 2298 user receives the committer name and defaults to current
2301 2299 repository username, date is the commit date in any format
2302 2300 supported by dateutil.parsedate() and defaults to current date, extra
2303 2301 is a dictionary of metadata or is left empty.
2304 2302 """
2305 2303
2306 2304 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2307 2305 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2308 2306 # this field to determine what to do in filectxfn.
2309 2307 _returnnoneformissingfiles = True
2310 2308
2311 2309 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2312 2310 date=None, extra=None, branch=None, editor=False):
2313 2311 super(memctx, self).__init__(repo, text, user, date, extra)
2314 2312 self._rev = None
2315 2313 self._node = None
2316 2314 parents = [(p or nullid) for p in parents]
2317 2315 p1, p2 = parents
2318 2316 self._parents = [self._repo[p] for p in (p1, p2)]
2319 2317 files = sorted(set(files))
2320 2318 self._files = files
2321 2319 if branch is not None:
2322 2320 self._extra['branch'] = encoding.fromlocal(branch)
2323 2321 self.substate = {}
2324 2322
2325 2323 if isinstance(filectxfn, patch.filestore):
2326 2324 filectxfn = memfilefrompatch(filectxfn)
2327 2325 elif not callable(filectxfn):
2328 2326 # if store is not callable, wrap it in a function
2329 2327 filectxfn = memfilefromctx(filectxfn)
2330 2328
2331 2329 # memoizing increases performance for e.g. vcs convert scenarios.
2332 2330 self._filectxfn = makecachingfilectxfn(filectxfn)
2333 2331
2334 2332 if editor:
2335 2333 self._text = editor(self._repo, self, [])
2336 2334 self._repo.savecommitmessage(self._text)
2337 2335
2338 2336 def filectx(self, path, filelog=None):
2339 2337 """get a file context from the working directory
2340 2338
2341 2339 Returns None if file doesn't exist and should be removed."""
2342 2340 return self._filectxfn(self._repo, self, path)
2343 2341
2344 2342 def commit(self):
2345 2343 """commit context to the repo"""
2346 2344 return self._repo.commitctx(self)
2347 2345
2348 2346 @propertycache
2349 2347 def _manifest(self):
2350 2348 """generate a manifest based on the return values of filectxfn"""
2351 2349
2352 2350 # keep this simple for now; just worry about p1
2353 2351 pctx = self._parents[0]
2354 2352 man = pctx.manifest().copy()
2355 2353
2356 2354 for f in self._status.modified:
2357 2355 man[f] = modifiednodeid
2358 2356
2359 2357 for f in self._status.added:
2360 2358 man[f] = addednodeid
2361 2359
2362 2360 for f in self._status.removed:
2363 2361 if f in man:
2364 2362 del man[f]
2365 2363
2366 2364 return man
2367 2365
2368 2366 @propertycache
2369 2367 def _status(self):
2370 2368 """Calculate exact status from ``files`` specified at construction
2371 2369 """
2372 2370 man1 = self.p1().manifest()
2373 2371 p2 = self._parents[1]
2374 2372 # "1 < len(self._parents)" can't be used for checking
2375 2373 # existence of the 2nd parent, because "memctx._parents" is
2376 2374 # explicitly initialized by the list, of which length is 2.
2377 2375 if p2.node() != nullid:
2378 2376 man2 = p2.manifest()
2379 2377 managing = lambda f: f in man1 or f in man2
2380 2378 else:
2381 2379 managing = lambda f: f in man1
2382 2380
2383 2381 modified, added, removed = [], [], []
2384 2382 for f in self._files:
2385 2383 if not managing(f):
2386 2384 added.append(f)
2387 2385 elif self[f]:
2388 2386 modified.append(f)
2389 2387 else:
2390 2388 removed.append(f)
2391 2389
2392 2390 return scmutil.status(modified, added, removed, [], [], [], [])
2393 2391
2394 2392 class memfilectx(committablefilectx):
2395 2393 """memfilectx represents an in-memory file to commit.
2396 2394
2397 2395 See memctx and committablefilectx for more details.
2398 2396 """
2399 2397 def __init__(self, repo, changectx, path, data, islink=False,
2400 2398 isexec=False, copysource=None):
2401 2399 """
2402 2400 path is the normalized file path relative to repository root.
2403 2401 data is the file content as a string.
2404 2402 islink is True if the file is a symbolic link.
2405 2403 isexec is True if the file is executable.
2406 2404 copied is the source file path if current file was copied in the
2407 2405 revision being committed, or None."""
2408 2406 super(memfilectx, self).__init__(repo, path, None, changectx)
2409 2407 self._data = data
2410 2408 if islink:
2411 2409 self._flags = 'l'
2412 2410 elif isexec:
2413 2411 self._flags = 'x'
2414 2412 else:
2415 2413 self._flags = ''
2416 2414 self._copysource = copysource
2417 2415
2418 2416 def copysource(self):
2419 2417 return self._copysource
2420 2418
2421 2419 def cmp(self, fctx):
2422 2420 return self.data() != fctx.data()
2423 2421
2424 2422 def data(self):
2425 2423 return self._data
2426 2424
2427 2425 def remove(self, ignoremissing=False):
2428 2426 """wraps unlink for a repo's working directory"""
2429 2427 # need to figure out what to do here
2430 2428 del self._changectx[self._path]
2431 2429
2432 2430 def write(self, data, flags, **kwargs):
2433 2431 """wraps repo.wwrite"""
2434 2432 self._data = data
2435 2433
2436 2434
2437 2435 class metadataonlyctx(committablectx):
2438 2436 """Like memctx but it's reusing the manifest of different commit.
2439 2437 Intended to be used by lightweight operations that are creating
2440 2438 metadata-only changes.
2441 2439
2442 2440 Revision information is supplied at initialization time. 'repo' is the
2443 2441 current localrepo, 'ctx' is original revision which manifest we're reuisng
2444 2442 'parents' is a sequence of two parent revisions identifiers (pass None for
2445 2443 every missing parent), 'text' is the commit.
2446 2444
2447 2445 user receives the committer name and defaults to current repository
2448 2446 username, date is the commit date in any format supported by
2449 2447 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2450 2448 metadata or is left empty.
2451 2449 """
2452 2450 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2453 2451 date=None, extra=None, editor=False):
2454 2452 if text is None:
2455 2453 text = originalctx.description()
2456 2454 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2457 2455 self._rev = None
2458 2456 self._node = None
2459 2457 self._originalctx = originalctx
2460 2458 self._manifestnode = originalctx.manifestnode()
2461 2459 if parents is None:
2462 2460 parents = originalctx.parents()
2463 2461 else:
2464 2462 parents = [repo[p] for p in parents if p is not None]
2465 2463 parents = parents[:]
2466 2464 while len(parents) < 2:
2467 2465 parents.append(repo[nullid])
2468 2466 p1, p2 = self._parents = parents
2469 2467
2470 2468 # sanity check to ensure that the reused manifest parents are
2471 2469 # manifests of our commit parents
2472 2470 mp1, mp2 = self.manifestctx().parents
2473 2471 if p1 != nullid and p1.manifestnode() != mp1:
2474 2472 raise RuntimeError(r"can't reuse the manifest: its p1 "
2475 2473 r"doesn't match the new ctx p1")
2476 2474 if p2 != nullid and p2.manifestnode() != mp2:
2477 2475 raise RuntimeError(r"can't reuse the manifest: "
2478 2476 r"its p2 doesn't match the new ctx p2")
2479 2477
2480 2478 self._files = originalctx.files()
2481 2479 self.substate = {}
2482 2480
2483 2481 if editor:
2484 2482 self._text = editor(self._repo, self, [])
2485 2483 self._repo.savecommitmessage(self._text)
2486 2484
2487 2485 def manifestnode(self):
2488 2486 return self._manifestnode
2489 2487
2490 2488 @property
2491 2489 def _manifestctx(self):
2492 2490 return self._repo.manifestlog[self._manifestnode]
2493 2491
2494 2492 def filectx(self, path, filelog=None):
2495 2493 return self._originalctx.filectx(path, filelog=filelog)
2496 2494
2497 2495 def commit(self):
2498 2496 """commit context to the repo"""
2499 2497 return self._repo.commitctx(self)
2500 2498
2501 2499 @property
2502 2500 def _manifest(self):
2503 2501 return self._originalctx.manifest()
2504 2502
2505 2503 @propertycache
2506 2504 def _status(self):
2507 2505 """Calculate exact status from ``files`` specified in the ``origctx``
2508 2506 and parents manifests.
2509 2507 """
2510 2508 man1 = self.p1().manifest()
2511 2509 p2 = self._parents[1]
2512 2510 # "1 < len(self._parents)" can't be used for checking
2513 2511 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2514 2512 # explicitly initialized by the list, of which length is 2.
2515 2513 if p2.node() != nullid:
2516 2514 man2 = p2.manifest()
2517 2515 managing = lambda f: f in man1 or f in man2
2518 2516 else:
2519 2517 managing = lambda f: f in man1
2520 2518
2521 2519 modified, added, removed = [], [], []
2522 2520 for f in self._files:
2523 2521 if not managing(f):
2524 2522 added.append(f)
2525 2523 elif f in self:
2526 2524 modified.append(f)
2527 2525 else:
2528 2526 removed.append(f)
2529 2527
2530 2528 return scmutil.status(modified, added, removed, [], [], [], [])
2531 2529
2532 2530 class arbitraryfilectx(object):
2533 2531 """Allows you to use filectx-like functions on a file in an arbitrary
2534 2532 location on disk, possibly not in the working directory.
2535 2533 """
2536 2534 def __init__(self, path, repo=None):
2537 2535 # Repo is optional because contrib/simplemerge uses this class.
2538 2536 self._repo = repo
2539 2537 self._path = path
2540 2538
2541 2539 def cmp(self, fctx):
2542 2540 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2543 2541 # path if either side is a symlink.
2544 2542 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2545 2543 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2546 2544 # Add a fast-path for merge if both sides are disk-backed.
2547 2545 # Note that filecmp uses the opposite return values (True if same)
2548 2546 # from our cmp functions (True if different).
2549 2547 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2550 2548 return self.data() != fctx.data()
2551 2549
2552 2550 def path(self):
2553 2551 return self._path
2554 2552
2555 2553 def flags(self):
2556 2554 return ''
2557 2555
2558 2556 def data(self):
2559 2557 return util.readfile(self._path)
2560 2558
2561 2559 def decodeddata(self):
2562 2560 with open(self._path, "rb") as f:
2563 2561 return f.read()
2564 2562
2565 2563 def remove(self):
2566 2564 util.unlink(self._path)
2567 2565
2568 2566 def write(self, data, flags, **kwargs):
2569 2567 assert not flags
2570 2568 with open(self._path, "wb") as f:
2571 2569 f.write(data)
@@ -1,78 +1,78 b''
1 1 # extension to emulate invoking 'dirstate.write()' at the time
2 2 # specified by '[fakedirstatewritetime] fakenow', only when
3 3 # 'dirstate.write()' is invoked via functions below:
4 4 #
5 5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 6 # - 'committablectx.markcommitted()'
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 context,
12 12 dirstate,
13 13 extensions,
14 14 policy,
15 15 registrar,
16 16 )
17 17 from mercurial.utils import dateutil
18 18
19 19 configtable = {}
20 20 configitem = registrar.configitem(configtable)
21 21
22 22 configitem(b'fakedirstatewritetime', b'fakenow',
23 23 default=None,
24 24 )
25 25
26 26 parsers = policy.importmod(r'parsers')
27 27
28 28 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
29 29 # execute what original parsers.pack_dirstate should do actually
30 30 # for consistency
31 31 actualnow = int(now)
32 32 for f, e in dmap.items():
33 33 if e[0] == 'n' and e[3] == actualnow:
34 34 e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
35 35 dmap[f] = e
36 36
37 37 return orig(dmap, copymap, pl, fakenow)
38 38
39 39 def fakewrite(ui, func):
40 40 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
41 41
42 42 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
43 43 if not fakenow:
44 44 # Execute original one, if fakenow isn't configured. This is
45 45 # useful to prevent subrepos from executing replaced one,
46 46 # because replacing 'parsers.pack_dirstate' is also effective
47 47 # in subrepos.
48 48 return func()
49 49
50 50 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
51 51 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
52 52 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
53 53
54 54 orig_pack_dirstate = parsers.pack_dirstate
55 55 orig_dirstate_getfsnow = dirstate._getfsnow
56 56 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
57 57
58 58 parsers.pack_dirstate = wrapper
59 59 dirstate._getfsnow = lambda *args: fakenow
60 60 try:
61 61 return func()
62 62 finally:
63 63 parsers.pack_dirstate = orig_pack_dirstate
64 64 dirstate._getfsnow = orig_dirstate_getfsnow
65 65
66 66 def _poststatusfixup(orig, workingctx, status, fixup):
67 67 ui = workingctx.repo().ui
68 68 return fakewrite(ui, lambda : orig(workingctx, status, fixup))
69 69
70 70 def markcommitted(orig, committablectx, node):
71 71 ui = committablectx.repo().ui
72 72 return fakewrite(ui, lambda : orig(committablectx, node))
73 73
74 74 def extsetup(ui):
75 75 extensions.wrapfunction(context.workingctx, '_poststatusfixup',
76 76 _poststatusfixup)
77 extensions.wrapfunction(context.committablectx, 'markcommitted',
77 extensions.wrapfunction(context.workingctx, 'markcommitted',
78 78 markcommitted)
@@ -1,78 +1,78 b''
1 1 $ hg init test-content
2 2 $ cd test-content
3 3 $ hg debugbuilddag '+2*2*3*4+7'
4 4 $ hg bookmark -r 1 @
5 5 $ hg log -G --template '{rev}:{node|short}'
6 6 o 11:1d876b1f862c
7 7 |
8 8 o 10:ea5f71948eb8
9 9 |
10 10 o 9:f1b0356d867a
11 11 |
12 12 o 8:e8d1253fb0d7
13 13 |
14 14 o 7:d423bbba4459
15 15 |
16 16 o 6:a2f58e9c1e56
17 17 |
18 18 o 5:3a367db1fabc
19 19 |
20 20 o 4:e7bd5218ca15
21 21 |
22 22 | o 3:6100d3090acf
23 23 |/
24 24 | o 2:fa942426a6fd
25 25 |/
26 26 | o 1:66f7d451a68b
27 27 |/
28 28 o 0:1ea73414a91b
29 29
30 30 $ hg --config extensions.closehead= close-head -m 'Not a head' 0 1
31 31 abort: revision is not an open head: 0
32 32 [255]
33 33 $ hg --config extensions.closehead= close-head -m 'Not a head' -r 0 1
34 34 abort: revision is not an open head: 0
35 35 [255]
36 36 $ hg id
37 37 000000000000
38 38 $ hg --config extensions.closehead= close-head -m 'Close old heads' -r 1 2
39 39 $ hg id
40 340d36cac2f4 tip
40 000000000000
41 41 $ hg bookmark
42 42 @ 1:66f7d451a68b
43 43 $ hg heads
44 44 changeset: 11:1d876b1f862c
45 45 user: debugbuilddag
46 46 date: Thu Jan 01 00:00:11 1970 +0000
47 47 summary: r11
48 48
49 49 changeset: 3:6100d3090acf
50 50 parent: 0:1ea73414a91b
51 51 user: debugbuilddag
52 52 date: Thu Jan 01 00:00:03 1970 +0000
53 53 summary: r3
54 54
55 55 $ hg --config extensions.closehead= close-head -m 'Close more old heads' -r 11
56 56 $ hg heads
57 57 changeset: 3:6100d3090acf
58 58 parent: 0:1ea73414a91b
59 59 user: debugbuilddag
60 60 date: Thu Jan 01 00:00:03 1970 +0000
61 61 summary: r3
62 62
63 63 $ hg --config extensions.closehead= close-head -m 'Not a head' 0
64 64 abort: revision is not an open head: 0
65 65 [255]
66 66 $ hg --config extensions.closehead= close-head -m 'Already closed head' 1
67 67 abort: revision is not an open head: 1
68 68 [255]
69 69
70 70 $ hg init ../test-empty
71 71 $ cd ../test-empty
72 72 $ hg debugbuilddag '+1'
73 73 $ hg log -G --template '{rev}:{node|short}'
74 74 o 0:1ea73414a91b
75 75
76 76 $ hg --config extensions.closehead= close-head -m 'Close initial revision' 0
77 77 $ hg heads
78 78 [1]
General Comments 0
You need to be logged in to leave comments. Login now