##// END OF EJS Templates
context: filter out invalid copies from workingctx.p[12]copies()...
Martin von Zweigbergk -
r43125:2b869a51 default
parent child Browse files
Show More
@@ -1,2579 +1,2580 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 copies,
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52 class basectx(object):
53 53 """A basectx object represents the common logic for its children:
54 54 changectx: read-only context that is already present in the repo,
55 55 workingctx: a context that represents the working directory and can
56 56 be committed,
57 57 memctx: a context that represents changes in-memory and can also
58 58 be committed."""
59 59
60 60 def __init__(self, repo):
61 61 self._repo = repo
62 62
63 63 def __bytes__(self):
64 64 return short(self.node())
65 65
66 66 __str__ = encoding.strmethod(__bytes__)
67 67
68 68 def __repr__(self):
69 69 return r"<%s %s>" % (type(self).__name__, str(self))
70 70
71 71 def __eq__(self, other):
72 72 try:
73 73 return type(self) == type(other) and self._rev == other._rev
74 74 except AttributeError:
75 75 return False
76 76
77 77 def __ne__(self, other):
78 78 return not (self == other)
79 79
80 80 def __contains__(self, key):
81 81 return key in self._manifest
82 82
83 83 def __getitem__(self, key):
84 84 return self.filectx(key)
85 85
86 86 def __iter__(self):
87 87 return iter(self._manifest)
88 88
89 89 def _buildstatusmanifest(self, status):
90 90 """Builds a manifest that includes the given status results, if this is
91 91 a working copy context. For non-working copy contexts, it just returns
92 92 the normal manifest."""
93 93 return self.manifest()
94 94
95 95 def _matchstatus(self, other, match):
96 96 """This internal method provides a way for child objects to override the
97 97 match operator.
98 98 """
99 99 return match
100 100
101 101 def _buildstatus(self, other, s, match, listignored, listclean,
102 102 listunknown):
103 103 """build a status with respect to another context"""
104 104 # Load earliest manifest first for caching reasons. More specifically,
105 105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 108 # delta to what's in the cache. So that's one full reconstruction + one
109 109 # delta application.
110 110 mf2 = None
111 111 if self.rev() is not None and self.rev() < other.rev():
112 112 mf2 = self._buildstatusmanifest(s)
113 113 mf1 = other._buildstatusmanifest(s)
114 114 if mf2 is None:
115 115 mf2 = self._buildstatusmanifest(s)
116 116
117 117 modified, added = [], []
118 118 removed = []
119 119 clean = []
120 120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 121 deletedset = set(deleted)
122 122 d = mf1.diff(mf2, match=match, clean=listclean)
123 123 for fn, value in d.iteritems():
124 124 if fn in deletedset:
125 125 continue
126 126 if value is None:
127 127 clean.append(fn)
128 128 continue
129 129 (node1, flag1), (node2, flag2) = value
130 130 if node1 is None:
131 131 added.append(fn)
132 132 elif node2 is None:
133 133 removed.append(fn)
134 134 elif flag1 != flag2:
135 135 modified.append(fn)
136 136 elif node2 not in wdirfilenodeids:
137 137 # When comparing files between two commits, we save time by
138 138 # not comparing the file contents when the nodeids differ.
139 139 # Note that this means we incorrectly report a reverted change
140 140 # to a file as a modification.
141 141 modified.append(fn)
142 142 elif self[fn].cmp(other[fn]):
143 143 modified.append(fn)
144 144 else:
145 145 clean.append(fn)
146 146
147 147 if removed:
148 148 # need to filter files if they are already reported as removed
149 149 unknown = [fn for fn in unknown if fn not in mf1 and
150 150 (not match or match(fn))]
151 151 ignored = [fn for fn in ignored if fn not in mf1 and
152 152 (not match or match(fn))]
153 153 # if they're deleted, don't report them as removed
154 154 removed = [fn for fn in removed if fn not in deletedset]
155 155
156 156 return scmutil.status(modified, added, removed, deleted, unknown,
157 157 ignored, clean)
158 158
159 159 @propertycache
160 160 def substate(self):
161 161 return subrepoutil.state(self, self._repo.ui)
162 162
163 163 def subrev(self, subpath):
164 164 return self.substate[subpath][1]
165 165
166 166 def rev(self):
167 167 return self._rev
168 168 def node(self):
169 169 return self._node
170 170 def hex(self):
171 171 return hex(self.node())
172 172 def manifest(self):
173 173 return self._manifest
174 174 def manifestctx(self):
175 175 return self._manifestctx
176 176 def repo(self):
177 177 return self._repo
178 178 def phasestr(self):
179 179 return phases.phasenames[self.phase()]
180 180 def mutable(self):
181 181 return self.phase() > phases.public
182 182
183 183 def matchfileset(self, expr, badfn=None):
184 184 return fileset.match(self, expr, badfn=badfn)
185 185
186 186 def obsolete(self):
187 187 """True if the changeset is obsolete"""
188 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 189
190 190 def extinct(self):
191 191 """True if the changeset is extinct"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 193
194 194 def orphan(self):
195 195 """True if the changeset is not obsolete, but its ancestor is"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 197
198 198 def phasedivergent(self):
199 199 """True if the changeset tries to be a successor of a public changeset
200 200
201 201 Only non-public and non-obsolete changesets may be phase-divergent.
202 202 """
203 203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 204
205 205 def contentdivergent(self):
206 206 """Is a successor of a changeset with multiple possible successor sets
207 207
208 208 Only non-public and non-obsolete changesets may be content-divergent.
209 209 """
210 210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 211
212 212 def isunstable(self):
213 213 """True if the changeset is either orphan, phase-divergent or
214 214 content-divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return self._repo[nullrev]
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 @propertycache
277 277 def _copies(self):
278 278 return copies.computechangesetcopies(self)
279 279 def p1copies(self):
280 280 return self._copies[0]
281 281 def p2copies(self):
282 282 return self._copies[1]
283 283
284 284 def sub(self, path, allowcreate=True):
285 285 '''return a subrepo for the stored revision of path, never wdir()'''
286 286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287 287
288 288 def nullsub(self, path, pctx):
289 289 return subrepo.nullsubrepo(self, path, pctx)
290 290
291 291 def workingsub(self, path):
292 292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 293 context.
294 294 '''
295 295 return subrepo.subrepo(self, path, allowwdir=True)
296 296
297 297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 298 listsubrepos=False, badfn=None):
299 299 r = self._repo
300 300 return matchmod.match(r.root, r.getcwd(), pats,
301 301 include, exclude, default,
302 302 auditor=r.nofsauditor, ctx=self,
303 303 listsubrepos=listsubrepos, badfn=badfn)
304 304
305 305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 306 losedatafn=None, pathfn=None, copy=None,
307 307 copysourcematch=None, hunksfilterfn=None):
308 308 """Returns a diff generator for the given contexts and matcher"""
309 309 if ctx2 is None:
310 310 ctx2 = self.p1()
311 311 if ctx2 is not None:
312 312 ctx2 = self._repo[ctx2]
313 313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 315 copy=copy, copysourcematch=copysourcematch,
316 316 hunksfilterfn=hunksfilterfn)
317 317
318 318 def dirs(self):
319 319 return self._manifest.dirs()
320 320
321 321 def hasdir(self, dir):
322 322 return self._manifest.hasdir(dir)
323 323
324 324 def status(self, other=None, match=None, listignored=False,
325 325 listclean=False, listunknown=False, listsubrepos=False):
326 326 """return status of files between two nodes or node and working
327 327 directory.
328 328
329 329 If other is None, compare this node with working directory.
330 330
331 331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 332 """
333 333
334 334 ctx1 = self
335 335 ctx2 = self._repo[other]
336 336
337 337 # This next code block is, admittedly, fragile logic that tests for
338 338 # reversing the contexts and wouldn't need to exist if it weren't for
339 339 # the fast (and common) code path of comparing the working directory
340 340 # with its first parent.
341 341 #
342 342 # What we're aiming for here is the ability to call:
343 343 #
344 344 # workingctx.status(parentctx)
345 345 #
346 346 # If we always built the manifest for each context and compared those,
347 347 # then we'd be done. But the special case of the above call means we
348 348 # just copy the manifest of the parent.
349 349 reversed = False
350 350 if (not isinstance(ctx1, changectx)
351 351 and isinstance(ctx2, changectx)):
352 352 reversed = True
353 353 ctx1, ctx2 = ctx2, ctx1
354 354
355 355 match = self._repo.narrowmatch(match)
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 class changectx(basectx):
389 389 """A changecontext object makes access to data related to a particular
390 390 changeset convenient. It represents a read-only context already present in
391 391 the repo."""
392 392 def __init__(self, repo, rev, node):
393 393 super(changectx, self).__init__(repo)
394 394 self._rev = rev
395 395 self._node = node
396 396
397 397 def __hash__(self):
398 398 try:
399 399 return hash(self._rev)
400 400 except AttributeError:
401 401 return id(self)
402 402
403 403 def __nonzero__(self):
404 404 return self._rev != nullrev
405 405
406 406 __bool__ = __nonzero__
407 407
408 408 @propertycache
409 409 def _changeset(self):
410 410 return self._repo.changelog.changelogrevision(self.rev())
411 411
412 412 @propertycache
413 413 def _manifest(self):
414 414 return self._manifestctx.read()
415 415
416 416 @property
417 417 def _manifestctx(self):
418 418 return self._repo.manifestlog[self._changeset.manifest]
419 419
420 420 @propertycache
421 421 def _manifestdelta(self):
422 422 return self._manifestctx.readdelta()
423 423
424 424 @propertycache
425 425 def _parents(self):
426 426 repo = self._repo
427 427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 428 if p2 == nullrev:
429 429 return [repo[p1]]
430 430 return [repo[p1], repo[p2]]
431 431
432 432 def changeset(self):
433 433 c = self._changeset
434 434 return (
435 435 c.manifest,
436 436 c.user,
437 437 c.date,
438 438 c.files,
439 439 c.description,
440 440 c.extra,
441 441 )
442 442 def manifestnode(self):
443 443 return self._changeset.manifest
444 444
445 445 def user(self):
446 446 return self._changeset.user
447 447 def date(self):
448 448 return self._changeset.date
449 449 def files(self):
450 450 return self._changeset.files
451 451 def filesmodified(self):
452 452 modified = set(self.files())
453 453 modified.difference_update(self.filesadded())
454 454 modified.difference_update(self.filesremoved())
455 455 return sorted(modified)
456 456 def filesadded(self):
457 457 source = self._repo.ui.config('experimental', 'copies.read-from')
458 458 if (source == 'changeset-only' or
459 459 (source == 'compatibility' and
460 460 self._changeset.filesadded is not None)):
461 461 return self._changeset.filesadded or []
462 462 return scmutil.computechangesetfilesadded(self)
463 463 def filesremoved(self):
464 464 source = self._repo.ui.config('experimental', 'copies.read-from')
465 465 if (source == 'changeset-only' or
466 466 (source == 'compatibility' and
467 467 self._changeset.filesremoved is not None)):
468 468 return self._changeset.filesremoved or []
469 469 return scmutil.computechangesetfilesremoved(self)
470 470
471 471 @propertycache
472 472 def _copies(self):
473 473 source = self._repo.ui.config('experimental', 'copies.read-from')
474 474 p1copies = self._changeset.p1copies
475 475 p2copies = self._changeset.p2copies
476 476 # If config says to get copy metadata only from changeset, then return
477 477 # that, defaulting to {} if there was no copy metadata.
478 478 # In compatibility mode, we return copy data from the changeset if
479 479 # it was recorded there, and otherwise we fall back to getting it from
480 480 # the filelogs (below).
481 481 if (source == 'changeset-only' or
482 482 (source == 'compatibility' and p1copies is not None)):
483 483 return p1copies or {}, p2copies or {}
484 484
485 485 # Otherwise (config said to read only from filelog, or we are in
486 486 # compatiblity mode and there is not data in the changeset), we get
487 487 # the copy metadata from the filelogs.
488 488 return super(changectx, self)._copies
489 489 def description(self):
490 490 return self._changeset.description
491 491 def branch(self):
492 492 return encoding.tolocal(self._changeset.extra.get("branch"))
493 493 def closesbranch(self):
494 494 return 'close' in self._changeset.extra
495 495 def extra(self):
496 496 """Return a dict of extra information."""
497 497 return self._changeset.extra
498 498 def tags(self):
499 499 """Return a list of byte tag names"""
500 500 return self._repo.nodetags(self._node)
501 501 def bookmarks(self):
502 502 """Return a list of byte bookmark names."""
503 503 return self._repo.nodebookmarks(self._node)
504 504 def phase(self):
505 505 return self._repo._phasecache.phase(self._repo, self._rev)
506 506 def hidden(self):
507 507 return self._rev in repoview.filterrevs(self._repo, 'visible')
508 508
509 509 def isinmemory(self):
510 510 return False
511 511
512 512 def children(self):
513 513 """return list of changectx contexts for each child changeset.
514 514
515 515 This returns only the immediate child changesets. Use descendants() to
516 516 recursively walk children.
517 517 """
518 518 c = self._repo.changelog.children(self._node)
519 519 return [self._repo[x] for x in c]
520 520
521 521 def ancestors(self):
522 522 for a in self._repo.changelog.ancestors([self._rev]):
523 523 yield self._repo[a]
524 524
525 525 def descendants(self):
526 526 """Recursively yield all children of the changeset.
527 527
528 528 For just the immediate children, use children()
529 529 """
530 530 for d in self._repo.changelog.descendants([self._rev]):
531 531 yield self._repo[d]
532 532
533 533 def filectx(self, path, fileid=None, filelog=None):
534 534 """get a file context from this changeset"""
535 535 if fileid is None:
536 536 fileid = self.filenode(path)
537 537 return filectx(self._repo, path, fileid=fileid,
538 538 changectx=self, filelog=filelog)
539 539
540 540 def ancestor(self, c2, warn=False):
541 541 """return the "best" ancestor context of self and c2
542 542
543 543 If there are multiple candidates, it will show a message and check
544 544 merge.preferancestor configuration before falling back to the
545 545 revlog ancestor."""
546 546 # deal with workingctxs
547 547 n2 = c2._node
548 548 if n2 is None:
549 549 n2 = c2._parents[0]._node
550 550 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
551 551 if not cahs:
552 552 anc = nullid
553 553 elif len(cahs) == 1:
554 554 anc = cahs[0]
555 555 else:
556 556 # experimental config: merge.preferancestor
557 557 for r in self._repo.ui.configlist('merge', 'preferancestor'):
558 558 try:
559 559 ctx = scmutil.revsymbol(self._repo, r)
560 560 except error.RepoLookupError:
561 561 continue
562 562 anc = ctx.node()
563 563 if anc in cahs:
564 564 break
565 565 else:
566 566 anc = self._repo.changelog.ancestor(self._node, n2)
567 567 if warn:
568 568 self._repo.ui.status(
569 569 (_("note: using %s as ancestor of %s and %s\n") %
570 570 (short(anc), short(self._node), short(n2))) +
571 571 ''.join(_(" alternatively, use --config "
572 572 "merge.preferancestor=%s\n") %
573 573 short(n) for n in sorted(cahs) if n != anc))
574 574 return self._repo[anc]
575 575
576 576 def isancestorof(self, other):
577 577 """True if this changeset is an ancestor of other"""
578 578 return self._repo.changelog.isancestorrev(self._rev, other._rev)
579 579
580 580 def walk(self, match):
581 581 '''Generates matching file names.'''
582 582
583 583 # Wrap match.bad method to have message with nodeid
584 584 def bad(fn, msg):
585 585 # The manifest doesn't know about subrepos, so don't complain about
586 586 # paths into valid subrepos.
587 587 if any(fn == s or fn.startswith(s + '/')
588 588 for s in self.substate):
589 589 return
590 590 match.bad(fn, _('no such file in rev %s') % self)
591 591
592 592 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
593 593 return self._manifest.walk(m)
594 594
595 595 def matches(self, match):
596 596 return self.walk(match)
597 597
598 598 class basefilectx(object):
599 599 """A filecontext object represents the common logic for its children:
600 600 filectx: read-only access to a filerevision that is already present
601 601 in the repo,
602 602 workingfilectx: a filecontext that represents files from the working
603 603 directory,
604 604 memfilectx: a filecontext that represents files in-memory,
605 605 """
606 606 @propertycache
607 607 def _filelog(self):
608 608 return self._repo.file(self._path)
609 609
610 610 @propertycache
611 611 def _changeid(self):
612 612 if r'_changectx' in self.__dict__:
613 613 return self._changectx.rev()
614 614 elif r'_descendantrev' in self.__dict__:
615 615 # this file context was created from a revision with a known
616 616 # descendant, we can (lazily) correct for linkrev aliases
617 617 return self._adjustlinkrev(self._descendantrev)
618 618 else:
619 619 return self._filelog.linkrev(self._filerev)
620 620
621 621 @propertycache
622 622 def _filenode(self):
623 623 if r'_fileid' in self.__dict__:
624 624 return self._filelog.lookup(self._fileid)
625 625 else:
626 626 return self._changectx.filenode(self._path)
627 627
628 628 @propertycache
629 629 def _filerev(self):
630 630 return self._filelog.rev(self._filenode)
631 631
632 632 @propertycache
633 633 def _repopath(self):
634 634 return self._path
635 635
636 636 def __nonzero__(self):
637 637 try:
638 638 self._filenode
639 639 return True
640 640 except error.LookupError:
641 641 # file is missing
642 642 return False
643 643
644 644 __bool__ = __nonzero__
645 645
646 646 def __bytes__(self):
647 647 try:
648 648 return "%s@%s" % (self.path(), self._changectx)
649 649 except error.LookupError:
650 650 return "%s@???" % self.path()
651 651
652 652 __str__ = encoding.strmethod(__bytes__)
653 653
654 654 def __repr__(self):
655 655 return r"<%s %s>" % (type(self).__name__, str(self))
656 656
657 657 def __hash__(self):
658 658 try:
659 659 return hash((self._path, self._filenode))
660 660 except AttributeError:
661 661 return id(self)
662 662
663 663 def __eq__(self, other):
664 664 try:
665 665 return (type(self) == type(other) and self._path == other._path
666 666 and self._filenode == other._filenode)
667 667 except AttributeError:
668 668 return False
669 669
670 670 def __ne__(self, other):
671 671 return not (self == other)
672 672
673 673 def filerev(self):
674 674 return self._filerev
675 675 def filenode(self):
676 676 return self._filenode
677 677 @propertycache
678 678 def _flags(self):
679 679 return self._changectx.flags(self._path)
680 680 def flags(self):
681 681 return self._flags
682 682 def filelog(self):
683 683 return self._filelog
684 684 def rev(self):
685 685 return self._changeid
686 686 def linkrev(self):
687 687 return self._filelog.linkrev(self._filerev)
688 688 def node(self):
689 689 return self._changectx.node()
690 690 def hex(self):
691 691 return self._changectx.hex()
692 692 def user(self):
693 693 return self._changectx.user()
694 694 def date(self):
695 695 return self._changectx.date()
696 696 def files(self):
697 697 return self._changectx.files()
698 698 def description(self):
699 699 return self._changectx.description()
700 700 def branch(self):
701 701 return self._changectx.branch()
702 702 def extra(self):
703 703 return self._changectx.extra()
704 704 def phase(self):
705 705 return self._changectx.phase()
706 706 def phasestr(self):
707 707 return self._changectx.phasestr()
708 708 def obsolete(self):
709 709 return self._changectx.obsolete()
710 710 def instabilities(self):
711 711 return self._changectx.instabilities()
712 712 def manifest(self):
713 713 return self._changectx.manifest()
714 714 def changectx(self):
715 715 return self._changectx
716 716 def renamed(self):
717 717 return self._copied
718 718 def copysource(self):
719 719 return self._copied and self._copied[0]
720 720 def repo(self):
721 721 return self._repo
722 722 def size(self):
723 723 return len(self.data())
724 724
725 725 def path(self):
726 726 return self._path
727 727
728 728 def isbinary(self):
729 729 try:
730 730 return stringutil.binary(self.data())
731 731 except IOError:
732 732 return False
733 733 def isexec(self):
734 734 return 'x' in self.flags()
735 735 def islink(self):
736 736 return 'l' in self.flags()
737 737
738 738 def isabsent(self):
739 739 """whether this filectx represents a file not in self._changectx
740 740
741 741 This is mainly for merge code to detect change/delete conflicts. This is
742 742 expected to be True for all subclasses of basectx."""
743 743 return False
744 744
745 745 _customcmp = False
746 746 def cmp(self, fctx):
747 747 """compare with other file context
748 748
749 749 returns True if different than fctx.
750 750 """
751 751 if fctx._customcmp:
752 752 return fctx.cmp(self)
753 753
754 754 if self._filenode is None:
755 755 raise error.ProgrammingError(
756 756 'filectx.cmp() must be reimplemented if not backed by revlog')
757 757
758 758 if fctx._filenode is None:
759 759 if self._repo._encodefilterpats:
760 760 # can't rely on size() because wdir content may be decoded
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() - 4 == fctx.size():
763 763 # size() can match:
764 764 # if file data starts with '\1\n', empty metadata block is
765 765 # prepended, which adds 4 bytes to filelog.size().
766 766 return self._filelog.cmp(self._filenode, fctx.data())
767 767 if self.size() == fctx.size():
768 768 # size() matches: need to compare content
769 769 return self._filelog.cmp(self._filenode, fctx.data())
770 770
771 771 # size() differs
772 772 return True
773 773
774 774 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
775 775 """return the first ancestor of <srcrev> introducing <fnode>
776 776
777 777 If the linkrev of the file revision does not point to an ancestor of
778 778 srcrev, we'll walk down the ancestors until we find one introducing
779 779 this file revision.
780 780
781 781 :srcrev: the changeset revision we search ancestors from
782 782 :inclusive: if true, the src revision will also be checked
783 783 :stoprev: an optional revision to stop the walk at. If no introduction
784 784 of this file content could be found before this floor
785 785 revision, the function will returns "None" and stops its
786 786 iteration.
787 787 """
788 788 repo = self._repo
789 789 cl = repo.unfiltered().changelog
790 790 mfl = repo.manifestlog
791 791 # fetch the linkrev
792 792 lkr = self.linkrev()
793 793 if srcrev == lkr:
794 794 return lkr
795 795 # hack to reuse ancestor computation when searching for renames
796 796 memberanc = getattr(self, '_ancestrycontext', None)
797 797 iteranc = None
798 798 if srcrev is None:
799 799 # wctx case, used by workingfilectx during mergecopy
800 800 revs = [p.rev() for p in self._repo[None].parents()]
801 801 inclusive = True # we skipped the real (revless) source
802 802 else:
803 803 revs = [srcrev]
804 804 if memberanc is None:
805 805 memberanc = iteranc = cl.ancestors(revs, lkr,
806 806 inclusive=inclusive)
807 807 # check if this linkrev is an ancestor of srcrev
808 808 if lkr not in memberanc:
809 809 if iteranc is None:
810 810 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
811 811 fnode = self._filenode
812 812 path = self._path
813 813 for a in iteranc:
814 814 if stoprev is not None and a < stoprev:
815 815 return None
816 816 ac = cl.read(a) # get changeset data (we avoid object creation)
817 817 if path in ac[3]: # checking the 'files' field.
818 818 # The file has been touched, check if the content is
819 819 # similar to the one we search for.
820 820 if fnode == mfl[ac[0]].readfast().get(path):
821 821 return a
822 822 # In theory, we should never get out of that loop without a result.
823 823 # But if manifest uses a buggy file revision (not children of the
824 824 # one it replaces) we could. Such a buggy situation will likely
825 825 # result is crash somewhere else at to some point.
826 826 return lkr
827 827
828 828 def isintroducedafter(self, changelogrev):
829 829 """True if a filectx has been introduced after a given floor revision
830 830 """
831 831 if self.linkrev() >= changelogrev:
832 832 return True
833 833 introrev = self._introrev(stoprev=changelogrev)
834 834 if introrev is None:
835 835 return False
836 836 return introrev >= changelogrev
837 837
838 838 def introrev(self):
839 839 """return the rev of the changeset which introduced this file revision
840 840
841 841 This method is different from linkrev because it take into account the
842 842 changeset the filectx was created from. It ensures the returned
843 843 revision is one of its ancestors. This prevents bugs from
844 844 'linkrev-shadowing' when a file revision is used by multiple
845 845 changesets.
846 846 """
847 847 return self._introrev()
848 848
849 849 def _introrev(self, stoprev=None):
850 850 """
851 851 Same as `introrev` but, with an extra argument to limit changelog
852 852 iteration range in some internal usecase.
853 853
854 854 If `stoprev` is set, the `introrev` will not be searched past that
855 855 `stoprev` revision and "None" might be returned. This is useful to
856 856 limit the iteration range.
857 857 """
858 858 toprev = None
859 859 attrs = vars(self)
860 860 if r'_changeid' in attrs:
861 861 # We have a cached value already
862 862 toprev = self._changeid
863 863 elif r'_changectx' in attrs:
864 864 # We know which changelog entry we are coming from
865 865 toprev = self._changectx.rev()
866 866
867 867 if toprev is not None:
868 868 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
869 869 elif r'_descendantrev' in attrs:
870 870 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
871 871 # be nice and cache the result of the computation
872 872 if introrev is not None:
873 873 self._changeid = introrev
874 874 return introrev
875 875 else:
876 876 return self.linkrev()
877 877
878 878 def introfilectx(self):
879 879 """Return filectx having identical contents, but pointing to the
880 880 changeset revision where this filectx was introduced"""
881 881 introrev = self.introrev()
882 882 if self.rev() == introrev:
883 883 return self
884 884 return self.filectx(self.filenode(), changeid=introrev)
885 885
886 886 def _parentfilectx(self, path, fileid, filelog):
887 887 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
888 888 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
889 889 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
890 890 # If self is associated with a changeset (probably explicitly
891 891 # fed), ensure the created filectx is associated with a
892 892 # changeset that is an ancestor of self.changectx.
893 893 # This lets us later use _adjustlinkrev to get a correct link.
894 894 fctx._descendantrev = self.rev()
895 895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
896 896 elif r'_descendantrev' in vars(self):
897 897 # Otherwise propagate _descendantrev if we have one associated.
898 898 fctx._descendantrev = self._descendantrev
899 899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
900 900 return fctx
901 901
902 902 def parents(self):
903 903 _path = self._path
904 904 fl = self._filelog
905 905 parents = self._filelog.parents(self._filenode)
906 906 pl = [(_path, node, fl) for node in parents if node != nullid]
907 907
908 908 r = fl.renamed(self._filenode)
909 909 if r:
910 910 # - In the simple rename case, both parent are nullid, pl is empty.
911 911 # - In case of merge, only one of the parent is null id and should
912 912 # be replaced with the rename information. This parent is -always-
913 913 # the first one.
914 914 #
915 915 # As null id have always been filtered out in the previous list
916 916 # comprehension, inserting to 0 will always result in "replacing
917 917 # first nullid parent with rename information.
918 918 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
919 919
920 920 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
921 921
922 922 def p1(self):
923 923 return self.parents()[0]
924 924
925 925 def p2(self):
926 926 p = self.parents()
927 927 if len(p) == 2:
928 928 return p[1]
929 929 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
930 930
931 931 def annotate(self, follow=False, skiprevs=None, diffopts=None):
932 932 """Returns a list of annotateline objects for each line in the file
933 933
934 934 - line.fctx is the filectx of the node where that line was last changed
935 935 - line.lineno is the line number at the first appearance in the managed
936 936 file
937 937 - line.text is the data on that line (including newline character)
938 938 """
939 939 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
940 940
941 941 def parents(f):
942 942 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
943 943 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
944 944 # from the topmost introrev (= srcrev) down to p.linkrev() if it
945 945 # isn't an ancestor of the srcrev.
946 946 f._changeid
947 947 pl = f.parents()
948 948
949 949 # Don't return renamed parents if we aren't following.
950 950 if not follow:
951 951 pl = [p for p in pl if p.path() == f.path()]
952 952
953 953 # renamed filectx won't have a filelog yet, so set it
954 954 # from the cache to save time
955 955 for p in pl:
956 956 if not r'_filelog' in p.__dict__:
957 957 p._filelog = getlog(p.path())
958 958
959 959 return pl
960 960
961 961 # use linkrev to find the first changeset where self appeared
962 962 base = self.introfilectx()
963 963 if getattr(base, '_ancestrycontext', None) is None:
964 964 cl = self._repo.changelog
965 965 if base.rev() is None:
966 966 # wctx is not inclusive, but works because _ancestrycontext
967 967 # is used to test filelog revisions
968 968 ac = cl.ancestors([p.rev() for p in base.parents()],
969 969 inclusive=True)
970 970 else:
971 971 ac = cl.ancestors([base.rev()], inclusive=True)
972 972 base._ancestrycontext = ac
973 973
974 974 return dagop.annotate(base, parents, skiprevs=skiprevs,
975 975 diffopts=diffopts)
976 976
977 977 def ancestors(self, followfirst=False):
978 978 visit = {}
979 979 c = self
980 980 if followfirst:
981 981 cut = 1
982 982 else:
983 983 cut = None
984 984
985 985 while True:
986 986 for parent in c.parents()[:cut]:
987 987 visit[(parent.linkrev(), parent.filenode())] = parent
988 988 if not visit:
989 989 break
990 990 c = visit.pop(max(visit))
991 991 yield c
992 992
993 993 def decodeddata(self):
994 994 """Returns `data()` after running repository decoding filters.
995 995
996 996 This is often equivalent to how the data would be expressed on disk.
997 997 """
998 998 return self._repo.wwritedata(self.path(), self.data())
999 999
1000 1000 class filectx(basefilectx):
1001 1001 """A filecontext object makes access to data related to a particular
1002 1002 filerevision convenient."""
1003 1003 def __init__(self, repo, path, changeid=None, fileid=None,
1004 1004 filelog=None, changectx=None):
1005 1005 """changeid must be a revision number, if specified.
1006 1006 fileid can be a file revision or node."""
1007 1007 self._repo = repo
1008 1008 self._path = path
1009 1009
1010 1010 assert (changeid is not None
1011 1011 or fileid is not None
1012 1012 or changectx is not None), (
1013 1013 "bad args: changeid=%r, fileid=%r, changectx=%r"
1014 1014 % (changeid, fileid, changectx))
1015 1015
1016 1016 if filelog is not None:
1017 1017 self._filelog = filelog
1018 1018
1019 1019 if changeid is not None:
1020 1020 self._changeid = changeid
1021 1021 if changectx is not None:
1022 1022 self._changectx = changectx
1023 1023 if fileid is not None:
1024 1024 self._fileid = fileid
1025 1025
1026 1026 @propertycache
1027 1027 def _changectx(self):
1028 1028 try:
1029 1029 return self._repo[self._changeid]
1030 1030 except error.FilteredRepoLookupError:
1031 1031 # Linkrev may point to any revision in the repository. When the
1032 1032 # repository is filtered this may lead to `filectx` trying to build
1033 1033 # `changectx` for filtered revision. In such case we fallback to
1034 1034 # creating `changectx` on the unfiltered version of the reposition.
1035 1035 # This fallback should not be an issue because `changectx` from
1036 1036 # `filectx` are not used in complex operations that care about
1037 1037 # filtering.
1038 1038 #
1039 1039 # This fallback is a cheap and dirty fix that prevent several
1040 1040 # crashes. It does not ensure the behavior is correct. However the
1041 1041 # behavior was not correct before filtering either and "incorrect
1042 1042 # behavior" is seen as better as "crash"
1043 1043 #
1044 1044 # Linkrevs have several serious troubles with filtering that are
1045 1045 # complicated to solve. Proper handling of the issue here should be
1046 1046 # considered when solving linkrev issue are on the table.
1047 1047 return self._repo.unfiltered()[self._changeid]
1048 1048
1049 1049 def filectx(self, fileid, changeid=None):
1050 1050 '''opens an arbitrary revision of the file without
1051 1051 opening a new filelog'''
1052 1052 return filectx(self._repo, self._path, fileid=fileid,
1053 1053 filelog=self._filelog, changeid=changeid)
1054 1054
1055 1055 def rawdata(self):
1056 1056 return self._filelog.rawdata(self._filenode)
1057 1057
1058 1058 def rawflags(self):
1059 1059 """low-level revlog flags"""
1060 1060 return self._filelog.flags(self._filerev)
1061 1061
1062 1062 def data(self):
1063 1063 try:
1064 1064 return self._filelog.read(self._filenode)
1065 1065 except error.CensoredNodeError:
1066 1066 if self._repo.ui.config("censor", "policy") == "ignore":
1067 1067 return ""
1068 1068 raise error.Abort(_("censored node: %s") % short(self._filenode),
1069 1069 hint=_("set censor.policy to ignore errors"))
1070 1070
1071 1071 def size(self):
1072 1072 return self._filelog.size(self._filerev)
1073 1073
1074 1074 @propertycache
1075 1075 def _copied(self):
1076 1076 """check if file was actually renamed in this changeset revision
1077 1077
1078 1078 If rename logged in file revision, we report copy for changeset only
1079 1079 if file revisions linkrev points back to the changeset in question
1080 1080 or both changeset parents contain different file revisions.
1081 1081 """
1082 1082
1083 1083 renamed = self._filelog.renamed(self._filenode)
1084 1084 if not renamed:
1085 1085 return None
1086 1086
1087 1087 if self.rev() == self.linkrev():
1088 1088 return renamed
1089 1089
1090 1090 name = self.path()
1091 1091 fnode = self._filenode
1092 1092 for p in self._changectx.parents():
1093 1093 try:
1094 1094 if fnode == p.filenode(name):
1095 1095 return None
1096 1096 except error.LookupError:
1097 1097 pass
1098 1098 return renamed
1099 1099
1100 1100 def children(self):
1101 1101 # hard for renames
1102 1102 c = self._filelog.children(self._filenode)
1103 1103 return [filectx(self._repo, self._path, fileid=x,
1104 1104 filelog=self._filelog) for x in c]
1105 1105
1106 1106 class committablectx(basectx):
1107 1107 """A committablectx object provides common functionality for a context that
1108 1108 wants the ability to commit, e.g. workingctx or memctx."""
1109 1109 def __init__(self, repo, text="", user=None, date=None, extra=None,
1110 1110 changes=None, branch=None):
1111 1111 super(committablectx, self).__init__(repo)
1112 1112 self._rev = None
1113 1113 self._node = None
1114 1114 self._text = text
1115 1115 if date:
1116 1116 self._date = dateutil.parsedate(date)
1117 1117 if user:
1118 1118 self._user = user
1119 1119 if changes:
1120 1120 self._status = changes
1121 1121
1122 1122 self._extra = {}
1123 1123 if extra:
1124 1124 self._extra = extra.copy()
1125 1125 if branch is not None:
1126 1126 self._extra['branch'] = encoding.fromlocal(branch)
1127 1127 if not self._extra.get('branch'):
1128 1128 self._extra['branch'] = 'default'
1129 1129
1130 1130 def __bytes__(self):
1131 1131 return bytes(self._parents[0]) + "+"
1132 1132
1133 1133 __str__ = encoding.strmethod(__bytes__)
1134 1134
1135 1135 def __nonzero__(self):
1136 1136 return True
1137 1137
1138 1138 __bool__ = __nonzero__
1139 1139
1140 1140 @propertycache
1141 1141 def _status(self):
1142 1142 return self._repo.status()
1143 1143
1144 1144 @propertycache
1145 1145 def _user(self):
1146 1146 return self._repo.ui.username()
1147 1147
1148 1148 @propertycache
1149 1149 def _date(self):
1150 1150 ui = self._repo.ui
1151 1151 date = ui.configdate('devel', 'default-date')
1152 1152 if date is None:
1153 1153 date = dateutil.makedate()
1154 1154 return date
1155 1155
1156 1156 def subrev(self, subpath):
1157 1157 return None
1158 1158
1159 1159 def manifestnode(self):
1160 1160 return None
1161 1161 def user(self):
1162 1162 return self._user or self._repo.ui.username()
1163 1163 def date(self):
1164 1164 return self._date
1165 1165 def description(self):
1166 1166 return self._text
1167 1167 def files(self):
1168 1168 return sorted(self._status.modified + self._status.added +
1169 1169 self._status.removed)
1170 1170 def modified(self):
1171 1171 return self._status.modified
1172 1172 def added(self):
1173 1173 return self._status.added
1174 1174 def removed(self):
1175 1175 return self._status.removed
1176 1176 def deleted(self):
1177 1177 return self._status.deleted
1178 1178 filesmodified = modified
1179 1179 filesadded = added
1180 1180 filesremoved = removed
1181 1181
1182 1182 def branch(self):
1183 1183 return encoding.tolocal(self._extra['branch'])
1184 1184 def closesbranch(self):
1185 1185 return 'close' in self._extra
1186 1186 def extra(self):
1187 1187 return self._extra
1188 1188
1189 1189 def isinmemory(self):
1190 1190 return False
1191 1191
1192 1192 def tags(self):
1193 1193 return []
1194 1194
1195 1195 def bookmarks(self):
1196 1196 b = []
1197 1197 for p in self.parents():
1198 1198 b.extend(p.bookmarks())
1199 1199 return b
1200 1200
1201 1201 def phase(self):
1202 1202 phase = phases.draft # default phase to draft
1203 1203 for p in self.parents():
1204 1204 phase = max(phase, p.phase())
1205 1205 return phase
1206 1206
1207 1207 def hidden(self):
1208 1208 return False
1209 1209
1210 1210 def children(self):
1211 1211 return []
1212 1212
1213 1213 def ancestor(self, c2):
1214 1214 """return the "best" ancestor context of self and c2"""
1215 1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1216 1216
1217 1217 def ancestors(self):
1218 1218 for p in self._parents:
1219 1219 yield p
1220 1220 for a in self._repo.changelog.ancestors(
1221 1221 [p.rev() for p in self._parents]):
1222 1222 yield self._repo[a]
1223 1223
1224 1224 def markcommitted(self, node):
1225 1225 """Perform post-commit cleanup necessary after committing this ctx
1226 1226
1227 1227 Specifically, this updates backing stores this working context
1228 1228 wraps to reflect the fact that the changes reflected by this
1229 1229 workingctx have been committed. For example, it marks
1230 1230 modified and added files as normal in the dirstate.
1231 1231
1232 1232 """
1233 1233
1234 1234 def dirty(self, missing=False, merge=True, branch=True):
1235 1235 return False
1236 1236
1237 1237 class workingctx(committablectx):
1238 1238 """A workingctx object makes access to data related to
1239 1239 the current working directory convenient.
1240 1240 date - any valid date string or (unixtime, offset), or None.
1241 1241 user - username string, or None.
1242 1242 extra - a dictionary of extra values, or None.
1243 1243 changes - a list of file lists as returned by localrepo.status()
1244 1244 or None to use the repository status.
1245 1245 """
1246 1246 def __init__(self, repo, text="", user=None, date=None, extra=None,
1247 1247 changes=None):
1248 1248 branch = None
1249 1249 if not extra or 'branch' not in extra:
1250 1250 try:
1251 1251 branch = repo.dirstate.branch()
1252 1252 except UnicodeDecodeError:
1253 1253 raise error.Abort(_('branch name not in UTF-8!'))
1254 1254 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1255 1255 branch=branch)
1256 1256
1257 1257 def __iter__(self):
1258 1258 d = self._repo.dirstate
1259 1259 for f in d:
1260 1260 if d[f] != 'r':
1261 1261 yield f
1262 1262
1263 1263 def __contains__(self, key):
1264 1264 return self._repo.dirstate[key] not in "?r"
1265 1265
1266 1266 def hex(self):
1267 1267 return wdirhex
1268 1268
1269 1269 @propertycache
1270 1270 def _parents(self):
1271 1271 p = self._repo.dirstate.parents()
1272 1272 if p[1] == nullid:
1273 1273 p = p[:-1]
1274 1274 # use unfiltered repo to delay/avoid loading obsmarkers
1275 1275 unfi = self._repo.unfiltered()
1276 1276 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1277 1277
1278 1278 def _fileinfo(self, path):
1279 1279 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1280 1280 self._manifest
1281 1281 return super(workingctx, self)._fileinfo(path)
1282 1282
1283 1283 def _buildflagfunc(self):
1284 1284 # Create a fallback function for getting file flags when the
1285 1285 # filesystem doesn't support them
1286 1286
1287 1287 copiesget = self._repo.dirstate.copies().get
1288 1288 parents = self.parents()
1289 1289 if len(parents) < 2:
1290 1290 # when we have one parent, it's easy: copy from parent
1291 1291 man = parents[0].manifest()
1292 1292 def func(f):
1293 1293 f = copiesget(f, f)
1294 1294 return man.flags(f)
1295 1295 else:
1296 1296 # merges are tricky: we try to reconstruct the unstored
1297 1297 # result from the merge (issue1802)
1298 1298 p1, p2 = parents
1299 1299 pa = p1.ancestor(p2)
1300 1300 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1301 1301
1302 1302 def func(f):
1303 1303 f = copiesget(f, f) # may be wrong for merges with copies
1304 1304 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1305 1305 if fl1 == fl2:
1306 1306 return fl1
1307 1307 if fl1 == fla:
1308 1308 return fl2
1309 1309 if fl2 == fla:
1310 1310 return fl1
1311 1311 return '' # punt for conflicts
1312 1312
1313 1313 return func
1314 1314
1315 1315 @propertycache
1316 1316 def _flagfunc(self):
1317 1317 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1318 1318
1319 1319 def flags(self, path):
1320 1320 if r'_manifest' in self.__dict__:
1321 1321 try:
1322 1322 return self._manifest.flags(path)
1323 1323 except KeyError:
1324 1324 return ''
1325 1325
1326 1326 try:
1327 1327 return self._flagfunc(path)
1328 1328 except OSError:
1329 1329 return ''
1330 1330
1331 1331 def filectx(self, path, filelog=None):
1332 1332 """get a file context from the working directory"""
1333 1333 return workingfilectx(self._repo, path, workingctx=self,
1334 1334 filelog=filelog)
1335 1335
1336 1336 def dirty(self, missing=False, merge=True, branch=True):
1337 1337 "check whether a working directory is modified"
1338 1338 # check subrepos first
1339 1339 for s in sorted(self.substate):
1340 1340 if self.sub(s).dirty(missing=missing):
1341 1341 return True
1342 1342 # check current working dir
1343 1343 return ((merge and self.p2()) or
1344 1344 (branch and self.branch() != self.p1().branch()) or
1345 1345 self.modified() or self.added() or self.removed() or
1346 1346 (missing and self.deleted()))
1347 1347
1348 1348 def add(self, list, prefix=""):
1349 1349 with self._repo.wlock():
1350 1350 ui, ds = self._repo.ui, self._repo.dirstate
1351 1351 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1352 1352 rejected = []
1353 1353 lstat = self._repo.wvfs.lstat
1354 1354 for f in list:
1355 1355 # ds.pathto() returns an absolute file when this is invoked from
1356 1356 # the keyword extension. That gets flagged as non-portable on
1357 1357 # Windows, since it contains the drive letter and colon.
1358 1358 scmutil.checkportable(ui, os.path.join(prefix, f))
1359 1359 try:
1360 1360 st = lstat(f)
1361 1361 except OSError:
1362 1362 ui.warn(_("%s does not exist!\n") % uipath(f))
1363 1363 rejected.append(f)
1364 1364 continue
1365 1365 limit = ui.configbytes('ui', 'large-file-limit')
1366 1366 if limit != 0 and st.st_size > limit:
1367 1367 ui.warn(_("%s: up to %d MB of RAM may be required "
1368 1368 "to manage this file\n"
1369 1369 "(use 'hg revert %s' to cancel the "
1370 1370 "pending addition)\n")
1371 1371 % (f, 3 * st.st_size // 1000000, uipath(f)))
1372 1372 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1373 1373 ui.warn(_("%s not added: only files and symlinks "
1374 1374 "supported currently\n") % uipath(f))
1375 1375 rejected.append(f)
1376 1376 elif ds[f] in 'amn':
1377 1377 ui.warn(_("%s already tracked!\n") % uipath(f))
1378 1378 elif ds[f] == 'r':
1379 1379 ds.normallookup(f)
1380 1380 else:
1381 1381 ds.add(f)
1382 1382 return rejected
1383 1383
1384 1384 def forget(self, files, prefix=""):
1385 1385 with self._repo.wlock():
1386 1386 ds = self._repo.dirstate
1387 1387 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1388 1388 rejected = []
1389 1389 for f in files:
1390 1390 if f not in ds:
1391 1391 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1392 1392 rejected.append(f)
1393 1393 elif ds[f] != 'a':
1394 1394 ds.remove(f)
1395 1395 else:
1396 1396 ds.drop(f)
1397 1397 return rejected
1398 1398
1399 1399 def copy(self, source, dest):
1400 1400 try:
1401 1401 st = self._repo.wvfs.lstat(dest)
1402 1402 except OSError as err:
1403 1403 if err.errno != errno.ENOENT:
1404 1404 raise
1405 1405 self._repo.ui.warn(_("%s does not exist!\n")
1406 1406 % self._repo.dirstate.pathto(dest))
1407 1407 return
1408 1408 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1409 1409 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1410 1410 "symbolic link\n")
1411 1411 % self._repo.dirstate.pathto(dest))
1412 1412 else:
1413 1413 with self._repo.wlock():
1414 1414 ds = self._repo.dirstate
1415 1415 if ds[dest] in '?':
1416 1416 ds.add(dest)
1417 1417 elif ds[dest] in 'r':
1418 1418 ds.normallookup(dest)
1419 1419 ds.copy(source, dest)
1420 1420
1421 1421 def match(self, pats=None, include=None, exclude=None, default='glob',
1422 1422 listsubrepos=False, badfn=None):
1423 1423 r = self._repo
1424 1424
1425 1425 # Only a case insensitive filesystem needs magic to translate user input
1426 1426 # to actual case in the filesystem.
1427 1427 icasefs = not util.fscasesensitive(r.root)
1428 1428 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1429 1429 default, auditor=r.auditor, ctx=self,
1430 1430 listsubrepos=listsubrepos, badfn=badfn,
1431 1431 icasefs=icasefs)
1432 1432
1433 1433 def _filtersuspectsymlink(self, files):
1434 1434 if not files or self._repo.dirstate._checklink:
1435 1435 return files
1436 1436
1437 1437 # Symlink placeholders may get non-symlink-like contents
1438 1438 # via user error or dereferencing by NFS or Samba servers,
1439 1439 # so we filter out any placeholders that don't look like a
1440 1440 # symlink
1441 1441 sane = []
1442 1442 for f in files:
1443 1443 if self.flags(f) == 'l':
1444 1444 d = self[f].data()
1445 1445 if (d == '' or len(d) >= 1024 or '\n' in d
1446 1446 or stringutil.binary(d)):
1447 1447 self._repo.ui.debug('ignoring suspect symlink placeholder'
1448 1448 ' "%s"\n' % f)
1449 1449 continue
1450 1450 sane.append(f)
1451 1451 return sane
1452 1452
1453 1453 def _checklookup(self, files):
1454 1454 # check for any possibly clean files
1455 1455 if not files:
1456 1456 return [], [], []
1457 1457
1458 1458 modified = []
1459 1459 deleted = []
1460 1460 fixup = []
1461 1461 pctx = self._parents[0]
1462 1462 # do a full compare of any files that might have changed
1463 1463 for f in sorted(files):
1464 1464 try:
1465 1465 # This will return True for a file that got replaced by a
1466 1466 # directory in the interim, but fixing that is pretty hard.
1467 1467 if (f not in pctx or self.flags(f) != pctx.flags(f)
1468 1468 or pctx[f].cmp(self[f])):
1469 1469 modified.append(f)
1470 1470 else:
1471 1471 fixup.append(f)
1472 1472 except (IOError, OSError):
1473 1473 # A file become inaccessible in between? Mark it as deleted,
1474 1474 # matching dirstate behavior (issue5584).
1475 1475 # The dirstate has more complex behavior around whether a
1476 1476 # missing file matches a directory, etc, but we don't need to
1477 1477 # bother with that: if f has made it to this point, we're sure
1478 1478 # it's in the dirstate.
1479 1479 deleted.append(f)
1480 1480
1481 1481 return modified, deleted, fixup
1482 1482
1483 1483 def _poststatusfixup(self, status, fixup):
1484 1484 """update dirstate for files that are actually clean"""
1485 1485 poststatus = self._repo.postdsstatus()
1486 1486 if fixup or poststatus:
1487 1487 try:
1488 1488 oldid = self._repo.dirstate.identity()
1489 1489
1490 1490 # updating the dirstate is optional
1491 1491 # so we don't wait on the lock
1492 1492 # wlock can invalidate the dirstate, so cache normal _after_
1493 1493 # taking the lock
1494 1494 with self._repo.wlock(False):
1495 1495 if self._repo.dirstate.identity() == oldid:
1496 1496 if fixup:
1497 1497 normal = self._repo.dirstate.normal
1498 1498 for f in fixup:
1499 1499 normal(f)
1500 1500 # write changes out explicitly, because nesting
1501 1501 # wlock at runtime may prevent 'wlock.release()'
1502 1502 # after this block from doing so for subsequent
1503 1503 # changing files
1504 1504 tr = self._repo.currenttransaction()
1505 1505 self._repo.dirstate.write(tr)
1506 1506
1507 1507 if poststatus:
1508 1508 for ps in poststatus:
1509 1509 ps(self, status)
1510 1510 else:
1511 1511 # in this case, writing changes out breaks
1512 1512 # consistency, because .hg/dirstate was
1513 1513 # already changed simultaneously after last
1514 1514 # caching (see also issue5584 for detail)
1515 1515 self._repo.ui.debug('skip updating dirstate: '
1516 1516 'identity mismatch\n')
1517 1517 except error.LockError:
1518 1518 pass
1519 1519 finally:
1520 1520 # Even if the wlock couldn't be grabbed, clear out the list.
1521 1521 self._repo.clearpostdsstatus()
1522 1522
1523 1523 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1524 1524 '''Gets the status from the dirstate -- internal use only.'''
1525 1525 subrepos = []
1526 1526 if '.hgsub' in self:
1527 1527 subrepos = sorted(self.substate)
1528 1528 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1529 1529 clean=clean, unknown=unknown)
1530 1530
1531 1531 # check for any possibly clean files
1532 1532 fixup = []
1533 1533 if cmp:
1534 1534 modified2, deleted2, fixup = self._checklookup(cmp)
1535 1535 s.modified.extend(modified2)
1536 1536 s.deleted.extend(deleted2)
1537 1537
1538 1538 if fixup and clean:
1539 1539 s.clean.extend(fixup)
1540 1540
1541 1541 self._poststatusfixup(s, fixup)
1542 1542
1543 1543 if match.always():
1544 1544 # cache for performance
1545 1545 if s.unknown or s.ignored or s.clean:
1546 1546 # "_status" is cached with list*=False in the normal route
1547 1547 self._status = scmutil.status(s.modified, s.added, s.removed,
1548 1548 s.deleted, [], [], [])
1549 1549 else:
1550 1550 self._status = s
1551 1551
1552 1552 return s
1553 1553
1554 1554 @propertycache
1555 1555 def _copies(self):
1556 1556 p1copies = {}
1557 1557 p2copies = {}
1558 1558 parents = self._repo.dirstate.parents()
1559 1559 p1manifest = self._repo[parents[0]].manifest()
1560 1560 p2manifest = self._repo[parents[1]].manifest()
1561 changedset = set(self.added()) | set(self.modified())
1561 1562 narrowmatch = self._repo.narrowmatch()
1562 1563 for dst, src in self._repo.dirstate.copies().items():
1563 if not narrowmatch(dst):
1564 if dst not in changedset or not narrowmatch(dst):
1564 1565 continue
1565 1566 if src in p1manifest:
1566 1567 p1copies[dst] = src
1567 1568 elif src in p2manifest:
1568 1569 p2copies[dst] = src
1569 1570 return p1copies, p2copies
1570 1571
1571 1572 @propertycache
1572 1573 def _manifest(self):
1573 1574 """generate a manifest corresponding to the values in self._status
1574 1575
1575 1576 This reuse the file nodeid from parent, but we use special node
1576 1577 identifiers for added and modified files. This is used by manifests
1577 1578 merge to see that files are different and by update logic to avoid
1578 1579 deleting newly added files.
1579 1580 """
1580 1581 return self._buildstatusmanifest(self._status)
1581 1582
1582 1583 def _buildstatusmanifest(self, status):
1583 1584 """Builds a manifest that includes the given status results."""
1584 1585 parents = self.parents()
1585 1586
1586 1587 man = parents[0].manifest().copy()
1587 1588
1588 1589 ff = self._flagfunc
1589 1590 for i, l in ((addednodeid, status.added),
1590 1591 (modifiednodeid, status.modified)):
1591 1592 for f in l:
1592 1593 man[f] = i
1593 1594 try:
1594 1595 man.setflag(f, ff(f))
1595 1596 except OSError:
1596 1597 pass
1597 1598
1598 1599 for f in status.deleted + status.removed:
1599 1600 if f in man:
1600 1601 del man[f]
1601 1602
1602 1603 return man
1603 1604
1604 1605 def _buildstatus(self, other, s, match, listignored, listclean,
1605 1606 listunknown):
1606 1607 """build a status with respect to another context
1607 1608
1608 1609 This includes logic for maintaining the fast path of status when
1609 1610 comparing the working directory against its parent, which is to skip
1610 1611 building a new manifest if self (working directory) is not comparing
1611 1612 against its parent (repo['.']).
1612 1613 """
1613 1614 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1614 1615 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1615 1616 # might have accidentally ended up with the entire contents of the file
1616 1617 # they are supposed to be linking to.
1617 1618 s.modified[:] = self._filtersuspectsymlink(s.modified)
1618 1619 if other != self._repo['.']:
1619 1620 s = super(workingctx, self)._buildstatus(other, s, match,
1620 1621 listignored, listclean,
1621 1622 listunknown)
1622 1623 return s
1623 1624
1624 1625 def _matchstatus(self, other, match):
1625 1626 """override the match method with a filter for directory patterns
1626 1627
1627 1628 We use inheritance to customize the match.bad method only in cases of
1628 1629 workingctx since it belongs only to the working directory when
1629 1630 comparing against the parent changeset.
1630 1631
1631 1632 If we aren't comparing against the working directory's parent, then we
1632 1633 just use the default match object sent to us.
1633 1634 """
1634 1635 if other != self._repo['.']:
1635 1636 def bad(f, msg):
1636 1637 # 'f' may be a directory pattern from 'match.files()',
1637 1638 # so 'f not in ctx1' is not enough
1638 1639 if f not in other and not other.hasdir(f):
1639 1640 self._repo.ui.warn('%s: %s\n' %
1640 1641 (self._repo.dirstate.pathto(f), msg))
1641 1642 match.bad = bad
1642 1643 return match
1643 1644
1644 1645 def walk(self, match):
1645 1646 '''Generates matching file names.'''
1646 1647 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1647 1648 subrepos=sorted(self.substate),
1648 1649 unknown=True, ignored=False))
1649 1650
1650 1651 def matches(self, match):
1651 1652 match = self._repo.narrowmatch(match)
1652 1653 ds = self._repo.dirstate
1653 1654 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1654 1655
1655 1656 def markcommitted(self, node):
1656 1657 with self._repo.dirstate.parentchange():
1657 1658 for f in self.modified() + self.added():
1658 1659 self._repo.dirstate.normal(f)
1659 1660 for f in self.removed():
1660 1661 self._repo.dirstate.drop(f)
1661 1662 self._repo.dirstate.setparents(node)
1662 1663
1663 1664 # write changes out explicitly, because nesting wlock at
1664 1665 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1665 1666 # from immediately doing so for subsequent changing files
1666 1667 self._repo.dirstate.write(self._repo.currenttransaction())
1667 1668
1668 1669 sparse.aftercommit(self._repo, node)
1669 1670
1670 1671 class committablefilectx(basefilectx):
1671 1672 """A committablefilectx provides common functionality for a file context
1672 1673 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1673 1674 def __init__(self, repo, path, filelog=None, ctx=None):
1674 1675 self._repo = repo
1675 1676 self._path = path
1676 1677 self._changeid = None
1677 1678 self._filerev = self._filenode = None
1678 1679
1679 1680 if filelog is not None:
1680 1681 self._filelog = filelog
1681 1682 if ctx:
1682 1683 self._changectx = ctx
1683 1684
1684 1685 def __nonzero__(self):
1685 1686 return True
1686 1687
1687 1688 __bool__ = __nonzero__
1688 1689
1689 1690 def linkrev(self):
1690 1691 # linked to self._changectx no matter if file is modified or not
1691 1692 return self.rev()
1692 1693
1693 1694 def renamed(self):
1694 1695 path = self.copysource()
1695 1696 if not path:
1696 1697 return None
1697 1698 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1698 1699
1699 1700 def parents(self):
1700 1701 '''return parent filectxs, following copies if necessary'''
1701 1702 def filenode(ctx, path):
1702 1703 return ctx._manifest.get(path, nullid)
1703 1704
1704 1705 path = self._path
1705 1706 fl = self._filelog
1706 1707 pcl = self._changectx._parents
1707 1708 renamed = self.renamed()
1708 1709
1709 1710 if renamed:
1710 1711 pl = [renamed + (None,)]
1711 1712 else:
1712 1713 pl = [(path, filenode(pcl[0], path), fl)]
1713 1714
1714 1715 for pc in pcl[1:]:
1715 1716 pl.append((path, filenode(pc, path), fl))
1716 1717
1717 1718 return [self._parentfilectx(p, fileid=n, filelog=l)
1718 1719 for p, n, l in pl if n != nullid]
1719 1720
1720 1721 def children(self):
1721 1722 return []
1722 1723
1723 1724 class workingfilectx(committablefilectx):
1724 1725 """A workingfilectx object makes access to data related to a particular
1725 1726 file in the working directory convenient."""
1726 1727 def __init__(self, repo, path, filelog=None, workingctx=None):
1727 1728 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1728 1729
1729 1730 @propertycache
1730 1731 def _changectx(self):
1731 1732 return workingctx(self._repo)
1732 1733
1733 1734 def data(self):
1734 1735 return self._repo.wread(self._path)
1735 1736 def copysource(self):
1736 1737 return self._repo.dirstate.copied(self._path)
1737 1738
1738 1739 def size(self):
1739 1740 return self._repo.wvfs.lstat(self._path).st_size
1740 1741 def lstat(self):
1741 1742 return self._repo.wvfs.lstat(self._path)
1742 1743 def date(self):
1743 1744 t, tz = self._changectx.date()
1744 1745 try:
1745 1746 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1746 1747 except OSError as err:
1747 1748 if err.errno != errno.ENOENT:
1748 1749 raise
1749 1750 return (t, tz)
1750 1751
1751 1752 def exists(self):
1752 1753 return self._repo.wvfs.exists(self._path)
1753 1754
1754 1755 def lexists(self):
1755 1756 return self._repo.wvfs.lexists(self._path)
1756 1757
1757 1758 def audit(self):
1758 1759 return self._repo.wvfs.audit(self._path)
1759 1760
1760 1761 def cmp(self, fctx):
1761 1762 """compare with other file context
1762 1763
1763 1764 returns True if different than fctx.
1764 1765 """
1765 1766 # fctx should be a filectx (not a workingfilectx)
1766 1767 # invert comparison to reuse the same code path
1767 1768 return fctx.cmp(self)
1768 1769
1769 1770 def remove(self, ignoremissing=False):
1770 1771 """wraps unlink for a repo's working directory"""
1771 1772 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1772 1773 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1773 1774 rmdir=rmdir)
1774 1775
1775 1776 def write(self, data, flags, backgroundclose=False, **kwargs):
1776 1777 """wraps repo.wwrite"""
1777 1778 return self._repo.wwrite(self._path, data, flags,
1778 1779 backgroundclose=backgroundclose,
1779 1780 **kwargs)
1780 1781
1781 1782 def markcopied(self, src):
1782 1783 """marks this file a copy of `src`"""
1783 1784 self._repo.dirstate.copy(src, self._path)
1784 1785
1785 1786 def clearunknown(self):
1786 1787 """Removes conflicting items in the working directory so that
1787 1788 ``write()`` can be called successfully.
1788 1789 """
1789 1790 wvfs = self._repo.wvfs
1790 1791 f = self._path
1791 1792 wvfs.audit(f)
1792 1793 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1793 1794 # remove files under the directory as they should already be
1794 1795 # warned and backed up
1795 1796 if wvfs.isdir(f) and not wvfs.islink(f):
1796 1797 wvfs.rmtree(f, forcibly=True)
1797 1798 for p in reversed(list(util.finddirs(f))):
1798 1799 if wvfs.isfileorlink(p):
1799 1800 wvfs.unlink(p)
1800 1801 break
1801 1802 else:
1802 1803 # don't remove files if path conflicts are not processed
1803 1804 if wvfs.isdir(f) and not wvfs.islink(f):
1804 1805 wvfs.removedirs(f)
1805 1806
1806 1807 def setflags(self, l, x):
1807 1808 self._repo.wvfs.setflags(self._path, l, x)
1808 1809
1809 1810 class overlayworkingctx(committablectx):
1810 1811 """Wraps another mutable context with a write-back cache that can be
1811 1812 converted into a commit context.
1812 1813
1813 1814 self._cache[path] maps to a dict with keys: {
1814 1815 'exists': bool?
1815 1816 'date': date?
1816 1817 'data': str?
1817 1818 'flags': str?
1818 1819 'copied': str? (path or None)
1819 1820 }
1820 1821 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1821 1822 is `False`, the file was deleted.
1822 1823 """
1823 1824
1824 1825 def __init__(self, repo):
1825 1826 super(overlayworkingctx, self).__init__(repo)
1826 1827 self.clean()
1827 1828
1828 1829 def setbase(self, wrappedctx):
1829 1830 self._wrappedctx = wrappedctx
1830 1831 self._parents = [wrappedctx]
1831 1832 # Drop old manifest cache as it is now out of date.
1832 1833 # This is necessary when, e.g., rebasing several nodes with one
1833 1834 # ``overlayworkingctx`` (e.g. with --collapse).
1834 1835 util.clearcachedproperty(self, '_manifest')
1835 1836
1836 1837 def data(self, path):
1837 1838 if self.isdirty(path):
1838 1839 if self._cache[path]['exists']:
1839 1840 if self._cache[path]['data'] is not None:
1840 1841 return self._cache[path]['data']
1841 1842 else:
1842 1843 # Must fallback here, too, because we only set flags.
1843 1844 return self._wrappedctx[path].data()
1844 1845 else:
1845 1846 raise error.ProgrammingError("No such file or directory: %s" %
1846 1847 path)
1847 1848 else:
1848 1849 return self._wrappedctx[path].data()
1849 1850
1850 1851 @propertycache
1851 1852 def _manifest(self):
1852 1853 parents = self.parents()
1853 1854 man = parents[0].manifest().copy()
1854 1855
1855 1856 flag = self._flagfunc
1856 1857 for path in self.added():
1857 1858 man[path] = addednodeid
1858 1859 man.setflag(path, flag(path))
1859 1860 for path in self.modified():
1860 1861 man[path] = modifiednodeid
1861 1862 man.setflag(path, flag(path))
1862 1863 for path in self.removed():
1863 1864 del man[path]
1864 1865 return man
1865 1866
1866 1867 @propertycache
1867 1868 def _flagfunc(self):
1868 1869 def f(path):
1869 1870 return self._cache[path]['flags']
1870 1871 return f
1871 1872
1872 1873 def files(self):
1873 1874 return sorted(self.added() + self.modified() + self.removed())
1874 1875
1875 1876 def modified(self):
1876 1877 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1877 1878 self._existsinparent(f)]
1878 1879
1879 1880 def added(self):
1880 1881 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1881 1882 not self._existsinparent(f)]
1882 1883
1883 1884 def removed(self):
1884 1885 return [f for f in self._cache.keys() if
1885 1886 not self._cache[f]['exists'] and self._existsinparent(f)]
1886 1887
1887 1888 def p1copies(self):
1888 1889 copies = self._repo._wrappedctx.p1copies().copy()
1889 1890 narrowmatch = self._repo.narrowmatch()
1890 1891 for f in self._cache.keys():
1891 1892 if not narrowmatch(f):
1892 1893 continue
1893 1894 copies.pop(f, None) # delete if it exists
1894 1895 source = self._cache[f]['copied']
1895 1896 if source:
1896 1897 copies[f] = source
1897 1898 return copies
1898 1899
1899 1900 def p2copies(self):
1900 1901 copies = self._repo._wrappedctx.p2copies().copy()
1901 1902 narrowmatch = self._repo.narrowmatch()
1902 1903 for f in self._cache.keys():
1903 1904 if not narrowmatch(f):
1904 1905 continue
1905 1906 copies.pop(f, None) # delete if it exists
1906 1907 source = self._cache[f]['copied']
1907 1908 if source:
1908 1909 copies[f] = source
1909 1910 return copies
1910 1911
1911 1912 def isinmemory(self):
1912 1913 return True
1913 1914
1914 1915 def filedate(self, path):
1915 1916 if self.isdirty(path):
1916 1917 return self._cache[path]['date']
1917 1918 else:
1918 1919 return self._wrappedctx[path].date()
1919 1920
1920 1921 def markcopied(self, path, origin):
1921 1922 self._markdirty(path, exists=True, date=self.filedate(path),
1922 1923 flags=self.flags(path), copied=origin)
1923 1924
1924 1925 def copydata(self, path):
1925 1926 if self.isdirty(path):
1926 1927 return self._cache[path]['copied']
1927 1928 else:
1928 1929 return None
1929 1930
1930 1931 def flags(self, path):
1931 1932 if self.isdirty(path):
1932 1933 if self._cache[path]['exists']:
1933 1934 return self._cache[path]['flags']
1934 1935 else:
1935 1936 raise error.ProgrammingError("No such file or directory: %s" %
1936 1937 self._path)
1937 1938 else:
1938 1939 return self._wrappedctx[path].flags()
1939 1940
1940 1941 def __contains__(self, key):
1941 1942 if key in self._cache:
1942 1943 return self._cache[key]['exists']
1943 1944 return key in self.p1()
1944 1945
1945 1946 def _existsinparent(self, path):
1946 1947 try:
1947 1948 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1948 1949 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1949 1950 # with an ``exists()`` function.
1950 1951 self._wrappedctx[path]
1951 1952 return True
1952 1953 except error.ManifestLookupError:
1953 1954 return False
1954 1955
1955 1956 def _auditconflicts(self, path):
1956 1957 """Replicates conflict checks done by wvfs.write().
1957 1958
1958 1959 Since we never write to the filesystem and never call `applyupdates` in
1959 1960 IMM, we'll never check that a path is actually writable -- e.g., because
1960 1961 it adds `a/foo`, but `a` is actually a file in the other commit.
1961 1962 """
1962 1963 def fail(path, component):
1963 1964 # p1() is the base and we're receiving "writes" for p2()'s
1964 1965 # files.
1965 1966 if 'l' in self.p1()[component].flags():
1966 1967 raise error.Abort("error: %s conflicts with symlink %s "
1967 1968 "in %d." % (path, component,
1968 1969 self.p1().rev()))
1969 1970 else:
1970 1971 raise error.Abort("error: '%s' conflicts with file '%s' in "
1971 1972 "%d." % (path, component,
1972 1973 self.p1().rev()))
1973 1974
1974 1975 # Test that each new directory to be created to write this path from p2
1975 1976 # is not a file in p1.
1976 1977 components = path.split('/')
1977 1978 for i in pycompat.xrange(len(components)):
1978 1979 component = "/".join(components[0:i])
1979 1980 if component in self:
1980 1981 fail(path, component)
1981 1982
1982 1983 # Test the other direction -- that this path from p2 isn't a directory
1983 1984 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1984 1985 match = self.match([path], default=b'path')
1985 1986 matches = self.p1().manifest().matches(match)
1986 1987 mfiles = matches.keys()
1987 1988 if len(mfiles) > 0:
1988 1989 if len(mfiles) == 1 and mfiles[0] == path:
1989 1990 return
1990 1991 # omit the files which are deleted in current IMM wctx
1991 1992 mfiles = [m for m in mfiles if m in self]
1992 1993 if not mfiles:
1993 1994 return
1994 1995 raise error.Abort("error: file '%s' cannot be written because "
1995 1996 " '%s/' is a directory in %s (containing %d "
1996 1997 "entries: %s)"
1997 1998 % (path, path, self.p1(), len(mfiles),
1998 1999 ', '.join(mfiles)))
1999 2000
2000 2001 def write(self, path, data, flags='', **kwargs):
2001 2002 if data is None:
2002 2003 raise error.ProgrammingError("data must be non-None")
2003 2004 self._auditconflicts(path)
2004 2005 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2005 2006 flags=flags)
2006 2007
2007 2008 def setflags(self, path, l, x):
2008 2009 flag = ''
2009 2010 if l:
2010 2011 flag = 'l'
2011 2012 elif x:
2012 2013 flag = 'x'
2013 2014 self._markdirty(path, exists=True, date=dateutil.makedate(),
2014 2015 flags=flag)
2015 2016
2016 2017 def remove(self, path):
2017 2018 self._markdirty(path, exists=False)
2018 2019
2019 2020 def exists(self, path):
2020 2021 """exists behaves like `lexists`, but needs to follow symlinks and
2021 2022 return False if they are broken.
2022 2023 """
2023 2024 if self.isdirty(path):
2024 2025 # If this path exists and is a symlink, "follow" it by calling
2025 2026 # exists on the destination path.
2026 2027 if (self._cache[path]['exists'] and
2027 2028 'l' in self._cache[path]['flags']):
2028 2029 return self.exists(self._cache[path]['data'].strip())
2029 2030 else:
2030 2031 return self._cache[path]['exists']
2031 2032
2032 2033 return self._existsinparent(path)
2033 2034
2034 2035 def lexists(self, path):
2035 2036 """lexists returns True if the path exists"""
2036 2037 if self.isdirty(path):
2037 2038 return self._cache[path]['exists']
2038 2039
2039 2040 return self._existsinparent(path)
2040 2041
2041 2042 def size(self, path):
2042 2043 if self.isdirty(path):
2043 2044 if self._cache[path]['exists']:
2044 2045 return len(self._cache[path]['data'])
2045 2046 else:
2046 2047 raise error.ProgrammingError("No such file or directory: %s" %
2047 2048 self._path)
2048 2049 return self._wrappedctx[path].size()
2049 2050
2050 2051 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2051 2052 user=None, editor=None):
2052 2053 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2053 2054 committed.
2054 2055
2055 2056 ``text`` is the commit message.
2056 2057 ``parents`` (optional) are rev numbers.
2057 2058 """
2058 2059 # Default parents to the wrapped contexts' if not passed.
2059 2060 if parents is None:
2060 2061 parents = self._wrappedctx.parents()
2061 2062 if len(parents) == 1:
2062 2063 parents = (parents[0], None)
2063 2064
2064 2065 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2065 2066 if parents[1] is None:
2066 2067 parents = (self._repo[parents[0]], None)
2067 2068 else:
2068 2069 parents = (self._repo[parents[0]], self._repo[parents[1]])
2069 2070
2070 2071 files = self.files()
2071 2072 def getfile(repo, memctx, path):
2072 2073 if self._cache[path]['exists']:
2073 2074 return memfilectx(repo, memctx, path,
2074 2075 self._cache[path]['data'],
2075 2076 'l' in self._cache[path]['flags'],
2076 2077 'x' in self._cache[path]['flags'],
2077 2078 self._cache[path]['copied'])
2078 2079 else:
2079 2080 # Returning None, but including the path in `files`, is
2080 2081 # necessary for memctx to register a deletion.
2081 2082 return None
2082 2083 return memctx(self._repo, parents, text, files, getfile, date=date,
2083 2084 extra=extra, user=user, branch=branch, editor=editor)
2084 2085
2085 2086 def isdirty(self, path):
2086 2087 return path in self._cache
2087 2088
2088 2089 def isempty(self):
2089 2090 # We need to discard any keys that are actually clean before the empty
2090 2091 # commit check.
2091 2092 self._compact()
2092 2093 return len(self._cache) == 0
2093 2094
2094 2095 def clean(self):
2095 2096 self._cache = {}
2096 2097
2097 2098 def _compact(self):
2098 2099 """Removes keys from the cache that are actually clean, by comparing
2099 2100 them with the underlying context.
2100 2101
2101 2102 This can occur during the merge process, e.g. by passing --tool :local
2102 2103 to resolve a conflict.
2103 2104 """
2104 2105 keys = []
2105 2106 # This won't be perfect, but can help performance significantly when
2106 2107 # using things like remotefilelog.
2107 2108 scmutil.prefetchfiles(
2108 2109 self.repo(), [self.p1().rev()],
2109 2110 scmutil.matchfiles(self.repo(), self._cache.keys()))
2110 2111
2111 2112 for path in self._cache.keys():
2112 2113 cache = self._cache[path]
2113 2114 try:
2114 2115 underlying = self._wrappedctx[path]
2115 2116 if (underlying.data() == cache['data'] and
2116 2117 underlying.flags() == cache['flags']):
2117 2118 keys.append(path)
2118 2119 except error.ManifestLookupError:
2119 2120 # Path not in the underlying manifest (created).
2120 2121 continue
2121 2122
2122 2123 for path in keys:
2123 2124 del self._cache[path]
2124 2125 return keys
2125 2126
2126 2127 def _markdirty(self, path, exists, data=None, date=None, flags='',
2127 2128 copied=None):
2128 2129 # data not provided, let's see if we already have some; if not, let's
2129 2130 # grab it from our underlying context, so that we always have data if
2130 2131 # the file is marked as existing.
2131 2132 if exists and data is None:
2132 2133 oldentry = self._cache.get(path) or {}
2133 2134 data = oldentry.get('data')
2134 2135 if data is None:
2135 2136 data = self._wrappedctx[path].data()
2136 2137
2137 2138 self._cache[path] = {
2138 2139 'exists': exists,
2139 2140 'data': data,
2140 2141 'date': date,
2141 2142 'flags': flags,
2142 2143 'copied': copied,
2143 2144 }
2144 2145
2145 2146 def filectx(self, path, filelog=None):
2146 2147 return overlayworkingfilectx(self._repo, path, parent=self,
2147 2148 filelog=filelog)
2148 2149
2149 2150 class overlayworkingfilectx(committablefilectx):
2150 2151 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2151 2152 cache, which can be flushed through later by calling ``flush()``."""
2152 2153
2153 2154 def __init__(self, repo, path, filelog=None, parent=None):
2154 2155 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2155 2156 parent)
2156 2157 self._repo = repo
2157 2158 self._parent = parent
2158 2159 self._path = path
2159 2160
2160 2161 def cmp(self, fctx):
2161 2162 return self.data() != fctx.data()
2162 2163
2163 2164 def changectx(self):
2164 2165 return self._parent
2165 2166
2166 2167 def data(self):
2167 2168 return self._parent.data(self._path)
2168 2169
2169 2170 def date(self):
2170 2171 return self._parent.filedate(self._path)
2171 2172
2172 2173 def exists(self):
2173 2174 return self.lexists()
2174 2175
2175 2176 def lexists(self):
2176 2177 return self._parent.exists(self._path)
2177 2178
2178 2179 def copysource(self):
2179 2180 return self._parent.copydata(self._path)
2180 2181
2181 2182 def size(self):
2182 2183 return self._parent.size(self._path)
2183 2184
2184 2185 def markcopied(self, origin):
2185 2186 self._parent.markcopied(self._path, origin)
2186 2187
2187 2188 def audit(self):
2188 2189 pass
2189 2190
2190 2191 def flags(self):
2191 2192 return self._parent.flags(self._path)
2192 2193
2193 2194 def setflags(self, islink, isexec):
2194 2195 return self._parent.setflags(self._path, islink, isexec)
2195 2196
2196 2197 def write(self, data, flags, backgroundclose=False, **kwargs):
2197 2198 return self._parent.write(self._path, data, flags, **kwargs)
2198 2199
2199 2200 def remove(self, ignoremissing=False):
2200 2201 return self._parent.remove(self._path)
2201 2202
2202 2203 def clearunknown(self):
2203 2204 pass
2204 2205
2205 2206 class workingcommitctx(workingctx):
2206 2207 """A workingcommitctx object makes access to data related to
2207 2208 the revision being committed convenient.
2208 2209
2209 2210 This hides changes in the working directory, if they aren't
2210 2211 committed in this context.
2211 2212 """
2212 2213 def __init__(self, repo, changes,
2213 2214 text="", user=None, date=None, extra=None):
2214 2215 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2215 2216 changes)
2216 2217
2217 2218 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2218 2219 """Return matched files only in ``self._status``
2219 2220
2220 2221 Uncommitted files appear "clean" via this context, even if
2221 2222 they aren't actually so in the working directory.
2222 2223 """
2223 2224 if clean:
2224 2225 clean = [f for f in self._manifest if f not in self._changedset]
2225 2226 else:
2226 2227 clean = []
2227 2228 return scmutil.status([f for f in self._status.modified if match(f)],
2228 2229 [f for f in self._status.added if match(f)],
2229 2230 [f for f in self._status.removed if match(f)],
2230 2231 [], [], [], clean)
2231 2232
2232 2233 @propertycache
2233 2234 def _changedset(self):
2234 2235 """Return the set of files changed in this context
2235 2236 """
2236 2237 changed = set(self._status.modified)
2237 2238 changed.update(self._status.added)
2238 2239 changed.update(self._status.removed)
2239 2240 return changed
2240 2241
2241 2242 def makecachingfilectxfn(func):
2242 2243 """Create a filectxfn that caches based on the path.
2243 2244
2244 2245 We can't use util.cachefunc because it uses all arguments as the cache
2245 2246 key and this creates a cycle since the arguments include the repo and
2246 2247 memctx.
2247 2248 """
2248 2249 cache = {}
2249 2250
2250 2251 def getfilectx(repo, memctx, path):
2251 2252 if path not in cache:
2252 2253 cache[path] = func(repo, memctx, path)
2253 2254 return cache[path]
2254 2255
2255 2256 return getfilectx
2256 2257
2257 2258 def memfilefromctx(ctx):
2258 2259 """Given a context return a memfilectx for ctx[path]
2259 2260
2260 2261 This is a convenience method for building a memctx based on another
2261 2262 context.
2262 2263 """
2263 2264 def getfilectx(repo, memctx, path):
2264 2265 fctx = ctx[path]
2265 2266 copysource = fctx.copysource()
2266 2267 return memfilectx(repo, memctx, path, fctx.data(),
2267 2268 islink=fctx.islink(), isexec=fctx.isexec(),
2268 2269 copysource=copysource)
2269 2270
2270 2271 return getfilectx
2271 2272
2272 2273 def memfilefrompatch(patchstore):
2273 2274 """Given a patch (e.g. patchstore object) return a memfilectx
2274 2275
2275 2276 This is a convenience method for building a memctx based on a patchstore.
2276 2277 """
2277 2278 def getfilectx(repo, memctx, path):
2278 2279 data, mode, copysource = patchstore.getfile(path)
2279 2280 if data is None:
2280 2281 return None
2281 2282 islink, isexec = mode
2282 2283 return memfilectx(repo, memctx, path, data, islink=islink,
2283 2284 isexec=isexec, copysource=copysource)
2284 2285
2285 2286 return getfilectx
2286 2287
2287 2288 class memctx(committablectx):
2288 2289 """Use memctx to perform in-memory commits via localrepo.commitctx().
2289 2290
2290 2291 Revision information is supplied at initialization time while
2291 2292 related files data and is made available through a callback
2292 2293 mechanism. 'repo' is the current localrepo, 'parents' is a
2293 2294 sequence of two parent revisions identifiers (pass None for every
2294 2295 missing parent), 'text' is the commit message and 'files' lists
2295 2296 names of files touched by the revision (normalized and relative to
2296 2297 repository root).
2297 2298
2298 2299 filectxfn(repo, memctx, path) is a callable receiving the
2299 2300 repository, the current memctx object and the normalized path of
2300 2301 requested file, relative to repository root. It is fired by the
2301 2302 commit function for every file in 'files', but calls order is
2302 2303 undefined. If the file is available in the revision being
2303 2304 committed (updated or added), filectxfn returns a memfilectx
2304 2305 object. If the file was removed, filectxfn return None for recent
2305 2306 Mercurial. Moved files are represented by marking the source file
2306 2307 removed and the new file added with copy information (see
2307 2308 memfilectx).
2308 2309
2309 2310 user receives the committer name and defaults to current
2310 2311 repository username, date is the commit date in any format
2311 2312 supported by dateutil.parsedate() and defaults to current date, extra
2312 2313 is a dictionary of metadata or is left empty.
2313 2314 """
2314 2315
2315 2316 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2316 2317 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2317 2318 # this field to determine what to do in filectxfn.
2318 2319 _returnnoneformissingfiles = True
2319 2320
2320 2321 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2321 2322 date=None, extra=None, branch=None, editor=False):
2322 2323 super(memctx, self).__init__(repo, text, user, date, extra,
2323 2324 branch=branch)
2324 2325 self._rev = None
2325 2326 self._node = None
2326 2327 parents = [(p or nullid) for p in parents]
2327 2328 p1, p2 = parents
2328 2329 self._parents = [self._repo[p] for p in (p1, p2)]
2329 2330 files = sorted(set(files))
2330 2331 self._files = files
2331 2332 self.substate = {}
2332 2333
2333 2334 if isinstance(filectxfn, patch.filestore):
2334 2335 filectxfn = memfilefrompatch(filectxfn)
2335 2336 elif not callable(filectxfn):
2336 2337 # if store is not callable, wrap it in a function
2337 2338 filectxfn = memfilefromctx(filectxfn)
2338 2339
2339 2340 # memoizing increases performance for e.g. vcs convert scenarios.
2340 2341 self._filectxfn = makecachingfilectxfn(filectxfn)
2341 2342
2342 2343 if editor:
2343 2344 self._text = editor(self._repo, self, [])
2344 2345 self._repo.savecommitmessage(self._text)
2345 2346
2346 2347 def filectx(self, path, filelog=None):
2347 2348 """get a file context from the working directory
2348 2349
2349 2350 Returns None if file doesn't exist and should be removed."""
2350 2351 return self._filectxfn(self._repo, self, path)
2351 2352
2352 2353 def commit(self):
2353 2354 """commit context to the repo"""
2354 2355 return self._repo.commitctx(self)
2355 2356
2356 2357 @propertycache
2357 2358 def _manifest(self):
2358 2359 """generate a manifest based on the return values of filectxfn"""
2359 2360
2360 2361 # keep this simple for now; just worry about p1
2361 2362 pctx = self._parents[0]
2362 2363 man = pctx.manifest().copy()
2363 2364
2364 2365 for f in self._status.modified:
2365 2366 man[f] = modifiednodeid
2366 2367
2367 2368 for f in self._status.added:
2368 2369 man[f] = addednodeid
2369 2370
2370 2371 for f in self._status.removed:
2371 2372 if f in man:
2372 2373 del man[f]
2373 2374
2374 2375 return man
2375 2376
2376 2377 @propertycache
2377 2378 def _status(self):
2378 2379 """Calculate exact status from ``files`` specified at construction
2379 2380 """
2380 2381 man1 = self.p1().manifest()
2381 2382 p2 = self._parents[1]
2382 2383 # "1 < len(self._parents)" can't be used for checking
2383 2384 # existence of the 2nd parent, because "memctx._parents" is
2384 2385 # explicitly initialized by the list, of which length is 2.
2385 2386 if p2.node() != nullid:
2386 2387 man2 = p2.manifest()
2387 2388 managing = lambda f: f in man1 or f in man2
2388 2389 else:
2389 2390 managing = lambda f: f in man1
2390 2391
2391 2392 modified, added, removed = [], [], []
2392 2393 for f in self._files:
2393 2394 if not managing(f):
2394 2395 added.append(f)
2395 2396 elif self[f]:
2396 2397 modified.append(f)
2397 2398 else:
2398 2399 removed.append(f)
2399 2400
2400 2401 return scmutil.status(modified, added, removed, [], [], [], [])
2401 2402
2402 2403 class memfilectx(committablefilectx):
2403 2404 """memfilectx represents an in-memory file to commit.
2404 2405
2405 2406 See memctx and committablefilectx for more details.
2406 2407 """
2407 2408 def __init__(self, repo, changectx, path, data, islink=False,
2408 2409 isexec=False, copysource=None):
2409 2410 """
2410 2411 path is the normalized file path relative to repository root.
2411 2412 data is the file content as a string.
2412 2413 islink is True if the file is a symbolic link.
2413 2414 isexec is True if the file is executable.
2414 2415 copied is the source file path if current file was copied in the
2415 2416 revision being committed, or None."""
2416 2417 super(memfilectx, self).__init__(repo, path, None, changectx)
2417 2418 self._data = data
2418 2419 if islink:
2419 2420 self._flags = 'l'
2420 2421 elif isexec:
2421 2422 self._flags = 'x'
2422 2423 else:
2423 2424 self._flags = ''
2424 2425 self._copysource = copysource
2425 2426
2426 2427 def copysource(self):
2427 2428 return self._copysource
2428 2429
2429 2430 def cmp(self, fctx):
2430 2431 return self.data() != fctx.data()
2431 2432
2432 2433 def data(self):
2433 2434 return self._data
2434 2435
2435 2436 def remove(self, ignoremissing=False):
2436 2437 """wraps unlink for a repo's working directory"""
2437 2438 # need to figure out what to do here
2438 2439 del self._changectx[self._path]
2439 2440
2440 2441 def write(self, data, flags, **kwargs):
2441 2442 """wraps repo.wwrite"""
2442 2443 self._data = data
2443 2444
2444 2445
2445 2446 class metadataonlyctx(committablectx):
2446 2447 """Like memctx but it's reusing the manifest of different commit.
2447 2448 Intended to be used by lightweight operations that are creating
2448 2449 metadata-only changes.
2449 2450
2450 2451 Revision information is supplied at initialization time. 'repo' is the
2451 2452 current localrepo, 'ctx' is original revision which manifest we're reuisng
2452 2453 'parents' is a sequence of two parent revisions identifiers (pass None for
2453 2454 every missing parent), 'text' is the commit.
2454 2455
2455 2456 user receives the committer name and defaults to current repository
2456 2457 username, date is the commit date in any format supported by
2457 2458 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2458 2459 metadata or is left empty.
2459 2460 """
2460 2461 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2461 2462 date=None, extra=None, editor=False):
2462 2463 if text is None:
2463 2464 text = originalctx.description()
2464 2465 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2465 2466 self._rev = None
2466 2467 self._node = None
2467 2468 self._originalctx = originalctx
2468 2469 self._manifestnode = originalctx.manifestnode()
2469 2470 if parents is None:
2470 2471 parents = originalctx.parents()
2471 2472 else:
2472 2473 parents = [repo[p] for p in parents if p is not None]
2473 2474 parents = parents[:]
2474 2475 while len(parents) < 2:
2475 2476 parents.append(repo[nullid])
2476 2477 p1, p2 = self._parents = parents
2477 2478
2478 2479 # sanity check to ensure that the reused manifest parents are
2479 2480 # manifests of our commit parents
2480 2481 mp1, mp2 = self.manifestctx().parents
2481 2482 if p1 != nullid and p1.manifestnode() != mp1:
2482 2483 raise RuntimeError(r"can't reuse the manifest: its p1 "
2483 2484 r"doesn't match the new ctx p1")
2484 2485 if p2 != nullid and p2.manifestnode() != mp2:
2485 2486 raise RuntimeError(r"can't reuse the manifest: "
2486 2487 r"its p2 doesn't match the new ctx p2")
2487 2488
2488 2489 self._files = originalctx.files()
2489 2490 self.substate = {}
2490 2491
2491 2492 if editor:
2492 2493 self._text = editor(self._repo, self, [])
2493 2494 self._repo.savecommitmessage(self._text)
2494 2495
2495 2496 def manifestnode(self):
2496 2497 return self._manifestnode
2497 2498
2498 2499 @property
2499 2500 def _manifestctx(self):
2500 2501 return self._repo.manifestlog[self._manifestnode]
2501 2502
2502 2503 def filectx(self, path, filelog=None):
2503 2504 return self._originalctx.filectx(path, filelog=filelog)
2504 2505
2505 2506 def commit(self):
2506 2507 """commit context to the repo"""
2507 2508 return self._repo.commitctx(self)
2508 2509
2509 2510 @property
2510 2511 def _manifest(self):
2511 2512 return self._originalctx.manifest()
2512 2513
2513 2514 @propertycache
2514 2515 def _status(self):
2515 2516 """Calculate exact status from ``files`` specified in the ``origctx``
2516 2517 and parents manifests.
2517 2518 """
2518 2519 man1 = self.p1().manifest()
2519 2520 p2 = self._parents[1]
2520 2521 # "1 < len(self._parents)" can't be used for checking
2521 2522 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2522 2523 # explicitly initialized by the list, of which length is 2.
2523 2524 if p2.node() != nullid:
2524 2525 man2 = p2.manifest()
2525 2526 managing = lambda f: f in man1 or f in man2
2526 2527 else:
2527 2528 managing = lambda f: f in man1
2528 2529
2529 2530 modified, added, removed = [], [], []
2530 2531 for f in self._files:
2531 2532 if not managing(f):
2532 2533 added.append(f)
2533 2534 elif f in self:
2534 2535 modified.append(f)
2535 2536 else:
2536 2537 removed.append(f)
2537 2538
2538 2539 return scmutil.status(modified, added, removed, [], [], [], [])
2539 2540
2540 2541 class arbitraryfilectx(object):
2541 2542 """Allows you to use filectx-like functions on a file in an arbitrary
2542 2543 location on disk, possibly not in the working directory.
2543 2544 """
2544 2545 def __init__(self, path, repo=None):
2545 2546 # Repo is optional because contrib/simplemerge uses this class.
2546 2547 self._repo = repo
2547 2548 self._path = path
2548 2549
2549 2550 def cmp(self, fctx):
2550 2551 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2551 2552 # path if either side is a symlink.
2552 2553 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2553 2554 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2554 2555 # Add a fast-path for merge if both sides are disk-backed.
2555 2556 # Note that filecmp uses the opposite return values (True if same)
2556 2557 # from our cmp functions (True if different).
2557 2558 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2558 2559 return self.data() != fctx.data()
2559 2560
2560 2561 def path(self):
2561 2562 return self._path
2562 2563
2563 2564 def flags(self):
2564 2565 return ''
2565 2566
2566 2567 def data(self):
2567 2568 return util.readfile(self._path)
2568 2569
2569 2570 def decodeddata(self):
2570 2571 with open(self._path, "rb") as f:
2571 2572 return f.read()
2572 2573
2573 2574 def remove(self):
2574 2575 util.unlink(self._path)
2575 2576
2576 2577 def write(self, data, flags, **kwargs):
2577 2578 assert not flags
2578 2579 with open(self._path, "wb") as f:
2579 2580 f.write(data)
@@ -1,218 +1,241 b''
1 1
2 2 $ cat >> $HGRCPATH << EOF
3 3 > [experimental]
4 4 > copies.write-to=changeset-only
5 5 > copies.read-from=changeset-only
6 6 > [alias]
7 7 > changesetcopies = log -r . -T 'files: {files}
8 8 > {extras % "{ifcontains("files", key, "{key}: {value}\n")}"}
9 9 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
10 10 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
11 11 > [extensions]
12 12 > rebase =
13 13 > split =
14 14 > EOF
15 15
16 16 Check that copies are recorded correctly
17 17
18 18 $ hg init repo
19 19 $ cd repo
20 20 $ echo a > a
21 21 $ hg add a
22 22 $ hg ci -m initial
23 23 $ hg cp a b
24 24 $ hg cp a c
25 25 $ hg cp a d
26 26 $ hg ci -m 'copy a to b, c, and d'
27 27 $ hg changesetcopies
28 28 files: b c d
29 29 filesadded: 0
30 30 1
31 31 2
32 32
33 33 p1copies: 0\x00a (esc)
34 34 1\x00a (esc)
35 35 2\x00a (esc)
36 36 $ hg showcopies
37 37 a -> b
38 38 a -> c
39 39 a -> d
40 40 $ hg showcopies --config experimental.copies.read-from=compatibility
41 41 a -> b
42 42 a -> c
43 43 a -> d
44 44 $ hg showcopies --config experimental.copies.read-from=filelog-only
45 45
46 46 Check that renames are recorded correctly
47 47
48 48 $ hg mv b b2
49 49 $ hg ci -m 'rename b to b2'
50 50 $ hg changesetcopies
51 51 files: b b2
52 52 filesadded: 1
53 53 filesremoved: 0
54 54
55 55 p1copies: 1\x00b (esc)
56 56 $ hg showcopies
57 57 b -> b2
58 58
59 59 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
60 60 even though there is no filelog entry.
61 61
62 62 $ hg cp b2 c --force
63 63 $ hg st --copies
64 64 M c
65 65 b2
66 66 $ hg debugindex c
67 67 rev linkrev nodeid p1 p2
68 68 0 1 b789fdd96dc2 000000000000 000000000000
69 69 $ hg ci -m 'move b onto d'
70 70 $ hg changesetcopies
71 71 files: c
72 72
73 73 p1copies: 0\x00b2 (esc)
74 74 $ hg showcopies
75 75 b2 -> c
76 76 $ hg debugindex c
77 77 rev linkrev nodeid p1 p2
78 78 0 1 b789fdd96dc2 000000000000 000000000000
79 79
80 80 Create a merge commit with copying done during merge.
81 81
82 82 $ hg co 0
83 83 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
84 84 $ hg cp a e
85 85 $ hg cp a f
86 86 $ hg ci -m 'copy a to e and f'
87 87 created new head
88 88 $ hg merge 3
89 89 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 90 (branch merge, don't forget to commit)
91 91 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
92 92 always record it as being from p1
93 93 $ hg cp a g
94 94 File 'd' exists only in p2, so 'h' should be from p2
95 95 $ hg cp d h
96 96 File 'f' exists only in p1, so 'i' should be from p1
97 97 $ hg cp f i
98 98 $ hg ci -m 'merge'
99 99 $ hg changesetcopies
100 100 files: g h i
101 101 filesadded: 0
102 102 1
103 103 2
104 104
105 105 p1copies: 0\x00a (esc)
106 106 2\x00f (esc)
107 107 p2copies: 1\x00d (esc)
108 108 $ hg showcopies
109 109 a -> g
110 110 d -> h
111 111 f -> i
112 112
113 113 Test writing to both changeset and filelog
114 114
115 115 $ hg cp a j
116 116 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
117 117 $ hg changesetcopies
118 118 files: j
119 119 filesadded: 0
120 120 filesremoved:
121 121
122 122 p1copies: 0\x00a (esc)
123 123 p2copies:
124 124 $ hg debugdata j 0
125 125 \x01 (esc)
126 126 copy: a
127 127 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
128 128 \x01 (esc)
129 129 a
130 130 $ hg showcopies
131 131 a -> j
132 132 $ hg showcopies --config experimental.copies.read-from=compatibility
133 133 a -> j
134 134 $ hg showcopies --config experimental.copies.read-from=filelog-only
135 135 a -> j
136 136 The entries should be written to extras even if they're empty (so the client
137 137 won't have to fall back to reading from filelogs)
138 138 $ echo x >> j
139 139 $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
140 140 $ hg changesetcopies
141 141 files: j
142 142 filesadded:
143 143 filesremoved:
144 144
145 145 p1copies:
146 146 p2copies:
147 147
148 148 Test writing only to filelog
149 149
150 150 $ hg cp a k
151 151 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
152 152 $ hg changesetcopies
153 153 files: k
154 154
155 155 $ hg debugdata k 0
156 156 \x01 (esc)
157 157 copy: a
158 158 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
159 159 \x01 (esc)
160 160 a
161 161 $ hg showcopies
162 162 $ hg showcopies --config experimental.copies.read-from=compatibility
163 163 a -> k
164 164 $ hg showcopies --config experimental.copies.read-from=filelog-only
165 165 a -> k
166 166
167 167 $ cd ..
168 168
169 169 Test rebasing a commit with copy information
170 170
171 171 $ hg init rebase-rename
172 172 $ cd rebase-rename
173 173 $ echo a > a
174 174 $ hg ci -Aqm 'add a'
175 175 $ echo a2 > a
176 176 $ hg ci -m 'modify a'
177 177 $ hg co -q 0
178 178 $ hg mv a b
179 179 $ hg ci -qm 'rename a to b'
180 180 $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
181 181 rebasing 2:fc7287ac5b9b "rename a to b" (tip)
182 182 merging a and b to b
183 183 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/fc7287ac5b9b-8f2a95ec-rebase.hg
184 184 $ hg st --change . --copies
185 185 A b
186 186 a
187 187 R a
188 188 $ cd ..
189 189
190 190 Test splitting a commit
191 191
192 192 $ hg init split
193 193 $ cd split
194 194 $ echo a > a
195 195 $ echo b > b
196 196 $ hg ci -Aqm 'add a and b'
197 197 $ echo a2 > a
198 198 $ hg mv b c
199 199 $ hg ci -m 'modify a, move b to c'
200 $ (hg --config ui.interactive=yes split 2>&1 | grep mercurial.error) <<EOF
200 $ hg --config ui.interactive=yes split <<EOF
201 201 > y
202 202 > y
203 203 > n
204 204 > y
205 205 > EOF
206 mercurial.error.ProgrammingError: some copy targets missing from file list
206 diff --git a/a b/a
207 1 hunks, 1 lines changed
208 examine changes to 'a'?
209 (enter ? for help) [Ynesfdaq?] y
210
211 @@ -1,1 +1,1 @@
212 -a
213 +a2
214 record this change to 'a'?
215 (enter ? for help) [Ynesfdaq?] y
216
217 diff --git a/b b/c
218 rename from b
219 rename to c
220 examine changes to 'b' and 'c'?
221 (enter ? for help) [Ynesfdaq?] n
222
223 created new head
224 diff --git a/b b/c
225 rename from b
226 rename to c
227 examine changes to 'b' and 'c'?
228 (enter ? for help) [Ynesfdaq?] y
229
230 saved backup bundle to $TESTTMP/split/.hg/strip-backup/9a396d463e04-2d9e6864-split.hg
207 231 $ cd ..
208 232
209 233 Test committing half a rename
210 234
211 235 $ hg init partial
212 236 $ cd partial
213 237 $ echo a > a
214 238 $ hg ci -Aqm 'add a'
215 239 $ hg mv a b
216 $ hg ci -m 'remove a' a 2>&1 | grep mercurial.error
217 mercurial.error.ProgrammingError: some copy targets missing from file list
240 $ hg ci -m 'remove a' a
218 241 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now