##// END OF EJS Templates
copies: fix duplicatecopies() with overlay context...
Martin von Zweigbergk -
r42509:313812cb default
parent child Browse files
Show More
@@ -1,2571 +1,2570 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 @propertycache
276 276 def _copies(self):
277 277 p1copies = {}
278 278 p2copies = {}
279 279 p1 = self.p1()
280 280 p2 = self.p2()
281 281 narrowmatch = self._repo.narrowmatch()
282 282 for dst in self.files():
283 283 if not narrowmatch(dst) or dst not in self:
284 284 continue
285 285 copied = self[dst].renamed()
286 286 if not copied:
287 287 continue
288 288 src, srcnode = copied
289 289 if src in p1 and p1[src].filenode() == srcnode:
290 290 p1copies[dst] = src
291 291 elif src in p2 and p2[src].filenode() == srcnode:
292 292 p2copies[dst] = src
293 293 return p1copies, p2copies
294 294 def p1copies(self):
295 295 return self._copies[0]
296 296 def p2copies(self):
297 297 return self._copies[1]
298 298
299 299 def sub(self, path, allowcreate=True):
300 300 '''return a subrepo for the stored revision of path, never wdir()'''
301 301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
302 302
303 303 def nullsub(self, path, pctx):
304 304 return subrepo.nullsubrepo(self, path, pctx)
305 305
306 306 def workingsub(self, path):
307 307 '''return a subrepo for the stored revision, or wdir if this is a wdir
308 308 context.
309 309 '''
310 310 return subrepo.subrepo(self, path, allowwdir=True)
311 311
312 312 def match(self, pats=None, include=None, exclude=None, default='glob',
313 313 listsubrepos=False, badfn=None):
314 314 r = self._repo
315 315 return matchmod.match(r.root, r.getcwd(), pats,
316 316 include, exclude, default,
317 317 auditor=r.nofsauditor, ctx=self,
318 318 listsubrepos=listsubrepos, badfn=badfn)
319 319
320 320 def diff(self, ctx2=None, match=None, changes=None, opts=None,
321 321 losedatafn=None, pathfn=None, copy=None,
322 322 copysourcematch=None, hunksfilterfn=None):
323 323 """Returns a diff generator for the given contexts and matcher"""
324 324 if ctx2 is None:
325 325 ctx2 = self.p1()
326 326 if ctx2 is not None:
327 327 ctx2 = self._repo[ctx2]
328 328 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
329 329 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
330 330 copy=copy, copysourcematch=copysourcematch,
331 331 hunksfilterfn=hunksfilterfn)
332 332
333 333 def dirs(self):
334 334 return self._manifest.dirs()
335 335
336 336 def hasdir(self, dir):
337 337 return self._manifest.hasdir(dir)
338 338
339 339 def status(self, other=None, match=None, listignored=False,
340 340 listclean=False, listunknown=False, listsubrepos=False):
341 341 """return status of files between two nodes or node and working
342 342 directory.
343 343
344 344 If other is None, compare this node with working directory.
345 345
346 346 returns (modified, added, removed, deleted, unknown, ignored, clean)
347 347 """
348 348
349 349 ctx1 = self
350 350 ctx2 = self._repo[other]
351 351
352 352 # This next code block is, admittedly, fragile logic that tests for
353 353 # reversing the contexts and wouldn't need to exist if it weren't for
354 354 # the fast (and common) code path of comparing the working directory
355 355 # with its first parent.
356 356 #
357 357 # What we're aiming for here is the ability to call:
358 358 #
359 359 # workingctx.status(parentctx)
360 360 #
361 361 # If we always built the manifest for each context and compared those,
362 362 # then we'd be done. But the special case of the above call means we
363 363 # just copy the manifest of the parent.
364 364 reversed = False
365 365 if (not isinstance(ctx1, changectx)
366 366 and isinstance(ctx2, changectx)):
367 367 reversed = True
368 368 ctx1, ctx2 = ctx2, ctx1
369 369
370 370 match = self._repo.narrowmatch(match)
371 371 match = ctx2._matchstatus(ctx1, match)
372 372 r = scmutil.status([], [], [], [], [], [], [])
373 373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 374 listunknown)
375 375
376 376 if reversed:
377 377 # Reverse added and removed. Clear deleted, unknown and ignored as
378 378 # these make no sense to reverse.
379 379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 380 r.clean)
381 381
382 382 if listsubrepos:
383 383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 384 try:
385 385 rev2 = ctx2.subrev(subpath)
386 386 except KeyError:
387 387 # A subrepo that existed in node1 was deleted between
388 388 # node1 and node2 (inclusive). Thus, ctx2's substate
389 389 # won't contain that subpath. The best we can do ignore it.
390 390 rev2 = None
391 391 submatch = matchmod.subdirmatcher(subpath, match)
392 392 s = sub.status(rev2, match=submatch, ignored=listignored,
393 393 clean=listclean, unknown=listunknown,
394 394 listsubrepos=True)
395 395 for rfiles, sfiles in zip(r, s):
396 396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397 397
398 398 for l in r:
399 399 l.sort()
400 400
401 401 return r
402 402
403 403 class changectx(basectx):
404 404 """A changecontext object makes access to data related to a particular
405 405 changeset convenient. It represents a read-only context already present in
406 406 the repo."""
407 407 def __init__(self, repo, rev, node):
408 408 super(changectx, self).__init__(repo)
409 409 self._rev = rev
410 410 self._node = node
411 411
412 412 def __hash__(self):
413 413 try:
414 414 return hash(self._rev)
415 415 except AttributeError:
416 416 return id(self)
417 417
418 418 def __nonzero__(self):
419 419 return self._rev != nullrev
420 420
421 421 __bool__ = __nonzero__
422 422
423 423 @propertycache
424 424 def _changeset(self):
425 425 return self._repo.changelog.changelogrevision(self.rev())
426 426
427 427 @propertycache
428 428 def _manifest(self):
429 429 return self._manifestctx.read()
430 430
431 431 @property
432 432 def _manifestctx(self):
433 433 return self._repo.manifestlog[self._changeset.manifest]
434 434
435 435 @propertycache
436 436 def _manifestdelta(self):
437 437 return self._manifestctx.readdelta()
438 438
439 439 @propertycache
440 440 def _parents(self):
441 441 repo = self._repo
442 442 p1, p2 = repo.changelog.parentrevs(self._rev)
443 443 if p2 == nullrev:
444 444 return [repo[p1]]
445 445 return [repo[p1], repo[p2]]
446 446
447 447 def changeset(self):
448 448 c = self._changeset
449 449 return (
450 450 c.manifest,
451 451 c.user,
452 452 c.date,
453 453 c.files,
454 454 c.description,
455 455 c.extra,
456 456 )
457 457 def manifestnode(self):
458 458 return self._changeset.manifest
459 459
460 460 def user(self):
461 461 return self._changeset.user
462 462 def date(self):
463 463 return self._changeset.date
464 464 def files(self):
465 465 return self._changeset.files
466 466 @propertycache
467 467 def _copies(self):
468 468 source = self._repo.ui.config('experimental', 'copies.read-from')
469 469 p1copies = self._changeset.p1copies
470 470 p2copies = self._changeset.p2copies
471 471 # If config says to get copy metadata only from changeset, then return
472 472 # that, defaulting to {} if there was no copy metadata.
473 473 # In compatibility mode, we return copy data from the changeset if
474 474 # it was recorded there, and otherwise we fall back to getting it from
475 475 # the filelogs (below).
476 476 if (source == 'changeset-only' or
477 477 (source == 'compatibility' and p1copies is not None)):
478 478 return p1copies or {}, p2copies or {}
479 479
480 480 # Otherwise (config said to read only from filelog, or we are in
481 481 # compatiblity mode and there is not data in the changeset), we get
482 482 # the copy metadata from the filelogs.
483 483 return super(changectx, self)._copies
484 484 def description(self):
485 485 return self._changeset.description
486 486 def branch(self):
487 487 return encoding.tolocal(self._changeset.extra.get("branch"))
488 488 def closesbranch(self):
489 489 return 'close' in self._changeset.extra
490 490 def extra(self):
491 491 """Return a dict of extra information."""
492 492 return self._changeset.extra
493 493 def tags(self):
494 494 """Return a list of byte tag names"""
495 495 return self._repo.nodetags(self._node)
496 496 def bookmarks(self):
497 497 """Return a list of byte bookmark names."""
498 498 return self._repo.nodebookmarks(self._node)
499 499 def phase(self):
500 500 return self._repo._phasecache.phase(self._repo, self._rev)
501 501 def hidden(self):
502 502 return self._rev in repoview.filterrevs(self._repo, 'visible')
503 503
504 504 def isinmemory(self):
505 505 return False
506 506
507 507 def children(self):
508 508 """return list of changectx contexts for each child changeset.
509 509
510 510 This returns only the immediate child changesets. Use descendants() to
511 511 recursively walk children.
512 512 """
513 513 c = self._repo.changelog.children(self._node)
514 514 return [self._repo[x] for x in c]
515 515
516 516 def ancestors(self):
517 517 for a in self._repo.changelog.ancestors([self._rev]):
518 518 yield self._repo[a]
519 519
520 520 def descendants(self):
521 521 """Recursively yield all children of the changeset.
522 522
523 523 For just the immediate children, use children()
524 524 """
525 525 for d in self._repo.changelog.descendants([self._rev]):
526 526 yield self._repo[d]
527 527
528 528 def filectx(self, path, fileid=None, filelog=None):
529 529 """get a file context from this changeset"""
530 530 if fileid is None:
531 531 fileid = self.filenode(path)
532 532 return filectx(self._repo, path, fileid=fileid,
533 533 changectx=self, filelog=filelog)
534 534
535 535 def ancestor(self, c2, warn=False):
536 536 """return the "best" ancestor context of self and c2
537 537
538 538 If there are multiple candidates, it will show a message and check
539 539 merge.preferancestor configuration before falling back to the
540 540 revlog ancestor."""
541 541 # deal with workingctxs
542 542 n2 = c2._node
543 543 if n2 is None:
544 544 n2 = c2._parents[0]._node
545 545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 546 if not cahs:
547 547 anc = nullid
548 548 elif len(cahs) == 1:
549 549 anc = cahs[0]
550 550 else:
551 551 # experimental config: merge.preferancestor
552 552 for r in self._repo.ui.configlist('merge', 'preferancestor'):
553 553 try:
554 554 ctx = scmutil.revsymbol(self._repo, r)
555 555 except error.RepoLookupError:
556 556 continue
557 557 anc = ctx.node()
558 558 if anc in cahs:
559 559 break
560 560 else:
561 561 anc = self._repo.changelog.ancestor(self._node, n2)
562 562 if warn:
563 563 self._repo.ui.status(
564 564 (_("note: using %s as ancestor of %s and %s\n") %
565 565 (short(anc), short(self._node), short(n2))) +
566 566 ''.join(_(" alternatively, use --config "
567 567 "merge.preferancestor=%s\n") %
568 568 short(n) for n in sorted(cahs) if n != anc))
569 569 return self._repo[anc]
570 570
571 571 def isancestorof(self, other):
572 572 """True if this changeset is an ancestor of other"""
573 573 return self._repo.changelog.isancestorrev(self._rev, other._rev)
574 574
575 575 def walk(self, match):
576 576 '''Generates matching file names.'''
577 577
578 578 # Wrap match.bad method to have message with nodeid
579 579 def bad(fn, msg):
580 580 # The manifest doesn't know about subrepos, so don't complain about
581 581 # paths into valid subrepos.
582 582 if any(fn == s or fn.startswith(s + '/')
583 583 for s in self.substate):
584 584 return
585 585 match.bad(fn, _('no such file in rev %s') % self)
586 586
587 587 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
588 588 return self._manifest.walk(m)
589 589
590 590 def matches(self, match):
591 591 return self.walk(match)
592 592
593 593 class basefilectx(object):
594 594 """A filecontext object represents the common logic for its children:
595 595 filectx: read-only access to a filerevision that is already present
596 596 in the repo,
597 597 workingfilectx: a filecontext that represents files from the working
598 598 directory,
599 599 memfilectx: a filecontext that represents files in-memory,
600 600 """
601 601 @propertycache
602 602 def _filelog(self):
603 603 return self._repo.file(self._path)
604 604
605 605 @propertycache
606 606 def _changeid(self):
607 607 if r'_changectx' in self.__dict__:
608 608 return self._changectx.rev()
609 609 elif r'_descendantrev' in self.__dict__:
610 610 # this file context was created from a revision with a known
611 611 # descendant, we can (lazily) correct for linkrev aliases
612 612 return self._adjustlinkrev(self._descendantrev)
613 613 else:
614 614 return self._filelog.linkrev(self._filerev)
615 615
616 616 @propertycache
617 617 def _filenode(self):
618 618 if r'_fileid' in self.__dict__:
619 619 return self._filelog.lookup(self._fileid)
620 620 else:
621 621 return self._changectx.filenode(self._path)
622 622
623 623 @propertycache
624 624 def _filerev(self):
625 625 return self._filelog.rev(self._filenode)
626 626
627 627 @propertycache
628 628 def _repopath(self):
629 629 return self._path
630 630
631 631 def __nonzero__(self):
632 632 try:
633 633 self._filenode
634 634 return True
635 635 except error.LookupError:
636 636 # file is missing
637 637 return False
638 638
639 639 __bool__ = __nonzero__
640 640
641 641 def __bytes__(self):
642 642 try:
643 643 return "%s@%s" % (self.path(), self._changectx)
644 644 except error.LookupError:
645 645 return "%s@???" % self.path()
646 646
647 647 __str__ = encoding.strmethod(__bytes__)
648 648
649 649 def __repr__(self):
650 650 return r"<%s %s>" % (type(self).__name__, str(self))
651 651
652 652 def __hash__(self):
653 653 try:
654 654 return hash((self._path, self._filenode))
655 655 except AttributeError:
656 656 return id(self)
657 657
658 658 def __eq__(self, other):
659 659 try:
660 660 return (type(self) == type(other) and self._path == other._path
661 661 and self._filenode == other._filenode)
662 662 except AttributeError:
663 663 return False
664 664
665 665 def __ne__(self, other):
666 666 return not (self == other)
667 667
668 668 def filerev(self):
669 669 return self._filerev
670 670 def filenode(self):
671 671 return self._filenode
672 672 @propertycache
673 673 def _flags(self):
674 674 return self._changectx.flags(self._path)
675 675 def flags(self):
676 676 return self._flags
677 677 def filelog(self):
678 678 return self._filelog
679 679 def rev(self):
680 680 return self._changeid
681 681 def linkrev(self):
682 682 return self._filelog.linkrev(self._filerev)
683 683 def node(self):
684 684 return self._changectx.node()
685 685 def hex(self):
686 686 return self._changectx.hex()
687 687 def user(self):
688 688 return self._changectx.user()
689 689 def date(self):
690 690 return self._changectx.date()
691 691 def files(self):
692 692 return self._changectx.files()
693 693 def description(self):
694 694 return self._changectx.description()
695 695 def branch(self):
696 696 return self._changectx.branch()
697 697 def extra(self):
698 698 return self._changectx.extra()
699 699 def phase(self):
700 700 return self._changectx.phase()
701 701 def phasestr(self):
702 702 return self._changectx.phasestr()
703 703 def obsolete(self):
704 704 return self._changectx.obsolete()
705 705 def instabilities(self):
706 706 return self._changectx.instabilities()
707 707 def manifest(self):
708 708 return self._changectx.manifest()
709 709 def changectx(self):
710 710 return self._changectx
711 711 def renamed(self):
712 712 return self._copied
713 713 def copysource(self):
714 714 return self._copied and self._copied[0]
715 715 def repo(self):
716 716 return self._repo
717 717 def size(self):
718 718 return len(self.data())
719 719
720 720 def path(self):
721 721 return self._path
722 722
723 723 def isbinary(self):
724 724 try:
725 725 return stringutil.binary(self.data())
726 726 except IOError:
727 727 return False
728 728 def isexec(self):
729 729 return 'x' in self.flags()
730 730 def islink(self):
731 731 return 'l' in self.flags()
732 732
733 733 def isabsent(self):
734 734 """whether this filectx represents a file not in self._changectx
735 735
736 736 This is mainly for merge code to detect change/delete conflicts. This is
737 737 expected to be True for all subclasses of basectx."""
738 738 return False
739 739
740 740 _customcmp = False
741 741 def cmp(self, fctx):
742 742 """compare with other file context
743 743
744 744 returns True if different than fctx.
745 745 """
746 746 if fctx._customcmp:
747 747 return fctx.cmp(self)
748 748
749 749 if self._filenode is None:
750 750 raise error.ProgrammingError(
751 751 'filectx.cmp() must be reimplemented if not backed by revlog')
752 752
753 753 if fctx._filenode is None:
754 754 if self._repo._encodefilterpats:
755 755 # can't rely on size() because wdir content may be decoded
756 756 return self._filelog.cmp(self._filenode, fctx.data())
757 757 if self.size() - 4 == fctx.size():
758 758 # size() can match:
759 759 # if file data starts with '\1\n', empty metadata block is
760 760 # prepended, which adds 4 bytes to filelog.size().
761 761 return self._filelog.cmp(self._filenode, fctx.data())
762 762 if self.size() == fctx.size():
763 763 # size() matches: need to compare content
764 764 return self._filelog.cmp(self._filenode, fctx.data())
765 765
766 766 # size() differs
767 767 return True
768 768
769 769 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
770 770 """return the first ancestor of <srcrev> introducing <fnode>
771 771
772 772 If the linkrev of the file revision does not point to an ancestor of
773 773 srcrev, we'll walk down the ancestors until we find one introducing
774 774 this file revision.
775 775
776 776 :srcrev: the changeset revision we search ancestors from
777 777 :inclusive: if true, the src revision will also be checked
778 778 :stoprev: an optional revision to stop the walk at. If no introduction
779 779 of this file content could be found before this floor
780 780 revision, the function will returns "None" and stops its
781 781 iteration.
782 782 """
783 783 repo = self._repo
784 784 cl = repo.unfiltered().changelog
785 785 mfl = repo.manifestlog
786 786 # fetch the linkrev
787 787 lkr = self.linkrev()
788 788 if srcrev == lkr:
789 789 return lkr
790 790 # hack to reuse ancestor computation when searching for renames
791 791 memberanc = getattr(self, '_ancestrycontext', None)
792 792 iteranc = None
793 793 if srcrev is None:
794 794 # wctx case, used by workingfilectx during mergecopy
795 795 revs = [p.rev() for p in self._repo[None].parents()]
796 796 inclusive = True # we skipped the real (revless) source
797 797 else:
798 798 revs = [srcrev]
799 799 if memberanc is None:
800 800 memberanc = iteranc = cl.ancestors(revs, lkr,
801 801 inclusive=inclusive)
802 802 # check if this linkrev is an ancestor of srcrev
803 803 if lkr not in memberanc:
804 804 if iteranc is None:
805 805 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
806 806 fnode = self._filenode
807 807 path = self._path
808 808 for a in iteranc:
809 809 if stoprev is not None and a < stoprev:
810 810 return None
811 811 ac = cl.read(a) # get changeset data (we avoid object creation)
812 812 if path in ac[3]: # checking the 'files' field.
813 813 # The file has been touched, check if the content is
814 814 # similar to the one we search for.
815 815 if fnode == mfl[ac[0]].readfast().get(path):
816 816 return a
817 817 # In theory, we should never get out of that loop without a result.
818 818 # But if manifest uses a buggy file revision (not children of the
819 819 # one it replaces) we could. Such a buggy situation will likely
820 820 # result is crash somewhere else at to some point.
821 821 return lkr
822 822
823 823 def isintroducedafter(self, changelogrev):
824 824 """True if a filectx has been introduced after a given floor revision
825 825 """
826 826 if self.linkrev() >= changelogrev:
827 827 return True
828 828 introrev = self._introrev(stoprev=changelogrev)
829 829 if introrev is None:
830 830 return False
831 831 return introrev >= changelogrev
832 832
833 833 def introrev(self):
834 834 """return the rev of the changeset which introduced this file revision
835 835
836 836 This method is different from linkrev because it take into account the
837 837 changeset the filectx was created from. It ensures the returned
838 838 revision is one of its ancestors. This prevents bugs from
839 839 'linkrev-shadowing' when a file revision is used by multiple
840 840 changesets.
841 841 """
842 842 return self._introrev()
843 843
844 844 def _introrev(self, stoprev=None):
845 845 """
846 846 Same as `introrev` but, with an extra argument to limit changelog
847 847 iteration range in some internal usecase.
848 848
849 849 If `stoprev` is set, the `introrev` will not be searched past that
850 850 `stoprev` revision and "None" might be returned. This is useful to
851 851 limit the iteration range.
852 852 """
853 853 toprev = None
854 854 attrs = vars(self)
855 855 if r'_changeid' in attrs:
856 856 # We have a cached value already
857 857 toprev = self._changeid
858 858 elif r'_changectx' in attrs:
859 859 # We know which changelog entry we are coming from
860 860 toprev = self._changectx.rev()
861 861
862 862 if toprev is not None:
863 863 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
864 864 elif r'_descendantrev' in attrs:
865 865 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
866 866 # be nice and cache the result of the computation
867 867 if introrev is not None:
868 868 self._changeid = introrev
869 869 return introrev
870 870 else:
871 871 return self.linkrev()
872 872
873 873 def introfilectx(self):
874 874 """Return filectx having identical contents, but pointing to the
875 875 changeset revision where this filectx was introduced"""
876 876 introrev = self.introrev()
877 877 if self.rev() == introrev:
878 878 return self
879 879 return self.filectx(self.filenode(), changeid=introrev)
880 880
881 881 def _parentfilectx(self, path, fileid, filelog):
882 882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 884 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
885 885 # If self is associated with a changeset (probably explicitly
886 886 # fed), ensure the created filectx is associated with a
887 887 # changeset that is an ancestor of self.changectx.
888 888 # This lets us later use _adjustlinkrev to get a correct link.
889 889 fctx._descendantrev = self.rev()
890 890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 891 elif r'_descendantrev' in vars(self):
892 892 # Otherwise propagate _descendantrev if we have one associated.
893 893 fctx._descendantrev = self._descendantrev
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 return fctx
896 896
897 897 def parents(self):
898 898 _path = self._path
899 899 fl = self._filelog
900 900 parents = self._filelog.parents(self._filenode)
901 901 pl = [(_path, node, fl) for node in parents if node != nullid]
902 902
903 903 r = fl.renamed(self._filenode)
904 904 if r:
905 905 # - In the simple rename case, both parent are nullid, pl is empty.
906 906 # - In case of merge, only one of the parent is null id and should
907 907 # be replaced with the rename information. This parent is -always-
908 908 # the first one.
909 909 #
910 910 # As null id have always been filtered out in the previous list
911 911 # comprehension, inserting to 0 will always result in "replacing
912 912 # first nullid parent with rename information.
913 913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 914
915 915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 916
917 917 def p1(self):
918 918 return self.parents()[0]
919 919
920 920 def p2(self):
921 921 p = self.parents()
922 922 if len(p) == 2:
923 923 return p[1]
924 924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 925
926 926 def annotate(self, follow=False, skiprevs=None, diffopts=None):
927 927 """Returns a list of annotateline objects for each line in the file
928 928
929 929 - line.fctx is the filectx of the node where that line was last changed
930 930 - line.lineno is the line number at the first appearance in the managed
931 931 file
932 932 - line.text is the data on that line (including newline character)
933 933 """
934 934 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
935 935
936 936 def parents(f):
937 937 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
938 938 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
939 939 # from the topmost introrev (= srcrev) down to p.linkrev() if it
940 940 # isn't an ancestor of the srcrev.
941 941 f._changeid
942 942 pl = f.parents()
943 943
944 944 # Don't return renamed parents if we aren't following.
945 945 if not follow:
946 946 pl = [p for p in pl if p.path() == f.path()]
947 947
948 948 # renamed filectx won't have a filelog yet, so set it
949 949 # from the cache to save time
950 950 for p in pl:
951 951 if not r'_filelog' in p.__dict__:
952 952 p._filelog = getlog(p.path())
953 953
954 954 return pl
955 955
956 956 # use linkrev to find the first changeset where self appeared
957 957 base = self.introfilectx()
958 958 if getattr(base, '_ancestrycontext', None) is None:
959 959 cl = self._repo.changelog
960 960 if base.rev() is None:
961 961 # wctx is not inclusive, but works because _ancestrycontext
962 962 # is used to test filelog revisions
963 963 ac = cl.ancestors([p.rev() for p in base.parents()],
964 964 inclusive=True)
965 965 else:
966 966 ac = cl.ancestors([base.rev()], inclusive=True)
967 967 base._ancestrycontext = ac
968 968
969 969 return dagop.annotate(base, parents, skiprevs=skiprevs,
970 970 diffopts=diffopts)
971 971
972 972 def ancestors(self, followfirst=False):
973 973 visit = {}
974 974 c = self
975 975 if followfirst:
976 976 cut = 1
977 977 else:
978 978 cut = None
979 979
980 980 while True:
981 981 for parent in c.parents()[:cut]:
982 982 visit[(parent.linkrev(), parent.filenode())] = parent
983 983 if not visit:
984 984 break
985 985 c = visit.pop(max(visit))
986 986 yield c
987 987
988 988 def decodeddata(self):
989 989 """Returns `data()` after running repository decoding filters.
990 990
991 991 This is often equivalent to how the data would be expressed on disk.
992 992 """
993 993 return self._repo.wwritedata(self.path(), self.data())
994 994
995 995 class filectx(basefilectx):
996 996 """A filecontext object makes access to data related to a particular
997 997 filerevision convenient."""
998 998 def __init__(self, repo, path, changeid=None, fileid=None,
999 999 filelog=None, changectx=None):
1000 1000 """changeid must be a revision number, if specified.
1001 1001 fileid can be a file revision or node."""
1002 1002 self._repo = repo
1003 1003 self._path = path
1004 1004
1005 1005 assert (changeid is not None
1006 1006 or fileid is not None
1007 1007 or changectx is not None), (
1008 1008 "bad args: changeid=%r, fileid=%r, changectx=%r"
1009 1009 % (changeid, fileid, changectx))
1010 1010
1011 1011 if filelog is not None:
1012 1012 self._filelog = filelog
1013 1013
1014 1014 if changeid is not None:
1015 1015 self._changeid = changeid
1016 1016 if changectx is not None:
1017 1017 self._changectx = changectx
1018 1018 if fileid is not None:
1019 1019 self._fileid = fileid
1020 1020
1021 1021 @propertycache
1022 1022 def _changectx(self):
1023 1023 try:
1024 1024 return self._repo[self._changeid]
1025 1025 except error.FilteredRepoLookupError:
1026 1026 # Linkrev may point to any revision in the repository. When the
1027 1027 # repository is filtered this may lead to `filectx` trying to build
1028 1028 # `changectx` for filtered revision. In such case we fallback to
1029 1029 # creating `changectx` on the unfiltered version of the reposition.
1030 1030 # This fallback should not be an issue because `changectx` from
1031 1031 # `filectx` are not used in complex operations that care about
1032 1032 # filtering.
1033 1033 #
1034 1034 # This fallback is a cheap and dirty fix that prevent several
1035 1035 # crashes. It does not ensure the behavior is correct. However the
1036 1036 # behavior was not correct before filtering either and "incorrect
1037 1037 # behavior" is seen as better as "crash"
1038 1038 #
1039 1039 # Linkrevs have several serious troubles with filtering that are
1040 1040 # complicated to solve. Proper handling of the issue here should be
1041 1041 # considered when solving linkrev issue are on the table.
1042 1042 return self._repo.unfiltered()[self._changeid]
1043 1043
1044 1044 def filectx(self, fileid, changeid=None):
1045 1045 '''opens an arbitrary revision of the file without
1046 1046 opening a new filelog'''
1047 1047 return filectx(self._repo, self._path, fileid=fileid,
1048 1048 filelog=self._filelog, changeid=changeid)
1049 1049
1050 1050 def rawdata(self):
1051 1051 return self._filelog.revision(self._filenode, raw=True)
1052 1052
1053 1053 def rawflags(self):
1054 1054 """low-level revlog flags"""
1055 1055 return self._filelog.flags(self._filerev)
1056 1056
1057 1057 def data(self):
1058 1058 try:
1059 1059 return self._filelog.read(self._filenode)
1060 1060 except error.CensoredNodeError:
1061 1061 if self._repo.ui.config("censor", "policy") == "ignore":
1062 1062 return ""
1063 1063 raise error.Abort(_("censored node: %s") % short(self._filenode),
1064 1064 hint=_("set censor.policy to ignore errors"))
1065 1065
1066 1066 def size(self):
1067 1067 return self._filelog.size(self._filerev)
1068 1068
1069 1069 @propertycache
1070 1070 def _copied(self):
1071 1071 """check if file was actually renamed in this changeset revision
1072 1072
1073 1073 If rename logged in file revision, we report copy for changeset only
1074 1074 if file revisions linkrev points back to the changeset in question
1075 1075 or both changeset parents contain different file revisions.
1076 1076 """
1077 1077
1078 1078 renamed = self._filelog.renamed(self._filenode)
1079 1079 if not renamed:
1080 1080 return None
1081 1081
1082 1082 if self.rev() == self.linkrev():
1083 1083 return renamed
1084 1084
1085 1085 name = self.path()
1086 1086 fnode = self._filenode
1087 1087 for p in self._changectx.parents():
1088 1088 try:
1089 1089 if fnode == p.filenode(name):
1090 1090 return None
1091 1091 except error.LookupError:
1092 1092 pass
1093 1093 return renamed
1094 1094
1095 1095 def children(self):
1096 1096 # hard for renames
1097 1097 c = self._filelog.children(self._filenode)
1098 1098 return [filectx(self._repo, self._path, fileid=x,
1099 1099 filelog=self._filelog) for x in c]
1100 1100
1101 1101 class committablectx(basectx):
1102 1102 """A committablectx object provides common functionality for a context that
1103 1103 wants the ability to commit, e.g. workingctx or memctx."""
1104 1104 def __init__(self, repo, text="", user=None, date=None, extra=None,
1105 1105 changes=None, branch=None):
1106 1106 super(committablectx, self).__init__(repo)
1107 1107 self._rev = None
1108 1108 self._node = None
1109 1109 self._text = text
1110 1110 if date:
1111 1111 self._date = dateutil.parsedate(date)
1112 1112 if user:
1113 1113 self._user = user
1114 1114 if changes:
1115 1115 self._status = changes
1116 1116
1117 1117 self._extra = {}
1118 1118 if extra:
1119 1119 self._extra = extra.copy()
1120 1120 if branch is not None:
1121 1121 self._extra['branch'] = encoding.fromlocal(branch)
1122 1122 if not self._extra.get('branch'):
1123 1123 self._extra['branch'] = 'default'
1124 1124
1125 1125 def __bytes__(self):
1126 1126 return bytes(self._parents[0]) + "+"
1127 1127
1128 1128 __str__ = encoding.strmethod(__bytes__)
1129 1129
1130 1130 def __nonzero__(self):
1131 1131 return True
1132 1132
1133 1133 __bool__ = __nonzero__
1134 1134
1135 1135 @propertycache
1136 1136 def _status(self):
1137 1137 return self._repo.status()
1138 1138
1139 1139 @propertycache
1140 1140 def _user(self):
1141 1141 return self._repo.ui.username()
1142 1142
1143 1143 @propertycache
1144 1144 def _date(self):
1145 1145 ui = self._repo.ui
1146 1146 date = ui.configdate('devel', 'default-date')
1147 1147 if date is None:
1148 1148 date = dateutil.makedate()
1149 1149 return date
1150 1150
1151 1151 def subrev(self, subpath):
1152 1152 return None
1153 1153
1154 1154 def manifestnode(self):
1155 1155 return None
1156 1156 def user(self):
1157 1157 return self._user or self._repo.ui.username()
1158 1158 def date(self):
1159 1159 return self._date
1160 1160 def description(self):
1161 1161 return self._text
1162 1162 def files(self):
1163 1163 return sorted(self._status.modified + self._status.added +
1164 1164 self._status.removed)
1165 1165 def modified(self):
1166 1166 return self._status.modified
1167 1167 def added(self):
1168 1168 return self._status.added
1169 1169 def removed(self):
1170 1170 return self._status.removed
1171 1171 def deleted(self):
1172 1172 return self._status.deleted
1173 1173 def branch(self):
1174 1174 return encoding.tolocal(self._extra['branch'])
1175 1175 def closesbranch(self):
1176 1176 return 'close' in self._extra
1177 1177 def extra(self):
1178 1178 return self._extra
1179 1179
1180 1180 def isinmemory(self):
1181 1181 return False
1182 1182
1183 1183 def tags(self):
1184 1184 return []
1185 1185
1186 1186 def bookmarks(self):
1187 1187 b = []
1188 1188 for p in self.parents():
1189 1189 b.extend(p.bookmarks())
1190 1190 return b
1191 1191
1192 1192 def phase(self):
1193 1193 phase = phases.draft # default phase to draft
1194 1194 for p in self.parents():
1195 1195 phase = max(phase, p.phase())
1196 1196 return phase
1197 1197
1198 1198 def hidden(self):
1199 1199 return False
1200 1200
1201 1201 def children(self):
1202 1202 return []
1203 1203
1204 1204 def ancestor(self, c2):
1205 1205 """return the "best" ancestor context of self and c2"""
1206 1206 return self._parents[0].ancestor(c2) # punt on two parents for now
1207 1207
1208 1208 def ancestors(self):
1209 1209 for p in self._parents:
1210 1210 yield p
1211 1211 for a in self._repo.changelog.ancestors(
1212 1212 [p.rev() for p in self._parents]):
1213 1213 yield self._repo[a]
1214 1214
1215 1215 def markcommitted(self, node):
1216 1216 """Perform post-commit cleanup necessary after committing this ctx
1217 1217
1218 1218 Specifically, this updates backing stores this working context
1219 1219 wraps to reflect the fact that the changes reflected by this
1220 1220 workingctx have been committed. For example, it marks
1221 1221 modified and added files as normal in the dirstate.
1222 1222
1223 1223 """
1224 1224
1225 1225 def dirty(self, missing=False, merge=True, branch=True):
1226 1226 return False
1227 1227
1228 1228 class workingctx(committablectx):
1229 1229 """A workingctx object makes access to data related to
1230 1230 the current working directory convenient.
1231 1231 date - any valid date string or (unixtime, offset), or None.
1232 1232 user - username string, or None.
1233 1233 extra - a dictionary of extra values, or None.
1234 1234 changes - a list of file lists as returned by localrepo.status()
1235 1235 or None to use the repository status.
1236 1236 """
1237 1237 def __init__(self, repo, text="", user=None, date=None, extra=None,
1238 1238 changes=None):
1239 1239 branch = None
1240 1240 if not extra or 'branch' not in extra:
1241 1241 try:
1242 1242 branch = repo.dirstate.branch()
1243 1243 except UnicodeDecodeError:
1244 1244 raise error.Abort(_('branch name not in UTF-8!'))
1245 1245 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1246 1246 branch=branch)
1247 1247
1248 1248 def __iter__(self):
1249 1249 d = self._repo.dirstate
1250 1250 for f in d:
1251 1251 if d[f] != 'r':
1252 1252 yield f
1253 1253
1254 1254 def __contains__(self, key):
1255 1255 return self._repo.dirstate[key] not in "?r"
1256 1256
1257 1257 def hex(self):
1258 1258 return wdirhex
1259 1259
1260 1260 @propertycache
1261 1261 def _parents(self):
1262 1262 p = self._repo.dirstate.parents()
1263 1263 if p[1] == nullid:
1264 1264 p = p[:-1]
1265 1265 # use unfiltered repo to delay/avoid loading obsmarkers
1266 1266 unfi = self._repo.unfiltered()
1267 1267 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1268 1268
1269 1269 def _fileinfo(self, path):
1270 1270 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1271 1271 self._manifest
1272 1272 return super(workingctx, self)._fileinfo(path)
1273 1273
1274 1274 def _buildflagfunc(self):
1275 1275 # Create a fallback function for getting file flags when the
1276 1276 # filesystem doesn't support them
1277 1277
1278 1278 copiesget = self._repo.dirstate.copies().get
1279 1279 parents = self.parents()
1280 1280 if len(parents) < 2:
1281 1281 # when we have one parent, it's easy: copy from parent
1282 1282 man = parents[0].manifest()
1283 1283 def func(f):
1284 1284 f = copiesget(f, f)
1285 1285 return man.flags(f)
1286 1286 else:
1287 1287 # merges are tricky: we try to reconstruct the unstored
1288 1288 # result from the merge (issue1802)
1289 1289 p1, p2 = parents
1290 1290 pa = p1.ancestor(p2)
1291 1291 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1292 1292
1293 1293 def func(f):
1294 1294 f = copiesget(f, f) # may be wrong for merges with copies
1295 1295 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1296 1296 if fl1 == fl2:
1297 1297 return fl1
1298 1298 if fl1 == fla:
1299 1299 return fl2
1300 1300 if fl2 == fla:
1301 1301 return fl1
1302 1302 return '' # punt for conflicts
1303 1303
1304 1304 return func
1305 1305
1306 1306 @propertycache
1307 1307 def _flagfunc(self):
1308 1308 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1309 1309
1310 1310 def flags(self, path):
1311 1311 if r'_manifest' in self.__dict__:
1312 1312 try:
1313 1313 return self._manifest.flags(path)
1314 1314 except KeyError:
1315 1315 return ''
1316 1316
1317 1317 try:
1318 1318 return self._flagfunc(path)
1319 1319 except OSError:
1320 1320 return ''
1321 1321
1322 1322 def filectx(self, path, filelog=None):
1323 1323 """get a file context from the working directory"""
1324 1324 return workingfilectx(self._repo, path, workingctx=self,
1325 1325 filelog=filelog)
1326 1326
1327 1327 def dirty(self, missing=False, merge=True, branch=True):
1328 1328 "check whether a working directory is modified"
1329 1329 # check subrepos first
1330 1330 for s in sorted(self.substate):
1331 1331 if self.sub(s).dirty(missing=missing):
1332 1332 return True
1333 1333 # check current working dir
1334 1334 return ((merge and self.p2()) or
1335 1335 (branch and self.branch() != self.p1().branch()) or
1336 1336 self.modified() or self.added() or self.removed() or
1337 1337 (missing and self.deleted()))
1338 1338
1339 1339 def add(self, list, prefix=""):
1340 1340 with self._repo.wlock():
1341 1341 ui, ds = self._repo.ui, self._repo.dirstate
1342 1342 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1343 1343 rejected = []
1344 1344 lstat = self._repo.wvfs.lstat
1345 1345 for f in list:
1346 1346 # ds.pathto() returns an absolute file when this is invoked from
1347 1347 # the keyword extension. That gets flagged as non-portable on
1348 1348 # Windows, since it contains the drive letter and colon.
1349 1349 scmutil.checkportable(ui, os.path.join(prefix, f))
1350 1350 try:
1351 1351 st = lstat(f)
1352 1352 except OSError:
1353 1353 ui.warn(_("%s does not exist!\n") % uipath(f))
1354 1354 rejected.append(f)
1355 1355 continue
1356 1356 limit = ui.configbytes('ui', 'large-file-limit')
1357 1357 if limit != 0 and st.st_size > limit:
1358 1358 ui.warn(_("%s: up to %d MB of RAM may be required "
1359 1359 "to manage this file\n"
1360 1360 "(use 'hg revert %s' to cancel the "
1361 1361 "pending addition)\n")
1362 1362 % (f, 3 * st.st_size // 1000000, uipath(f)))
1363 1363 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1364 1364 ui.warn(_("%s not added: only files and symlinks "
1365 1365 "supported currently\n") % uipath(f))
1366 1366 rejected.append(f)
1367 1367 elif ds[f] in 'amn':
1368 1368 ui.warn(_("%s already tracked!\n") % uipath(f))
1369 1369 elif ds[f] == 'r':
1370 1370 ds.normallookup(f)
1371 1371 else:
1372 1372 ds.add(f)
1373 1373 return rejected
1374 1374
1375 1375 def forget(self, files, prefix=""):
1376 1376 with self._repo.wlock():
1377 1377 ds = self._repo.dirstate
1378 1378 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1379 1379 rejected = []
1380 1380 for f in files:
1381 1381 if f not in ds:
1382 1382 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1383 1383 rejected.append(f)
1384 1384 elif ds[f] != 'a':
1385 1385 ds.remove(f)
1386 1386 else:
1387 1387 ds.drop(f)
1388 1388 return rejected
1389 1389
1390 1390 def copy(self, source, dest):
1391 1391 try:
1392 1392 st = self._repo.wvfs.lstat(dest)
1393 1393 except OSError as err:
1394 1394 if err.errno != errno.ENOENT:
1395 1395 raise
1396 1396 self._repo.ui.warn(_("%s does not exist!\n")
1397 1397 % self._repo.dirstate.pathto(dest))
1398 1398 return
1399 1399 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1400 1400 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1401 1401 "symbolic link\n")
1402 1402 % self._repo.dirstate.pathto(dest))
1403 1403 else:
1404 1404 with self._repo.wlock():
1405 1405 ds = self._repo.dirstate
1406 1406 if ds[dest] in '?':
1407 1407 ds.add(dest)
1408 1408 elif ds[dest] in 'r':
1409 1409 ds.normallookup(dest)
1410 1410 ds.copy(source, dest)
1411 1411
1412 1412 def match(self, pats=None, include=None, exclude=None, default='glob',
1413 1413 listsubrepos=False, badfn=None):
1414 1414 r = self._repo
1415 1415
1416 1416 # Only a case insensitive filesystem needs magic to translate user input
1417 1417 # to actual case in the filesystem.
1418 1418 icasefs = not util.fscasesensitive(r.root)
1419 1419 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1420 1420 default, auditor=r.auditor, ctx=self,
1421 1421 listsubrepos=listsubrepos, badfn=badfn,
1422 1422 icasefs=icasefs)
1423 1423
1424 1424 def _filtersuspectsymlink(self, files):
1425 1425 if not files or self._repo.dirstate._checklink:
1426 1426 return files
1427 1427
1428 1428 # Symlink placeholders may get non-symlink-like contents
1429 1429 # via user error or dereferencing by NFS or Samba servers,
1430 1430 # so we filter out any placeholders that don't look like a
1431 1431 # symlink
1432 1432 sane = []
1433 1433 for f in files:
1434 1434 if self.flags(f) == 'l':
1435 1435 d = self[f].data()
1436 1436 if (d == '' or len(d) >= 1024 or '\n' in d
1437 1437 or stringutil.binary(d)):
1438 1438 self._repo.ui.debug('ignoring suspect symlink placeholder'
1439 1439 ' "%s"\n' % f)
1440 1440 continue
1441 1441 sane.append(f)
1442 1442 return sane
1443 1443
1444 1444 def _checklookup(self, files):
1445 1445 # check for any possibly clean files
1446 1446 if not files:
1447 1447 return [], [], []
1448 1448
1449 1449 modified = []
1450 1450 deleted = []
1451 1451 fixup = []
1452 1452 pctx = self._parents[0]
1453 1453 # do a full compare of any files that might have changed
1454 1454 for f in sorted(files):
1455 1455 try:
1456 1456 # This will return True for a file that got replaced by a
1457 1457 # directory in the interim, but fixing that is pretty hard.
1458 1458 if (f not in pctx or self.flags(f) != pctx.flags(f)
1459 1459 or pctx[f].cmp(self[f])):
1460 1460 modified.append(f)
1461 1461 else:
1462 1462 fixup.append(f)
1463 1463 except (IOError, OSError):
1464 1464 # A file become inaccessible in between? Mark it as deleted,
1465 1465 # matching dirstate behavior (issue5584).
1466 1466 # The dirstate has more complex behavior around whether a
1467 1467 # missing file matches a directory, etc, but we don't need to
1468 1468 # bother with that: if f has made it to this point, we're sure
1469 1469 # it's in the dirstate.
1470 1470 deleted.append(f)
1471 1471
1472 1472 return modified, deleted, fixup
1473 1473
1474 1474 def _poststatusfixup(self, status, fixup):
1475 1475 """update dirstate for files that are actually clean"""
1476 1476 poststatus = self._repo.postdsstatus()
1477 1477 if fixup or poststatus:
1478 1478 try:
1479 1479 oldid = self._repo.dirstate.identity()
1480 1480
1481 1481 # updating the dirstate is optional
1482 1482 # so we don't wait on the lock
1483 1483 # wlock can invalidate the dirstate, so cache normal _after_
1484 1484 # taking the lock
1485 1485 with self._repo.wlock(False):
1486 1486 if self._repo.dirstate.identity() == oldid:
1487 1487 if fixup:
1488 1488 normal = self._repo.dirstate.normal
1489 1489 for f in fixup:
1490 1490 normal(f)
1491 1491 # write changes out explicitly, because nesting
1492 1492 # wlock at runtime may prevent 'wlock.release()'
1493 1493 # after this block from doing so for subsequent
1494 1494 # changing files
1495 1495 tr = self._repo.currenttransaction()
1496 1496 self._repo.dirstate.write(tr)
1497 1497
1498 1498 if poststatus:
1499 1499 for ps in poststatus:
1500 1500 ps(self, status)
1501 1501 else:
1502 1502 # in this case, writing changes out breaks
1503 1503 # consistency, because .hg/dirstate was
1504 1504 # already changed simultaneously after last
1505 1505 # caching (see also issue5584 for detail)
1506 1506 self._repo.ui.debug('skip updating dirstate: '
1507 1507 'identity mismatch\n')
1508 1508 except error.LockError:
1509 1509 pass
1510 1510 finally:
1511 1511 # Even if the wlock couldn't be grabbed, clear out the list.
1512 1512 self._repo.clearpostdsstatus()
1513 1513
1514 1514 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1515 1515 '''Gets the status from the dirstate -- internal use only.'''
1516 1516 subrepos = []
1517 1517 if '.hgsub' in self:
1518 1518 subrepos = sorted(self.substate)
1519 1519 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1520 1520 clean=clean, unknown=unknown)
1521 1521
1522 1522 # check for any possibly clean files
1523 1523 fixup = []
1524 1524 if cmp:
1525 1525 modified2, deleted2, fixup = self._checklookup(cmp)
1526 1526 s.modified.extend(modified2)
1527 1527 s.deleted.extend(deleted2)
1528 1528
1529 1529 if fixup and clean:
1530 1530 s.clean.extend(fixup)
1531 1531
1532 1532 self._poststatusfixup(s, fixup)
1533 1533
1534 1534 if match.always():
1535 1535 # cache for performance
1536 1536 if s.unknown or s.ignored or s.clean:
1537 1537 # "_status" is cached with list*=False in the normal route
1538 1538 self._status = scmutil.status(s.modified, s.added, s.removed,
1539 1539 s.deleted, [], [], [])
1540 1540 else:
1541 1541 self._status = s
1542 1542
1543 1543 return s
1544 1544
1545 1545 @propertycache
1546 1546 def _copies(self):
1547 1547 p1copies = {}
1548 1548 p2copies = {}
1549 1549 parents = self._repo.dirstate.parents()
1550 1550 p1manifest = self._repo[parents[0]].manifest()
1551 1551 p2manifest = self._repo[parents[1]].manifest()
1552 1552 narrowmatch = self._repo.narrowmatch()
1553 1553 for dst, src in self._repo.dirstate.copies().items():
1554 1554 if not narrowmatch(dst):
1555 1555 continue
1556 1556 if src in p1manifest:
1557 1557 p1copies[dst] = src
1558 1558 elif src in p2manifest:
1559 1559 p2copies[dst] = src
1560 1560 return p1copies, p2copies
1561 1561 def p1copies(self):
1562 1562 return self._copies[0]
1563 1563 def p2copies(self):
1564 1564 return self._copies[1]
1565 1565
1566 1566 @propertycache
1567 1567 def _manifest(self):
1568 1568 """generate a manifest corresponding to the values in self._status
1569 1569
1570 1570 This reuse the file nodeid from parent, but we use special node
1571 1571 identifiers for added and modified files. This is used by manifests
1572 1572 merge to see that files are different and by update logic to avoid
1573 1573 deleting newly added files.
1574 1574 """
1575 1575 return self._buildstatusmanifest(self._status)
1576 1576
1577 1577 def _buildstatusmanifest(self, status):
1578 1578 """Builds a manifest that includes the given status results."""
1579 1579 parents = self.parents()
1580 1580
1581 1581 man = parents[0].manifest().copy()
1582 1582
1583 1583 ff = self._flagfunc
1584 1584 for i, l in ((addednodeid, status.added),
1585 1585 (modifiednodeid, status.modified)):
1586 1586 for f in l:
1587 1587 man[f] = i
1588 1588 try:
1589 1589 man.setflag(f, ff(f))
1590 1590 except OSError:
1591 1591 pass
1592 1592
1593 1593 for f in status.deleted + status.removed:
1594 1594 if f in man:
1595 1595 del man[f]
1596 1596
1597 1597 return man
1598 1598
1599 1599 def _buildstatus(self, other, s, match, listignored, listclean,
1600 1600 listunknown):
1601 1601 """build a status with respect to another context
1602 1602
1603 1603 This includes logic for maintaining the fast path of status when
1604 1604 comparing the working directory against its parent, which is to skip
1605 1605 building a new manifest if self (working directory) is not comparing
1606 1606 against its parent (repo['.']).
1607 1607 """
1608 1608 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1609 1609 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1610 1610 # might have accidentally ended up with the entire contents of the file
1611 1611 # they are supposed to be linking to.
1612 1612 s.modified[:] = self._filtersuspectsymlink(s.modified)
1613 1613 if other != self._repo['.']:
1614 1614 s = super(workingctx, self)._buildstatus(other, s, match,
1615 1615 listignored, listclean,
1616 1616 listunknown)
1617 1617 return s
1618 1618
1619 1619 def _matchstatus(self, other, match):
1620 1620 """override the match method with a filter for directory patterns
1621 1621
1622 1622 We use inheritance to customize the match.bad method only in cases of
1623 1623 workingctx since it belongs only to the working directory when
1624 1624 comparing against the parent changeset.
1625 1625
1626 1626 If we aren't comparing against the working directory's parent, then we
1627 1627 just use the default match object sent to us.
1628 1628 """
1629 1629 if other != self._repo['.']:
1630 1630 def bad(f, msg):
1631 1631 # 'f' may be a directory pattern from 'match.files()',
1632 1632 # so 'f not in ctx1' is not enough
1633 1633 if f not in other and not other.hasdir(f):
1634 1634 self._repo.ui.warn('%s: %s\n' %
1635 1635 (self._repo.dirstate.pathto(f), msg))
1636 1636 match.bad = bad
1637 1637 return match
1638 1638
1639 1639 def walk(self, match):
1640 1640 '''Generates matching file names.'''
1641 1641 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1642 1642 subrepos=sorted(self.substate),
1643 1643 unknown=True, ignored=False))
1644 1644
1645 1645 def matches(self, match):
1646 1646 match = self._repo.narrowmatch(match)
1647 1647 ds = self._repo.dirstate
1648 1648 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1649 1649
1650 1650 def markcommitted(self, node):
1651 1651 with self._repo.dirstate.parentchange():
1652 1652 for f in self.modified() + self.added():
1653 1653 self._repo.dirstate.normal(f)
1654 1654 for f in self.removed():
1655 1655 self._repo.dirstate.drop(f)
1656 1656 self._repo.dirstate.setparents(node)
1657 1657
1658 1658 # write changes out explicitly, because nesting wlock at
1659 1659 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1660 1660 # from immediately doing so for subsequent changing files
1661 1661 self._repo.dirstate.write(self._repo.currenttransaction())
1662 1662
1663 1663 sparse.aftercommit(self._repo, node)
1664 1664
1665 1665 class committablefilectx(basefilectx):
1666 1666 """A committablefilectx provides common functionality for a file context
1667 1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 1668 def __init__(self, repo, path, filelog=None, ctx=None):
1669 1669 self._repo = repo
1670 1670 self._path = path
1671 1671 self._changeid = None
1672 1672 self._filerev = self._filenode = None
1673 1673
1674 1674 if filelog is not None:
1675 1675 self._filelog = filelog
1676 1676 if ctx:
1677 1677 self._changectx = ctx
1678 1678
1679 1679 def __nonzero__(self):
1680 1680 return True
1681 1681
1682 1682 __bool__ = __nonzero__
1683 1683
1684 1684 def linkrev(self):
1685 1685 # linked to self._changectx no matter if file is modified or not
1686 1686 return self.rev()
1687 1687
1688 1688 def renamed(self):
1689 1689 path = self.copysource()
1690 1690 if not path:
1691 1691 return None
1692 1692 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1693 1693
1694 1694 def parents(self):
1695 1695 '''return parent filectxs, following copies if necessary'''
1696 1696 def filenode(ctx, path):
1697 1697 return ctx._manifest.get(path, nullid)
1698 1698
1699 1699 path = self._path
1700 1700 fl = self._filelog
1701 1701 pcl = self._changectx._parents
1702 1702 renamed = self.renamed()
1703 1703
1704 1704 if renamed:
1705 1705 pl = [renamed + (None,)]
1706 1706 else:
1707 1707 pl = [(path, filenode(pcl[0], path), fl)]
1708 1708
1709 1709 for pc in pcl[1:]:
1710 1710 pl.append((path, filenode(pc, path), fl))
1711 1711
1712 1712 return [self._parentfilectx(p, fileid=n, filelog=l)
1713 1713 for p, n, l in pl if n != nullid]
1714 1714
1715 1715 def children(self):
1716 1716 return []
1717 1717
1718 1718 class workingfilectx(committablefilectx):
1719 1719 """A workingfilectx object makes access to data related to a particular
1720 1720 file in the working directory convenient."""
1721 1721 def __init__(self, repo, path, filelog=None, workingctx=None):
1722 1722 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1723 1723
1724 1724 @propertycache
1725 1725 def _changectx(self):
1726 1726 return workingctx(self._repo)
1727 1727
1728 1728 def data(self):
1729 1729 return self._repo.wread(self._path)
1730 1730 def copysource(self):
1731 1731 return self._repo.dirstate.copied(self._path)
1732 1732
1733 1733 def size(self):
1734 1734 return self._repo.wvfs.lstat(self._path).st_size
1735 1735 def date(self):
1736 1736 t, tz = self._changectx.date()
1737 1737 try:
1738 1738 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1739 1739 except OSError as err:
1740 1740 if err.errno != errno.ENOENT:
1741 1741 raise
1742 1742 return (t, tz)
1743 1743
1744 1744 def exists(self):
1745 1745 return self._repo.wvfs.exists(self._path)
1746 1746
1747 1747 def lexists(self):
1748 1748 return self._repo.wvfs.lexists(self._path)
1749 1749
1750 1750 def audit(self):
1751 1751 return self._repo.wvfs.audit(self._path)
1752 1752
1753 1753 def cmp(self, fctx):
1754 1754 """compare with other file context
1755 1755
1756 1756 returns True if different than fctx.
1757 1757 """
1758 1758 # fctx should be a filectx (not a workingfilectx)
1759 1759 # invert comparison to reuse the same code path
1760 1760 return fctx.cmp(self)
1761 1761
1762 1762 def remove(self, ignoremissing=False):
1763 1763 """wraps unlink for a repo's working directory"""
1764 1764 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1765 1765 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1766 1766 rmdir=rmdir)
1767 1767
1768 1768 def write(self, data, flags, backgroundclose=False, **kwargs):
1769 1769 """wraps repo.wwrite"""
1770 1770 self._repo.wwrite(self._path, data, flags,
1771 1771 backgroundclose=backgroundclose,
1772 1772 **kwargs)
1773 1773
1774 1774 def markcopied(self, src):
1775 1775 """marks this file a copy of `src`"""
1776 if self._repo.dirstate[self._path] in "nma":
1777 1776 self._repo.dirstate.copy(src, self._path)
1778 1777
1779 1778 def clearunknown(self):
1780 1779 """Removes conflicting items in the working directory so that
1781 1780 ``write()`` can be called successfully.
1782 1781 """
1783 1782 wvfs = self._repo.wvfs
1784 1783 f = self._path
1785 1784 wvfs.audit(f)
1786 1785 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1787 1786 # remove files under the directory as they should already be
1788 1787 # warned and backed up
1789 1788 if wvfs.isdir(f) and not wvfs.islink(f):
1790 1789 wvfs.rmtree(f, forcibly=True)
1791 1790 for p in reversed(list(util.finddirs(f))):
1792 1791 if wvfs.isfileorlink(p):
1793 1792 wvfs.unlink(p)
1794 1793 break
1795 1794 else:
1796 1795 # don't remove files if path conflicts are not processed
1797 1796 if wvfs.isdir(f) and not wvfs.islink(f):
1798 1797 wvfs.removedirs(f)
1799 1798
1800 1799 def setflags(self, l, x):
1801 1800 self._repo.wvfs.setflags(self._path, l, x)
1802 1801
1803 1802 class overlayworkingctx(committablectx):
1804 1803 """Wraps another mutable context with a write-back cache that can be
1805 1804 converted into a commit context.
1806 1805
1807 1806 self._cache[path] maps to a dict with keys: {
1808 1807 'exists': bool?
1809 1808 'date': date?
1810 1809 'data': str?
1811 1810 'flags': str?
1812 1811 'copied': str? (path or None)
1813 1812 }
1814 1813 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1815 1814 is `False`, the file was deleted.
1816 1815 """
1817 1816
1818 1817 def __init__(self, repo):
1819 1818 super(overlayworkingctx, self).__init__(repo)
1820 1819 self.clean()
1821 1820
1822 1821 def setbase(self, wrappedctx):
1823 1822 self._wrappedctx = wrappedctx
1824 1823 self._parents = [wrappedctx]
1825 1824 # Drop old manifest cache as it is now out of date.
1826 1825 # This is necessary when, e.g., rebasing several nodes with one
1827 1826 # ``overlayworkingctx`` (e.g. with --collapse).
1828 1827 util.clearcachedproperty(self, '_manifest')
1829 1828
1830 1829 def data(self, path):
1831 1830 if self.isdirty(path):
1832 1831 if self._cache[path]['exists']:
1833 1832 if self._cache[path]['data'] is not None:
1834 1833 return self._cache[path]['data']
1835 1834 else:
1836 1835 # Must fallback here, too, because we only set flags.
1837 1836 return self._wrappedctx[path].data()
1838 1837 else:
1839 1838 raise error.ProgrammingError("No such file or directory: %s" %
1840 1839 path)
1841 1840 else:
1842 1841 return self._wrappedctx[path].data()
1843 1842
1844 1843 @propertycache
1845 1844 def _manifest(self):
1846 1845 parents = self.parents()
1847 1846 man = parents[0].manifest().copy()
1848 1847
1849 1848 flag = self._flagfunc
1850 1849 for path in self.added():
1851 1850 man[path] = addednodeid
1852 1851 man.setflag(path, flag(path))
1853 1852 for path in self.modified():
1854 1853 man[path] = modifiednodeid
1855 1854 man.setflag(path, flag(path))
1856 1855 for path in self.removed():
1857 1856 del man[path]
1858 1857 return man
1859 1858
1860 1859 @propertycache
1861 1860 def _flagfunc(self):
1862 1861 def f(path):
1863 1862 return self._cache[path]['flags']
1864 1863 return f
1865 1864
1866 1865 def files(self):
1867 1866 return sorted(self.added() + self.modified() + self.removed())
1868 1867
1869 1868 def modified(self):
1870 1869 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1871 1870 self._existsinparent(f)]
1872 1871
1873 1872 def added(self):
1874 1873 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1875 1874 not self._existsinparent(f)]
1876 1875
1877 1876 def removed(self):
1878 1877 return [f for f in self._cache.keys() if
1879 1878 not self._cache[f]['exists'] and self._existsinparent(f)]
1880 1879
1881 1880 def p1copies(self):
1882 1881 copies = self._repo._wrappedctx.p1copies().copy()
1883 1882 narrowmatch = self._repo.narrowmatch()
1884 1883 for f in self._cache.keys():
1885 1884 if not narrowmatch(f):
1886 1885 continue
1887 1886 copies.pop(f, None) # delete if it exists
1888 1887 source = self._cache[f]['copied']
1889 1888 if source:
1890 1889 copies[f] = source
1891 1890 return copies
1892 1891
1893 1892 def p2copies(self):
1894 1893 copies = self._repo._wrappedctx.p2copies().copy()
1895 1894 narrowmatch = self._repo.narrowmatch()
1896 1895 for f in self._cache.keys():
1897 1896 if not narrowmatch(f):
1898 1897 continue
1899 1898 copies.pop(f, None) # delete if it exists
1900 1899 source = self._cache[f]['copied']
1901 1900 if source:
1902 1901 copies[f] = source
1903 1902 return copies
1904 1903
1905 1904 def isinmemory(self):
1906 1905 return True
1907 1906
1908 1907 def filedate(self, path):
1909 1908 if self.isdirty(path):
1910 1909 return self._cache[path]['date']
1911 1910 else:
1912 1911 return self._wrappedctx[path].date()
1913 1912
1914 1913 def markcopied(self, path, origin):
1915 1914 self._markdirty(path, exists=True, date=self.filedate(path),
1916 1915 flags=self.flags(path), copied=origin)
1917 1916
1918 1917 def copydata(self, path):
1919 1918 if self.isdirty(path):
1920 1919 return self._cache[path]['copied']
1921 1920 else:
1922 1921 return None
1923 1922
1924 1923 def flags(self, path):
1925 1924 if self.isdirty(path):
1926 1925 if self._cache[path]['exists']:
1927 1926 return self._cache[path]['flags']
1928 1927 else:
1929 1928 raise error.ProgrammingError("No such file or directory: %s" %
1930 1929 self._path)
1931 1930 else:
1932 1931 return self._wrappedctx[path].flags()
1933 1932
1934 1933 def __contains__(self, key):
1935 1934 if key in self._cache:
1936 1935 return self._cache[key]['exists']
1937 1936 return key in self.p1()
1938 1937
1939 1938 def _existsinparent(self, path):
1940 1939 try:
1941 1940 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1942 1941 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1943 1942 # with an ``exists()`` function.
1944 1943 self._wrappedctx[path]
1945 1944 return True
1946 1945 except error.ManifestLookupError:
1947 1946 return False
1948 1947
1949 1948 def _auditconflicts(self, path):
1950 1949 """Replicates conflict checks done by wvfs.write().
1951 1950
1952 1951 Since we never write to the filesystem and never call `applyupdates` in
1953 1952 IMM, we'll never check that a path is actually writable -- e.g., because
1954 1953 it adds `a/foo`, but `a` is actually a file in the other commit.
1955 1954 """
1956 1955 def fail(path, component):
1957 1956 # p1() is the base and we're receiving "writes" for p2()'s
1958 1957 # files.
1959 1958 if 'l' in self.p1()[component].flags():
1960 1959 raise error.Abort("error: %s conflicts with symlink %s "
1961 1960 "in %d." % (path, component,
1962 1961 self.p1().rev()))
1963 1962 else:
1964 1963 raise error.Abort("error: '%s' conflicts with file '%s' in "
1965 1964 "%d." % (path, component,
1966 1965 self.p1().rev()))
1967 1966
1968 1967 # Test that each new directory to be created to write this path from p2
1969 1968 # is not a file in p1.
1970 1969 components = path.split('/')
1971 1970 for i in pycompat.xrange(len(components)):
1972 1971 component = "/".join(components[0:i])
1973 1972 if component in self:
1974 1973 fail(path, component)
1975 1974
1976 1975 # Test the other direction -- that this path from p2 isn't a directory
1977 1976 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1978 1977 match = self.match([path], default=b'path')
1979 1978 matches = self.p1().manifest().matches(match)
1980 1979 mfiles = matches.keys()
1981 1980 if len(mfiles) > 0:
1982 1981 if len(mfiles) == 1 and mfiles[0] == path:
1983 1982 return
1984 1983 # omit the files which are deleted in current IMM wctx
1985 1984 mfiles = [m for m in mfiles if m in self]
1986 1985 if not mfiles:
1987 1986 return
1988 1987 raise error.Abort("error: file '%s' cannot be written because "
1989 1988 " '%s/' is a directory in %s (containing %d "
1990 1989 "entries: %s)"
1991 1990 % (path, path, self.p1(), len(mfiles),
1992 1991 ', '.join(mfiles)))
1993 1992
1994 1993 def write(self, path, data, flags='', **kwargs):
1995 1994 if data is None:
1996 1995 raise error.ProgrammingError("data must be non-None")
1997 1996 self._auditconflicts(path)
1998 1997 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1999 1998 flags=flags)
2000 1999
2001 2000 def setflags(self, path, l, x):
2002 2001 flag = ''
2003 2002 if l:
2004 2003 flag = 'l'
2005 2004 elif x:
2006 2005 flag = 'x'
2007 2006 self._markdirty(path, exists=True, date=dateutil.makedate(),
2008 2007 flags=flag)
2009 2008
2010 2009 def remove(self, path):
2011 2010 self._markdirty(path, exists=False)
2012 2011
2013 2012 def exists(self, path):
2014 2013 """exists behaves like `lexists`, but needs to follow symlinks and
2015 2014 return False if they are broken.
2016 2015 """
2017 2016 if self.isdirty(path):
2018 2017 # If this path exists and is a symlink, "follow" it by calling
2019 2018 # exists on the destination path.
2020 2019 if (self._cache[path]['exists'] and
2021 2020 'l' in self._cache[path]['flags']):
2022 2021 return self.exists(self._cache[path]['data'].strip())
2023 2022 else:
2024 2023 return self._cache[path]['exists']
2025 2024
2026 2025 return self._existsinparent(path)
2027 2026
2028 2027 def lexists(self, path):
2029 2028 """lexists returns True if the path exists"""
2030 2029 if self.isdirty(path):
2031 2030 return self._cache[path]['exists']
2032 2031
2033 2032 return self._existsinparent(path)
2034 2033
2035 2034 def size(self, path):
2036 2035 if self.isdirty(path):
2037 2036 if self._cache[path]['exists']:
2038 2037 return len(self._cache[path]['data'])
2039 2038 else:
2040 2039 raise error.ProgrammingError("No such file or directory: %s" %
2041 2040 self._path)
2042 2041 return self._wrappedctx[path].size()
2043 2042
2044 2043 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2045 2044 user=None, editor=None):
2046 2045 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2047 2046 committed.
2048 2047
2049 2048 ``text`` is the commit message.
2050 2049 ``parents`` (optional) are rev numbers.
2051 2050 """
2052 2051 # Default parents to the wrapped contexts' if not passed.
2053 2052 if parents is None:
2054 2053 parents = self._wrappedctx.parents()
2055 2054 if len(parents) == 1:
2056 2055 parents = (parents[0], None)
2057 2056
2058 2057 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2059 2058 if parents[1] is None:
2060 2059 parents = (self._repo[parents[0]], None)
2061 2060 else:
2062 2061 parents = (self._repo[parents[0]], self._repo[parents[1]])
2063 2062
2064 2063 files = self.files()
2065 2064 def getfile(repo, memctx, path):
2066 2065 if self._cache[path]['exists']:
2067 2066 return memfilectx(repo, memctx, path,
2068 2067 self._cache[path]['data'],
2069 2068 'l' in self._cache[path]['flags'],
2070 2069 'x' in self._cache[path]['flags'],
2071 2070 self._cache[path]['copied'])
2072 2071 else:
2073 2072 # Returning None, but including the path in `files`, is
2074 2073 # necessary for memctx to register a deletion.
2075 2074 return None
2076 2075 return memctx(self._repo, parents, text, files, getfile, date=date,
2077 2076 extra=extra, user=user, branch=branch, editor=editor)
2078 2077
2079 2078 def isdirty(self, path):
2080 2079 return path in self._cache
2081 2080
2082 2081 def isempty(self):
2083 2082 # We need to discard any keys that are actually clean before the empty
2084 2083 # commit check.
2085 2084 self._compact()
2086 2085 return len(self._cache) == 0
2087 2086
2088 2087 def clean(self):
2089 2088 self._cache = {}
2090 2089
2091 2090 def _compact(self):
2092 2091 """Removes keys from the cache that are actually clean, by comparing
2093 2092 them with the underlying context.
2094 2093
2095 2094 This can occur during the merge process, e.g. by passing --tool :local
2096 2095 to resolve a conflict.
2097 2096 """
2098 2097 keys = []
2099 2098 # This won't be perfect, but can help performance significantly when
2100 2099 # using things like remotefilelog.
2101 2100 scmutil.prefetchfiles(
2102 2101 self.repo(), [self.p1().rev()],
2103 2102 scmutil.matchfiles(self.repo(), self._cache.keys()))
2104 2103
2105 2104 for path in self._cache.keys():
2106 2105 cache = self._cache[path]
2107 2106 try:
2108 2107 underlying = self._wrappedctx[path]
2109 2108 if (underlying.data() == cache['data'] and
2110 2109 underlying.flags() == cache['flags']):
2111 2110 keys.append(path)
2112 2111 except error.ManifestLookupError:
2113 2112 # Path not in the underlying manifest (created).
2114 2113 continue
2115 2114
2116 2115 for path in keys:
2117 2116 del self._cache[path]
2118 2117 return keys
2119 2118
2120 2119 def _markdirty(self, path, exists, data=None, date=None, flags='',
2121 2120 copied=None):
2122 2121 # data not provided, let's see if we already have some; if not, let's
2123 2122 # grab it from our underlying context, so that we always have data if
2124 2123 # the file is marked as existing.
2125 2124 if exists and data is None:
2126 2125 oldentry = self._cache.get(path) or {}
2127 2126 data = oldentry.get('data') or self._wrappedctx[path].data()
2128 2127
2129 2128 self._cache[path] = {
2130 2129 'exists': exists,
2131 2130 'data': data,
2132 2131 'date': date,
2133 2132 'flags': flags,
2134 2133 'copied': copied,
2135 2134 }
2136 2135
2137 2136 def filectx(self, path, filelog=None):
2138 2137 return overlayworkingfilectx(self._repo, path, parent=self,
2139 2138 filelog=filelog)
2140 2139
2141 2140 class overlayworkingfilectx(committablefilectx):
2142 2141 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2143 2142 cache, which can be flushed through later by calling ``flush()``."""
2144 2143
2145 2144 def __init__(self, repo, path, filelog=None, parent=None):
2146 2145 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2147 2146 parent)
2148 2147 self._repo = repo
2149 2148 self._parent = parent
2150 2149 self._path = path
2151 2150
2152 2151 def cmp(self, fctx):
2153 2152 return self.data() != fctx.data()
2154 2153
2155 2154 def changectx(self):
2156 2155 return self._parent
2157 2156
2158 2157 def data(self):
2159 2158 return self._parent.data(self._path)
2160 2159
2161 2160 def date(self):
2162 2161 return self._parent.filedate(self._path)
2163 2162
2164 2163 def exists(self):
2165 2164 return self.lexists()
2166 2165
2167 2166 def lexists(self):
2168 2167 return self._parent.exists(self._path)
2169 2168
2170 2169 def copysource(self):
2171 2170 return self._parent.copydata(self._path)
2172 2171
2173 2172 def size(self):
2174 2173 return self._parent.size(self._path)
2175 2174
2176 2175 def markcopied(self, origin):
2177 2176 self._parent.markcopied(self._path, origin)
2178 2177
2179 2178 def audit(self):
2180 2179 pass
2181 2180
2182 2181 def flags(self):
2183 2182 return self._parent.flags(self._path)
2184 2183
2185 2184 def setflags(self, islink, isexec):
2186 2185 return self._parent.setflags(self._path, islink, isexec)
2187 2186
2188 2187 def write(self, data, flags, backgroundclose=False, **kwargs):
2189 2188 return self._parent.write(self._path, data, flags, **kwargs)
2190 2189
2191 2190 def remove(self, ignoremissing=False):
2192 2191 return self._parent.remove(self._path)
2193 2192
2194 2193 def clearunknown(self):
2195 2194 pass
2196 2195
2197 2196 class workingcommitctx(workingctx):
2198 2197 """A workingcommitctx object makes access to data related to
2199 2198 the revision being committed convenient.
2200 2199
2201 2200 This hides changes in the working directory, if they aren't
2202 2201 committed in this context.
2203 2202 """
2204 2203 def __init__(self, repo, changes,
2205 2204 text="", user=None, date=None, extra=None):
2206 2205 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2207 2206 changes)
2208 2207
2209 2208 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2210 2209 """Return matched files only in ``self._status``
2211 2210
2212 2211 Uncommitted files appear "clean" via this context, even if
2213 2212 they aren't actually so in the working directory.
2214 2213 """
2215 2214 if clean:
2216 2215 clean = [f for f in self._manifest if f not in self._changedset]
2217 2216 else:
2218 2217 clean = []
2219 2218 return scmutil.status([f for f in self._status.modified if match(f)],
2220 2219 [f for f in self._status.added if match(f)],
2221 2220 [f for f in self._status.removed if match(f)],
2222 2221 [], [], [], clean)
2223 2222
2224 2223 @propertycache
2225 2224 def _changedset(self):
2226 2225 """Return the set of files changed in this context
2227 2226 """
2228 2227 changed = set(self._status.modified)
2229 2228 changed.update(self._status.added)
2230 2229 changed.update(self._status.removed)
2231 2230 return changed
2232 2231
2233 2232 def makecachingfilectxfn(func):
2234 2233 """Create a filectxfn that caches based on the path.
2235 2234
2236 2235 We can't use util.cachefunc because it uses all arguments as the cache
2237 2236 key and this creates a cycle since the arguments include the repo and
2238 2237 memctx.
2239 2238 """
2240 2239 cache = {}
2241 2240
2242 2241 def getfilectx(repo, memctx, path):
2243 2242 if path not in cache:
2244 2243 cache[path] = func(repo, memctx, path)
2245 2244 return cache[path]
2246 2245
2247 2246 return getfilectx
2248 2247
2249 2248 def memfilefromctx(ctx):
2250 2249 """Given a context return a memfilectx for ctx[path]
2251 2250
2252 2251 This is a convenience method for building a memctx based on another
2253 2252 context.
2254 2253 """
2255 2254 def getfilectx(repo, memctx, path):
2256 2255 fctx = ctx[path]
2257 2256 copysource = fctx.copysource()
2258 2257 return memfilectx(repo, memctx, path, fctx.data(),
2259 2258 islink=fctx.islink(), isexec=fctx.isexec(),
2260 2259 copysource=copysource)
2261 2260
2262 2261 return getfilectx
2263 2262
2264 2263 def memfilefrompatch(patchstore):
2265 2264 """Given a patch (e.g. patchstore object) return a memfilectx
2266 2265
2267 2266 This is a convenience method for building a memctx based on a patchstore.
2268 2267 """
2269 2268 def getfilectx(repo, memctx, path):
2270 2269 data, mode, copysource = patchstore.getfile(path)
2271 2270 if data is None:
2272 2271 return None
2273 2272 islink, isexec = mode
2274 2273 return memfilectx(repo, memctx, path, data, islink=islink,
2275 2274 isexec=isexec, copysource=copysource)
2276 2275
2277 2276 return getfilectx
2278 2277
2279 2278 class memctx(committablectx):
2280 2279 """Use memctx to perform in-memory commits via localrepo.commitctx().
2281 2280
2282 2281 Revision information is supplied at initialization time while
2283 2282 related files data and is made available through a callback
2284 2283 mechanism. 'repo' is the current localrepo, 'parents' is a
2285 2284 sequence of two parent revisions identifiers (pass None for every
2286 2285 missing parent), 'text' is the commit message and 'files' lists
2287 2286 names of files touched by the revision (normalized and relative to
2288 2287 repository root).
2289 2288
2290 2289 filectxfn(repo, memctx, path) is a callable receiving the
2291 2290 repository, the current memctx object and the normalized path of
2292 2291 requested file, relative to repository root. It is fired by the
2293 2292 commit function for every file in 'files', but calls order is
2294 2293 undefined. If the file is available in the revision being
2295 2294 committed (updated or added), filectxfn returns a memfilectx
2296 2295 object. If the file was removed, filectxfn return None for recent
2297 2296 Mercurial. Moved files are represented by marking the source file
2298 2297 removed and the new file added with copy information (see
2299 2298 memfilectx).
2300 2299
2301 2300 user receives the committer name and defaults to current
2302 2301 repository username, date is the commit date in any format
2303 2302 supported by dateutil.parsedate() and defaults to current date, extra
2304 2303 is a dictionary of metadata or is left empty.
2305 2304 """
2306 2305
2307 2306 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2308 2307 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2309 2308 # this field to determine what to do in filectxfn.
2310 2309 _returnnoneformissingfiles = True
2311 2310
2312 2311 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2313 2312 date=None, extra=None, branch=None, editor=False):
2314 2313 super(memctx, self).__init__(repo, text, user, date, extra,
2315 2314 branch=branch)
2316 2315 self._rev = None
2317 2316 self._node = None
2318 2317 parents = [(p or nullid) for p in parents]
2319 2318 p1, p2 = parents
2320 2319 self._parents = [self._repo[p] for p in (p1, p2)]
2321 2320 files = sorted(set(files))
2322 2321 self._files = files
2323 2322 self.substate = {}
2324 2323
2325 2324 if isinstance(filectxfn, patch.filestore):
2326 2325 filectxfn = memfilefrompatch(filectxfn)
2327 2326 elif not callable(filectxfn):
2328 2327 # if store is not callable, wrap it in a function
2329 2328 filectxfn = memfilefromctx(filectxfn)
2330 2329
2331 2330 # memoizing increases performance for e.g. vcs convert scenarios.
2332 2331 self._filectxfn = makecachingfilectxfn(filectxfn)
2333 2332
2334 2333 if editor:
2335 2334 self._text = editor(self._repo, self, [])
2336 2335 self._repo.savecommitmessage(self._text)
2337 2336
2338 2337 def filectx(self, path, filelog=None):
2339 2338 """get a file context from the working directory
2340 2339
2341 2340 Returns None if file doesn't exist and should be removed."""
2342 2341 return self._filectxfn(self._repo, self, path)
2343 2342
2344 2343 def commit(self):
2345 2344 """commit context to the repo"""
2346 2345 return self._repo.commitctx(self)
2347 2346
2348 2347 @propertycache
2349 2348 def _manifest(self):
2350 2349 """generate a manifest based on the return values of filectxfn"""
2351 2350
2352 2351 # keep this simple for now; just worry about p1
2353 2352 pctx = self._parents[0]
2354 2353 man = pctx.manifest().copy()
2355 2354
2356 2355 for f in self._status.modified:
2357 2356 man[f] = modifiednodeid
2358 2357
2359 2358 for f in self._status.added:
2360 2359 man[f] = addednodeid
2361 2360
2362 2361 for f in self._status.removed:
2363 2362 if f in man:
2364 2363 del man[f]
2365 2364
2366 2365 return man
2367 2366
2368 2367 @propertycache
2369 2368 def _status(self):
2370 2369 """Calculate exact status from ``files`` specified at construction
2371 2370 """
2372 2371 man1 = self.p1().manifest()
2373 2372 p2 = self._parents[1]
2374 2373 # "1 < len(self._parents)" can't be used for checking
2375 2374 # existence of the 2nd parent, because "memctx._parents" is
2376 2375 # explicitly initialized by the list, of which length is 2.
2377 2376 if p2.node() != nullid:
2378 2377 man2 = p2.manifest()
2379 2378 managing = lambda f: f in man1 or f in man2
2380 2379 else:
2381 2380 managing = lambda f: f in man1
2382 2381
2383 2382 modified, added, removed = [], [], []
2384 2383 for f in self._files:
2385 2384 if not managing(f):
2386 2385 added.append(f)
2387 2386 elif self[f]:
2388 2387 modified.append(f)
2389 2388 else:
2390 2389 removed.append(f)
2391 2390
2392 2391 return scmutil.status(modified, added, removed, [], [], [], [])
2393 2392
2394 2393 class memfilectx(committablefilectx):
2395 2394 """memfilectx represents an in-memory file to commit.
2396 2395
2397 2396 See memctx and committablefilectx for more details.
2398 2397 """
2399 2398 def __init__(self, repo, changectx, path, data, islink=False,
2400 2399 isexec=False, copysource=None):
2401 2400 """
2402 2401 path is the normalized file path relative to repository root.
2403 2402 data is the file content as a string.
2404 2403 islink is True if the file is a symbolic link.
2405 2404 isexec is True if the file is executable.
2406 2405 copied is the source file path if current file was copied in the
2407 2406 revision being committed, or None."""
2408 2407 super(memfilectx, self).__init__(repo, path, None, changectx)
2409 2408 self._data = data
2410 2409 if islink:
2411 2410 self._flags = 'l'
2412 2411 elif isexec:
2413 2412 self._flags = 'x'
2414 2413 else:
2415 2414 self._flags = ''
2416 2415 self._copysource = copysource
2417 2416
2418 2417 def copysource(self):
2419 2418 return self._copysource
2420 2419
2421 2420 def cmp(self, fctx):
2422 2421 return self.data() != fctx.data()
2423 2422
2424 2423 def data(self):
2425 2424 return self._data
2426 2425
2427 2426 def remove(self, ignoremissing=False):
2428 2427 """wraps unlink for a repo's working directory"""
2429 2428 # need to figure out what to do here
2430 2429 del self._changectx[self._path]
2431 2430
2432 2431 def write(self, data, flags, **kwargs):
2433 2432 """wraps repo.wwrite"""
2434 2433 self._data = data
2435 2434
2436 2435
2437 2436 class metadataonlyctx(committablectx):
2438 2437 """Like memctx but it's reusing the manifest of different commit.
2439 2438 Intended to be used by lightweight operations that are creating
2440 2439 metadata-only changes.
2441 2440
2442 2441 Revision information is supplied at initialization time. 'repo' is the
2443 2442 current localrepo, 'ctx' is original revision which manifest we're reuisng
2444 2443 'parents' is a sequence of two parent revisions identifiers (pass None for
2445 2444 every missing parent), 'text' is the commit.
2446 2445
2447 2446 user receives the committer name and defaults to current repository
2448 2447 username, date is the commit date in any format supported by
2449 2448 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2450 2449 metadata or is left empty.
2451 2450 """
2452 2451 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2453 2452 date=None, extra=None, editor=False):
2454 2453 if text is None:
2455 2454 text = originalctx.description()
2456 2455 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2457 2456 self._rev = None
2458 2457 self._node = None
2459 2458 self._originalctx = originalctx
2460 2459 self._manifestnode = originalctx.manifestnode()
2461 2460 if parents is None:
2462 2461 parents = originalctx.parents()
2463 2462 else:
2464 2463 parents = [repo[p] for p in parents if p is not None]
2465 2464 parents = parents[:]
2466 2465 while len(parents) < 2:
2467 2466 parents.append(repo[nullid])
2468 2467 p1, p2 = self._parents = parents
2469 2468
2470 2469 # sanity check to ensure that the reused manifest parents are
2471 2470 # manifests of our commit parents
2472 2471 mp1, mp2 = self.manifestctx().parents
2473 2472 if p1 != nullid and p1.manifestnode() != mp1:
2474 2473 raise RuntimeError(r"can't reuse the manifest: its p1 "
2475 2474 r"doesn't match the new ctx p1")
2476 2475 if p2 != nullid and p2.manifestnode() != mp2:
2477 2476 raise RuntimeError(r"can't reuse the manifest: "
2478 2477 r"its p2 doesn't match the new ctx p2")
2479 2478
2480 2479 self._files = originalctx.files()
2481 2480 self.substate = {}
2482 2481
2483 2482 if editor:
2484 2483 self._text = editor(self._repo, self, [])
2485 2484 self._repo.savecommitmessage(self._text)
2486 2485
2487 2486 def manifestnode(self):
2488 2487 return self._manifestnode
2489 2488
2490 2489 @property
2491 2490 def _manifestctx(self):
2492 2491 return self._repo.manifestlog[self._manifestnode]
2493 2492
2494 2493 def filectx(self, path, filelog=None):
2495 2494 return self._originalctx.filectx(path, filelog=filelog)
2496 2495
2497 2496 def commit(self):
2498 2497 """commit context to the repo"""
2499 2498 return self._repo.commitctx(self)
2500 2499
2501 2500 @property
2502 2501 def _manifest(self):
2503 2502 return self._originalctx.manifest()
2504 2503
2505 2504 @propertycache
2506 2505 def _status(self):
2507 2506 """Calculate exact status from ``files`` specified in the ``origctx``
2508 2507 and parents manifests.
2509 2508 """
2510 2509 man1 = self.p1().manifest()
2511 2510 p2 = self._parents[1]
2512 2511 # "1 < len(self._parents)" can't be used for checking
2513 2512 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2514 2513 # explicitly initialized by the list, of which length is 2.
2515 2514 if p2.node() != nullid:
2516 2515 man2 = p2.manifest()
2517 2516 managing = lambda f: f in man1 or f in man2
2518 2517 else:
2519 2518 managing = lambda f: f in man1
2520 2519
2521 2520 modified, added, removed = [], [], []
2522 2521 for f in self._files:
2523 2522 if not managing(f):
2524 2523 added.append(f)
2525 2524 elif f in self:
2526 2525 modified.append(f)
2527 2526 else:
2528 2527 removed.append(f)
2529 2528
2530 2529 return scmutil.status(modified, added, removed, [], [], [], [])
2531 2530
2532 2531 class arbitraryfilectx(object):
2533 2532 """Allows you to use filectx-like functions on a file in an arbitrary
2534 2533 location on disk, possibly not in the working directory.
2535 2534 """
2536 2535 def __init__(self, path, repo=None):
2537 2536 # Repo is optional because contrib/simplemerge uses this class.
2538 2537 self._repo = repo
2539 2538 self._path = path
2540 2539
2541 2540 def cmp(self, fctx):
2542 2541 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2543 2542 # path if either side is a symlink.
2544 2543 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2545 2544 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2546 2545 # Add a fast-path for merge if both sides are disk-backed.
2547 2546 # Note that filecmp uses the opposite return values (True if same)
2548 2547 # from our cmp functions (True if different).
2549 2548 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2550 2549 return self.data() != fctx.data()
2551 2550
2552 2551 def path(self):
2553 2552 return self._path
2554 2553
2555 2554 def flags(self):
2556 2555 return ''
2557 2556
2558 2557 def data(self):
2559 2558 return util.readfile(self._path)
2560 2559
2561 2560 def decodeddata(self):
2562 2561 with open(self._path, "rb") as f:
2563 2562 return f.read()
2564 2563
2565 2564 def remove(self):
2566 2565 util.unlink(self._path)
2567 2566
2568 2567 def write(self, data, flags, **kwargs):
2569 2568 assert not flags
2570 2569 with open(self._path, "wb") as f:
2571 2570 f.write(data)
@@ -1,801 +1,802 b''
1 1 # copies.py - copy detection for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import heapq
12 12 import os
13 13
14 14 from .i18n import _
15 15
16 16 from . import (
17 17 match as matchmod,
18 18 node,
19 19 pathutil,
20 20 util,
21 21 )
22 22 from .utils import (
23 23 stringutil,
24 24 )
25 25
26 26 def _findlimit(repo, ctxa, ctxb):
27 27 """
28 28 Find the last revision that needs to be checked to ensure that a full
29 29 transitive closure for file copies can be properly calculated.
30 30 Generally, this means finding the earliest revision number that's an
31 31 ancestor of a or b but not both, except when a or b is a direct descendent
32 32 of the other, in which case we can return the minimum revnum of a and b.
33 33 """
34 34
35 35 # basic idea:
36 36 # - mark a and b with different sides
37 37 # - if a parent's children are all on the same side, the parent is
38 38 # on that side, otherwise it is on no side
39 39 # - walk the graph in topological order with the help of a heap;
40 40 # - add unseen parents to side map
41 41 # - clear side of any parent that has children on different sides
42 42 # - track number of interesting revs that might still be on a side
43 43 # - track the lowest interesting rev seen
44 44 # - quit when interesting revs is zero
45 45
46 46 cl = repo.changelog
47 47 wdirparents = None
48 48 a = ctxa.rev()
49 49 b = ctxb.rev()
50 50 if a is None:
51 51 wdirparents = (ctxa.p1(), ctxa.p2())
52 52 a = node.wdirrev
53 53 if b is None:
54 54 assert not wdirparents
55 55 wdirparents = (ctxb.p1(), ctxb.p2())
56 56 b = node.wdirrev
57 57
58 58 side = {a: -1, b: 1}
59 59 visit = [-a, -b]
60 60 heapq.heapify(visit)
61 61 interesting = len(visit)
62 62 limit = node.wdirrev
63 63
64 64 while interesting:
65 65 r = -heapq.heappop(visit)
66 66 if r == node.wdirrev:
67 67 parents = [pctx.rev() for pctx in wdirparents]
68 68 else:
69 69 parents = cl.parentrevs(r)
70 70 if parents[1] == node.nullrev:
71 71 parents = parents[:1]
72 72 for p in parents:
73 73 if p not in side:
74 74 # first time we see p; add it to visit
75 75 side[p] = side[r]
76 76 if side[p]:
77 77 interesting += 1
78 78 heapq.heappush(visit, -p)
79 79 elif side[p] and side[p] != side[r]:
80 80 # p was interesting but now we know better
81 81 side[p] = 0
82 82 interesting -= 1
83 83 if side[r]:
84 84 limit = r # lowest rev visited
85 85 interesting -= 1
86 86
87 87 # Consider the following flow (see test-commit-amend.t under issue4405):
88 88 # 1/ File 'a0' committed
89 89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
90 90 # 3/ Move back to first commit
91 91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
92 92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
93 93 #
94 94 # During the amend in step five, we will be in this state:
95 95 #
96 96 # @ 3 temporary amend commit for a1-amend
97 97 # |
98 98 # o 2 a1-amend
99 99 # |
100 100 # | o 1 a1
101 101 # |/
102 102 # o 0 a0
103 103 #
104 104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
105 105 # yet the filelog has the copy information in rev 1 and we will not look
106 106 # back far enough unless we also look at the a and b as candidates.
107 107 # This only occurs when a is a descendent of b or visa-versa.
108 108 return min(limit, a, b)
109 109
110 110 def _chain(src, dst, a, b):
111 111 """chain two sets of copies 'a' and 'b'"""
112 112
113 113 # When chaining copies in 'a' (from 'src' via some other commit 'mid') with
114 114 # copies in 'b' (from 'mid' to 'dst'), we can get the different cases in the
115 115 # following table (not including trivial cases). For example, case 2 is
116 116 # where a file existed in 'src' and remained under that name in 'mid' and
117 117 # then was renamed between 'mid' and 'dst'.
118 118 #
119 119 # case src mid dst result
120 120 # 1 x y - -
121 121 # 2 x y y x->y
122 122 # 3 x y x -
123 123 # 4 x y z x->z
124 124 # 5 - x y -
125 125 # 6 x x y x->y
126 126
127 127 # Initialize result ('t') from 'a'. This catches cases 1 & 2. We'll remove
128 128 # case 1 later. We'll also catch cases 3 & 4 here. Case 4 will be
129 129 # overwritten later, and case 3 will be removed later.
130 130 t = a.copy()
131 131 for k, v in b.iteritems():
132 132 if v in t:
133 133 # Found a chain, i.e. cases 3 & 4. We'll remove case 3 later.
134 134 t[k] = t[v]
135 135 else:
136 136 # Renamed only in 'b', i.e. cases 5 & 6. We'll remove case 5 later.
137 137 t[k] = v
138 138
139 139 for k, v in list(t.items()):
140 140 # remove copies from files that didn't exist, i.e. case 5
141 141 if v not in src:
142 142 del t[k]
143 143 # remove criss-crossed copies, i.e. case 3
144 144 elif k in src and v in dst:
145 145 del t[k]
146 146 # remove copies to files that were then removed, i.e. case 1
147 147 # and file 'y' in cases 3 & 4 (in case of rename)
148 148 elif k not in dst:
149 149 del t[k]
150 150
151 151 return t
152 152
153 153 def _tracefile(fctx, am, limit):
154 154 """return file context that is the ancestor of fctx present in ancestor
155 155 manifest am, stopping after the first ancestor lower than limit"""
156 156
157 157 for f in fctx.ancestors():
158 158 if am.get(f.path(), None) == f.filenode():
159 159 return f
160 160 if not f.isintroducedafter(limit):
161 161 return None
162 162
163 163 def _dirstatecopies(repo, match=None):
164 164 ds = repo.dirstate
165 165 c = ds.copies().copy()
166 166 for k in list(c):
167 167 if ds[k] not in 'anm' or (match and not match(k)):
168 168 del c[k]
169 169 return c
170 170
171 171 def _computeforwardmissing(a, b, match=None):
172 172 """Computes which files are in b but not a.
173 173 This is its own function so extensions can easily wrap this call to see what
174 174 files _forwardcopies is about to process.
175 175 """
176 176 ma = a.manifest()
177 177 mb = b.manifest()
178 178 return mb.filesnotin(ma, match=match)
179 179
180 180 def usechangesetcentricalgo(repo):
181 181 """Checks if we should use changeset-centric copy algorithms"""
182 182 return (repo.ui.config('experimental', 'copies.read-from') in
183 183 ('changeset-only', 'compatibility'))
184 184
185 185 def _committedforwardcopies(a, b, match):
186 186 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
187 187 # files might have to be traced back to the fctx parent of the last
188 188 # one-side-only changeset, but not further back than that
189 189 repo = a._repo
190 190
191 191 if usechangesetcentricalgo(repo):
192 192 return _changesetforwardcopies(a, b, match)
193 193
194 194 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
195 195 dbg = repo.ui.debug
196 196 if debug:
197 197 dbg('debug.copies: looking into rename from %s to %s\n'
198 198 % (a, b))
199 199 limit = _findlimit(repo, a, b)
200 200 if debug:
201 201 dbg('debug.copies: search limit: %d\n' % limit)
202 202 am = a.manifest()
203 203
204 204 # find where new files came from
205 205 # we currently don't try to find where old files went, too expensive
206 206 # this means we can miss a case like 'hg rm b; hg cp a b'
207 207 cm = {}
208 208
209 209 # Computing the forward missing is quite expensive on large manifests, since
210 210 # it compares the entire manifests. We can optimize it in the common use
211 211 # case of computing what copies are in a commit versus its parent (like
212 212 # during a rebase or histedit). Note, we exclude merge commits from this
213 213 # optimization, since the ctx.files() for a merge commit is not correct for
214 214 # this comparison.
215 215 forwardmissingmatch = match
216 216 if b.p1() == a and b.p2().node() == node.nullid:
217 217 filesmatcher = matchmod.exact(b.files())
218 218 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
219 219 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
220 220
221 221 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
222 222
223 223 if debug:
224 224 dbg('debug.copies: missing files to search: %d\n' % len(missing))
225 225
226 226 for f in sorted(missing):
227 227 if debug:
228 228 dbg('debug.copies: tracing file: %s\n' % f)
229 229 fctx = b[f]
230 230 fctx._ancestrycontext = ancestrycontext
231 231
232 232 if debug:
233 233 start = util.timer()
234 234 ofctx = _tracefile(fctx, am, limit)
235 235 if ofctx:
236 236 if debug:
237 237 dbg('debug.copies: rename of: %s\n' % ofctx._path)
238 238 cm[f] = ofctx.path()
239 239 if debug:
240 240 dbg('debug.copies: time: %f seconds\n'
241 241 % (util.timer() - start))
242 242 return cm
243 243
244 244 def _changesetforwardcopies(a, b, match):
245 245 if a.rev() == node.nullrev:
246 246 return {}
247 247
248 248 repo = a.repo()
249 249 children = {}
250 250 cl = repo.changelog
251 251 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
252 252 for r in missingrevs:
253 253 for p in cl.parentrevs(r):
254 254 if p == node.nullrev:
255 255 continue
256 256 if p not in children:
257 257 children[p] = [r]
258 258 else:
259 259 children[p].append(r)
260 260
261 261 roots = set(children) - set(missingrevs)
262 262 # 'work' contains 3-tuples of a (revision number, parent number, copies).
263 263 # The parent number is only used for knowing which parent the copies dict
264 264 # came from.
265 265 work = [(r, 1, {}) for r in roots]
266 266 heapq.heapify(work)
267 267 while work:
268 268 r, i1, copies1 = heapq.heappop(work)
269 269 if work and work[0][0] == r:
270 270 # We are tracing copies from both parents
271 271 r, i2, copies2 = heapq.heappop(work)
272 272 copies = {}
273 273 ctx = repo[r]
274 274 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
275 275 allcopies = set(copies1) | set(copies2)
276 276 # TODO: perhaps this filtering should be done as long as ctx
277 277 # is merge, whether or not we're tracing from both parent.
278 278 for dst in allcopies:
279 279 if not match(dst):
280 280 continue
281 281 if dst not in copies2:
282 282 # Copied on p1 side: mark as copy from p1 side if it didn't
283 283 # already exist on p2 side
284 284 if dst not in p2man:
285 285 copies[dst] = copies1[dst]
286 286 elif dst not in copies1:
287 287 # Copied on p2 side: mark as copy from p2 side if it didn't
288 288 # already exist on p1 side
289 289 if dst not in p1man:
290 290 copies[dst] = copies2[dst]
291 291 else:
292 292 # Copied on both sides: mark as copy from p1 side
293 293 copies[dst] = copies1[dst]
294 294 else:
295 295 copies = copies1
296 296 if r == b.rev():
297 297 return copies
298 298 for c in children[r]:
299 299 childctx = repo[c]
300 300 if r == childctx.p1().rev():
301 301 parent = 1
302 302 childcopies = childctx.p1copies()
303 303 else:
304 304 assert r == childctx.p2().rev()
305 305 parent = 2
306 306 childcopies = childctx.p2copies()
307 307 if not match.always():
308 308 childcopies = {dst: src for dst, src in childcopies.items()
309 309 if match(dst)}
310 310 childcopies = _chain(a, childctx, copies, childcopies)
311 311 heapq.heappush(work, (c, parent, childcopies))
312 312 assert False
313 313
314 314 def _forwardcopies(a, b, match=None):
315 315 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
316 316
317 317 match = a.repo().narrowmatch(match)
318 318 # check for working copy
319 319 if b.rev() is None:
320 320 if a == b.p1():
321 321 # short-circuit to avoid issues with merge states
322 322 return _dirstatecopies(b._repo, match)
323 323
324 324 cm = _committedforwardcopies(a, b.p1(), match)
325 325 # combine copies from dirstate if necessary
326 326 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
327 327 return _committedforwardcopies(a, b, match)
328 328
329 329 def _backwardrenames(a, b, match):
330 330 if a._repo.ui.config('experimental', 'copytrace') == 'off':
331 331 return {}
332 332
333 333 # Even though we're not taking copies into account, 1:n rename situations
334 334 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
335 335 # arbitrarily pick one of the renames.
336 336 # We don't want to pass in "match" here, since that would filter
337 337 # the destination by it. Since we're reversing the copies, we want
338 338 # to filter the source instead.
339 339 f = _forwardcopies(b, a)
340 340 r = {}
341 341 for k, v in sorted(f.iteritems()):
342 342 if match and not match(v):
343 343 continue
344 344 # remove copies
345 345 if v in a:
346 346 continue
347 347 r[v] = k
348 348 return r
349 349
350 350 def pathcopies(x, y, match=None):
351 351 """find {dst@y: src@x} copy mapping for directed compare"""
352 352 repo = x._repo
353 353 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
354 354 if debug:
355 355 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
356 356 % (x, y))
357 357 if x == y or not x or not y:
358 358 return {}
359 359 a = y.ancestor(x)
360 360 if a == x:
361 361 if debug:
362 362 repo.ui.debug('debug.copies: search mode: forward\n')
363 363 return _forwardcopies(x, y, match=match)
364 364 if a == y:
365 365 if debug:
366 366 repo.ui.debug('debug.copies: search mode: backward\n')
367 367 return _backwardrenames(x, y, match=match)
368 368 if debug:
369 369 repo.ui.debug('debug.copies: search mode: combined\n')
370 370 return _chain(x, y, _backwardrenames(x, a, match=match),
371 371 _forwardcopies(a, y, match=match))
372 372
373 373 def mergecopies(repo, c1, c2, base):
374 374 """
375 375 Finds moves and copies between context c1 and c2 that are relevant for
376 376 merging. 'base' will be used as the merge base.
377 377
378 378 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
379 379 files that were moved/ copied in one merge parent and modified in another.
380 380 For example:
381 381
382 382 o ---> 4 another commit
383 383 |
384 384 | o ---> 3 commit that modifies a.txt
385 385 | /
386 386 o / ---> 2 commit that moves a.txt to b.txt
387 387 |/
388 388 o ---> 1 merge base
389 389
390 390 If we try to rebase revision 3 on revision 4, since there is no a.txt in
391 391 revision 4, and if user have copytrace disabled, we prints the following
392 392 message:
393 393
394 394 ```other changed <file> which local deleted```
395 395
396 396 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
397 397 "dirmove".
398 398
399 399 "copy" is a mapping from destination name -> source name,
400 400 where source is in c1 and destination is in c2 or vice-versa.
401 401
402 402 "movewithdir" is a mapping from source name -> destination name,
403 403 where the file at source present in one context but not the other
404 404 needs to be moved to destination by the merge process, because the
405 405 other context moved the directory it is in.
406 406
407 407 "diverge" is a mapping of source name -> list of destination names
408 408 for divergent renames.
409 409
410 410 "renamedelete" is a mapping of source name -> list of destination
411 411 names for files deleted in c1 that were renamed in c2 or vice-versa.
412 412
413 413 "dirmove" is a mapping of detected source dir -> destination dir renames.
414 414 This is needed for handling changes to new files previously grafted into
415 415 renamed directories.
416 416
417 417 This function calls different copytracing algorithms based on config.
418 418 """
419 419 # avoid silly behavior for update from empty dir
420 420 if not c1 or not c2 or c1 == c2:
421 421 return {}, {}, {}, {}, {}
422 422
423 423 narrowmatch = c1.repo().narrowmatch()
424 424
425 425 # avoid silly behavior for parent -> working dir
426 426 if c2.node() is None and c1.node() == repo.dirstate.p1():
427 427 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
428 428
429 429 copytracing = repo.ui.config('experimental', 'copytrace')
430 430 if stringutil.parsebool(copytracing) is False:
431 431 # stringutil.parsebool() returns None when it is unable to parse the
432 432 # value, so we should rely on making sure copytracing is on such cases
433 433 return {}, {}, {}, {}, {}
434 434
435 435 if usechangesetcentricalgo(repo):
436 436 # The heuristics don't make sense when we need changeset-centric algos
437 437 return _fullcopytracing(repo, c1, c2, base)
438 438
439 439 # Copy trace disabling is explicitly below the node == p1 logic above
440 440 # because the logic above is required for a simple copy to be kept across a
441 441 # rebase.
442 442 if copytracing == 'heuristics':
443 443 # Do full copytracing if only non-public revisions are involved as
444 444 # that will be fast enough and will also cover the copies which could
445 445 # be missed by heuristics
446 446 if _isfullcopytraceable(repo, c1, base):
447 447 return _fullcopytracing(repo, c1, c2, base)
448 448 return _heuristicscopytracing(repo, c1, c2, base)
449 449 else:
450 450 return _fullcopytracing(repo, c1, c2, base)
451 451
452 452 def _isfullcopytraceable(repo, c1, base):
453 453 """ Checks that if base, source and destination are all no-public branches,
454 454 if yes let's use the full copytrace algorithm for increased capabilities
455 455 since it will be fast enough.
456 456
457 457 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
458 458 number of changesets from c1 to base such that if number of changesets are
459 459 more than the limit, full copytracing algorithm won't be used.
460 460 """
461 461 if c1.rev() is None:
462 462 c1 = c1.p1()
463 463 if c1.mutable() and base.mutable():
464 464 sourcecommitlimit = repo.ui.configint('experimental',
465 465 'copytrace.sourcecommitlimit')
466 466 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
467 467 return commits < sourcecommitlimit
468 468 return False
469 469
470 470 def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
471 471 copy, renamedelete):
472 472 if src not in m2:
473 473 # deleted on side 2
474 474 if src not in m1:
475 475 # renamed on side 1, deleted on side 2
476 476 renamedelete[src] = dsts1
477 477 elif m2[src] != mb[src]:
478 478 if not _related(c2[src], base[src]):
479 479 return
480 480 # modified on side 2
481 481 for dst in dsts1:
482 482 if dst not in m2:
483 483 # dst not added on side 2 (handle as regular
484 484 # "both created" case in manifestmerge otherwise)
485 485 copy[dst] = src
486 486
487 487 def _fullcopytracing(repo, c1, c2, base):
488 488 """ The full copytracing algorithm which finds all the new files that were
489 489 added from merge base up to the top commit and for each file it checks if
490 490 this file was copied from another file.
491 491
492 492 This is pretty slow when a lot of changesets are involved but will track all
493 493 the copies.
494 494 """
495 495 m1 = c1.manifest()
496 496 m2 = c2.manifest()
497 497 mb = base.manifest()
498 498
499 499 copies1 = pathcopies(base, c1)
500 500 copies2 = pathcopies(base, c2)
501 501
502 502 inversecopies1 = {}
503 503 inversecopies2 = {}
504 504 for dst, src in copies1.items():
505 505 inversecopies1.setdefault(src, []).append(dst)
506 506 for dst, src in copies2.items():
507 507 inversecopies2.setdefault(src, []).append(dst)
508 508
509 509 copy = {}
510 510 diverge = {}
511 511 renamedelete = {}
512 512 allsources = set(inversecopies1) | set(inversecopies2)
513 513 for src in allsources:
514 514 dsts1 = inversecopies1.get(src)
515 515 dsts2 = inversecopies2.get(src)
516 516 if dsts1 and dsts2:
517 517 # copied/renamed on both sides
518 518 if src not in m1 and src not in m2:
519 519 # renamed on both sides
520 520 dsts1 = set(dsts1)
521 521 dsts2 = set(dsts2)
522 522 # If there's some overlap in the rename destinations, we
523 523 # consider it not divergent. For example, if side 1 copies 'a'
524 524 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
525 525 # and 'd' and deletes 'a'.
526 526 if dsts1 & dsts2:
527 527 for dst in (dsts1 & dsts2):
528 528 copy[dst] = src
529 529 else:
530 530 diverge[src] = sorted(dsts1 | dsts2)
531 531 elif src in m1 and src in m2:
532 532 # copied on both sides
533 533 dsts1 = set(dsts1)
534 534 dsts2 = set(dsts2)
535 535 for dst in (dsts1 & dsts2):
536 536 copy[dst] = src
537 537 # TODO: Handle cases where it was renamed on one side and copied
538 538 # on the other side
539 539 elif dsts1:
540 540 # copied/renamed only on side 1
541 541 _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
542 542 copy, renamedelete)
543 543 elif dsts2:
544 544 # copied/renamed only on side 2
545 545 _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base,
546 546 copy, renamedelete)
547 547
548 548 renamedeleteset = set()
549 549 divergeset = set()
550 550 for dsts in diverge.values():
551 551 divergeset.update(dsts)
552 552 for dsts in renamedelete.values():
553 553 renamedeleteset.update(dsts)
554 554
555 555 # find interesting file sets from manifests
556 556 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
557 557 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
558 558 u1 = sorted(addedinm1 - addedinm2)
559 559 u2 = sorted(addedinm2 - addedinm1)
560 560
561 561 header = " unmatched files in %s"
562 562 if u1:
563 563 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
564 564 if u2:
565 565 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
566 566
567 567 fullcopy = copies1.copy()
568 568 fullcopy.update(copies2)
569 569 if not fullcopy:
570 570 return copy, {}, diverge, renamedelete, {}
571 571
572 572 if repo.ui.debugflag:
573 573 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
574 574 "% = renamed and deleted):\n")
575 575 for f in sorted(fullcopy):
576 576 note = ""
577 577 if f in copy:
578 578 note += "*"
579 579 if f in divergeset:
580 580 note += "!"
581 581 if f in renamedeleteset:
582 582 note += "%"
583 583 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
584 584 note))
585 585 del divergeset
586 586
587 587 repo.ui.debug(" checking for directory renames\n")
588 588
589 589 # generate a directory move map
590 590 d1, d2 = c1.dirs(), c2.dirs()
591 591 # Hack for adding '', which is not otherwise added, to d1 and d2
592 592 d1.addpath('/')
593 593 d2.addpath('/')
594 594 invalid = set()
595 595 dirmove = {}
596 596
597 597 # examine each file copy for a potential directory move, which is
598 598 # when all the files in a directory are moved to a new directory
599 599 for dst, src in fullcopy.iteritems():
600 600 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
601 601 if dsrc in invalid:
602 602 # already seen to be uninteresting
603 603 continue
604 604 elif dsrc in d1 and ddst in d1:
605 605 # directory wasn't entirely moved locally
606 606 invalid.add(dsrc)
607 607 elif dsrc in d2 and ddst in d2:
608 608 # directory wasn't entirely moved remotely
609 609 invalid.add(dsrc)
610 610 elif dsrc in dirmove and dirmove[dsrc] != ddst:
611 611 # files from the same directory moved to two different places
612 612 invalid.add(dsrc)
613 613 else:
614 614 # looks good so far
615 615 dirmove[dsrc] = ddst
616 616
617 617 for i in invalid:
618 618 if i in dirmove:
619 619 del dirmove[i]
620 620 del d1, d2, invalid
621 621
622 622 if not dirmove:
623 623 return copy, {}, diverge, renamedelete, {}
624 624
625 625 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
626 626
627 627 for d in dirmove:
628 628 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
629 629 (d, dirmove[d]))
630 630
631 631 movewithdir = {}
632 632 # check unaccounted nonoverlapping files against directory moves
633 633 for f in u1 + u2:
634 634 if f not in fullcopy:
635 635 for d in dirmove:
636 636 if f.startswith(d):
637 637 # new file added in a directory that was moved, move it
638 638 df = dirmove[d] + f[len(d):]
639 639 if df not in copy:
640 640 movewithdir[f] = df
641 641 repo.ui.debug((" pending file src: '%s' -> "
642 642 "dst: '%s'\n") % (f, df))
643 643 break
644 644
645 645 return copy, movewithdir, diverge, renamedelete, dirmove
646 646
647 647 def _heuristicscopytracing(repo, c1, c2, base):
648 648 """ Fast copytracing using filename heuristics
649 649
650 650 Assumes that moves or renames are of following two types:
651 651
652 652 1) Inside a directory only (same directory name but different filenames)
653 653 2) Move from one directory to another
654 654 (same filenames but different directory names)
655 655
656 656 Works only when there are no merge commits in the "source branch".
657 657 Source branch is commits from base up to c2 not including base.
658 658
659 659 If merge is involved it fallbacks to _fullcopytracing().
660 660
661 661 Can be used by setting the following config:
662 662
663 663 [experimental]
664 664 copytrace = heuristics
665 665
666 666 In some cases the copy/move candidates found by heuristics can be very large
667 667 in number and that will make the algorithm slow. The number of possible
668 668 candidates to check can be limited by using the config
669 669 `experimental.copytrace.movecandidateslimit` which defaults to 100.
670 670 """
671 671
672 672 if c1.rev() is None:
673 673 c1 = c1.p1()
674 674 if c2.rev() is None:
675 675 c2 = c2.p1()
676 676
677 677 copies = {}
678 678
679 679 changedfiles = set()
680 680 m1 = c1.manifest()
681 681 if not repo.revs('%d::%d', base.rev(), c2.rev()):
682 682 # If base is not in c2 branch, we switch to fullcopytracing
683 683 repo.ui.debug("switching to full copytracing as base is not "
684 684 "an ancestor of c2\n")
685 685 return _fullcopytracing(repo, c1, c2, base)
686 686
687 687 ctx = c2
688 688 while ctx != base:
689 689 if len(ctx.parents()) == 2:
690 690 # To keep things simple let's not handle merges
691 691 repo.ui.debug("switching to full copytracing because of merges\n")
692 692 return _fullcopytracing(repo, c1, c2, base)
693 693 changedfiles.update(ctx.files())
694 694 ctx = ctx.p1()
695 695
696 696 cp = _forwardcopies(base, c2)
697 697 for dst, src in cp.iteritems():
698 698 if src in m1:
699 699 copies[dst] = src
700 700
701 701 # file is missing if it isn't present in the destination, but is present in
702 702 # the base and present in the source.
703 703 # Presence in the base is important to exclude added files, presence in the
704 704 # source is important to exclude removed files.
705 705 filt = lambda f: f not in m1 and f in base and f in c2
706 706 missingfiles = [f for f in changedfiles if filt(f)]
707 707
708 708 if missingfiles:
709 709 basenametofilename = collections.defaultdict(list)
710 710 dirnametofilename = collections.defaultdict(list)
711 711
712 712 for f in m1.filesnotin(base.manifest()):
713 713 basename = os.path.basename(f)
714 714 dirname = os.path.dirname(f)
715 715 basenametofilename[basename].append(f)
716 716 dirnametofilename[dirname].append(f)
717 717
718 718 for f in missingfiles:
719 719 basename = os.path.basename(f)
720 720 dirname = os.path.dirname(f)
721 721 samebasename = basenametofilename[basename]
722 722 samedirname = dirnametofilename[dirname]
723 723 movecandidates = samebasename + samedirname
724 724 # f is guaranteed to be present in c2, that's why
725 725 # c2.filectx(f) won't fail
726 726 f2 = c2.filectx(f)
727 727 # we can have a lot of candidates which can slow down the heuristics
728 728 # config value to limit the number of candidates moves to check
729 729 maxcandidates = repo.ui.configint('experimental',
730 730 'copytrace.movecandidateslimit')
731 731
732 732 if len(movecandidates) > maxcandidates:
733 733 repo.ui.status(_("skipping copytracing for '%s', more "
734 734 "candidates than the limit: %d\n")
735 735 % (f, len(movecandidates)))
736 736 continue
737 737
738 738 for candidate in movecandidates:
739 739 f1 = c1.filectx(candidate)
740 740 if _related(f1, f2):
741 741 # if there are a few related copies then we'll merge
742 742 # changes into all of them. This matches the behaviour
743 743 # of upstream copytracing
744 744 copies[candidate] = f
745 745
746 746 return copies, {}, {}, {}, {}
747 747
748 748 def _related(f1, f2):
749 749 """return True if f1 and f2 filectx have a common ancestor
750 750
751 751 Walk back to common ancestor to see if the two files originate
752 752 from the same file. Since workingfilectx's rev() is None it messes
753 753 up the integer comparison logic, hence the pre-step check for
754 754 None (f1 and f2 can only be workingfilectx's initially).
755 755 """
756 756
757 757 if f1 == f2:
758 758 return True # a match
759 759
760 760 g1, g2 = f1.ancestors(), f2.ancestors()
761 761 try:
762 762 f1r, f2r = f1.linkrev(), f2.linkrev()
763 763
764 764 if f1r is None:
765 765 f1 = next(g1)
766 766 if f2r is None:
767 767 f2 = next(g2)
768 768
769 769 while True:
770 770 f1r, f2r = f1.linkrev(), f2.linkrev()
771 771 if f1r > f2r:
772 772 f1 = next(g1)
773 773 elif f2r > f1r:
774 774 f2 = next(g2)
775 775 else: # f1 and f2 point to files in the same linkrev
776 776 return f1 == f2 # true if they point to the same file
777 777 except StopIteration:
778 778 return False
779 779
780 780 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
781 781 """reproduce copies from fromrev to rev in the dirstate
782 782
783 783 If skiprev is specified, it's a revision that should be used to
784 784 filter copy records. Any copies that occur between fromrev and
785 785 skiprev will not be duplicated, even if they appear in the set of
786 786 copies between fromrev and rev.
787 787 """
788 788 exclude = {}
789 789 ctraceconfig = repo.ui.config('experimental', 'copytrace')
790 790 bctrace = stringutil.parsebool(ctraceconfig)
791 791 if (skiprev is not None and
792 792 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
793 793 # copytrace='off' skips this line, but not the entire function because
794 794 # the line below is O(size of the repo) during a rebase, while the rest
795 795 # of the function is much faster (and is required for carrying copy
796 796 # metadata across the rebase anyway).
797 797 exclude = pathcopies(repo[fromrev], repo[skiprev])
798 798 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
799 799 if dst in exclude:
800 800 continue
801 if dst in wctx:
801 802 wctx[dst].markcopied(src)
@@ -1,845 +1,843 b''
1 1 #require symlink execbit
2 2 $ cat << EOF >> $HGRCPATH
3 3 > [phases]
4 4 > publish=False
5 5 > [extensions]
6 6 > amend=
7 7 > rebase=
8 8 > debugdrawdag=$TESTDIR/drawdag.py
9 9 > strip=
10 10 > [rebase]
11 11 > experimental.inmemory=1
12 12 > [diff]
13 13 > git=1
14 14 > [alias]
15 15 > tglog = log -G --template "{rev}: {node|short} '{desc}'\n"
16 16 > EOF
17 17
18 18 Rebase a simple DAG:
19 19 $ hg init repo1
20 20 $ cd repo1
21 21 $ hg debugdrawdag <<'EOS'
22 22 > c b
23 23 > |/
24 24 > d
25 25 > |
26 26 > a
27 27 > EOS
28 28 $ hg up -C a
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30 $ hg tglog
31 31 o 3: 814f6bd05178 'c'
32 32 |
33 33 | o 2: db0e82a16a62 'b'
34 34 |/
35 35 o 1: 02952614a83d 'd'
36 36 |
37 37 @ 0: b173517d0057 'a'
38 38
39 39 $ hg cat -r 3 c
40 40 c (no-eol)
41 41 $ hg cat -r 2 b
42 42 b (no-eol)
43 43 $ hg rebase --debug -r b -d c | grep rebasing
44 44 rebasing in-memory
45 45 rebasing 2:db0e82a16a62 "b" (b)
46 46 $ hg tglog
47 47 o 3: ca58782ad1e4 'b'
48 48 |
49 49 o 2: 814f6bd05178 'c'
50 50 |
51 51 o 1: 02952614a83d 'd'
52 52 |
53 53 @ 0: b173517d0057 'a'
54 54
55 55 $ hg cat -r 3 b
56 56 b (no-eol)
57 57 $ hg cat -r 2 c
58 58 c (no-eol)
59 59 $ cd ..
60 60
61 61 Case 2:
62 62 $ hg init repo2
63 63 $ cd repo2
64 64 $ hg debugdrawdag <<'EOS'
65 65 > c b
66 66 > |/
67 67 > d
68 68 > |
69 69 > a
70 70 > EOS
71 71
72 72 Add a symlink and executable file:
73 73 $ hg up -C c
74 74 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 75 $ ln -s somefile e
76 76 $ echo f > f
77 77 $ chmod +x f
78 78 $ hg add e f
79 79 $ hg amend -q
80 80 $ hg up -Cq a
81 81
82 82 Write files to the working copy, and ensure they're still there after the rebase
83 83 $ echo "abc" > a
84 84 $ ln -s def b
85 85 $ echo "ghi" > c
86 86 $ echo "jkl" > d
87 87 $ echo "mno" > e
88 88 $ hg tglog
89 89 o 3: f56b71190a8f 'c'
90 90 |
91 91 | o 2: db0e82a16a62 'b'
92 92 |/
93 93 o 1: 02952614a83d 'd'
94 94 |
95 95 @ 0: b173517d0057 'a'
96 96
97 97 $ hg cat -r 3 c
98 98 c (no-eol)
99 99 $ hg cat -r 2 b
100 100 b (no-eol)
101 101 $ hg cat -r 3 e
102 102 somefile (no-eol)
103 103 $ hg rebase --debug -s b -d a | grep rebasing
104 104 rebasing in-memory
105 105 rebasing 2:db0e82a16a62 "b" (b)
106 106 $ hg tglog
107 107 o 3: fc055c3b4d33 'b'
108 108 |
109 109 | o 2: f56b71190a8f 'c'
110 110 | |
111 111 | o 1: 02952614a83d 'd'
112 112 |/
113 113 @ 0: b173517d0057 'a'
114 114
115 115 $ hg cat -r 2 c
116 116 c (no-eol)
117 117 $ hg cat -r 3 b
118 118 b (no-eol)
119 119 $ hg rebase --debug -s 1 -d 3 | grep rebasing
120 120 rebasing in-memory
121 121 rebasing 1:02952614a83d "d" (d)
122 122 rebasing 2:f56b71190a8f "c"
123 123 $ hg tglog
124 124 o 3: 753feb6fd12a 'c'
125 125 |
126 126 o 2: 09c044d2cb43 'd'
127 127 |
128 128 o 1: fc055c3b4d33 'b'
129 129 |
130 130 @ 0: b173517d0057 'a'
131 131
132 132 Ensure working copy files are still there:
133 133 $ cat a
134 134 abc
135 135 $ readlink.py b
136 136 b -> def
137 137 $ cat e
138 138 mno
139 139
140 140 Ensure symlink and executable files were rebased properly:
141 141 $ hg up -Cq 3
142 142 $ readlink.py e
143 143 e -> somefile
144 144 $ ls -l f | cut -c -10
145 145 -rwxr-xr-x
146 146
147 147 Rebase the working copy parent
148 148 $ hg up -C 3
149 149 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 150 $ hg rebase -r 3 -d 0 --debug | grep rebasing
151 151 rebasing in-memory
152 152 rebasing 3:753feb6fd12a "c" (tip)
153 153 $ hg tglog
154 154 @ 3: 844a7de3e617 'c'
155 155 |
156 156 | o 2: 09c044d2cb43 'd'
157 157 | |
158 158 | o 1: fc055c3b4d33 'b'
159 159 |/
160 160 o 0: b173517d0057 'a'
161 161
162 162
163 163 Test reporting of path conflicts
164 164
165 165 $ hg rm a
166 166 $ mkdir a
167 167 $ touch a/a
168 168 $ hg ci -Am "a/a"
169 169 adding a/a
170 170 $ hg tglog
171 171 @ 4: daf7dfc139cb 'a/a'
172 172 |
173 173 o 3: 844a7de3e617 'c'
174 174 |
175 175 | o 2: 09c044d2cb43 'd'
176 176 | |
177 177 | o 1: fc055c3b4d33 'b'
178 178 |/
179 179 o 0: b173517d0057 'a'
180 180
181 181 $ hg rebase -r . -d 2
182 182 rebasing 4:daf7dfc139cb "a/a" (tip)
183 183 saved backup bundle to $TESTTMP/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
184 184
185 185 $ hg tglog
186 186 @ 4: c6ad37a4f250 'a/a'
187 187 |
188 188 | o 3: 844a7de3e617 'c'
189 189 | |
190 190 o | 2: 09c044d2cb43 'd'
191 191 | |
192 192 o | 1: fc055c3b4d33 'b'
193 193 |/
194 194 o 0: b173517d0057 'a'
195 195
196 196 $ echo foo > foo
197 197 $ hg ci -Aqm "added foo"
198 198 $ hg up '.^'
199 199 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
200 200 $ echo bar > bar
201 201 $ hg ci -Aqm "added bar"
202 202 $ hg rm a/a
203 203 $ echo a > a
204 204 $ hg ci -Aqm "added a back!"
205 205 $ hg tglog
206 206 @ 7: 855e9797387e 'added a back!'
207 207 |
208 208 o 6: d14530e5e3e6 'added bar'
209 209 |
210 210 | o 5: 9b94b9373deb 'added foo'
211 211 |/
212 212 o 4: c6ad37a4f250 'a/a'
213 213 |
214 214 | o 3: 844a7de3e617 'c'
215 215 | |
216 216 o | 2: 09c044d2cb43 'd'
217 217 | |
218 218 o | 1: fc055c3b4d33 'b'
219 219 |/
220 220 o 0: b173517d0057 'a'
221 221
222 222 $ hg rebase -r . -d 5
223 223 rebasing 7:855e9797387e "added a back!" (tip)
224 224 saved backup bundle to $TESTTMP/repo2/.hg/strip-backup/855e9797387e-81ee4c5d-rebase.hg
225 225
226 226 $ hg tglog
227 227 @ 7: bb3f02be2688 'added a back!'
228 228 |
229 229 | o 6: d14530e5e3e6 'added bar'
230 230 | |
231 231 o | 5: 9b94b9373deb 'added foo'
232 232 |/
233 233 o 4: c6ad37a4f250 'a/a'
234 234 |
235 235 | o 3: 844a7de3e617 'c'
236 236 | |
237 237 o | 2: 09c044d2cb43 'd'
238 238 | |
239 239 o | 1: fc055c3b4d33 'b'
240 240 |/
241 241 o 0: b173517d0057 'a'
242 242
243 243 $ mkdir -p c/subdir
244 244 $ echo c > c/subdir/file.txt
245 245 $ hg add c/subdir/file.txt
246 246 $ hg ci -m 'c/subdir/file.txt'
247 247 $ hg rebase -r . -d 3 -n
248 248 starting dry-run rebase; repository will not be changed
249 249 rebasing 8:e147e6e3c490 "c/subdir/file.txt" (tip)
250 250 abort: error: 'c/subdir/file.txt' conflicts with file 'c' in 3.
251 251 [255]
252 252 $ hg rebase -r 3 -d . -n
253 253 starting dry-run rebase; repository will not be changed
254 254 rebasing 3:844a7de3e617 "c"
255 255 abort: error: file 'c' cannot be written because 'c/' is a directory in e147e6e3c490 (containing 1 entries: c/subdir/file.txt)
256 256 [255]
257 257
258 258 $ cd ..
259 259
260 260 Test path auditing (issue5818)
261 261
262 262 $ mkdir lib_
263 263 $ ln -s lib_ lib
264 264 $ hg init repo
265 265 $ cd repo
266 266 $ mkdir -p ".$TESTTMP/lib"
267 267 $ touch ".$TESTTMP/lib/a"
268 268 $ hg add ".$TESTTMP/lib/a"
269 269 $ hg ci -m 'a'
270 270
271 271 $ touch ".$TESTTMP/lib/b"
272 272 $ hg add ".$TESTTMP/lib/b"
273 273 $ hg ci -m 'b'
274 274
275 275 $ hg up -q '.^'
276 276 $ touch ".$TESTTMP/lib/c"
277 277 $ hg add ".$TESTTMP/lib/c"
278 278 $ hg ci -m 'c'
279 279 created new head
280 280 $ hg rebase -s 1 -d .
281 281 rebasing 1:* "b" (glob)
282 282 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-rebase.hg (glob)
283 283 $ cd ..
284 284
285 285 Test dry-run rebasing
286 286
287 287 $ hg init repo3
288 288 $ cd repo3
289 289 $ echo a>a
290 290 $ hg ci -Aqma
291 291 $ echo b>b
292 292 $ hg ci -Aqmb
293 293 $ echo c>c
294 294 $ hg ci -Aqmc
295 295 $ echo d>d
296 296 $ hg ci -Aqmd
297 297 $ echo e>e
298 298 $ hg ci -Aqme
299 299
300 300 $ hg up 1 -q
301 301 $ echo f>f
302 302 $ hg ci -Amf
303 303 adding f
304 304 created new head
305 305 $ echo g>g
306 306 $ hg ci -Aqmg
307 307 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
308 308 @ 6:baf10c5166d4 test
309 309 | g
310 310 |
311 311 o 5:6343ca3eff20 test
312 312 | f
313 313 |
314 314 | o 4:e860deea161a test
315 315 | | e
316 316 | |
317 317 | o 3:055a42cdd887 test
318 318 | | d
319 319 | |
320 320 | o 2:177f92b77385 test
321 321 |/ c
322 322 |
323 323 o 1:d2ae7f538514 test
324 324 | b
325 325 |
326 326 o 0:cb9a9f314b8b test
327 327 a
328 328
329 329 Make sure it throws error while passing --continue or --abort with --dry-run
330 330 $ hg rebase -s 2 -d 6 -n --continue
331 331 abort: cannot specify both --dry-run and --continue
332 332 [255]
333 333 $ hg rebase -s 2 -d 6 -n --abort
334 334 abort: cannot specify both --dry-run and --abort
335 335 [255]
336 336
337 337 Check dryrun gives correct results when there is no conflict in rebasing
338 338 $ hg rebase -s 2 -d 6 -n
339 339 starting dry-run rebase; repository will not be changed
340 340 rebasing 2:177f92b77385 "c"
341 341 rebasing 3:055a42cdd887 "d"
342 342 rebasing 4:e860deea161a "e"
343 343 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
344 344
345 345 $ hg diff
346 346 $ hg status
347 347
348 348 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
349 349 @ 6:baf10c5166d4 test
350 350 | g
351 351 |
352 352 o 5:6343ca3eff20 test
353 353 | f
354 354 |
355 355 | o 4:e860deea161a test
356 356 | | e
357 357 | |
358 358 | o 3:055a42cdd887 test
359 359 | | d
360 360 | |
361 361 | o 2:177f92b77385 test
362 362 |/ c
363 363 |
364 364 o 1:d2ae7f538514 test
365 365 | b
366 366 |
367 367 o 0:cb9a9f314b8b test
368 368 a
369 369
370 370 Check dryrun working with --collapse when there is no conflict
371 371 $ hg rebase -s 2 -d 6 -n --collapse
372 372 starting dry-run rebase; repository will not be changed
373 373 rebasing 2:177f92b77385 "c"
374 374 rebasing 3:055a42cdd887 "d"
375 375 rebasing 4:e860deea161a "e"
376 376 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
377 377
378 378 Check dryrun gives correct results when there is conflict in rebasing
379 379 Make a conflict:
380 380 $ hg up 6 -q
381 381 $ echo conflict>e
382 382 $ hg ci -Aqm "conflict with e"
383 383 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
384 384 @ 7:d2c195b28050 test
385 385 | conflict with e
386 386 |
387 387 o 6:baf10c5166d4 test
388 388 | g
389 389 |
390 390 o 5:6343ca3eff20 test
391 391 | f
392 392 |
393 393 | o 4:e860deea161a test
394 394 | | e
395 395 | |
396 396 | o 3:055a42cdd887 test
397 397 | | d
398 398 | |
399 399 | o 2:177f92b77385 test
400 400 |/ c
401 401 |
402 402 o 1:d2ae7f538514 test
403 403 | b
404 404 |
405 405 o 0:cb9a9f314b8b test
406 406 a
407 407
408 408 $ hg rebase -s 2 -d 7 -n
409 409 starting dry-run rebase; repository will not be changed
410 410 rebasing 2:177f92b77385 "c"
411 411 rebasing 3:055a42cdd887 "d"
412 412 rebasing 4:e860deea161a "e"
413 413 merging e
414 414 transaction abort!
415 415 rollback completed
416 416 hit a merge conflict
417 417 [1]
418 418 $ hg diff
419 419 $ hg status
420 420 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
421 421 @ 7:d2c195b28050 test
422 422 | conflict with e
423 423 |
424 424 o 6:baf10c5166d4 test
425 425 | g
426 426 |
427 427 o 5:6343ca3eff20 test
428 428 | f
429 429 |
430 430 | o 4:e860deea161a test
431 431 | | e
432 432 | |
433 433 | o 3:055a42cdd887 test
434 434 | | d
435 435 | |
436 436 | o 2:177f92b77385 test
437 437 |/ c
438 438 |
439 439 o 1:d2ae7f538514 test
440 440 | b
441 441 |
442 442 o 0:cb9a9f314b8b test
443 443 a
444 444
445 445 Check dryrun working with --collapse when there is conflicts
446 446 $ hg rebase -s 2 -d 7 -n --collapse
447 447 starting dry-run rebase; repository will not be changed
448 448 rebasing 2:177f92b77385 "c"
449 449 rebasing 3:055a42cdd887 "d"
450 450 rebasing 4:e860deea161a "e"
451 451 merging e
452 452 hit a merge conflict
453 453 [1]
454 454
455 455 In-memory rebase that fails due to merge conflicts
456 456
457 457 $ hg rebase -s 2 -d 7
458 458 rebasing 2:177f92b77385 "c"
459 459 rebasing 3:055a42cdd887 "d"
460 460 rebasing 4:e860deea161a "e"
461 461 merging e
462 462 transaction abort!
463 463 rollback completed
464 464 hit merge conflicts; re-running rebase without in-memory merge
465 465 rebasing 2:177f92b77385 "c"
466 466 rebasing 3:055a42cdd887 "d"
467 467 rebasing 4:e860deea161a "e"
468 468 merging e
469 469 warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
470 470 unresolved conflicts (see hg resolve, then hg rebase --continue)
471 471 [1]
472 472 $ hg rebase --abort
473 473 saved backup bundle to $TESTTMP/repo3/.hg/strip-backup/c1e524d4287c-f91f82e1-backup.hg
474 474 rebase aborted
475 475
476 476 Retrying without in-memory merge won't lose working copy changes
477 477 $ cd ..
478 478 $ hg clone repo3 repo3-dirty -q
479 479 $ cd repo3-dirty
480 480 $ echo dirty > a
481 481 $ hg rebase -s 2 -d 7
482 482 rebasing 2:177f92b77385 "c"
483 483 rebasing 3:055a42cdd887 "d"
484 484 rebasing 4:e860deea161a "e"
485 485 merging e
486 486 transaction abort!
487 487 rollback completed
488 488 hit merge conflicts; re-running rebase without in-memory merge
489 489 abort: uncommitted changes
490 490 [255]
491 491 $ cat a
492 492 dirty
493 493
494 494 Retrying without in-memory merge won't lose merge state
495 495 $ cd ..
496 496 $ hg clone repo3 repo3-merge-state -q
497 497 $ cd repo3-merge-state
498 498 $ hg merge 4
499 499 merging e
500 500 warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
501 501 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
502 502 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
503 503 [1]
504 504 $ hg resolve -l
505 505 U e
506 506 $ hg rebase -s 2 -d 7
507 507 rebasing 2:177f92b77385 "c"
508 508 abort: outstanding merge conflicts
509 509 [255]
510 510 $ hg resolve -l
511 511 U e
512 512
513 513 ==========================
514 514 Test for --confirm option|
515 515 ==========================
516 516 $ cd ..
517 517 $ hg clone repo3 repo4 -q
518 518 $ cd repo4
519 519 $ hg strip 7 -q
520 520 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
521 521 @ 6:baf10c5166d4 test
522 522 | g
523 523 |
524 524 o 5:6343ca3eff20 test
525 525 | f
526 526 |
527 527 | o 4:e860deea161a test
528 528 | | e
529 529 | |
530 530 | o 3:055a42cdd887 test
531 531 | | d
532 532 | |
533 533 | o 2:177f92b77385 test
534 534 |/ c
535 535 |
536 536 o 1:d2ae7f538514 test
537 537 | b
538 538 |
539 539 o 0:cb9a9f314b8b test
540 540 a
541 541
542 542 Check it gives error when both --dryrun and --confirm is used:
543 543 $ hg rebase -s 2 -d . --confirm --dry-run
544 544 abort: cannot specify both --confirm and --dry-run
545 545 [255]
546 546 $ hg rebase -s 2 -d . --confirm --abort
547 547 abort: cannot specify both --confirm and --abort
548 548 [255]
549 549 $ hg rebase -s 2 -d . --confirm --continue
550 550 abort: cannot specify both --confirm and --continue
551 551 [255]
552 552
553 553 Test --confirm option when there are no conflicts:
554 554 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
555 555 > n
556 556 > EOF
557 557 starting in-memory rebase
558 558 rebasing 2:177f92b77385 "c"
559 559 rebasing 3:055a42cdd887 "d"
560 560 rebasing 4:e860deea161a "e"
561 561 rebase completed successfully
562 562 apply changes (yn)? n
563 563 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
564 564 @ 6:baf10c5166d4 test
565 565 | g
566 566 |
567 567 o 5:6343ca3eff20 test
568 568 | f
569 569 |
570 570 | o 4:e860deea161a test
571 571 | | e
572 572 | |
573 573 | o 3:055a42cdd887 test
574 574 | | d
575 575 | |
576 576 | o 2:177f92b77385 test
577 577 |/ c
578 578 |
579 579 o 1:d2ae7f538514 test
580 580 | b
581 581 |
582 582 o 0:cb9a9f314b8b test
583 583 a
584 584
585 585 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
586 586 > y
587 587 > EOF
588 588 starting in-memory rebase
589 589 rebasing 2:177f92b77385 "c"
590 590 rebasing 3:055a42cdd887 "d"
591 591 rebasing 4:e860deea161a "e"
592 592 rebase completed successfully
593 593 apply changes (yn)? y
594 594 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
595 595 o 9:9fd28f55f6dc test
596 596 | e
597 597 |
598 598 o 8:12cbf031f469 test
599 599 | d
600 600 |
601 601 o 7:c83b1da5b1ae test
602 602 | c
603 603 |
604 604 @ 6:baf10c5166d4 test
605 605 | g
606 606 |
607 607 o 5:6343ca3eff20 test
608 608 | f
609 609 |
610 610 | o 4:e860deea161a test
611 611 | | e
612 612 | |
613 613 | o 3:055a42cdd887 test
614 614 | | d
615 615 | |
616 616 | o 2:177f92b77385 test
617 617 |/ c
618 618 |
619 619 o 1:d2ae7f538514 test
620 620 | b
621 621 |
622 622 o 0:cb9a9f314b8b test
623 623 a
624 624
625 625 Test --confirm option when there is a conflict
626 626 $ hg up tip -q
627 627 $ echo ee>e
628 628 $ hg ci --amend -m "conflict with e" -q
629 629 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
630 630 @ 9:906d72f66a59 test
631 631 | conflict with e
632 632 |
633 633 o 8:12cbf031f469 test
634 634 | d
635 635 |
636 636 o 7:c83b1da5b1ae test
637 637 | c
638 638 |
639 639 o 6:baf10c5166d4 test
640 640 | g
641 641 |
642 642 o 5:6343ca3eff20 test
643 643 | f
644 644 |
645 645 | o 4:e860deea161a test
646 646 | | e
647 647 | |
648 648 | o 3:055a42cdd887 test
649 649 | | d
650 650 | |
651 651 | o 2:177f92b77385 test
652 652 |/ c
653 653 |
654 654 o 1:d2ae7f538514 test
655 655 | b
656 656 |
657 657 o 0:cb9a9f314b8b test
658 658 a
659 659
660 660 $ hg rebase -s 4 -d . --keep --confirm
661 661 starting in-memory rebase
662 662 rebasing 4:e860deea161a "e"
663 663 merging e
664 664 hit a merge conflict
665 665 [1]
666 666 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
667 667 @ 9:906d72f66a59 test
668 668 | conflict with e
669 669 |
670 670 o 8:12cbf031f469 test
671 671 | d
672 672 |
673 673 o 7:c83b1da5b1ae test
674 674 | c
675 675 |
676 676 o 6:baf10c5166d4 test
677 677 | g
678 678 |
679 679 o 5:6343ca3eff20 test
680 680 | f
681 681 |
682 682 | o 4:e860deea161a test
683 683 | | e
684 684 | |
685 685 | o 3:055a42cdd887 test
686 686 | | d
687 687 | |
688 688 | o 2:177f92b77385 test
689 689 |/ c
690 690 |
691 691 o 1:d2ae7f538514 test
692 692 | b
693 693 |
694 694 o 0:cb9a9f314b8b test
695 695 a
696 696
697 697 Test a metadata-only in-memory merge
698 698 $ cd $TESTTMP
699 699 $ hg init no_exception
700 700 $ cd no_exception
701 701 # Produce the following graph:
702 702 # o 'add +x to foo.txt'
703 703 # | o r1 (adds bar.txt, just for something to rebase to)
704 704 # |/
705 705 # o r0 (adds foo.txt, no +x)
706 706 $ echo hi > foo.txt
707 707 $ hg ci -qAm r0
708 708 $ echo hi > bar.txt
709 709 $ hg ci -qAm r1
710 710 $ hg co -qr ".^"
711 711 $ chmod +x foo.txt
712 712 $ hg ci -qAm 'add +x to foo.txt'
713 713 issue5960: this was raising an AttributeError exception
714 714 $ hg rebase -r . -d 1
715 715 rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
716 716 saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
717 717 $ hg diff -c tip
718 718 diff --git a/foo.txt b/foo.txt
719 719 old mode 100644
720 720 new mode 100755
721 721
722 722 Test rebasing a commit with copy information, but no content changes
723 723
724 724 $ cd ..
725 725 $ hg clone -q repo1 merge-and-rename
726 726 $ cd merge-and-rename
727 727 $ cat << EOF >> .hg/hgrc
728 728 > [experimental]
729 729 > evolution.createmarkers=True
730 730 > evolution.allowunstable=True
731 731 > EOF
732 732 $ hg co -q 1
733 733 $ hg mv d e
734 734 $ hg ci -qm 'rename d to e'
735 735 $ hg co -q 3
736 736 $ hg merge -q 4
737 737 $ hg ci -m 'merge'
738 738 $ hg co -q 2
739 739 $ mv d e
740 740 $ hg addremove -qs 0
741 741 $ hg ci -qm 'untracked rename of d to e'
742 742 $ hg debugobsolete -q `hg log -T '{node}' -r 4` `hg log -T '{node}' -r .`
743 743 1 new orphan changesets
744 744 $ hg tglog
745 745 @ 6: 676538af172d 'untracked rename of d to e'
746 746 |
747 747 | * 5: 71cb43376053 'merge'
748 748 | |\
749 749 | | x 4: 2c8b5dad7956 'rename d to e'
750 750 | | |
751 751 | o | 3: ca58782ad1e4 'b'
752 752 |/ /
753 753 o / 2: 814f6bd05178 'c'
754 754 |/
755 755 o 1: 02952614a83d 'd'
756 756 |
757 757 o 0: b173517d0057 'a'
758 758
759 759 $ hg rebase -b 5 -d tip
760 760 rebasing 3:ca58782ad1e4 "b"
761 761 rebasing 5:71cb43376053 "merge"
762 762 note: not rebasing 5:71cb43376053 "merge", its destination already has all its changes
763 763
764 764 $ cd ..
765 765
766 766 Test rebasing a commit with copy information
767 767
768 768 $ hg init rebase-rename
769 769 $ cd rebase-rename
770 770 $ echo a > a
771 771 $ hg ci -Aqm 'add a'
772 772 $ echo a2 > a
773 773 $ hg ci -m 'modify a'
774 774 $ hg co -q 0
775 775 $ hg mv a b
776 776 $ hg ci -qm 'rename a to b'
777 777 $ hg rebase -d 1
778 778 rebasing 2:b977edf6f839 "rename a to b" (tip)
779 779 merging a and b to b
780 780 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/b977edf6f839-0864f570-rebase.hg
781 781 $ hg st --copies --change .
782 782 A b
783 783 a
784 784 R a
785 785 $ cd ..
786 786
787 787 Rebase across a copy with --collapse
788 788
789 789 $ hg init rebase-rename-collapse
790 790 $ cd rebase-rename-collapse
791 791 $ echo a > a
792 792 $ hg ci -Aqm 'add a'
793 793 $ hg mv a b
794 794 $ hg ci -m 'rename a to b'
795 795 $ hg co -q 0
796 796 $ echo a2 > a
797 797 $ hg ci -qm 'modify a'
798 BROKEN: obviously...
799 798 $ hg rebase -r . -d 1 --collapse
800 799 rebasing 2:41c4ea50d4cf "modify a" (tip)
801 800 merging b and a to b
802 abort: a@b977edf6f839: not found in manifest!
803 [255]
801 saved backup bundle to $TESTTMP/rebase-rename-collapse/.hg/strip-backup/41c4ea50d4cf-b90b7994-rebase.hg
804 802 $ cd ..
805 803
806 804 Test rebasing when the file we are merging in destination is empty
807 805
808 806 $ hg init test
809 807 $ cd test
810 808 $ echo a > foo
811 809 $ hg ci -Aqm 'added a to foo'
812 810
813 811 $ rm foo
814 812 $ touch foo
815 813 $ hg di
816 814 diff --git a/foo b/foo
817 815 --- a/foo
818 816 +++ b/foo
819 817 @@ -1,1 +0,0 @@
820 818 -a
821 819
822 820 $ hg ci -m "make foo an empty file"
823 821
824 822 $ hg up '.^'
825 823 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
826 824 $ echo b > foo
827 825 $ hg di
828 826 diff --git a/foo b/foo
829 827 --- a/foo
830 828 +++ b/foo
831 829 @@ -1,1 +1,1 @@
832 830 -a
833 831 +b
834 832 $ hg ci -m "add b to foo"
835 833 created new head
836 834
837 835 $ hg rebase -r . -d 1 --config ui.merge=internal:merge3
838 836 rebasing 2:fb62b706688e "add b to foo" (tip)
839 837 merging foo
840 838 hit merge conflicts; re-running rebase without in-memory merge
841 839 rebasing 2:fb62b706688e "add b to foo" (tip)
842 840 merging foo
843 841 warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
844 842 unresolved conflicts (see hg resolve, then hg rebase --continue)
845 843 [1]
General Comments 0
You need to be logged in to leave comments. Login now