##// END OF EJS Templates
workingfilectx: add backgroundclose as a kwarg to write()...
Phil Cohen -
r33085:1e79c66d default
parent child Browse files
Show More
@@ -1,2306 +1,2307
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 subrepo,
42 42 util,
43 43 )
44 44
45 45 propertycache = util.propertycache
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __int__(self):
74 74 return self.rev()
75 75
76 76 def __repr__(self):
77 77 return r"<%s %s>" % (type(self).__name__, str(self))
78 78
79 79 def __eq__(self, other):
80 80 try:
81 81 return type(self) == type(other) and self._rev == other._rev
82 82 except AttributeError:
83 83 return False
84 84
85 85 def __ne__(self, other):
86 86 return not (self == other)
87 87
88 88 def __contains__(self, key):
89 89 return key in self._manifest
90 90
91 91 def __getitem__(self, key):
92 92 return self.filectx(key)
93 93
94 94 def __iter__(self):
95 95 return iter(self._manifest)
96 96
97 97 def _buildstatusmanifest(self, status):
98 98 """Builds a manifest that includes the given status results, if this is
99 99 a working copy context. For non-working copy contexts, it just returns
100 100 the normal manifest."""
101 101 return self.manifest()
102 102
103 103 def _matchstatus(self, other, match):
104 104 """return match.always if match is none
105 105
106 106 This internal method provides a way for child objects to override the
107 107 match operator.
108 108 """
109 109 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 110
111 111 def _buildstatus(self, other, s, match, listignored, listclean,
112 112 listunknown):
113 113 """build a status with respect to another context"""
114 114 # Load earliest manifest first for caching reasons. More specifically,
115 115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 118 # delta to what's in the cache. So that's one full reconstruction + one
119 119 # delta application.
120 120 mf2 = None
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 mf2 = self._buildstatusmanifest(s)
123 123 mf1 = other._buildstatusmanifest(s)
124 124 if mf2 is None:
125 125 mf2 = self._buildstatusmanifest(s)
126 126
127 127 modified, added = [], []
128 128 removed = []
129 129 clean = []
130 130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 131 deletedset = set(deleted)
132 132 d = mf1.diff(mf2, match=match, clean=listclean)
133 133 for fn, value in d.iteritems():
134 134 if fn in deletedset:
135 135 continue
136 136 if value is None:
137 137 clean.append(fn)
138 138 continue
139 139 (node1, flag1), (node2, flag2) = value
140 140 if node1 is None:
141 141 added.append(fn)
142 142 elif node2 is None:
143 143 removed.append(fn)
144 144 elif flag1 != flag2:
145 145 modified.append(fn)
146 146 elif node2 not in wdirnodes:
147 147 # When comparing files between two commits, we save time by
148 148 # not comparing the file contents when the nodeids differ.
149 149 # Note that this means we incorrectly report a reverted change
150 150 # to a file as a modification.
151 151 modified.append(fn)
152 152 elif self[fn].cmp(other[fn]):
153 153 modified.append(fn)
154 154 else:
155 155 clean.append(fn)
156 156
157 157 if removed:
158 158 # need to filter files if they are already reported as removed
159 159 unknown = [fn for fn in unknown if fn not in mf1 and
160 160 (not match or match(fn))]
161 161 ignored = [fn for fn in ignored if fn not in mf1 and
162 162 (not match or match(fn))]
163 163 # if they're deleted, don't report them as removed
164 164 removed = [fn for fn in removed if fn not in deletedset]
165 165
166 166 return scmutil.status(modified, added, removed, deleted, unknown,
167 167 ignored, clean)
168 168
169 169 @propertycache
170 170 def substate(self):
171 171 return subrepo.state(self, self._repo.ui)
172 172
173 173 def subrev(self, subpath):
174 174 return self.substate[subpath][1]
175 175
176 176 def rev(self):
177 177 return self._rev
178 178 def node(self):
179 179 return self._node
180 180 def hex(self):
181 181 return hex(self.node())
182 182 def manifest(self):
183 183 return self._manifest
184 184 def manifestctx(self):
185 185 return self._manifestctx
186 186 def repo(self):
187 187 return self._repo
188 188 def phasestr(self):
189 189 return phases.phasenames[self.phase()]
190 190 def mutable(self):
191 191 return self.phase() > phases.public
192 192
193 193 def getfileset(self, expr):
194 194 return fileset.getfileset(self, expr)
195 195
196 196 def obsolete(self):
197 197 """True if the changeset is obsolete"""
198 198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199 199
200 200 def extinct(self):
201 201 """True if the changeset is extinct"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203 203
204 204 def unstable(self):
205 205 """True if the changeset is not obsolete but it's ancestor are"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
207 207
208 208 def bumped(self):
209 209 """True if the changeset try to be a successor of a public changeset
210 210
211 211 Only non-public and non-obsolete changesets may be bumped.
212 212 """
213 213 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
214 214
215 215 def divergent(self):
216 216 """Is a successors of a changeset with multiple possible successors set
217 217
218 218 Only non-public and non-obsolete changesets may be divergent.
219 219 """
220 220 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
221 221
222 222 def troubled(self):
223 223 """True if the changeset is either unstable, bumped or divergent"""
224 224 return self.unstable() or self.bumped() or self.divergent()
225 225
226 226 def troubles(self):
227 227 """return the list of troubles affecting this changesets.
228 228
229 229 Troubles are returned as strings. possible values are:
230 230 - unstable,
231 231 - bumped,
232 232 - divergent.
233 233 """
234 234 troubles = []
235 235 if self.unstable():
236 236 troubles.append('unstable')
237 237 if self.bumped():
238 238 troubles.append('bumped')
239 239 if self.divergent():
240 240 troubles.append('divergent')
241 241 return troubles
242 242
243 243 def parents(self):
244 244 """return contexts for each parent changeset"""
245 245 return self._parents
246 246
247 247 def p1(self):
248 248 return self._parents[0]
249 249
250 250 def p2(self):
251 251 parents = self._parents
252 252 if len(parents) == 2:
253 253 return parents[1]
254 254 return changectx(self._repo, nullrev)
255 255
256 256 def _fileinfo(self, path):
257 257 if r'_manifest' in self.__dict__:
258 258 try:
259 259 return self._manifest[path], self._manifest.flags(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263 if r'_manifestdelta' in self.__dict__ or path in self.files():
264 264 if path in self._manifestdelta:
265 265 return (self._manifestdelta[path],
266 266 self._manifestdelta.flags(path))
267 267 mfl = self._repo.manifestlog
268 268 try:
269 269 node, flag = mfl[self._changeset.manifest].find(path)
270 270 except KeyError:
271 271 raise error.ManifestLookupError(self._node, path,
272 272 _('not found in manifest'))
273 273
274 274 return node, flag
275 275
276 276 def filenode(self, path):
277 277 return self._fileinfo(path)[0]
278 278
279 279 def flags(self, path):
280 280 try:
281 281 return self._fileinfo(path)[1]
282 282 except error.LookupError:
283 283 return ''
284 284
285 285 def sub(self, path, allowcreate=True):
286 286 '''return a subrepo for the stored revision of path, never wdir()'''
287 287 return subrepo.subrepo(self, path, allowcreate=allowcreate)
288 288
289 289 def nullsub(self, path, pctx):
290 290 return subrepo.nullsubrepo(self, path, pctx)
291 291
292 292 def workingsub(self, path):
293 293 '''return a subrepo for the stored revision, or wdir if this is a wdir
294 294 context.
295 295 '''
296 296 return subrepo.subrepo(self, path, allowwdir=True)
297 297
298 298 def match(self, pats=None, include=None, exclude=None, default='glob',
299 299 listsubrepos=False, badfn=None):
300 300 r = self._repo
301 301 return matchmod.match(r.root, r.getcwd(), pats,
302 302 include, exclude, default,
303 303 auditor=r.nofsauditor, ctx=self,
304 304 listsubrepos=listsubrepos, badfn=badfn)
305 305
306 306 def diff(self, ctx2=None, match=None, **opts):
307 307 """Returns a diff generator for the given contexts and matcher"""
308 308 if ctx2 is None:
309 309 ctx2 = self.p1()
310 310 if ctx2 is not None:
311 311 ctx2 = self._repo[ctx2]
312 312 diffopts = patch.diffopts(self._repo.ui, opts)
313 313 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
314 314
315 315 def dirs(self):
316 316 return self._manifest.dirs()
317 317
318 318 def hasdir(self, dir):
319 319 return self._manifest.hasdir(dir)
320 320
321 321 def status(self, other=None, match=None, listignored=False,
322 322 listclean=False, listunknown=False, listsubrepos=False):
323 323 """return status of files between two nodes or node and working
324 324 directory.
325 325
326 326 If other is None, compare this node with working directory.
327 327
328 328 returns (modified, added, removed, deleted, unknown, ignored, clean)
329 329 """
330 330
331 331 ctx1 = self
332 332 ctx2 = self._repo[other]
333 333
334 334 # This next code block is, admittedly, fragile logic that tests for
335 335 # reversing the contexts and wouldn't need to exist if it weren't for
336 336 # the fast (and common) code path of comparing the working directory
337 337 # with its first parent.
338 338 #
339 339 # What we're aiming for here is the ability to call:
340 340 #
341 341 # workingctx.status(parentctx)
342 342 #
343 343 # If we always built the manifest for each context and compared those,
344 344 # then we'd be done. But the special case of the above call means we
345 345 # just copy the manifest of the parent.
346 346 reversed = False
347 347 if (not isinstance(ctx1, changectx)
348 348 and isinstance(ctx2, changectx)):
349 349 reversed = True
350 350 ctx1, ctx2 = ctx2, ctx1
351 351
352 352 match = ctx2._matchstatus(ctx1, match)
353 353 r = scmutil.status([], [], [], [], [], [], [])
354 354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
355 355 listunknown)
356 356
357 357 if reversed:
358 358 # Reverse added and removed. Clear deleted, unknown and ignored as
359 359 # these make no sense to reverse.
360 360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
361 361 r.clean)
362 362
363 363 if listsubrepos:
364 364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
365 365 try:
366 366 rev2 = ctx2.subrev(subpath)
367 367 except KeyError:
368 368 # A subrepo that existed in node1 was deleted between
369 369 # node1 and node2 (inclusive). Thus, ctx2's substate
370 370 # won't contain that subpath. The best we can do ignore it.
371 371 rev2 = None
372 372 submatch = matchmod.subdirmatcher(subpath, match)
373 373 s = sub.status(rev2, match=submatch, ignored=listignored,
374 374 clean=listclean, unknown=listunknown,
375 375 listsubrepos=True)
376 376 for rfiles, sfiles in zip(r, s):
377 377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
378 378
379 379 for l in r:
380 380 l.sort()
381 381
382 382 return r
383 383
384 384 def _filterederror(repo, changeid):
385 385 """build an exception to be raised about a filtered changeid
386 386
387 387 This is extracted in a function to help extensions (eg: evolve) to
388 388 experiment with various message variants."""
389 389 if repo.filtername.startswith('visible'):
390 390 msg = _("hidden revision '%s'") % changeid
391 391 hint = _('use --hidden to access hidden revisions')
392 392 return error.FilteredRepoLookupError(msg, hint=hint)
393 393 msg = _("filtered revision '%s' (not in '%s' subset)")
394 394 msg %= (changeid, repo.filtername)
395 395 return error.FilteredRepoLookupError(msg)
396 396
397 397 class changectx(basectx):
398 398 """A changecontext object makes access to data related to a particular
399 399 changeset convenient. It represents a read-only context already present in
400 400 the repo."""
401 401 def __init__(self, repo, changeid=''):
402 402 """changeid is a revision number, node, or tag"""
403 403
404 404 # since basectx.__new__ already took care of copying the object, we
405 405 # don't need to do anything in __init__, so we just exit here
406 406 if isinstance(changeid, basectx):
407 407 return
408 408
409 409 if changeid == '':
410 410 changeid = '.'
411 411 self._repo = repo
412 412
413 413 try:
414 414 if isinstance(changeid, int):
415 415 self._node = repo.changelog.node(changeid)
416 416 self._rev = changeid
417 417 return
418 418 if not pycompat.ispy3 and isinstance(changeid, long):
419 419 changeid = str(changeid)
420 420 if changeid == 'null':
421 421 self._node = nullid
422 422 self._rev = nullrev
423 423 return
424 424 if changeid == 'tip':
425 425 self._node = repo.changelog.tip()
426 426 self._rev = repo.changelog.rev(self._node)
427 427 return
428 428 if changeid == '.' or changeid == repo.dirstate.p1():
429 429 # this is a hack to delay/avoid loading obsmarkers
430 430 # when we know that '.' won't be hidden
431 431 self._node = repo.dirstate.p1()
432 432 self._rev = repo.unfiltered().changelog.rev(self._node)
433 433 return
434 434 if len(changeid) == 20:
435 435 try:
436 436 self._node = changeid
437 437 self._rev = repo.changelog.rev(changeid)
438 438 return
439 439 except error.FilteredRepoLookupError:
440 440 raise
441 441 except LookupError:
442 442 pass
443 443
444 444 try:
445 445 r = int(changeid)
446 446 if '%d' % r != changeid:
447 447 raise ValueError
448 448 l = len(repo.changelog)
449 449 if r < 0:
450 450 r += l
451 451 if r < 0 or r >= l and r != wdirrev:
452 452 raise ValueError
453 453 self._rev = r
454 454 self._node = repo.changelog.node(r)
455 455 return
456 456 except error.FilteredIndexError:
457 457 raise
458 458 except (ValueError, OverflowError, IndexError):
459 459 pass
460 460
461 461 if len(changeid) == 40:
462 462 try:
463 463 self._node = bin(changeid)
464 464 self._rev = repo.changelog.rev(self._node)
465 465 return
466 466 except error.FilteredLookupError:
467 467 raise
468 468 except (TypeError, LookupError):
469 469 pass
470 470
471 471 # lookup bookmarks through the name interface
472 472 try:
473 473 self._node = repo.names.singlenode(repo, changeid)
474 474 self._rev = repo.changelog.rev(self._node)
475 475 return
476 476 except KeyError:
477 477 pass
478 478 except error.FilteredRepoLookupError:
479 479 raise
480 480 except error.RepoLookupError:
481 481 pass
482 482
483 483 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 484 if self._node is not None:
485 485 self._rev = repo.changelog.rev(self._node)
486 486 return
487 487
488 488 # lookup failed
489 489 # check if it might have come from damaged dirstate
490 490 #
491 491 # XXX we could avoid the unfiltered if we had a recognizable
492 492 # exception for filtered changeset access
493 493 if changeid in repo.unfiltered().dirstate.parents():
494 494 msg = _("working directory has unknown parent '%s'!")
495 495 raise error.Abort(msg % short(changeid))
496 496 try:
497 497 if len(changeid) == 20 and nonascii(changeid):
498 498 changeid = hex(changeid)
499 499 except TypeError:
500 500 pass
501 501 except (error.FilteredIndexError, error.FilteredLookupError,
502 502 error.FilteredRepoLookupError):
503 503 raise _filterederror(repo, changeid)
504 504 except IndexError:
505 505 pass
506 506 raise error.RepoLookupError(
507 507 _("unknown revision '%s'") % changeid)
508 508
509 509 def __hash__(self):
510 510 try:
511 511 return hash(self._rev)
512 512 except AttributeError:
513 513 return id(self)
514 514
515 515 def __nonzero__(self):
516 516 return self._rev != nullrev
517 517
518 518 __bool__ = __nonzero__
519 519
520 520 @propertycache
521 521 def _changeset(self):
522 522 return self._repo.changelog.changelogrevision(self.rev())
523 523
524 524 @propertycache
525 525 def _manifest(self):
526 526 return self._manifestctx.read()
527 527
528 528 @property
529 529 def _manifestctx(self):
530 530 return self._repo.manifestlog[self._changeset.manifest]
531 531
532 532 @propertycache
533 533 def _manifestdelta(self):
534 534 return self._manifestctx.readdelta()
535 535
536 536 @propertycache
537 537 def _parents(self):
538 538 repo = self._repo
539 539 p1, p2 = repo.changelog.parentrevs(self._rev)
540 540 if p2 == nullrev:
541 541 return [changectx(repo, p1)]
542 542 return [changectx(repo, p1), changectx(repo, p2)]
543 543
544 544 def changeset(self):
545 545 c = self._changeset
546 546 return (
547 547 c.manifest,
548 548 c.user,
549 549 c.date,
550 550 c.files,
551 551 c.description,
552 552 c.extra,
553 553 )
554 554 def manifestnode(self):
555 555 return self._changeset.manifest
556 556
557 557 def user(self):
558 558 return self._changeset.user
559 559 def date(self):
560 560 return self._changeset.date
561 561 def files(self):
562 562 return self._changeset.files
563 563 def description(self):
564 564 return self._changeset.description
565 565 def branch(self):
566 566 return encoding.tolocal(self._changeset.extra.get("branch"))
567 567 def closesbranch(self):
568 568 return 'close' in self._changeset.extra
569 569 def extra(self):
570 570 return self._changeset.extra
571 571 def tags(self):
572 572 return self._repo.nodetags(self._node)
573 573 def bookmarks(self):
574 574 return self._repo.nodebookmarks(self._node)
575 575 def phase(self):
576 576 return self._repo._phasecache.phase(self._repo, self._rev)
577 577 def hidden(self):
578 578 return self._rev in repoview.filterrevs(self._repo, 'visible')
579 579
580 580 def children(self):
581 581 """return contexts for each child changeset"""
582 582 c = self._repo.changelog.children(self._node)
583 583 return [changectx(self._repo, x) for x in c]
584 584
585 585 def ancestors(self):
586 586 for a in self._repo.changelog.ancestors([self._rev]):
587 587 yield changectx(self._repo, a)
588 588
589 589 def descendants(self):
590 590 for d in self._repo.changelog.descendants([self._rev]):
591 591 yield changectx(self._repo, d)
592 592
593 593 def filectx(self, path, fileid=None, filelog=None):
594 594 """get a file context from this changeset"""
595 595 if fileid is None:
596 596 fileid = self.filenode(path)
597 597 return filectx(self._repo, path, fileid=fileid,
598 598 changectx=self, filelog=filelog)
599 599
600 600 def ancestor(self, c2, warn=False):
601 601 """return the "best" ancestor context of self and c2
602 602
603 603 If there are multiple candidates, it will show a message and check
604 604 merge.preferancestor configuration before falling back to the
605 605 revlog ancestor."""
606 606 # deal with workingctxs
607 607 n2 = c2._node
608 608 if n2 is None:
609 609 n2 = c2._parents[0]._node
610 610 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
611 611 if not cahs:
612 612 anc = nullid
613 613 elif len(cahs) == 1:
614 614 anc = cahs[0]
615 615 else:
616 616 # experimental config: merge.preferancestor
617 617 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
618 618 try:
619 619 ctx = changectx(self._repo, r)
620 620 except error.RepoLookupError:
621 621 continue
622 622 anc = ctx.node()
623 623 if anc in cahs:
624 624 break
625 625 else:
626 626 anc = self._repo.changelog.ancestor(self._node, n2)
627 627 if warn:
628 628 self._repo.ui.status(
629 629 (_("note: using %s as ancestor of %s and %s\n") %
630 630 (short(anc), short(self._node), short(n2))) +
631 631 ''.join(_(" alternatively, use --config "
632 632 "merge.preferancestor=%s\n") %
633 633 short(n) for n in sorted(cahs) if n != anc))
634 634 return changectx(self._repo, anc)
635 635
636 636 def descendant(self, other):
637 637 """True if other is descendant of this changeset"""
638 638 return self._repo.changelog.descendant(self._rev, other._rev)
639 639
640 640 def walk(self, match):
641 641 '''Generates matching file names.'''
642 642
643 643 # Wrap match.bad method to have message with nodeid
644 644 def bad(fn, msg):
645 645 # The manifest doesn't know about subrepos, so don't complain about
646 646 # paths into valid subrepos.
647 647 if any(fn == s or fn.startswith(s + '/')
648 648 for s in self.substate):
649 649 return
650 650 match.bad(fn, _('no such file in rev %s') % self)
651 651
652 652 m = matchmod.badmatch(match, bad)
653 653 return self._manifest.walk(m)
654 654
655 655 def matches(self, match):
656 656 return self.walk(match)
657 657
658 658 class basefilectx(object):
659 659 """A filecontext object represents the common logic for its children:
660 660 filectx: read-only access to a filerevision that is already present
661 661 in the repo,
662 662 workingfilectx: a filecontext that represents files from the working
663 663 directory,
664 664 memfilectx: a filecontext that represents files in-memory,
665 665 overlayfilectx: duplicate another filecontext with some fields overridden.
666 666 """
667 667 @propertycache
668 668 def _filelog(self):
669 669 return self._repo.file(self._path)
670 670
671 671 @propertycache
672 672 def _changeid(self):
673 673 if r'_changeid' in self.__dict__:
674 674 return self._changeid
675 675 elif r'_changectx' in self.__dict__:
676 676 return self._changectx.rev()
677 677 elif r'_descendantrev' in self.__dict__:
678 678 # this file context was created from a revision with a known
679 679 # descendant, we can (lazily) correct for linkrev aliases
680 680 return self._adjustlinkrev(self._descendantrev)
681 681 else:
682 682 return self._filelog.linkrev(self._filerev)
683 683
684 684 @propertycache
685 685 def _filenode(self):
686 686 if r'_fileid' in self.__dict__:
687 687 return self._filelog.lookup(self._fileid)
688 688 else:
689 689 return self._changectx.filenode(self._path)
690 690
691 691 @propertycache
692 692 def _filerev(self):
693 693 return self._filelog.rev(self._filenode)
694 694
695 695 @propertycache
696 696 def _repopath(self):
697 697 return self._path
698 698
699 699 def __nonzero__(self):
700 700 try:
701 701 self._filenode
702 702 return True
703 703 except error.LookupError:
704 704 # file is missing
705 705 return False
706 706
707 707 __bool__ = __nonzero__
708 708
709 709 def __bytes__(self):
710 710 try:
711 711 return "%s@%s" % (self.path(), self._changectx)
712 712 except error.LookupError:
713 713 return "%s@???" % self.path()
714 714
715 715 __str__ = encoding.strmethod(__bytes__)
716 716
717 717 def __repr__(self):
718 718 return "<%s %s>" % (type(self).__name__, str(self))
719 719
720 720 def __hash__(self):
721 721 try:
722 722 return hash((self._path, self._filenode))
723 723 except AttributeError:
724 724 return id(self)
725 725
726 726 def __eq__(self, other):
727 727 try:
728 728 return (type(self) == type(other) and self._path == other._path
729 729 and self._filenode == other._filenode)
730 730 except AttributeError:
731 731 return False
732 732
733 733 def __ne__(self, other):
734 734 return not (self == other)
735 735
736 736 def filerev(self):
737 737 return self._filerev
738 738 def filenode(self):
739 739 return self._filenode
740 740 @propertycache
741 741 def _flags(self):
742 742 return self._changectx.flags(self._path)
743 743 def flags(self):
744 744 return self._flags
745 745 def filelog(self):
746 746 return self._filelog
747 747 def rev(self):
748 748 return self._changeid
749 749 def linkrev(self):
750 750 return self._filelog.linkrev(self._filerev)
751 751 def node(self):
752 752 return self._changectx.node()
753 753 def hex(self):
754 754 return self._changectx.hex()
755 755 def user(self):
756 756 return self._changectx.user()
757 757 def date(self):
758 758 return self._changectx.date()
759 759 def files(self):
760 760 return self._changectx.files()
761 761 def description(self):
762 762 return self._changectx.description()
763 763 def branch(self):
764 764 return self._changectx.branch()
765 765 def extra(self):
766 766 return self._changectx.extra()
767 767 def phase(self):
768 768 return self._changectx.phase()
769 769 def phasestr(self):
770 770 return self._changectx.phasestr()
771 771 def manifest(self):
772 772 return self._changectx.manifest()
773 773 def changectx(self):
774 774 return self._changectx
775 775 def renamed(self):
776 776 return self._copied
777 777 def repo(self):
778 778 return self._repo
779 779 def size(self):
780 780 return len(self.data())
781 781
782 782 def path(self):
783 783 return self._path
784 784
785 785 def isbinary(self):
786 786 try:
787 787 return util.binary(self.data())
788 788 except IOError:
789 789 return False
790 790 def isexec(self):
791 791 return 'x' in self.flags()
792 792 def islink(self):
793 793 return 'l' in self.flags()
794 794
795 795 def isabsent(self):
796 796 """whether this filectx represents a file not in self._changectx
797 797
798 798 This is mainly for merge code to detect change/delete conflicts. This is
799 799 expected to be True for all subclasses of basectx."""
800 800 return False
801 801
802 802 _customcmp = False
803 803 def cmp(self, fctx):
804 804 """compare with other file context
805 805
806 806 returns True if different than fctx.
807 807 """
808 808 if fctx._customcmp:
809 809 return fctx.cmp(self)
810 810
811 811 if (fctx._filenode is None
812 812 and (self._repo._encodefilterpats
813 813 # if file data starts with '\1\n', empty metadata block is
814 814 # prepended, which adds 4 bytes to filelog.size().
815 815 or self.size() - 4 == fctx.size())
816 816 or self.size() == fctx.size()):
817 817 return self._filelog.cmp(self._filenode, fctx.data())
818 818
819 819 return True
820 820
821 821 def _adjustlinkrev(self, srcrev, inclusive=False):
822 822 """return the first ancestor of <srcrev> introducing <fnode>
823 823
824 824 If the linkrev of the file revision does not point to an ancestor of
825 825 srcrev, we'll walk down the ancestors until we find one introducing
826 826 this file revision.
827 827
828 828 :srcrev: the changeset revision we search ancestors from
829 829 :inclusive: if true, the src revision will also be checked
830 830 """
831 831 repo = self._repo
832 832 cl = repo.unfiltered().changelog
833 833 mfl = repo.manifestlog
834 834 # fetch the linkrev
835 835 lkr = self.linkrev()
836 836 # hack to reuse ancestor computation when searching for renames
837 837 memberanc = getattr(self, '_ancestrycontext', None)
838 838 iteranc = None
839 839 if srcrev is None:
840 840 # wctx case, used by workingfilectx during mergecopy
841 841 revs = [p.rev() for p in self._repo[None].parents()]
842 842 inclusive = True # we skipped the real (revless) source
843 843 else:
844 844 revs = [srcrev]
845 845 if memberanc is None:
846 846 memberanc = iteranc = cl.ancestors(revs, lkr,
847 847 inclusive=inclusive)
848 848 # check if this linkrev is an ancestor of srcrev
849 849 if lkr not in memberanc:
850 850 if iteranc is None:
851 851 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
852 852 fnode = self._filenode
853 853 path = self._path
854 854 for a in iteranc:
855 855 ac = cl.read(a) # get changeset data (we avoid object creation)
856 856 if path in ac[3]: # checking the 'files' field.
857 857 # The file has been touched, check if the content is
858 858 # similar to the one we search for.
859 859 if fnode == mfl[ac[0]].readfast().get(path):
860 860 return a
861 861 # In theory, we should never get out of that loop without a result.
862 862 # But if manifest uses a buggy file revision (not children of the
863 863 # one it replaces) we could. Such a buggy situation will likely
864 864 # result is crash somewhere else at to some point.
865 865 return lkr
866 866
867 867 def introrev(self):
868 868 """return the rev of the changeset which introduced this file revision
869 869
870 870 This method is different from linkrev because it take into account the
871 871 changeset the filectx was created from. It ensures the returned
872 872 revision is one of its ancestors. This prevents bugs from
873 873 'linkrev-shadowing' when a file revision is used by multiple
874 874 changesets.
875 875 """
876 876 lkr = self.linkrev()
877 877 attrs = vars(self)
878 878 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
879 879 if noctx or self.rev() == lkr:
880 880 return self.linkrev()
881 881 return self._adjustlinkrev(self.rev(), inclusive=True)
882 882
883 883 def _parentfilectx(self, path, fileid, filelog):
884 884 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
885 885 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
886 886 if '_changeid' in vars(self) or '_changectx' in vars(self):
887 887 # If self is associated with a changeset (probably explicitly
888 888 # fed), ensure the created filectx is associated with a
889 889 # changeset that is an ancestor of self.changectx.
890 890 # This lets us later use _adjustlinkrev to get a correct link.
891 891 fctx._descendantrev = self.rev()
892 892 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 893 elif '_descendantrev' in vars(self):
894 894 # Otherwise propagate _descendantrev if we have one associated.
895 895 fctx._descendantrev = self._descendantrev
896 896 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
897 897 return fctx
898 898
899 899 def parents(self):
900 900 _path = self._path
901 901 fl = self._filelog
902 902 parents = self._filelog.parents(self._filenode)
903 903 pl = [(_path, node, fl) for node in parents if node != nullid]
904 904
905 905 r = fl.renamed(self._filenode)
906 906 if r:
907 907 # - In the simple rename case, both parent are nullid, pl is empty.
908 908 # - In case of merge, only one of the parent is null id and should
909 909 # be replaced with the rename information. This parent is -always-
910 910 # the first one.
911 911 #
912 912 # As null id have always been filtered out in the previous list
913 913 # comprehension, inserting to 0 will always result in "replacing
914 914 # first nullid parent with rename information.
915 915 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
916 916
917 917 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
918 918
919 919 def p1(self):
920 920 return self.parents()[0]
921 921
922 922 def p2(self):
923 923 p = self.parents()
924 924 if len(p) == 2:
925 925 return p[1]
926 926 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
927 927
928 928 def annotate(self, follow=False, linenumber=False, skiprevs=None,
929 929 diffopts=None):
930 930 '''returns a list of tuples of ((ctx, number), line) for each line
931 931 in the file, where ctx is the filectx of the node where
932 932 that line was last changed; if linenumber parameter is true, number is
933 933 the line number at the first appearance in the managed file, otherwise,
934 934 number has a fixed value of False.
935 935 '''
936 936
937 937 def lines(text):
938 938 if text.endswith("\n"):
939 939 return text.count("\n")
940 940 return text.count("\n") + int(bool(text))
941 941
942 942 if linenumber:
943 943 def decorate(text, rev):
944 944 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
945 945 else:
946 946 def decorate(text, rev):
947 947 return ([(rev, False)] * lines(text), text)
948 948
949 949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950 950
951 951 def parents(f):
952 952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 955 # isn't an ancestor of the srcrev.
956 956 f._changeid
957 957 pl = f.parents()
958 958
959 959 # Don't return renamed parents if we aren't following.
960 960 if not follow:
961 961 pl = [p for p in pl if p.path() == f.path()]
962 962
963 963 # renamed filectx won't have a filelog yet, so set it
964 964 # from the cache to save time
965 965 for p in pl:
966 966 if not '_filelog' in p.__dict__:
967 967 p._filelog = getlog(p.path())
968 968
969 969 return pl
970 970
971 971 # use linkrev to find the first changeset where self appeared
972 972 base = self
973 973 introrev = self.introrev()
974 974 if self.rev() != introrev:
975 975 base = self.filectx(self.filenode(), changeid=introrev)
976 976 if getattr(base, '_ancestrycontext', None) is None:
977 977 cl = self._repo.changelog
978 978 if introrev is None:
979 979 # wctx is not inclusive, but works because _ancestrycontext
980 980 # is used to test filelog revisions
981 981 ac = cl.ancestors([p.rev() for p in base.parents()],
982 982 inclusive=True)
983 983 else:
984 984 ac = cl.ancestors([introrev], inclusive=True)
985 985 base._ancestrycontext = ac
986 986
987 987 # This algorithm would prefer to be recursive, but Python is a
988 988 # bit recursion-hostile. Instead we do an iterative
989 989 # depth-first search.
990 990
991 991 # 1st DFS pre-calculates pcache and needed
992 992 visit = [base]
993 993 pcache = {}
994 994 needed = {base: 1}
995 995 while visit:
996 996 f = visit.pop()
997 997 if f in pcache:
998 998 continue
999 999 pl = parents(f)
1000 1000 pcache[f] = pl
1001 1001 for p in pl:
1002 1002 needed[p] = needed.get(p, 0) + 1
1003 1003 if p not in pcache:
1004 1004 visit.append(p)
1005 1005
1006 1006 # 2nd DFS does the actual annotate
1007 1007 visit[:] = [base]
1008 1008 hist = {}
1009 1009 while visit:
1010 1010 f = visit[-1]
1011 1011 if f in hist:
1012 1012 visit.pop()
1013 1013 continue
1014 1014
1015 1015 ready = True
1016 1016 pl = pcache[f]
1017 1017 for p in pl:
1018 1018 if p not in hist:
1019 1019 ready = False
1020 1020 visit.append(p)
1021 1021 if ready:
1022 1022 visit.pop()
1023 1023 curr = decorate(f.data(), f)
1024 1024 skipchild = False
1025 1025 if skiprevs is not None:
1026 1026 skipchild = f._changeid in skiprevs
1027 1027 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1028 1028 diffopts)
1029 1029 for p in pl:
1030 1030 if needed[p] == 1:
1031 1031 del hist[p]
1032 1032 del needed[p]
1033 1033 else:
1034 1034 needed[p] -= 1
1035 1035
1036 1036 hist[f] = curr
1037 1037 del pcache[f]
1038 1038
1039 1039 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 1040
1041 1041 def ancestors(self, followfirst=False):
1042 1042 visit = {}
1043 1043 c = self
1044 1044 if followfirst:
1045 1045 cut = 1
1046 1046 else:
1047 1047 cut = None
1048 1048
1049 1049 while True:
1050 1050 for parent in c.parents()[:cut]:
1051 1051 visit[(parent.linkrev(), parent.filenode())] = parent
1052 1052 if not visit:
1053 1053 break
1054 1054 c = visit.pop(max(visit))
1055 1055 yield c
1056 1056
1057 1057 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1058 1058 r'''
1059 1059 Given parent and child fctxes and annotate data for parents, for all lines
1060 1060 in either parent that match the child, annotate the child with the parent's
1061 1061 data.
1062 1062
1063 1063 Additionally, if `skipchild` is True, replace all other lines with parent
1064 1064 annotate data as well such that child is never blamed for any lines.
1065 1065
1066 1066 >>> oldfctx = 'old'
1067 1067 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1068 1068 >>> olddata = 'a\nb\n'
1069 1069 >>> p1data = 'a\nb\nc\n'
1070 1070 >>> p2data = 'a\nc\nd\n'
1071 1071 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1072 1072 >>> diffopts = mdiff.diffopts()
1073 1073
1074 1074 >>> def decorate(text, rev):
1075 1075 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1076 1076
1077 1077 Basic usage:
1078 1078
1079 1079 >>> oldann = decorate(olddata, oldfctx)
1080 1080 >>> p1ann = decorate(p1data, p1fctx)
1081 1081 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1082 1082 >>> p1ann[0]
1083 1083 [('old', 1), ('old', 2), ('p1', 3)]
1084 1084 >>> p2ann = decorate(p2data, p2fctx)
1085 1085 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1086 1086 >>> p2ann[0]
1087 1087 [('old', 1), ('p2', 2), ('p2', 3)]
1088 1088
1089 1089 Test with multiple parents (note the difference caused by ordering):
1090 1090
1091 1091 >>> childann = decorate(childdata, childfctx)
1092 1092 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1093 1093 ... diffopts)
1094 1094 >>> childann[0]
1095 1095 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1096 1096
1097 1097 >>> childann = decorate(childdata, childfctx)
1098 1098 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1099 1099 ... diffopts)
1100 1100 >>> childann[0]
1101 1101 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1102 1102
1103 1103 Test with skipchild (note the difference caused by ordering):
1104 1104
1105 1105 >>> childann = decorate(childdata, childfctx)
1106 1106 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1107 1107 ... diffopts)
1108 1108 >>> childann[0]
1109 1109 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1110 1110
1111 1111 >>> childann = decorate(childdata, childfctx)
1112 1112 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1113 1113 ... diffopts)
1114 1114 >>> childann[0]
1115 1115 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1116 1116 '''
1117 1117 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1118 1118 for parent in parents]
1119 1119
1120 1120 if skipchild:
1121 1121 # Need to iterate over the blocks twice -- make it a list
1122 1122 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1123 1123 # Mercurial currently prefers p2 over p1 for annotate.
1124 1124 # TODO: change this?
1125 1125 for parent, blocks in pblocks:
1126 1126 for (a1, a2, b1, b2), t in blocks:
1127 1127 # Changed blocks ('!') or blocks made only of blank lines ('~')
1128 1128 # belong to the child.
1129 1129 if t == '=':
1130 1130 child[0][b1:b2] = parent[0][a1:a2]
1131 1131
1132 1132 if skipchild:
1133 1133 # Now try and match up anything that couldn't be matched,
1134 1134 # Reversing pblocks maintains bias towards p2, matching above
1135 1135 # behavior.
1136 1136 pblocks.reverse()
1137 1137
1138 1138 # The heuristics are:
1139 1139 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1140 1140 # This could potentially be smarter but works well enough.
1141 1141 # * For a non-matching section, do a best-effort fit. Match lines in
1142 1142 # diff hunks 1:1, dropping lines as necessary.
1143 1143 # * Repeat the last line as a last resort.
1144 1144
1145 1145 # First, replace as much as possible without repeating the last line.
1146 1146 remaining = [(parent, []) for parent, _blocks in pblocks]
1147 1147 for idx, (parent, blocks) in enumerate(pblocks):
1148 1148 for (a1, a2, b1, b2), _t in blocks:
1149 1149 if a2 - a1 >= b2 - b1:
1150 1150 for bk in xrange(b1, b2):
1151 1151 if child[0][bk][0] == childfctx:
1152 1152 ak = min(a1 + (bk - b1), a2 - 1)
1153 1153 child[0][bk] = parent[0][ak]
1154 1154 else:
1155 1155 remaining[idx][1].append((a1, a2, b1, b2))
1156 1156
1157 1157 # Then, look at anything left, which might involve repeating the last
1158 1158 # line.
1159 1159 for parent, blocks in remaining:
1160 1160 for a1, a2, b1, b2 in blocks:
1161 1161 for bk in xrange(b1, b2):
1162 1162 if child[0][bk][0] == childfctx:
1163 1163 ak = min(a1 + (bk - b1), a2 - 1)
1164 1164 child[0][bk] = parent[0][ak]
1165 1165 return child
1166 1166
1167 1167 class filectx(basefilectx):
1168 1168 """A filecontext object makes access to data related to a particular
1169 1169 filerevision convenient."""
1170 1170 def __init__(self, repo, path, changeid=None, fileid=None,
1171 1171 filelog=None, changectx=None):
1172 1172 """changeid can be a changeset revision, node, or tag.
1173 1173 fileid can be a file revision or node."""
1174 1174 self._repo = repo
1175 1175 self._path = path
1176 1176
1177 1177 assert (changeid is not None
1178 1178 or fileid is not None
1179 1179 or changectx is not None), \
1180 1180 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1181 1181 % (changeid, fileid, changectx))
1182 1182
1183 1183 if filelog is not None:
1184 1184 self._filelog = filelog
1185 1185
1186 1186 if changeid is not None:
1187 1187 self._changeid = changeid
1188 1188 if changectx is not None:
1189 1189 self._changectx = changectx
1190 1190 if fileid is not None:
1191 1191 self._fileid = fileid
1192 1192
1193 1193 @propertycache
1194 1194 def _changectx(self):
1195 1195 try:
1196 1196 return changectx(self._repo, self._changeid)
1197 1197 except error.FilteredRepoLookupError:
1198 1198 # Linkrev may point to any revision in the repository. When the
1199 1199 # repository is filtered this may lead to `filectx` trying to build
1200 1200 # `changectx` for filtered revision. In such case we fallback to
1201 1201 # creating `changectx` on the unfiltered version of the reposition.
1202 1202 # This fallback should not be an issue because `changectx` from
1203 1203 # `filectx` are not used in complex operations that care about
1204 1204 # filtering.
1205 1205 #
1206 1206 # This fallback is a cheap and dirty fix that prevent several
1207 1207 # crashes. It does not ensure the behavior is correct. However the
1208 1208 # behavior was not correct before filtering either and "incorrect
1209 1209 # behavior" is seen as better as "crash"
1210 1210 #
1211 1211 # Linkrevs have several serious troubles with filtering that are
1212 1212 # complicated to solve. Proper handling of the issue here should be
1213 1213 # considered when solving linkrev issue are on the table.
1214 1214 return changectx(self._repo.unfiltered(), self._changeid)
1215 1215
1216 1216 def filectx(self, fileid, changeid=None):
1217 1217 '''opens an arbitrary revision of the file without
1218 1218 opening a new filelog'''
1219 1219 return filectx(self._repo, self._path, fileid=fileid,
1220 1220 filelog=self._filelog, changeid=changeid)
1221 1221
1222 1222 def rawdata(self):
1223 1223 return self._filelog.revision(self._filenode, raw=True)
1224 1224
1225 1225 def rawflags(self):
1226 1226 """low-level revlog flags"""
1227 1227 return self._filelog.flags(self._filerev)
1228 1228
1229 1229 def data(self):
1230 1230 try:
1231 1231 return self._filelog.read(self._filenode)
1232 1232 except error.CensoredNodeError:
1233 1233 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1234 1234 return ""
1235 1235 raise error.Abort(_("censored node: %s") % short(self._filenode),
1236 1236 hint=_("set censor.policy to ignore errors"))
1237 1237
1238 1238 def size(self):
1239 1239 return self._filelog.size(self._filerev)
1240 1240
1241 1241 @propertycache
1242 1242 def _copied(self):
1243 1243 """check if file was actually renamed in this changeset revision
1244 1244
1245 1245 If rename logged in file revision, we report copy for changeset only
1246 1246 if file revisions linkrev points back to the changeset in question
1247 1247 or both changeset parents contain different file revisions.
1248 1248 """
1249 1249
1250 1250 renamed = self._filelog.renamed(self._filenode)
1251 1251 if not renamed:
1252 1252 return renamed
1253 1253
1254 1254 if self.rev() == self.linkrev():
1255 1255 return renamed
1256 1256
1257 1257 name = self.path()
1258 1258 fnode = self._filenode
1259 1259 for p in self._changectx.parents():
1260 1260 try:
1261 1261 if fnode == p.filenode(name):
1262 1262 return None
1263 1263 except error.LookupError:
1264 1264 pass
1265 1265 return renamed
1266 1266
1267 1267 def children(self):
1268 1268 # hard for renames
1269 1269 c = self._filelog.children(self._filenode)
1270 1270 return [filectx(self._repo, self._path, fileid=x,
1271 1271 filelog=self._filelog) for x in c]
1272 1272
1273 1273 class committablectx(basectx):
1274 1274 """A committablectx object provides common functionality for a context that
1275 1275 wants the ability to commit, e.g. workingctx or memctx."""
1276 1276 def __init__(self, repo, text="", user=None, date=None, extra=None,
1277 1277 changes=None):
1278 1278 self._repo = repo
1279 1279 self._rev = None
1280 1280 self._node = None
1281 1281 self._text = text
1282 1282 if date:
1283 1283 self._date = util.parsedate(date)
1284 1284 if user:
1285 1285 self._user = user
1286 1286 if changes:
1287 1287 self._status = changes
1288 1288
1289 1289 self._extra = {}
1290 1290 if extra:
1291 1291 self._extra = extra.copy()
1292 1292 if 'branch' not in self._extra:
1293 1293 try:
1294 1294 branch = encoding.fromlocal(self._repo.dirstate.branch())
1295 1295 except UnicodeDecodeError:
1296 1296 raise error.Abort(_('branch name not in UTF-8!'))
1297 1297 self._extra['branch'] = branch
1298 1298 if self._extra['branch'] == '':
1299 1299 self._extra['branch'] = 'default'
1300 1300
1301 1301 def __bytes__(self):
1302 1302 return bytes(self._parents[0]) + "+"
1303 1303
1304 1304 __str__ = encoding.strmethod(__bytes__)
1305 1305
1306 1306 def __nonzero__(self):
1307 1307 return True
1308 1308
1309 1309 __bool__ = __nonzero__
1310 1310
1311 1311 def _buildflagfunc(self):
1312 1312 # Create a fallback function for getting file flags when the
1313 1313 # filesystem doesn't support them
1314 1314
1315 1315 copiesget = self._repo.dirstate.copies().get
1316 1316 parents = self.parents()
1317 1317 if len(parents) < 2:
1318 1318 # when we have one parent, it's easy: copy from parent
1319 1319 man = parents[0].manifest()
1320 1320 def func(f):
1321 1321 f = copiesget(f, f)
1322 1322 return man.flags(f)
1323 1323 else:
1324 1324 # merges are tricky: we try to reconstruct the unstored
1325 1325 # result from the merge (issue1802)
1326 1326 p1, p2 = parents
1327 1327 pa = p1.ancestor(p2)
1328 1328 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1329 1329
1330 1330 def func(f):
1331 1331 f = copiesget(f, f) # may be wrong for merges with copies
1332 1332 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1333 1333 if fl1 == fl2:
1334 1334 return fl1
1335 1335 if fl1 == fla:
1336 1336 return fl2
1337 1337 if fl2 == fla:
1338 1338 return fl1
1339 1339 return '' # punt for conflicts
1340 1340
1341 1341 return func
1342 1342
1343 1343 @propertycache
1344 1344 def _flagfunc(self):
1345 1345 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1346 1346
1347 1347 @propertycache
1348 1348 def _status(self):
1349 1349 return self._repo.status()
1350 1350
1351 1351 @propertycache
1352 1352 def _user(self):
1353 1353 return self._repo.ui.username()
1354 1354
1355 1355 @propertycache
1356 1356 def _date(self):
1357 1357 ui = self._repo.ui
1358 1358 date = ui.configdate('devel', 'default-date')
1359 1359 if date is None:
1360 1360 date = util.makedate()
1361 1361 return date
1362 1362
1363 1363 def subrev(self, subpath):
1364 1364 return None
1365 1365
1366 1366 def manifestnode(self):
1367 1367 return None
1368 1368 def user(self):
1369 1369 return self._user or self._repo.ui.username()
1370 1370 def date(self):
1371 1371 return self._date
1372 1372 def description(self):
1373 1373 return self._text
1374 1374 def files(self):
1375 1375 return sorted(self._status.modified + self._status.added +
1376 1376 self._status.removed)
1377 1377
1378 1378 def modified(self):
1379 1379 return self._status.modified
1380 1380 def added(self):
1381 1381 return self._status.added
1382 1382 def removed(self):
1383 1383 return self._status.removed
1384 1384 def deleted(self):
1385 1385 return self._status.deleted
1386 1386 def branch(self):
1387 1387 return encoding.tolocal(self._extra['branch'])
1388 1388 def closesbranch(self):
1389 1389 return 'close' in self._extra
1390 1390 def extra(self):
1391 1391 return self._extra
1392 1392
1393 1393 def tags(self):
1394 1394 return []
1395 1395
1396 1396 def bookmarks(self):
1397 1397 b = []
1398 1398 for p in self.parents():
1399 1399 b.extend(p.bookmarks())
1400 1400 return b
1401 1401
1402 1402 def phase(self):
1403 1403 phase = phases.draft # default phase to draft
1404 1404 for p in self.parents():
1405 1405 phase = max(phase, p.phase())
1406 1406 return phase
1407 1407
1408 1408 def hidden(self):
1409 1409 return False
1410 1410
1411 1411 def children(self):
1412 1412 return []
1413 1413
1414 1414 def flags(self, path):
1415 1415 if r'_manifest' in self.__dict__:
1416 1416 try:
1417 1417 return self._manifest.flags(path)
1418 1418 except KeyError:
1419 1419 return ''
1420 1420
1421 1421 try:
1422 1422 return self._flagfunc(path)
1423 1423 except OSError:
1424 1424 return ''
1425 1425
1426 1426 def ancestor(self, c2):
1427 1427 """return the "best" ancestor context of self and c2"""
1428 1428 return self._parents[0].ancestor(c2) # punt on two parents for now
1429 1429
1430 1430 def walk(self, match):
1431 1431 '''Generates matching file names.'''
1432 1432 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1433 1433 True, False))
1434 1434
1435 1435 def matches(self, match):
1436 1436 return sorted(self._repo.dirstate.matches(match))
1437 1437
1438 1438 def ancestors(self):
1439 1439 for p in self._parents:
1440 1440 yield p
1441 1441 for a in self._repo.changelog.ancestors(
1442 1442 [p.rev() for p in self._parents]):
1443 1443 yield changectx(self._repo, a)
1444 1444
1445 1445 def markcommitted(self, node):
1446 1446 """Perform post-commit cleanup necessary after committing this ctx
1447 1447
1448 1448 Specifically, this updates backing stores this working context
1449 1449 wraps to reflect the fact that the changes reflected by this
1450 1450 workingctx have been committed. For example, it marks
1451 1451 modified and added files as normal in the dirstate.
1452 1452
1453 1453 """
1454 1454
1455 1455 with self._repo.dirstate.parentchange():
1456 1456 for f in self.modified() + self.added():
1457 1457 self._repo.dirstate.normal(f)
1458 1458 for f in self.removed():
1459 1459 self._repo.dirstate.drop(f)
1460 1460 self._repo.dirstate.setparents(node)
1461 1461
1462 1462 # write changes out explicitly, because nesting wlock at
1463 1463 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1464 1464 # from immediately doing so for subsequent changing files
1465 1465 self._repo.dirstate.write(self._repo.currenttransaction())
1466 1466
1467 1467 def dirty(self, missing=False, merge=True, branch=True):
1468 1468 return False
1469 1469
1470 1470 class workingctx(committablectx):
1471 1471 """A workingctx object makes access to data related to
1472 1472 the current working directory convenient.
1473 1473 date - any valid date string or (unixtime, offset), or None.
1474 1474 user - username string, or None.
1475 1475 extra - a dictionary of extra values, or None.
1476 1476 changes - a list of file lists as returned by localrepo.status()
1477 1477 or None to use the repository status.
1478 1478 """
1479 1479 def __init__(self, repo, text="", user=None, date=None, extra=None,
1480 1480 changes=None):
1481 1481 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1482 1482
1483 1483 def __iter__(self):
1484 1484 d = self._repo.dirstate
1485 1485 for f in d:
1486 1486 if d[f] != 'r':
1487 1487 yield f
1488 1488
1489 1489 def __contains__(self, key):
1490 1490 return self._repo.dirstate[key] not in "?r"
1491 1491
1492 1492 def hex(self):
1493 1493 return hex(wdirid)
1494 1494
1495 1495 @propertycache
1496 1496 def _parents(self):
1497 1497 p = self._repo.dirstate.parents()
1498 1498 if p[1] == nullid:
1499 1499 p = p[:-1]
1500 1500 return [changectx(self._repo, x) for x in p]
1501 1501
1502 1502 def filectx(self, path, filelog=None):
1503 1503 """get a file context from the working directory"""
1504 1504 return workingfilectx(self._repo, path, workingctx=self,
1505 1505 filelog=filelog)
1506 1506
1507 1507 def dirty(self, missing=False, merge=True, branch=True):
1508 1508 "check whether a working directory is modified"
1509 1509 # check subrepos first
1510 1510 for s in sorted(self.substate):
1511 1511 if self.sub(s).dirty():
1512 1512 return True
1513 1513 # check current working dir
1514 1514 return ((merge and self.p2()) or
1515 1515 (branch and self.branch() != self.p1().branch()) or
1516 1516 self.modified() or self.added() or self.removed() or
1517 1517 (missing and self.deleted()))
1518 1518
1519 1519 def add(self, list, prefix=""):
1520 1520 join = lambda f: os.path.join(prefix, f)
1521 1521 with self._repo.wlock():
1522 1522 ui, ds = self._repo.ui, self._repo.dirstate
1523 1523 rejected = []
1524 1524 lstat = self._repo.wvfs.lstat
1525 1525 for f in list:
1526 1526 scmutil.checkportable(ui, join(f))
1527 1527 try:
1528 1528 st = lstat(f)
1529 1529 except OSError:
1530 1530 ui.warn(_("%s does not exist!\n") % join(f))
1531 1531 rejected.append(f)
1532 1532 continue
1533 1533 if st.st_size > 10000000:
1534 1534 ui.warn(_("%s: up to %d MB of RAM may be required "
1535 1535 "to manage this file\n"
1536 1536 "(use 'hg revert %s' to cancel the "
1537 1537 "pending addition)\n")
1538 1538 % (f, 3 * st.st_size // 1000000, join(f)))
1539 1539 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1540 1540 ui.warn(_("%s not added: only files and symlinks "
1541 1541 "supported currently\n") % join(f))
1542 1542 rejected.append(f)
1543 1543 elif ds[f] in 'amn':
1544 1544 ui.warn(_("%s already tracked!\n") % join(f))
1545 1545 elif ds[f] == 'r':
1546 1546 ds.normallookup(f)
1547 1547 else:
1548 1548 ds.add(f)
1549 1549 return rejected
1550 1550
1551 1551 def forget(self, files, prefix=""):
1552 1552 join = lambda f: os.path.join(prefix, f)
1553 1553 with self._repo.wlock():
1554 1554 rejected = []
1555 1555 for f in files:
1556 1556 if f not in self._repo.dirstate:
1557 1557 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1558 1558 rejected.append(f)
1559 1559 elif self._repo.dirstate[f] != 'a':
1560 1560 self._repo.dirstate.remove(f)
1561 1561 else:
1562 1562 self._repo.dirstate.drop(f)
1563 1563 return rejected
1564 1564
1565 1565 def undelete(self, list):
1566 1566 pctxs = self.parents()
1567 1567 with self._repo.wlock():
1568 1568 for f in list:
1569 1569 if self._repo.dirstate[f] != 'r':
1570 1570 self._repo.ui.warn(_("%s not removed!\n") % f)
1571 1571 else:
1572 1572 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1573 1573 t = fctx.data()
1574 1574 self._repo.wwrite(f, t, fctx.flags())
1575 1575 self._repo.dirstate.normal(f)
1576 1576
1577 1577 def copy(self, source, dest):
1578 1578 try:
1579 1579 st = self._repo.wvfs.lstat(dest)
1580 1580 except OSError as err:
1581 1581 if err.errno != errno.ENOENT:
1582 1582 raise
1583 1583 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1584 1584 return
1585 1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 1586 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1587 1587 "symbolic link\n") % dest)
1588 1588 else:
1589 1589 with self._repo.wlock():
1590 1590 if self._repo.dirstate[dest] in '?':
1591 1591 self._repo.dirstate.add(dest)
1592 1592 elif self._repo.dirstate[dest] in 'r':
1593 1593 self._repo.dirstate.normallookup(dest)
1594 1594 self._repo.dirstate.copy(source, dest)
1595 1595
1596 1596 def match(self, pats=None, include=None, exclude=None, default='glob',
1597 1597 listsubrepos=False, badfn=None):
1598 1598 r = self._repo
1599 1599
1600 1600 # Only a case insensitive filesystem needs magic to translate user input
1601 1601 # to actual case in the filesystem.
1602 1602 icasefs = not util.fscasesensitive(r.root)
1603 1603 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1604 1604 default, auditor=r.auditor, ctx=self,
1605 1605 listsubrepos=listsubrepos, badfn=badfn,
1606 1606 icasefs=icasefs)
1607 1607
1608 1608 def _filtersuspectsymlink(self, files):
1609 1609 if not files or self._repo.dirstate._checklink:
1610 1610 return files
1611 1611
1612 1612 # Symlink placeholders may get non-symlink-like contents
1613 1613 # via user error or dereferencing by NFS or Samba servers,
1614 1614 # so we filter out any placeholders that don't look like a
1615 1615 # symlink
1616 1616 sane = []
1617 1617 for f in files:
1618 1618 if self.flags(f) == 'l':
1619 1619 d = self[f].data()
1620 1620 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1621 1621 self._repo.ui.debug('ignoring suspect symlink placeholder'
1622 1622 ' "%s"\n' % f)
1623 1623 continue
1624 1624 sane.append(f)
1625 1625 return sane
1626 1626
1627 1627 def _checklookup(self, files):
1628 1628 # check for any possibly clean files
1629 1629 if not files:
1630 1630 return [], [], []
1631 1631
1632 1632 modified = []
1633 1633 deleted = []
1634 1634 fixup = []
1635 1635 pctx = self._parents[0]
1636 1636 # do a full compare of any files that might have changed
1637 1637 for f in sorted(files):
1638 1638 try:
1639 1639 # This will return True for a file that got replaced by a
1640 1640 # directory in the interim, but fixing that is pretty hard.
1641 1641 if (f not in pctx or self.flags(f) != pctx.flags(f)
1642 1642 or pctx[f].cmp(self[f])):
1643 1643 modified.append(f)
1644 1644 else:
1645 1645 fixup.append(f)
1646 1646 except (IOError, OSError):
1647 1647 # A file become inaccessible in between? Mark it as deleted,
1648 1648 # matching dirstate behavior (issue5584).
1649 1649 # The dirstate has more complex behavior around whether a
1650 1650 # missing file matches a directory, etc, but we don't need to
1651 1651 # bother with that: if f has made it to this point, we're sure
1652 1652 # it's in the dirstate.
1653 1653 deleted.append(f)
1654 1654
1655 1655 return modified, deleted, fixup
1656 1656
1657 1657 def _poststatusfixup(self, status, fixup):
1658 1658 """update dirstate for files that are actually clean"""
1659 1659 poststatus = self._repo.postdsstatus()
1660 1660 if fixup or poststatus:
1661 1661 try:
1662 1662 oldid = self._repo.dirstate.identity()
1663 1663
1664 1664 # updating the dirstate is optional
1665 1665 # so we don't wait on the lock
1666 1666 # wlock can invalidate the dirstate, so cache normal _after_
1667 1667 # taking the lock
1668 1668 with self._repo.wlock(False):
1669 1669 if self._repo.dirstate.identity() == oldid:
1670 1670 if fixup:
1671 1671 normal = self._repo.dirstate.normal
1672 1672 for f in fixup:
1673 1673 normal(f)
1674 1674 # write changes out explicitly, because nesting
1675 1675 # wlock at runtime may prevent 'wlock.release()'
1676 1676 # after this block from doing so for subsequent
1677 1677 # changing files
1678 1678 tr = self._repo.currenttransaction()
1679 1679 self._repo.dirstate.write(tr)
1680 1680
1681 1681 if poststatus:
1682 1682 for ps in poststatus:
1683 1683 ps(self, status)
1684 1684 else:
1685 1685 # in this case, writing changes out breaks
1686 1686 # consistency, because .hg/dirstate was
1687 1687 # already changed simultaneously after last
1688 1688 # caching (see also issue5584 for detail)
1689 1689 self._repo.ui.debug('skip updating dirstate: '
1690 1690 'identity mismatch\n')
1691 1691 except error.LockError:
1692 1692 pass
1693 1693 finally:
1694 1694 # Even if the wlock couldn't be grabbed, clear out the list.
1695 1695 self._repo.clearpostdsstatus()
1696 1696
1697 1697 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1698 1698 unknown=False):
1699 1699 '''Gets the status from the dirstate -- internal use only.'''
1700 1700 listignored, listclean, listunknown = ignored, clean, unknown
1701 1701 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1702 1702 subrepos = []
1703 1703 if '.hgsub' in self:
1704 1704 subrepos = sorted(self.substate)
1705 1705 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1706 1706 listclean, listunknown)
1707 1707
1708 1708 # check for any possibly clean files
1709 1709 fixup = []
1710 1710 if cmp:
1711 1711 modified2, deleted2, fixup = self._checklookup(cmp)
1712 1712 s.modified.extend(modified2)
1713 1713 s.deleted.extend(deleted2)
1714 1714
1715 1715 if fixup and listclean:
1716 1716 s.clean.extend(fixup)
1717 1717
1718 1718 self._poststatusfixup(s, fixup)
1719 1719
1720 1720 if match.always():
1721 1721 # cache for performance
1722 1722 if s.unknown or s.ignored or s.clean:
1723 1723 # "_status" is cached with list*=False in the normal route
1724 1724 self._status = scmutil.status(s.modified, s.added, s.removed,
1725 1725 s.deleted, [], [], [])
1726 1726 else:
1727 1727 self._status = s
1728 1728
1729 1729 return s
1730 1730
1731 1731 @propertycache
1732 1732 def _manifest(self):
1733 1733 """generate a manifest corresponding to the values in self._status
1734 1734
1735 1735 This reuse the file nodeid from parent, but we use special node
1736 1736 identifiers for added and modified files. This is used by manifests
1737 1737 merge to see that files are different and by update logic to avoid
1738 1738 deleting newly added files.
1739 1739 """
1740 1740 return self._buildstatusmanifest(self._status)
1741 1741
1742 1742 def _buildstatusmanifest(self, status):
1743 1743 """Builds a manifest that includes the given status results."""
1744 1744 parents = self.parents()
1745 1745
1746 1746 man = parents[0].manifest().copy()
1747 1747
1748 1748 ff = self._flagfunc
1749 1749 for i, l in ((addednodeid, status.added),
1750 1750 (modifiednodeid, status.modified)):
1751 1751 for f in l:
1752 1752 man[f] = i
1753 1753 try:
1754 1754 man.setflag(f, ff(f))
1755 1755 except OSError:
1756 1756 pass
1757 1757
1758 1758 for f in status.deleted + status.removed:
1759 1759 if f in man:
1760 1760 del man[f]
1761 1761
1762 1762 return man
1763 1763
1764 1764 def _buildstatus(self, other, s, match, listignored, listclean,
1765 1765 listunknown):
1766 1766 """build a status with respect to another context
1767 1767
1768 1768 This includes logic for maintaining the fast path of status when
1769 1769 comparing the working directory against its parent, which is to skip
1770 1770 building a new manifest if self (working directory) is not comparing
1771 1771 against its parent (repo['.']).
1772 1772 """
1773 1773 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1774 1774 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1775 1775 # might have accidentally ended up with the entire contents of the file
1776 1776 # they are supposed to be linking to.
1777 1777 s.modified[:] = self._filtersuspectsymlink(s.modified)
1778 1778 if other != self._repo['.']:
1779 1779 s = super(workingctx, self)._buildstatus(other, s, match,
1780 1780 listignored, listclean,
1781 1781 listunknown)
1782 1782 return s
1783 1783
1784 1784 def _matchstatus(self, other, match):
1785 1785 """override the match method with a filter for directory patterns
1786 1786
1787 1787 We use inheritance to customize the match.bad method only in cases of
1788 1788 workingctx since it belongs only to the working directory when
1789 1789 comparing against the parent changeset.
1790 1790
1791 1791 If we aren't comparing against the working directory's parent, then we
1792 1792 just use the default match object sent to us.
1793 1793 """
1794 1794 superself = super(workingctx, self)
1795 1795 match = superself._matchstatus(other, match)
1796 1796 if other != self._repo['.']:
1797 1797 def bad(f, msg):
1798 1798 # 'f' may be a directory pattern from 'match.files()',
1799 1799 # so 'f not in ctx1' is not enough
1800 1800 if f not in other and not other.hasdir(f):
1801 1801 self._repo.ui.warn('%s: %s\n' %
1802 1802 (self._repo.dirstate.pathto(f), msg))
1803 1803 match.bad = bad
1804 1804 return match
1805 1805
1806 1806 class committablefilectx(basefilectx):
1807 1807 """A committablefilectx provides common functionality for a file context
1808 1808 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1809 1809 def __init__(self, repo, path, filelog=None, ctx=None):
1810 1810 self._repo = repo
1811 1811 self._path = path
1812 1812 self._changeid = None
1813 1813 self._filerev = self._filenode = None
1814 1814
1815 1815 if filelog is not None:
1816 1816 self._filelog = filelog
1817 1817 if ctx:
1818 1818 self._changectx = ctx
1819 1819
1820 1820 def __nonzero__(self):
1821 1821 return True
1822 1822
1823 1823 __bool__ = __nonzero__
1824 1824
1825 1825 def linkrev(self):
1826 1826 # linked to self._changectx no matter if file is modified or not
1827 1827 return self.rev()
1828 1828
1829 1829 def parents(self):
1830 1830 '''return parent filectxs, following copies if necessary'''
1831 1831 def filenode(ctx, path):
1832 1832 return ctx._manifest.get(path, nullid)
1833 1833
1834 1834 path = self._path
1835 1835 fl = self._filelog
1836 1836 pcl = self._changectx._parents
1837 1837 renamed = self.renamed()
1838 1838
1839 1839 if renamed:
1840 1840 pl = [renamed + (None,)]
1841 1841 else:
1842 1842 pl = [(path, filenode(pcl[0], path), fl)]
1843 1843
1844 1844 for pc in pcl[1:]:
1845 1845 pl.append((path, filenode(pc, path), fl))
1846 1846
1847 1847 return [self._parentfilectx(p, fileid=n, filelog=l)
1848 1848 for p, n, l in pl if n != nullid]
1849 1849
1850 1850 def children(self):
1851 1851 return []
1852 1852
1853 1853 class workingfilectx(committablefilectx):
1854 1854 """A workingfilectx object makes access to data related to a particular
1855 1855 file in the working directory convenient."""
1856 1856 def __init__(self, repo, path, filelog=None, workingctx=None):
1857 1857 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1858 1858
1859 1859 @propertycache
1860 1860 def _changectx(self):
1861 1861 return workingctx(self._repo)
1862 1862
1863 1863 def data(self):
1864 1864 return self._repo.wread(self._path)
1865 1865 def renamed(self):
1866 1866 rp = self._repo.dirstate.copied(self._path)
1867 1867 if not rp:
1868 1868 return None
1869 1869 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1870 1870
1871 1871 def size(self):
1872 1872 return self._repo.wvfs.lstat(self._path).st_size
1873 1873 def date(self):
1874 1874 t, tz = self._changectx.date()
1875 1875 try:
1876 1876 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1877 1877 except OSError as err:
1878 1878 if err.errno != errno.ENOENT:
1879 1879 raise
1880 1880 return (t, tz)
1881 1881
1882 1882 def cmp(self, fctx):
1883 1883 """compare with other file context
1884 1884
1885 1885 returns True if different than fctx.
1886 1886 """
1887 1887 # fctx should be a filectx (not a workingfilectx)
1888 1888 # invert comparison to reuse the same code path
1889 1889 return fctx.cmp(self)
1890 1890
1891 1891 def remove(self, ignoremissing=False):
1892 1892 """wraps unlink for a repo's working directory"""
1893 1893 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1894 1894
1895 def write(self, data, flags):
1895 def write(self, data, flags, backgroundclose=False):
1896 1896 """wraps repo.wwrite"""
1897 self._repo.wwrite(self._path, data, flags)
1897 self._repo.wwrite(self._path, data, flags,
1898 backgroundclose=backgroundclose)
1898 1899
1899 1900 def setflags(self, l, x):
1900 1901 self._repo.wvfs.setflags(self._path, l, x)
1901 1902
1902 1903 class workingcommitctx(workingctx):
1903 1904 """A workingcommitctx object makes access to data related to
1904 1905 the revision being committed convenient.
1905 1906
1906 1907 This hides changes in the working directory, if they aren't
1907 1908 committed in this context.
1908 1909 """
1909 1910 def __init__(self, repo, changes,
1910 1911 text="", user=None, date=None, extra=None):
1911 1912 super(workingctx, self).__init__(repo, text, user, date, extra,
1912 1913 changes)
1913 1914
1914 1915 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1915 1916 unknown=False):
1916 1917 """Return matched files only in ``self._status``
1917 1918
1918 1919 Uncommitted files appear "clean" via this context, even if
1919 1920 they aren't actually so in the working directory.
1920 1921 """
1921 1922 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1922 1923 if clean:
1923 1924 clean = [f for f in self._manifest if f not in self._changedset]
1924 1925 else:
1925 1926 clean = []
1926 1927 return scmutil.status([f for f in self._status.modified if match(f)],
1927 1928 [f for f in self._status.added if match(f)],
1928 1929 [f for f in self._status.removed if match(f)],
1929 1930 [], [], [], clean)
1930 1931
1931 1932 @propertycache
1932 1933 def _changedset(self):
1933 1934 """Return the set of files changed in this context
1934 1935 """
1935 1936 changed = set(self._status.modified)
1936 1937 changed.update(self._status.added)
1937 1938 changed.update(self._status.removed)
1938 1939 return changed
1939 1940
1940 1941 def makecachingfilectxfn(func):
1941 1942 """Create a filectxfn that caches based on the path.
1942 1943
1943 1944 We can't use util.cachefunc because it uses all arguments as the cache
1944 1945 key and this creates a cycle since the arguments include the repo and
1945 1946 memctx.
1946 1947 """
1947 1948 cache = {}
1948 1949
1949 1950 def getfilectx(repo, memctx, path):
1950 1951 if path not in cache:
1951 1952 cache[path] = func(repo, memctx, path)
1952 1953 return cache[path]
1953 1954
1954 1955 return getfilectx
1955 1956
1956 1957 def memfilefromctx(ctx):
1957 1958 """Given a context return a memfilectx for ctx[path]
1958 1959
1959 1960 This is a convenience method for building a memctx based on another
1960 1961 context.
1961 1962 """
1962 1963 def getfilectx(repo, memctx, path):
1963 1964 fctx = ctx[path]
1964 1965 # this is weird but apparently we only keep track of one parent
1965 1966 # (why not only store that instead of a tuple?)
1966 1967 copied = fctx.renamed()
1967 1968 if copied:
1968 1969 copied = copied[0]
1969 1970 return memfilectx(repo, path, fctx.data(),
1970 1971 islink=fctx.islink(), isexec=fctx.isexec(),
1971 1972 copied=copied, memctx=memctx)
1972 1973
1973 1974 return getfilectx
1974 1975
1975 1976 def memfilefrompatch(patchstore):
1976 1977 """Given a patch (e.g. patchstore object) return a memfilectx
1977 1978
1978 1979 This is a convenience method for building a memctx based on a patchstore.
1979 1980 """
1980 1981 def getfilectx(repo, memctx, path):
1981 1982 data, mode, copied = patchstore.getfile(path)
1982 1983 if data is None:
1983 1984 return None
1984 1985 islink, isexec = mode
1985 1986 return memfilectx(repo, path, data, islink=islink,
1986 1987 isexec=isexec, copied=copied,
1987 1988 memctx=memctx)
1988 1989
1989 1990 return getfilectx
1990 1991
1991 1992 class memctx(committablectx):
1992 1993 """Use memctx to perform in-memory commits via localrepo.commitctx().
1993 1994
1994 1995 Revision information is supplied at initialization time while
1995 1996 related files data and is made available through a callback
1996 1997 mechanism. 'repo' is the current localrepo, 'parents' is a
1997 1998 sequence of two parent revisions identifiers (pass None for every
1998 1999 missing parent), 'text' is the commit message and 'files' lists
1999 2000 names of files touched by the revision (normalized and relative to
2000 2001 repository root).
2001 2002
2002 2003 filectxfn(repo, memctx, path) is a callable receiving the
2003 2004 repository, the current memctx object and the normalized path of
2004 2005 requested file, relative to repository root. It is fired by the
2005 2006 commit function for every file in 'files', but calls order is
2006 2007 undefined. If the file is available in the revision being
2007 2008 committed (updated or added), filectxfn returns a memfilectx
2008 2009 object. If the file was removed, filectxfn return None for recent
2009 2010 Mercurial. Moved files are represented by marking the source file
2010 2011 removed and the new file added with copy information (see
2011 2012 memfilectx).
2012 2013
2013 2014 user receives the committer name and defaults to current
2014 2015 repository username, date is the commit date in any format
2015 2016 supported by util.parsedate() and defaults to current date, extra
2016 2017 is a dictionary of metadata or is left empty.
2017 2018 """
2018 2019
2019 2020 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2020 2021 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2021 2022 # this field to determine what to do in filectxfn.
2022 2023 _returnnoneformissingfiles = True
2023 2024
2024 2025 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2025 2026 date=None, extra=None, branch=None, editor=False):
2026 2027 super(memctx, self).__init__(repo, text, user, date, extra)
2027 2028 self._rev = None
2028 2029 self._node = None
2029 2030 parents = [(p or nullid) for p in parents]
2030 2031 p1, p2 = parents
2031 2032 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2032 2033 files = sorted(set(files))
2033 2034 self._files = files
2034 2035 if branch is not None:
2035 2036 self._extra['branch'] = encoding.fromlocal(branch)
2036 2037 self.substate = {}
2037 2038
2038 2039 if isinstance(filectxfn, patch.filestore):
2039 2040 filectxfn = memfilefrompatch(filectxfn)
2040 2041 elif not callable(filectxfn):
2041 2042 # if store is not callable, wrap it in a function
2042 2043 filectxfn = memfilefromctx(filectxfn)
2043 2044
2044 2045 # memoizing increases performance for e.g. vcs convert scenarios.
2045 2046 self._filectxfn = makecachingfilectxfn(filectxfn)
2046 2047
2047 2048 if editor:
2048 2049 self._text = editor(self._repo, self, [])
2049 2050 self._repo.savecommitmessage(self._text)
2050 2051
2051 2052 def filectx(self, path, filelog=None):
2052 2053 """get a file context from the working directory
2053 2054
2054 2055 Returns None if file doesn't exist and should be removed."""
2055 2056 return self._filectxfn(self._repo, self, path)
2056 2057
2057 2058 def commit(self):
2058 2059 """commit context to the repo"""
2059 2060 return self._repo.commitctx(self)
2060 2061
2061 2062 @propertycache
2062 2063 def _manifest(self):
2063 2064 """generate a manifest based on the return values of filectxfn"""
2064 2065
2065 2066 # keep this simple for now; just worry about p1
2066 2067 pctx = self._parents[0]
2067 2068 man = pctx.manifest().copy()
2068 2069
2069 2070 for f in self._status.modified:
2070 2071 p1node = nullid
2071 2072 p2node = nullid
2072 2073 p = pctx[f].parents() # if file isn't in pctx, check p2?
2073 2074 if len(p) > 0:
2074 2075 p1node = p[0].filenode()
2075 2076 if len(p) > 1:
2076 2077 p2node = p[1].filenode()
2077 2078 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2078 2079
2079 2080 for f in self._status.added:
2080 2081 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2081 2082
2082 2083 for f in self._status.removed:
2083 2084 if f in man:
2084 2085 del man[f]
2085 2086
2086 2087 return man
2087 2088
2088 2089 @propertycache
2089 2090 def _status(self):
2090 2091 """Calculate exact status from ``files`` specified at construction
2091 2092 """
2092 2093 man1 = self.p1().manifest()
2093 2094 p2 = self._parents[1]
2094 2095 # "1 < len(self._parents)" can't be used for checking
2095 2096 # existence of the 2nd parent, because "memctx._parents" is
2096 2097 # explicitly initialized by the list, of which length is 2.
2097 2098 if p2.node() != nullid:
2098 2099 man2 = p2.manifest()
2099 2100 managing = lambda f: f in man1 or f in man2
2100 2101 else:
2101 2102 managing = lambda f: f in man1
2102 2103
2103 2104 modified, added, removed = [], [], []
2104 2105 for f in self._files:
2105 2106 if not managing(f):
2106 2107 added.append(f)
2107 2108 elif self[f]:
2108 2109 modified.append(f)
2109 2110 else:
2110 2111 removed.append(f)
2111 2112
2112 2113 return scmutil.status(modified, added, removed, [], [], [], [])
2113 2114
2114 2115 class memfilectx(committablefilectx):
2115 2116 """memfilectx represents an in-memory file to commit.
2116 2117
2117 2118 See memctx and committablefilectx for more details.
2118 2119 """
2119 2120 def __init__(self, repo, path, data, islink=False,
2120 2121 isexec=False, copied=None, memctx=None):
2121 2122 """
2122 2123 path is the normalized file path relative to repository root.
2123 2124 data is the file content as a string.
2124 2125 islink is True if the file is a symbolic link.
2125 2126 isexec is True if the file is executable.
2126 2127 copied is the source file path if current file was copied in the
2127 2128 revision being committed, or None."""
2128 2129 super(memfilectx, self).__init__(repo, path, None, memctx)
2129 2130 self._data = data
2130 2131 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2131 2132 self._copied = None
2132 2133 if copied:
2133 2134 self._copied = (copied, nullid)
2134 2135
2135 2136 def data(self):
2136 2137 return self._data
2137 2138
2138 2139 def remove(self, ignoremissing=False):
2139 2140 """wraps unlink for a repo's working directory"""
2140 2141 # need to figure out what to do here
2141 2142 del self._changectx[self._path]
2142 2143
2143 2144 def write(self, data, flags):
2144 2145 """wraps repo.wwrite"""
2145 2146 self._data = data
2146 2147
2147 2148 class overlayfilectx(committablefilectx):
2148 2149 """Like memfilectx but take an original filectx and optional parameters to
2149 2150 override parts of it. This is useful when fctx.data() is expensive (i.e.
2150 2151 flag processor is expensive) and raw data, flags, and filenode could be
2151 2152 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2152 2153 """
2153 2154
2154 2155 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2155 2156 copied=None, ctx=None):
2156 2157 """originalfctx: filecontext to duplicate
2157 2158
2158 2159 datafunc: None or a function to override data (file content). It is a
2159 2160 function to be lazy. path, flags, copied, ctx: None or overridden value
2160 2161
2161 2162 copied could be (path, rev), or False. copied could also be just path,
2162 2163 and will be converted to (path, nullid). This simplifies some callers.
2163 2164 """
2164 2165
2165 2166 if path is None:
2166 2167 path = originalfctx.path()
2167 2168 if ctx is None:
2168 2169 ctx = originalfctx.changectx()
2169 2170 ctxmatch = lambda: True
2170 2171 else:
2171 2172 ctxmatch = lambda: ctx == originalfctx.changectx()
2172 2173
2173 2174 repo = originalfctx.repo()
2174 2175 flog = originalfctx.filelog()
2175 2176 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2176 2177
2177 2178 if copied is None:
2178 2179 copied = originalfctx.renamed()
2179 2180 copiedmatch = lambda: True
2180 2181 else:
2181 2182 if copied and not isinstance(copied, tuple):
2182 2183 # repo._filecommit will recalculate copyrev so nullid is okay
2183 2184 copied = (copied, nullid)
2184 2185 copiedmatch = lambda: copied == originalfctx.renamed()
2185 2186
2186 2187 # When data, copied (could affect data), ctx (could affect filelog
2187 2188 # parents) are not overridden, rawdata, rawflags, and filenode may be
2188 2189 # reused (repo._filecommit should double check filelog parents).
2189 2190 #
2190 2191 # path, flags are not hashed in filelog (but in manifestlog) so they do
2191 2192 # not affect reusable here.
2192 2193 #
2193 2194 # If ctx or copied is overridden to a same value with originalfctx,
2194 2195 # still consider it's reusable. originalfctx.renamed() may be a bit
2195 2196 # expensive so it's not called unless necessary. Assuming datafunc is
2196 2197 # always expensive, do not call it for this "reusable" test.
2197 2198 reusable = datafunc is None and ctxmatch() and copiedmatch()
2198 2199
2199 2200 if datafunc is None:
2200 2201 datafunc = originalfctx.data
2201 2202 if flags is None:
2202 2203 flags = originalfctx.flags()
2203 2204
2204 2205 self._datafunc = datafunc
2205 2206 self._flags = flags
2206 2207 self._copied = copied
2207 2208
2208 2209 if reusable:
2209 2210 # copy extra fields from originalfctx
2210 2211 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2211 2212 for attr in attrs:
2212 2213 if util.safehasattr(originalfctx, attr):
2213 2214 setattr(self, attr, getattr(originalfctx, attr))
2214 2215
2215 2216 def data(self):
2216 2217 return self._datafunc()
2217 2218
2218 2219 class metadataonlyctx(committablectx):
2219 2220 """Like memctx but it's reusing the manifest of different commit.
2220 2221 Intended to be used by lightweight operations that are creating
2221 2222 metadata-only changes.
2222 2223
2223 2224 Revision information is supplied at initialization time. 'repo' is the
2224 2225 current localrepo, 'ctx' is original revision which manifest we're reuisng
2225 2226 'parents' is a sequence of two parent revisions identifiers (pass None for
2226 2227 every missing parent), 'text' is the commit.
2227 2228
2228 2229 user receives the committer name and defaults to current repository
2229 2230 username, date is the commit date in any format supported by
2230 2231 util.parsedate() and defaults to current date, extra is a dictionary of
2231 2232 metadata or is left empty.
2232 2233 """
2233 2234 def __new__(cls, repo, originalctx, *args, **kwargs):
2234 2235 return super(metadataonlyctx, cls).__new__(cls, repo)
2235 2236
2236 2237 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2237 2238 extra=None, editor=False):
2238 2239 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2239 2240 self._rev = None
2240 2241 self._node = None
2241 2242 self._originalctx = originalctx
2242 2243 self._manifestnode = originalctx.manifestnode()
2243 2244 parents = [(p or nullid) for p in parents]
2244 2245 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2245 2246
2246 2247 # sanity check to ensure that the reused manifest parents are
2247 2248 # manifests of our commit parents
2248 2249 mp1, mp2 = self.manifestctx().parents
2249 2250 if p1 != nullid and p1.manifestnode() != mp1:
2250 2251 raise RuntimeError('can\'t reuse the manifest: '
2251 2252 'its p1 doesn\'t match the new ctx p1')
2252 2253 if p2 != nullid and p2.manifestnode() != mp2:
2253 2254 raise RuntimeError('can\'t reuse the manifest: '
2254 2255 'its p2 doesn\'t match the new ctx p2')
2255 2256
2256 2257 self._files = originalctx.files()
2257 2258 self.substate = {}
2258 2259
2259 2260 if editor:
2260 2261 self._text = editor(self._repo, self, [])
2261 2262 self._repo.savecommitmessage(self._text)
2262 2263
2263 2264 def manifestnode(self):
2264 2265 return self._manifestnode
2265 2266
2266 2267 @property
2267 2268 def _manifestctx(self):
2268 2269 return self._repo.manifestlog[self._manifestnode]
2269 2270
2270 2271 def filectx(self, path, filelog=None):
2271 2272 return self._originalctx.filectx(path, filelog=filelog)
2272 2273
2273 2274 def commit(self):
2274 2275 """commit context to the repo"""
2275 2276 return self._repo.commitctx(self)
2276 2277
2277 2278 @property
2278 2279 def _manifest(self):
2279 2280 return self._originalctx.manifest()
2280 2281
2281 2282 @propertycache
2282 2283 def _status(self):
2283 2284 """Calculate exact status from ``files`` specified in the ``origctx``
2284 2285 and parents manifests.
2285 2286 """
2286 2287 man1 = self.p1().manifest()
2287 2288 p2 = self._parents[1]
2288 2289 # "1 < len(self._parents)" can't be used for checking
2289 2290 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2290 2291 # explicitly initialized by the list, of which length is 2.
2291 2292 if p2.node() != nullid:
2292 2293 man2 = p2.manifest()
2293 2294 managing = lambda f: f in man1 or f in man2
2294 2295 else:
2295 2296 managing = lambda f: f in man1
2296 2297
2297 2298 modified, added, removed = [], [], []
2298 2299 for f in self._files:
2299 2300 if not managing(f):
2300 2301 added.append(f)
2301 2302 elif self[f]:
2302 2303 modified.append(f)
2303 2304 else:
2304 2305 removed.append(f)
2305 2306
2306 2307 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,1752 +1,1751
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import shutil
14 14 import struct
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullhex,
23 23 nullid,
24 24 nullrev,
25 25 )
26 26 from . import (
27 27 copies,
28 28 error,
29 29 filemerge,
30 30 match as matchmod,
31 31 obsolete,
32 32 pycompat,
33 33 scmutil,
34 34 subrepo,
35 35 util,
36 36 worker,
37 37 )
38 38
39 39 _pack = struct.pack
40 40 _unpack = struct.unpack
41 41
42 42 def _droponode(data):
43 43 # used for compatibility for v1
44 44 bits = data.split('\0')
45 45 bits = bits[:-2] + bits[-1:]
46 46 return '\0'.join(bits)
47 47
48 48 class mergestate(object):
49 49 '''track 3-way merge state of individual files
50 50
51 51 The merge state is stored on disk when needed. Two files are used: one with
52 52 an old format (version 1), and one with a new format (version 2). Version 2
53 53 stores a superset of the data in version 1, including new kinds of records
54 54 in the future. For more about the new format, see the documentation for
55 55 `_readrecordsv2`.
56 56
57 57 Each record can contain arbitrary content, and has an associated type. This
58 58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 59 versions of Mercurial that don't support it should abort. If `type` is
60 60 lowercase, the record can be safely ignored.
61 61
62 62 Currently known records:
63 63
64 64 L: the node of the "local" part of the merge (hexified version)
65 65 O: the node of the "other" part of the merge (hexified version)
66 66 F: a file to be merged entry
67 67 C: a change/delete or delete/change conflict
68 68 D: a file that the external merge driver will merge internally
69 69 (experimental)
70 70 m: the external merge driver defined for this merge plus its run state
71 71 (experimental)
72 72 f: a (filename, dictionary) tuple of optional values for a given file
73 73 X: unsupported mandatory record type (used in tests)
74 74 x: unsupported advisory record type (used in tests)
75 75 l: the labels for the parts of the merge.
76 76
77 77 Merge driver run states (experimental):
78 78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 79 to resolve or commit
80 80 m: driver-resolved files marked -- only needs to be run before commit
81 81 s: success/skipped -- does not need to be run any more
82 82
83 83 '''
84 84 statepathv1 = 'merge/state'
85 85 statepathv2 = 'merge/state2'
86 86
87 87 @staticmethod
88 88 def clean(repo, node=None, other=None, labels=None):
89 89 """Initialize a brand new merge state, removing any existing state on
90 90 disk."""
91 91 ms = mergestate(repo)
92 92 ms.reset(node, other, labels)
93 93 return ms
94 94
95 95 @staticmethod
96 96 def read(repo):
97 97 """Initialize the merge state, reading it from disk."""
98 98 ms = mergestate(repo)
99 99 ms._read()
100 100 return ms
101 101
102 102 def __init__(self, repo):
103 103 """Initialize the merge state.
104 104
105 105 Do not use this directly! Instead call read() or clean()."""
106 106 self._repo = repo
107 107 self._dirty = False
108 108 self._labels = None
109 109
110 110 def reset(self, node=None, other=None, labels=None):
111 111 self._state = {}
112 112 self._stateextras = {}
113 113 self._local = None
114 114 self._other = None
115 115 self._labels = labels
116 116 for var in ('localctx', 'otherctx'):
117 117 if var in vars(self):
118 118 delattr(self, var)
119 119 if node:
120 120 self._local = node
121 121 self._other = other
122 122 self._readmergedriver = None
123 123 if self.mergedriver:
124 124 self._mdstate = 's'
125 125 else:
126 126 self._mdstate = 'u'
127 127 shutil.rmtree(self._repo.vfs.join('merge'), True)
128 128 self._results = {}
129 129 self._dirty = False
130 130
131 131 def _read(self):
132 132 """Analyse each record content to restore a serialized state from disk
133 133
134 134 This function process "record" entry produced by the de-serialization
135 135 of on disk file.
136 136 """
137 137 self._state = {}
138 138 self._stateextras = {}
139 139 self._local = None
140 140 self._other = None
141 141 for var in ('localctx', 'otherctx'):
142 142 if var in vars(self):
143 143 delattr(self, var)
144 144 self._readmergedriver = None
145 145 self._mdstate = 's'
146 146 unsupported = set()
147 147 records = self._readrecords()
148 148 for rtype, record in records:
149 149 if rtype == 'L':
150 150 self._local = bin(record)
151 151 elif rtype == 'O':
152 152 self._other = bin(record)
153 153 elif rtype == 'm':
154 154 bits = record.split('\0', 1)
155 155 mdstate = bits[1]
156 156 if len(mdstate) != 1 or mdstate not in 'ums':
157 157 # the merge driver should be idempotent, so just rerun it
158 158 mdstate = 'u'
159 159
160 160 self._readmergedriver = bits[0]
161 161 self._mdstate = mdstate
162 162 elif rtype in 'FDC':
163 163 bits = record.split('\0')
164 164 self._state[bits[0]] = bits[1:]
165 165 elif rtype == 'f':
166 166 filename, rawextras = record.split('\0', 1)
167 167 extraparts = rawextras.split('\0')
168 168 extras = {}
169 169 i = 0
170 170 while i < len(extraparts):
171 171 extras[extraparts[i]] = extraparts[i + 1]
172 172 i += 2
173 173
174 174 self._stateextras[filename] = extras
175 175 elif rtype == 'l':
176 176 labels = record.split('\0', 2)
177 177 self._labels = [l for l in labels if len(l) > 0]
178 178 elif not rtype.islower():
179 179 unsupported.add(rtype)
180 180 self._results = {}
181 181 self._dirty = False
182 182
183 183 if unsupported:
184 184 raise error.UnsupportedMergeRecords(unsupported)
185 185
186 186 def _readrecords(self):
187 187 """Read merge state from disk and return a list of record (TYPE, data)
188 188
189 189 We read data from both v1 and v2 files and decide which one to use.
190 190
191 191 V1 has been used by version prior to 2.9.1 and contains less data than
192 192 v2. We read both versions and check if no data in v2 contradicts
193 193 v1. If there is not contradiction we can safely assume that both v1
194 194 and v2 were written at the same time and use the extract data in v2. If
195 195 there is contradiction we ignore v2 content as we assume an old version
196 196 of Mercurial has overwritten the mergestate file and left an old v2
197 197 file around.
198 198
199 199 returns list of record [(TYPE, data), ...]"""
200 200 v1records = self._readrecordsv1()
201 201 v2records = self._readrecordsv2()
202 202 if self._v1v2match(v1records, v2records):
203 203 return v2records
204 204 else:
205 205 # v1 file is newer than v2 file, use it
206 206 # we have to infer the "other" changeset of the merge
207 207 # we cannot do better than that with v1 of the format
208 208 mctx = self._repo[None].parents()[-1]
209 209 v1records.append(('O', mctx.hex()))
210 210 # add place holder "other" file node information
211 211 # nobody is using it yet so we do no need to fetch the data
212 212 # if mctx was wrong `mctx[bits[-2]]` may fails.
213 213 for idx, r in enumerate(v1records):
214 214 if r[0] == 'F':
215 215 bits = r[1].split('\0')
216 216 bits.insert(-2, '')
217 217 v1records[idx] = (r[0], '\0'.join(bits))
218 218 return v1records
219 219
220 220 def _v1v2match(self, v1records, v2records):
221 221 oldv2 = set() # old format version of v2 record
222 222 for rec in v2records:
223 223 if rec[0] == 'L':
224 224 oldv2.add(rec)
225 225 elif rec[0] == 'F':
226 226 # drop the onode data (not contained in v1)
227 227 oldv2.add(('F', _droponode(rec[1])))
228 228 for rec in v1records:
229 229 if rec not in oldv2:
230 230 return False
231 231 else:
232 232 return True
233 233
234 234 def _readrecordsv1(self):
235 235 """read on disk merge state for version 1 file
236 236
237 237 returns list of record [(TYPE, data), ...]
238 238
239 239 Note: the "F" data from this file are one entry short
240 240 (no "other file node" entry)
241 241 """
242 242 records = []
243 243 try:
244 244 f = self._repo.vfs(self.statepathv1)
245 245 for i, l in enumerate(f):
246 246 if i == 0:
247 247 records.append(('L', l[:-1]))
248 248 else:
249 249 records.append(('F', l[:-1]))
250 250 f.close()
251 251 except IOError as err:
252 252 if err.errno != errno.ENOENT:
253 253 raise
254 254 return records
255 255
256 256 def _readrecordsv2(self):
257 257 """read on disk merge state for version 2 file
258 258
259 259 This format is a list of arbitrary records of the form:
260 260
261 261 [type][length][content]
262 262
263 263 `type` is a single character, `length` is a 4 byte integer, and
264 264 `content` is an arbitrary byte sequence of length `length`.
265 265
266 266 Mercurial versions prior to 3.7 have a bug where if there are
267 267 unsupported mandatory merge records, attempting to clear out the merge
268 268 state with hg update --clean or similar aborts. The 't' record type
269 269 works around that by writing out what those versions treat as an
270 270 advisory record, but later versions interpret as special: the first
271 271 character is the 'real' record type and everything onwards is the data.
272 272
273 273 Returns list of records [(TYPE, data), ...]."""
274 274 records = []
275 275 try:
276 276 f = self._repo.vfs(self.statepathv2)
277 277 data = f.read()
278 278 off = 0
279 279 end = len(data)
280 280 while off < end:
281 281 rtype = data[off]
282 282 off += 1
283 283 length = _unpack('>I', data[off:(off + 4)])[0]
284 284 off += 4
285 285 record = data[off:(off + length)]
286 286 off += length
287 287 if rtype == 't':
288 288 rtype, record = record[0], record[1:]
289 289 records.append((rtype, record))
290 290 f.close()
291 291 except IOError as err:
292 292 if err.errno != errno.ENOENT:
293 293 raise
294 294 return records
295 295
296 296 @util.propertycache
297 297 def mergedriver(self):
298 298 # protect against the following:
299 299 # - A configures a malicious merge driver in their hgrc, then
300 300 # pauses the merge
301 301 # - A edits their hgrc to remove references to the merge driver
302 302 # - A gives a copy of their entire repo, including .hg, to B
303 303 # - B inspects .hgrc and finds it to be clean
304 304 # - B then continues the merge and the malicious merge driver
305 305 # gets invoked
306 306 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
307 307 if (self._readmergedriver is not None
308 308 and self._readmergedriver != configmergedriver):
309 309 raise error.ConfigError(
310 310 _("merge driver changed since merge started"),
311 311 hint=_("revert merge driver change or abort merge"))
312 312
313 313 return configmergedriver
314 314
315 315 @util.propertycache
316 316 def localctx(self):
317 317 if self._local is None:
318 318 msg = "localctx accessed but self._local isn't set"
319 319 raise error.ProgrammingError(msg)
320 320 return self._repo[self._local]
321 321
322 322 @util.propertycache
323 323 def otherctx(self):
324 324 if self._other is None:
325 325 msg = "otherctx accessed but self._other isn't set"
326 326 raise error.ProgrammingError(msg)
327 327 return self._repo[self._other]
328 328
329 329 def active(self):
330 330 """Whether mergestate is active.
331 331
332 332 Returns True if there appears to be mergestate. This is a rough proxy
333 333 for "is a merge in progress."
334 334 """
335 335 # Check local variables before looking at filesystem for performance
336 336 # reasons.
337 337 return bool(self._local) or bool(self._state) or \
338 338 self._repo.vfs.exists(self.statepathv1) or \
339 339 self._repo.vfs.exists(self.statepathv2)
340 340
341 341 def commit(self):
342 342 """Write current state on disk (if necessary)"""
343 343 if self._dirty:
344 344 records = self._makerecords()
345 345 self._writerecords(records)
346 346 self._dirty = False
347 347
348 348 def _makerecords(self):
349 349 records = []
350 350 records.append(('L', hex(self._local)))
351 351 records.append(('O', hex(self._other)))
352 352 if self.mergedriver:
353 353 records.append(('m', '\0'.join([
354 354 self.mergedriver, self._mdstate])))
355 355 for d, v in self._state.iteritems():
356 356 if v[0] == 'd':
357 357 records.append(('D', '\0'.join([d] + v)))
358 358 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
359 359 # older versions of Mercurial
360 360 elif v[1] == nullhex or v[6] == nullhex:
361 361 records.append(('C', '\0'.join([d] + v)))
362 362 else:
363 363 records.append(('F', '\0'.join([d] + v)))
364 364 for filename, extras in sorted(self._stateextras.iteritems()):
365 365 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
366 366 extras.iteritems())
367 367 records.append(('f', '%s\0%s' % (filename, rawextras)))
368 368 if self._labels is not None:
369 369 labels = '\0'.join(self._labels)
370 370 records.append(('l', labels))
371 371 return records
372 372
373 373 def _writerecords(self, records):
374 374 """Write current state on disk (both v1 and v2)"""
375 375 self._writerecordsv1(records)
376 376 self._writerecordsv2(records)
377 377
378 378 def _writerecordsv1(self, records):
379 379 """Write current state on disk in a version 1 file"""
380 380 f = self._repo.vfs(self.statepathv1, 'w')
381 381 irecords = iter(records)
382 382 lrecords = next(irecords)
383 383 assert lrecords[0] == 'L'
384 384 f.write(hex(self._local) + '\n')
385 385 for rtype, data in irecords:
386 386 if rtype == 'F':
387 387 f.write('%s\n' % _droponode(data))
388 388 f.close()
389 389
390 390 def _writerecordsv2(self, records):
391 391 """Write current state on disk in a version 2 file
392 392
393 393 See the docstring for _readrecordsv2 for why we use 't'."""
394 394 # these are the records that all version 2 clients can read
395 395 whitelist = 'LOF'
396 396 f = self._repo.vfs(self.statepathv2, 'w')
397 397 for key, data in records:
398 398 assert len(key) == 1
399 399 if key not in whitelist:
400 400 key, data = 't', '%s%s' % (key, data)
401 401 format = '>sI%is' % len(data)
402 402 f.write(_pack(format, key, len(data), data))
403 403 f.close()
404 404
405 405 def add(self, fcl, fco, fca, fd):
406 406 """add a new (potentially?) conflicting file the merge state
407 407 fcl: file context for local,
408 408 fco: file context for remote,
409 409 fca: file context for ancestors,
410 410 fd: file path of the resulting merge.
411 411
412 412 note: also write the local version to the `.hg/merge` directory.
413 413 """
414 414 if fcl.isabsent():
415 415 hash = nullhex
416 416 else:
417 417 hash = hashlib.sha1(fcl.path()).hexdigest()
418 418 self._repo.vfs.write('merge/' + hash, fcl.data())
419 419 self._state[fd] = ['u', hash, fcl.path(),
420 420 fca.path(), hex(fca.filenode()),
421 421 fco.path(), hex(fco.filenode()),
422 422 fcl.flags()]
423 423 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
424 424 self._dirty = True
425 425
426 426 def __contains__(self, dfile):
427 427 return dfile in self._state
428 428
429 429 def __getitem__(self, dfile):
430 430 return self._state[dfile][0]
431 431
432 432 def __iter__(self):
433 433 return iter(sorted(self._state))
434 434
435 435 def files(self):
436 436 return self._state.keys()
437 437
438 438 def mark(self, dfile, state):
439 439 self._state[dfile][0] = state
440 440 self._dirty = True
441 441
442 442 def mdstate(self):
443 443 return self._mdstate
444 444
445 445 def unresolved(self):
446 446 """Obtain the paths of unresolved files."""
447 447
448 448 for f, entry in self._state.items():
449 449 if entry[0] == 'u':
450 450 yield f
451 451
452 452 def driverresolved(self):
453 453 """Obtain the paths of driver-resolved files."""
454 454
455 455 for f, entry in self._state.items():
456 456 if entry[0] == 'd':
457 457 yield f
458 458
459 459 def extras(self, filename):
460 460 return self._stateextras.setdefault(filename, {})
461 461
462 462 def _resolve(self, preresolve, dfile, wctx):
463 463 """rerun merge process for file path `dfile`"""
464 464 if self[dfile] in 'rd':
465 465 return True, 0
466 466 stateentry = self._state[dfile]
467 467 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
468 468 octx = self._repo[self._other]
469 469 extras = self.extras(dfile)
470 470 anccommitnode = extras.get('ancestorlinknode')
471 471 if anccommitnode:
472 472 actx = self._repo[anccommitnode]
473 473 else:
474 474 actx = None
475 475 fcd = self._filectxorabsent(hash, wctx, dfile)
476 476 fco = self._filectxorabsent(onode, octx, ofile)
477 477 # TODO: move this to filectxorabsent
478 478 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
479 479 # "premerge" x flags
480 480 flo = fco.flags()
481 481 fla = fca.flags()
482 482 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
483 483 if fca.node() == nullid and flags != flo:
484 484 if preresolve:
485 485 self._repo.ui.warn(
486 486 _('warning: cannot merge flags for %s '
487 487 'without common ancestor - keeping local flags\n')
488 488 % afile)
489 489 elif flags == fla:
490 490 flags = flo
491 491 if preresolve:
492 492 # restore local
493 493 if hash != nullhex:
494 494 f = self._repo.vfs('merge/' + hash)
495 495 wctx[dfile].write(f.read(), flags)
496 496 f.close()
497 497 else:
498 498 wctx[dfile].remove(ignoremissing=True)
499 499 complete, r, deleted = filemerge.premerge(self._repo, self._local,
500 500 lfile, fcd, fco, fca,
501 501 labels=self._labels)
502 502 else:
503 503 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
504 504 lfile, fcd, fco, fca,
505 505 labels=self._labels)
506 506 if r is None:
507 507 # no real conflict
508 508 del self._state[dfile]
509 509 self._stateextras.pop(dfile, None)
510 510 self._dirty = True
511 511 elif not r:
512 512 self.mark(dfile, 'r')
513 513
514 514 if complete:
515 515 action = None
516 516 if deleted:
517 517 if fcd.isabsent():
518 518 # dc: local picked. Need to drop if present, which may
519 519 # happen on re-resolves.
520 520 action = 'f'
521 521 else:
522 522 # cd: remote picked (or otherwise deleted)
523 523 action = 'r'
524 524 else:
525 525 if fcd.isabsent(): # dc: remote picked
526 526 action = 'g'
527 527 elif fco.isabsent(): # cd: local picked
528 528 if dfile in self.localctx:
529 529 action = 'am'
530 530 else:
531 531 action = 'a'
532 532 # else: regular merges (no action necessary)
533 533 self._results[dfile] = r, action
534 534
535 535 return complete, r
536 536
537 537 def _filectxorabsent(self, hexnode, ctx, f):
538 538 if hexnode == nullhex:
539 539 return filemerge.absentfilectx(ctx, f)
540 540 else:
541 541 return ctx[f]
542 542
543 543 def preresolve(self, dfile, wctx):
544 544 """run premerge process for dfile
545 545
546 546 Returns whether the merge is complete, and the exit code."""
547 547 return self._resolve(True, dfile, wctx)
548 548
549 549 def resolve(self, dfile, wctx):
550 550 """run merge process (assuming premerge was run) for dfile
551 551
552 552 Returns the exit code of the merge."""
553 553 return self._resolve(False, dfile, wctx)[1]
554 554
555 555 def counts(self):
556 556 """return counts for updated, merged and removed files in this
557 557 session"""
558 558 updated, merged, removed = 0, 0, 0
559 559 for r, action in self._results.itervalues():
560 560 if r is None:
561 561 updated += 1
562 562 elif r == 0:
563 563 if action == 'r':
564 564 removed += 1
565 565 else:
566 566 merged += 1
567 567 return updated, merged, removed
568 568
569 569 def unresolvedcount(self):
570 570 """get unresolved count for this merge (persistent)"""
571 571 return len([True for f, entry in self._state.iteritems()
572 572 if entry[0] == 'u'])
573 573
574 574 def actions(self):
575 575 """return lists of actions to perform on the dirstate"""
576 576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 577 for f, (r, action) in self._results.iteritems():
578 578 if action is not None:
579 579 actions[action].append((f, None, "merge result"))
580 580 return actions
581 581
582 582 def recordactions(self):
583 583 """record remove/add/get actions in the dirstate"""
584 584 branchmerge = self._repo.dirstate.p2() != nullid
585 585 recordupdates(self._repo, self.actions(), branchmerge)
586 586
587 587 def queueremove(self, f):
588 588 """queues a file to be removed from the dirstate
589 589
590 590 Meant for use by custom merge drivers."""
591 591 self._results[f] = 0, 'r'
592 592
593 593 def queueadd(self, f):
594 594 """queues a file to be added to the dirstate
595 595
596 596 Meant for use by custom merge drivers."""
597 597 self._results[f] = 0, 'a'
598 598
599 599 def queueget(self, f):
600 600 """queues a file to be marked modified in the dirstate
601 601
602 602 Meant for use by custom merge drivers."""
603 603 self._results[f] = 0, 'g'
604 604
605 605 def _getcheckunknownconfig(repo, section, name):
606 606 config = repo.ui.config(section, name, default='abort')
607 607 valid = ['abort', 'ignore', 'warn']
608 608 if config not in valid:
609 609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 610 raise error.ConfigError(_("%s.%s not valid "
611 611 "('%s' is none of %s)")
612 612 % (section, name, config, validstr))
613 613 return config
614 614
615 615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 616 if f2 is None:
617 617 f2 = f
618 618 return (repo.wvfs.audit.check(f)
619 619 and repo.wvfs.isfileorlink(f)
620 620 and repo.dirstate.normalize(f) not in repo.dirstate
621 621 and mctx[f2].cmp(wctx[f]))
622 622
623 623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 624 """
625 625 Considers any actions that care about the presence of conflicting unknown
626 626 files. For some actions, the result is to abort; for others, it is to
627 627 choose a different action.
628 628 """
629 629 conflicts = set()
630 630 warnconflicts = set()
631 631 abortconflicts = set()
632 632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 634 if not force:
635 635 def collectconflicts(conflicts, config):
636 636 if config == 'abort':
637 637 abortconflicts.update(conflicts)
638 638 elif config == 'warn':
639 639 warnconflicts.update(conflicts)
640 640
641 641 for f, (m, args, msg) in actions.iteritems():
642 642 if m in ('c', 'dc'):
643 643 if _checkunknownfile(repo, wctx, mctx, f):
644 644 conflicts.add(f)
645 645 elif m == 'dg':
646 646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 647 conflicts.add(f)
648 648
649 649 ignoredconflicts = set([c for c in conflicts
650 650 if repo.dirstate._ignore(c)])
651 651 unknownconflicts = conflicts - ignoredconflicts
652 652 collectconflicts(ignoredconflicts, ignoredconfig)
653 653 collectconflicts(unknownconflicts, unknownconfig)
654 654 else:
655 655 for f, (m, args, msg) in actions.iteritems():
656 656 if m == 'cm':
657 657 fl2, anc = args
658 658 different = _checkunknownfile(repo, wctx, mctx, f)
659 659 if repo.dirstate._ignore(f):
660 660 config = ignoredconfig
661 661 else:
662 662 config = unknownconfig
663 663
664 664 # The behavior when force is True is described by this table:
665 665 # config different mergeforce | action backup
666 666 # * n * | get n
667 667 # * y y | merge -
668 668 # abort y n | merge - (1)
669 669 # warn y n | warn + get y
670 670 # ignore y n | get y
671 671 #
672 672 # (1) this is probably the wrong behavior here -- we should
673 673 # probably abort, but some actions like rebases currently
674 674 # don't like an abort happening in the middle of
675 675 # merge.update.
676 676 if not different:
677 677 actions[f] = ('g', (fl2, False), "remote created")
678 678 elif mergeforce or config == 'abort':
679 679 actions[f] = ('m', (f, f, None, False, anc),
680 680 "remote differs from untracked local")
681 681 elif config == 'abort':
682 682 abortconflicts.add(f)
683 683 else:
684 684 if config == 'warn':
685 685 warnconflicts.add(f)
686 686 actions[f] = ('g', (fl2, True), "remote created")
687 687
688 688 for f in sorted(abortconflicts):
689 689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 690 if abortconflicts:
691 691 raise error.Abort(_("untracked files in working directory "
692 692 "differ from files in requested revision"))
693 693
694 694 for f in sorted(warnconflicts):
695 695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696 696
697 697 for f, (m, args, msg) in actions.iteritems():
698 698 backup = f in conflicts
699 699 if m == 'c':
700 700 flags, = args
701 701 actions[f] = ('g', (flags, backup), msg)
702 702
703 703 def _forgetremoved(wctx, mctx, branchmerge):
704 704 """
705 705 Forget removed files
706 706
707 707 If we're jumping between revisions (as opposed to merging), and if
708 708 neither the working directory nor the target rev has the file,
709 709 then we need to remove it from the dirstate, to prevent the
710 710 dirstate from listing the file when it is no longer in the
711 711 manifest.
712 712
713 713 If we're merging, and the other revision has removed a file
714 714 that is not present in the working directory, we need to mark it
715 715 as removed.
716 716 """
717 717
718 718 actions = {}
719 719 m = 'f'
720 720 if branchmerge:
721 721 m = 'r'
722 722 for f in wctx.deleted():
723 723 if f not in mctx:
724 724 actions[f] = m, None, "forget deleted"
725 725
726 726 if not branchmerge:
727 727 for f in wctx.removed():
728 728 if f not in mctx:
729 729 actions[f] = 'f', None, "forget removed"
730 730
731 731 return actions
732 732
733 733 def _checkcollision(repo, wmf, actions):
734 734 # build provisional merged manifest up
735 735 pmmf = set(wmf)
736 736
737 737 if actions:
738 738 # k, dr, e and rd are no-op
739 739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 740 for f, args, msg in actions[m]:
741 741 pmmf.add(f)
742 742 for f, args, msg in actions['r']:
743 743 pmmf.discard(f)
744 744 for f, args, msg in actions['dm']:
745 745 f2, flags = args
746 746 pmmf.discard(f2)
747 747 pmmf.add(f)
748 748 for f, args, msg in actions['dg']:
749 749 pmmf.add(f)
750 750 for f, args, msg in actions['m']:
751 751 f1, f2, fa, move, anc = args
752 752 if move:
753 753 pmmf.discard(f1)
754 754 pmmf.add(f)
755 755
756 756 # check case-folding collision in provisional merged manifest
757 757 foldmap = {}
758 758 for f in sorted(pmmf):
759 759 fold = util.normcase(f)
760 760 if fold in foldmap:
761 761 raise error.Abort(_("case-folding collision between %s and %s")
762 762 % (f, foldmap[fold]))
763 763 foldmap[fold] = f
764 764
765 765 # check case-folding of directories
766 766 foldprefix = unfoldprefix = lastfull = ''
767 767 for fold, f in sorted(foldmap.items()):
768 768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 769 # the folded prefix matches but actual casing is different
770 770 raise error.Abort(_("case-folding collision between "
771 771 "%s and directory of %s") % (lastfull, f))
772 772 foldprefix = fold + '/'
773 773 unfoldprefix = f + '/'
774 774 lastfull = f
775 775
776 776 def driverpreprocess(repo, ms, wctx, labels=None):
777 777 """run the preprocess step of the merge driver, if any
778 778
779 779 This is currently not implemented -- it's an extension point."""
780 780 return True
781 781
782 782 def driverconclude(repo, ms, wctx, labels=None):
783 783 """run the conclude step of the merge driver, if any
784 784
785 785 This is currently not implemented -- it's an extension point."""
786 786 return True
787 787
788 788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 789 acceptremote, followcopies, forcefulldiff=False):
790 790 """
791 791 Merge wctx and p2 with ancestor pa and generate merge action list
792 792
793 793 branchmerge and force are as passed in to update
794 794 matcher = matcher to filter file lists
795 795 acceptremote = accept the incoming changes without prompting
796 796 """
797 797 if matcher is not None and matcher.always():
798 798 matcher = None
799 799
800 800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801 801
802 802 # manifests fetched in order are going to be faster, so prime the caches
803 803 [x.manifest() for x in
804 804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805 805
806 806 if followcopies:
807 807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809 809
810 810 boolbm = pycompat.bytestr(bool(branchmerge))
811 811 boolf = pycompat.bytestr(bool(force))
812 812 boolm = pycompat.bytestr(bool(matcher))
813 813 repo.ui.note(_("resolving manifests\n"))
814 814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 815 % (boolbm, boolf, boolm))
816 816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817 817
818 818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 819 copied = set(copy.values())
820 820 copied.update(movewithdir.values())
821 821
822 822 if '.hgsubstate' in m1:
823 823 # check whether sub state is modified
824 824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 825 m1['.hgsubstate'] = modifiednodeid
826 826
827 827 # Don't use m2-vs-ma optimization if:
828 828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 829 # - The caller specifically asks for a full diff, which is useful during bid
830 830 # merge.
831 831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 832 # Identify which files are relevant to the merge, so we can limit the
833 833 # total m1-vs-m2 diff to just those files. This has significant
834 834 # performance benefits in large repositories.
835 835 relevantfiles = set(ma.diff(m2).keys())
836 836
837 837 # For copied and moved files, we need to add the source file too.
838 838 for copykey, copyvalue in copy.iteritems():
839 839 if copyvalue in relevantfiles:
840 840 relevantfiles.add(copykey)
841 841 for movedirkey in movewithdir:
842 842 relevantfiles.add(movedirkey)
843 843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845 845
846 846 diff = m1.diff(m2, match=matcher)
847 847
848 848 if matcher is None:
849 849 matcher = matchmod.always('', '')
850 850
851 851 actions = {}
852 852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 853 if n1 and n2: # file exists on both local and remote side
854 854 if f not in ma:
855 855 fa = copy.get(f, None)
856 856 if fa is not None:
857 857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 858 "both renamed from " + fa)
859 859 else:
860 860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 861 "both created")
862 862 else:
863 863 a = ma[f]
864 864 fla = ma.flags(f)
865 865 nol = 'l' not in fl1 + fl2 + fla
866 866 if n2 == a and fl2 == fla:
867 867 actions[f] = ('k' , (), "remote unchanged")
868 868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 869 if n1 == n2: # optimization: keep local content
870 870 actions[f] = ('e', (fl2,), "update permissions")
871 871 else:
872 872 actions[f] = ('g', (fl2, False), "remote is newer")
873 873 elif nol and n2 == a: # remote only changed 'x'
874 874 actions[f] = ('e', (fl2,), "update permissions")
875 875 elif nol and n1 == a: # local only changed 'x'
876 876 actions[f] = ('g', (fl1, False), "remote is newer")
877 877 else: # both changed something
878 878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 879 "versions differ")
880 880 elif n1: # file exists only on local side
881 881 if f in copied:
882 882 pass # we'll deal with it on m2 side
883 883 elif f in movewithdir: # directory rename, move local
884 884 f2 = movewithdir[f]
885 885 if f2 in m2:
886 886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 887 "remote directory rename, both created")
888 888 else:
889 889 actions[f2] = ('dm', (f, fl1),
890 890 "remote directory rename - move from " + f)
891 891 elif f in copy:
892 892 f2 = copy[f]
893 893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 894 "local copied/moved from " + f2)
895 895 elif f in ma: # clean, a different, no remote
896 896 if n1 != ma[f]:
897 897 if acceptremote:
898 898 actions[f] = ('r', None, "remote delete")
899 899 else:
900 900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 901 "prompt changed/deleted")
902 902 elif n1 == addednodeid:
903 903 # This extra 'a' is added by working copy manifest to mark
904 904 # the file as locally added. We should forget it instead of
905 905 # deleting it.
906 906 actions[f] = ('f', None, "remote deleted")
907 907 else:
908 908 actions[f] = ('r', None, "other deleted")
909 909 elif n2: # file exists only on remote side
910 910 if f in copied:
911 911 pass # we'll deal with it on m1 side
912 912 elif f in movewithdir:
913 913 f2 = movewithdir[f]
914 914 if f2 in m1:
915 915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 916 "local directory rename, both created")
917 917 else:
918 918 actions[f2] = ('dg', (f, fl2),
919 919 "local directory rename - get from " + f)
920 920 elif f in copy:
921 921 f2 = copy[f]
922 922 if f2 in m2:
923 923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 924 "remote copied from " + f2)
925 925 else:
926 926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 927 "remote moved from " + f2)
928 928 elif f not in ma:
929 929 # local unknown, remote created: the logic is described by the
930 930 # following table:
931 931 #
932 932 # force branchmerge different | action
933 933 # n * * | create
934 934 # y n * | create
935 935 # y y n | create
936 936 # y y y | merge
937 937 #
938 938 # Checking whether the files are different is expensive, so we
939 939 # don't do that when we can avoid it.
940 940 if not force:
941 941 actions[f] = ('c', (fl2,), "remote created")
942 942 elif not branchmerge:
943 943 actions[f] = ('c', (fl2,), "remote created")
944 944 else:
945 945 actions[f] = ('cm', (fl2, pa.node()),
946 946 "remote created, get or merge")
947 947 elif n2 != ma[f]:
948 948 df = None
949 949 for d in dirmove:
950 950 if f.startswith(d):
951 951 # new file added in a directory that was moved
952 952 df = dirmove[d] + f[len(d):]
953 953 break
954 954 if df is not None and df in m1:
955 955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 956 "local directory rename - respect move from " + f)
957 957 elif acceptremote:
958 958 actions[f] = ('c', (fl2,), "remote recreating")
959 959 else:
960 960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 961 "prompt deleted/changed")
962 962
963 963 return actions, diverge, renamedelete
964 964
965 965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 966 """Resolves false conflicts where the nodeid changed but the content
967 967 remained the same."""
968 968
969 969 for f, (m, args, msg) in actions.items():
970 970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 971 # local did change but ended up with same content
972 972 actions[f] = 'r', None, "prompt same"
973 973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 974 # remote did change but ended up with same content
975 975 del actions[f] # don't get = keep local deleted
976 976
977 977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 978 acceptremote, followcopies, matcher=None,
979 979 mergeforce=False):
980 980 "Calculate the actions needed to merge mctx into wctx using ancestors"
981 981 if len(ancestors) == 1: # default
982 982 actions, diverge, renamedelete = manifestmerge(
983 983 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
984 984 acceptremote, followcopies)
985 985 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
986 986
987 987 else: # only when merge.preferancestor=* - the default
988 988 repo.ui.note(
989 989 _("note: merging %s and %s using bids from ancestors %s\n") %
990 990 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
991 991
992 992 # Call for bids
993 993 fbids = {} # mapping filename to bids (action method to list af actions)
994 994 diverge, renamedelete = None, None
995 995 for ancestor in ancestors:
996 996 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
997 997 actions, diverge1, renamedelete1 = manifestmerge(
998 998 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
999 999 acceptremote, followcopies, forcefulldiff=True)
1000 1000 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1001 1001
1002 1002 # Track the shortest set of warning on the theory that bid
1003 1003 # merge will correctly incorporate more information
1004 1004 if diverge is None or len(diverge1) < len(diverge):
1005 1005 diverge = diverge1
1006 1006 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1007 1007 renamedelete = renamedelete1
1008 1008
1009 1009 for f, a in sorted(actions.iteritems()):
1010 1010 m, args, msg = a
1011 1011 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1012 1012 if f in fbids:
1013 1013 d = fbids[f]
1014 1014 if m in d:
1015 1015 d[m].append(a)
1016 1016 else:
1017 1017 d[m] = [a]
1018 1018 else:
1019 1019 fbids[f] = {m: [a]}
1020 1020
1021 1021 # Pick the best bid for each file
1022 1022 repo.ui.note(_('\nauction for merging merge bids\n'))
1023 1023 actions = {}
1024 1024 dms = [] # filenames that have dm actions
1025 1025 for f, bids in sorted(fbids.items()):
1026 1026 # bids is a mapping from action method to list af actions
1027 1027 # Consensus?
1028 1028 if len(bids) == 1: # all bids are the same kind of method
1029 1029 m, l = bids.items()[0]
1030 1030 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1031 1031 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1032 1032 actions[f] = l[0]
1033 1033 if m == 'dm':
1034 1034 dms.append(f)
1035 1035 continue
1036 1036 # If keep is an option, just do it.
1037 1037 if 'k' in bids:
1038 1038 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1039 1039 actions[f] = bids['k'][0]
1040 1040 continue
1041 1041 # If there are gets and they all agree [how could they not?], do it.
1042 1042 if 'g' in bids:
1043 1043 ga0 = bids['g'][0]
1044 1044 if all(a == ga0 for a in bids['g'][1:]):
1045 1045 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1046 1046 actions[f] = ga0
1047 1047 continue
1048 1048 # TODO: Consider other simple actions such as mode changes
1049 1049 # Handle inefficient democrazy.
1050 1050 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1051 1051 for m, l in sorted(bids.items()):
1052 1052 for _f, args, msg in l:
1053 1053 repo.ui.note(' %s -> %s\n' % (msg, m))
1054 1054 # Pick random action. TODO: Instead, prompt user when resolving
1055 1055 m, l = bids.items()[0]
1056 1056 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1057 1057 (f, m))
1058 1058 actions[f] = l[0]
1059 1059 if m == 'dm':
1060 1060 dms.append(f)
1061 1061 continue
1062 1062 # Work around 'dm' that can cause multiple actions for the same file
1063 1063 for f in dms:
1064 1064 dm, (f0, flags), msg = actions[f]
1065 1065 assert dm == 'dm', dm
1066 1066 if f0 in actions and actions[f0][0] == 'r':
1067 1067 # We have one bid for removing a file and another for moving it.
1068 1068 # These two could be merged as first move and then delete ...
1069 1069 # but instead drop moving and just delete.
1070 1070 del actions[f]
1071 1071 repo.ui.note(_('end of auction\n\n'))
1072 1072
1073 1073 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1074 1074
1075 1075 if wctx.rev() is None:
1076 1076 fractions = _forgetremoved(wctx, mctx, branchmerge)
1077 1077 actions.update(fractions)
1078 1078
1079 1079 return actions, diverge, renamedelete
1080 1080
1081 1081 def batchremove(repo, wctx, actions):
1082 1082 """apply removes to the working directory
1083 1083
1084 1084 yields tuples for progress updates
1085 1085 """
1086 1086 verbose = repo.ui.verbose
1087 1087 audit = repo.wvfs.audit
1088 1088 try:
1089 1089 cwd = pycompat.getcwd()
1090 1090 except OSError as err:
1091 1091 if err.errno != errno.ENOENT:
1092 1092 raise
1093 1093 cwd = None
1094 1094 i = 0
1095 1095 for f, args, msg in actions:
1096 1096 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1097 1097 if verbose:
1098 1098 repo.ui.note(_("removing %s\n") % f)
1099 1099 audit(f)
1100 1100 try:
1101 1101 wctx[f].remove(ignoremissing=True)
1102 1102 except OSError as inst:
1103 1103 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1104 1104 (f, inst.strerror))
1105 1105 if i == 100:
1106 1106 yield i, f
1107 1107 i = 0
1108 1108 i += 1
1109 1109 if i > 0:
1110 1110 yield i, f
1111 1111 if cwd:
1112 1112 # cwd was present before we started to remove files
1113 1113 # let's check if it is present after we removed them
1114 1114 try:
1115 1115 pycompat.getcwd()
1116 1116 except OSError as err:
1117 1117 if err.errno != errno.ENOENT:
1118 1118 raise
1119 1119 # Print a warning if cwd was deleted
1120 1120 repo.ui.warn(_("current directory was removed\n"
1121 1121 "(consider changing to repo root: %s)\n") %
1122 1122 repo.root)
1123 1123
1124 1124 def batchget(repo, mctx, wctx, actions):
1125 1125 """apply gets to the working directory
1126 1126
1127 1127 mctx is the context to get from
1128 1128
1129 1129 yields tuples for progress updates
1130 1130 """
1131 1131 verbose = repo.ui.verbose
1132 1132 fctx = mctx.filectx
1133 wwrite = repo.wwrite
1134 1133 ui = repo.ui
1135 1134 i = 0
1136 1135 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1137 1136 for f, (flags, backup), msg in actions:
1138 1137 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1139 1138 if verbose:
1140 1139 repo.ui.note(_("getting %s\n") % f)
1141 1140
1142 1141 if backup:
1143 1142 absf = repo.wjoin(f)
1144 1143 orig = scmutil.origpath(ui, repo, absf)
1145 1144 try:
1146 1145 if repo.wvfs.isfileorlink(f):
1147 1146 util.rename(absf, orig)
1148 1147 except OSError as e:
1149 1148 if e.errno != errno.ENOENT:
1150 1149 raise
1151 1150
1152 1151 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1153 1152 repo.wvfs.removedirs(f)
1154 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1153 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1155 1154 if i == 100:
1156 1155 yield i, f
1157 1156 i = 0
1158 1157 i += 1
1159 1158 if i > 0:
1160 1159 yield i, f
1161 1160
1162 1161 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1163 1162 """apply the merge action list to the working directory
1164 1163
1165 1164 wctx is the working copy context
1166 1165 mctx is the context to be merged into the working copy
1167 1166
1168 1167 Return a tuple of counts (updated, merged, removed, unresolved) that
1169 1168 describes how many files were affected by the update.
1170 1169 """
1171 1170
1172 1171 updated, merged, removed = 0, 0, 0
1173 1172 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1174 1173 moves = []
1175 1174 for m, l in actions.items():
1176 1175 l.sort()
1177 1176
1178 1177 # 'cd' and 'dc' actions are treated like other merge conflicts
1179 1178 mergeactions = sorted(actions['cd'])
1180 1179 mergeactions.extend(sorted(actions['dc']))
1181 1180 mergeactions.extend(actions['m'])
1182 1181 for f, args, msg in mergeactions:
1183 1182 f1, f2, fa, move, anc = args
1184 1183 if f == '.hgsubstate': # merged internally
1185 1184 continue
1186 1185 if f1 is None:
1187 1186 fcl = filemerge.absentfilectx(wctx, fa)
1188 1187 else:
1189 1188 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1190 1189 fcl = wctx[f1]
1191 1190 if f2 is None:
1192 1191 fco = filemerge.absentfilectx(mctx, fa)
1193 1192 else:
1194 1193 fco = mctx[f2]
1195 1194 actx = repo[anc]
1196 1195 if fa in actx:
1197 1196 fca = actx[fa]
1198 1197 else:
1199 1198 # TODO: move to absentfilectx
1200 1199 fca = repo.filectx(f1, fileid=nullrev)
1201 1200 ms.add(fcl, fco, fca, f)
1202 1201 if f1 != f and move:
1203 1202 moves.append(f1)
1204 1203
1205 1204 audit = repo.wvfs.audit
1206 1205 _updating = _('updating')
1207 1206 _files = _('files')
1208 1207 progress = repo.ui.progress
1209 1208
1210 1209 # remove renamed files after safely stored
1211 1210 for f in moves:
1212 1211 if os.path.lexists(repo.wjoin(f)):
1213 1212 repo.ui.debug("removing %s\n" % f)
1214 1213 audit(f)
1215 1214 wctx[f].remove()
1216 1215
1217 1216 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1218 1217
1219 1218 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1220 1219 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1221 1220
1222 1221 # remove in parallel (must come first)
1223 1222 z = 0
1224 1223 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1225 1224 actions['r'])
1226 1225 for i, item in prog:
1227 1226 z += i
1228 1227 progress(_updating, z, item=item, total=numupdates, unit=_files)
1229 1228 removed = len(actions['r'])
1230 1229
1231 1230 # get in parallel
1232 1231 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1233 1232 actions['g'])
1234 1233 for i, item in prog:
1235 1234 z += i
1236 1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1237 1236 updated = len(actions['g'])
1238 1237
1239 1238 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1240 1239 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1241 1240
1242 1241 # forget (manifest only, just log it) (must come first)
1243 1242 for f, args, msg in actions['f']:
1244 1243 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1245 1244 z += 1
1246 1245 progress(_updating, z, item=f, total=numupdates, unit=_files)
1247 1246
1248 1247 # re-add (manifest only, just log it)
1249 1248 for f, args, msg in actions['a']:
1250 1249 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1251 1250 z += 1
1252 1251 progress(_updating, z, item=f, total=numupdates, unit=_files)
1253 1252
1254 1253 # re-add/mark as modified (manifest only, just log it)
1255 1254 for f, args, msg in actions['am']:
1256 1255 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1257 1256 z += 1
1258 1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1259 1258
1260 1259 # keep (noop, just log it)
1261 1260 for f, args, msg in actions['k']:
1262 1261 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1263 1262 # no progress
1264 1263
1265 1264 # directory rename, move local
1266 1265 for f, args, msg in actions['dm']:
1267 1266 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1268 1267 z += 1
1269 1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1270 1269 f0, flags = args
1271 1270 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1272 1271 audit(f)
1273 1272 wctx[f].write(wctx.filectx(f0).data(), flags)
1274 1273 wctx[f0].remove()
1275 1274 updated += 1
1276 1275
1277 1276 # local directory rename, get
1278 1277 for f, args, msg in actions['dg']:
1279 1278 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1280 1279 z += 1
1281 1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1282 1281 f0, flags = args
1283 1282 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1284 1283 wctx[f].write(mctx.filectx(f0).data(), flags)
1285 1284 updated += 1
1286 1285
1287 1286 # exec
1288 1287 for f, args, msg in actions['e']:
1289 1288 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1290 1289 z += 1
1291 1290 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 1291 flags, = args
1293 1292 audit(f)
1294 1293 wctx[f].setflags('l' in flags, 'x' in flags)
1295 1294 updated += 1
1296 1295
1297 1296 # the ordering is important here -- ms.mergedriver will raise if the merge
1298 1297 # driver has changed, and we want to be able to bypass it when overwrite is
1299 1298 # True
1300 1299 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1301 1300
1302 1301 if usemergedriver:
1303 1302 ms.commit()
1304 1303 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1305 1304 # the driver might leave some files unresolved
1306 1305 unresolvedf = set(ms.unresolved())
1307 1306 if not proceed:
1308 1307 # XXX setting unresolved to at least 1 is a hack to make sure we
1309 1308 # error out
1310 1309 return updated, merged, removed, max(len(unresolvedf), 1)
1311 1310 newactions = []
1312 1311 for f, args, msg in mergeactions:
1313 1312 if f in unresolvedf:
1314 1313 newactions.append((f, args, msg))
1315 1314 mergeactions = newactions
1316 1315
1317 1316 # premerge
1318 1317 tocomplete = []
1319 1318 for f, args, msg in mergeactions:
1320 1319 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1321 1320 z += 1
1322 1321 progress(_updating, z, item=f, total=numupdates, unit=_files)
1323 1322 if f == '.hgsubstate': # subrepo states need updating
1324 1323 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1325 1324 overwrite, labels)
1326 1325 continue
1327 1326 audit(f)
1328 1327 complete, r = ms.preresolve(f, wctx)
1329 1328 if not complete:
1330 1329 numupdates += 1
1331 1330 tocomplete.append((f, args, msg))
1332 1331
1333 1332 # merge
1334 1333 for f, args, msg in tocomplete:
1335 1334 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1336 1335 z += 1
1337 1336 progress(_updating, z, item=f, total=numupdates, unit=_files)
1338 1337 ms.resolve(f, wctx)
1339 1338
1340 1339 ms.commit()
1341 1340
1342 1341 unresolved = ms.unresolvedcount()
1343 1342
1344 1343 if usemergedriver and not unresolved and ms.mdstate() != 's':
1345 1344 if not driverconclude(repo, ms, wctx, labels=labels):
1346 1345 # XXX setting unresolved to at least 1 is a hack to make sure we
1347 1346 # error out
1348 1347 unresolved = max(unresolved, 1)
1349 1348
1350 1349 ms.commit()
1351 1350
1352 1351 msupdated, msmerged, msremoved = ms.counts()
1353 1352 updated += msupdated
1354 1353 merged += msmerged
1355 1354 removed += msremoved
1356 1355
1357 1356 extraactions = ms.actions()
1358 1357 if extraactions:
1359 1358 mfiles = set(a[0] for a in actions['m'])
1360 1359 for k, acts in extraactions.iteritems():
1361 1360 actions[k].extend(acts)
1362 1361 # Remove these files from actions['m'] as well. This is important
1363 1362 # because in recordupdates, files in actions['m'] are processed
1364 1363 # after files in other actions, and the merge driver might add
1365 1364 # files to those actions via extraactions above. This can lead to a
1366 1365 # file being recorded twice, with poor results. This is especially
1367 1366 # problematic for actions['r'] (currently only possible with the
1368 1367 # merge driver in the initial merge process; interrupted merges
1369 1368 # don't go through this flow).
1370 1369 #
1371 1370 # The real fix here is to have indexes by both file and action so
1372 1371 # that when the action for a file is changed it is automatically
1373 1372 # reflected in the other action lists. But that involves a more
1374 1373 # complex data structure, so this will do for now.
1375 1374 #
1376 1375 # We don't need to do the same operation for 'dc' and 'cd' because
1377 1376 # those lists aren't consulted again.
1378 1377 mfiles.difference_update(a[0] for a in acts)
1379 1378
1380 1379 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1381 1380
1382 1381 progress(_updating, None, total=numupdates, unit=_files)
1383 1382
1384 1383 return updated, merged, removed, unresolved
1385 1384
1386 1385 def recordupdates(repo, actions, branchmerge):
1387 1386 "record merge actions to the dirstate"
1388 1387 # remove (must come first)
1389 1388 for f, args, msg in actions.get('r', []):
1390 1389 if branchmerge:
1391 1390 repo.dirstate.remove(f)
1392 1391 else:
1393 1392 repo.dirstate.drop(f)
1394 1393
1395 1394 # forget (must come first)
1396 1395 for f, args, msg in actions.get('f', []):
1397 1396 repo.dirstate.drop(f)
1398 1397
1399 1398 # re-add
1400 1399 for f, args, msg in actions.get('a', []):
1401 1400 repo.dirstate.add(f)
1402 1401
1403 1402 # re-add/mark as modified
1404 1403 for f, args, msg in actions.get('am', []):
1405 1404 if branchmerge:
1406 1405 repo.dirstate.normallookup(f)
1407 1406 else:
1408 1407 repo.dirstate.add(f)
1409 1408
1410 1409 # exec change
1411 1410 for f, args, msg in actions.get('e', []):
1412 1411 repo.dirstate.normallookup(f)
1413 1412
1414 1413 # keep
1415 1414 for f, args, msg in actions.get('k', []):
1416 1415 pass
1417 1416
1418 1417 # get
1419 1418 for f, args, msg in actions.get('g', []):
1420 1419 if branchmerge:
1421 1420 repo.dirstate.otherparent(f)
1422 1421 else:
1423 1422 repo.dirstate.normal(f)
1424 1423
1425 1424 # merge
1426 1425 for f, args, msg in actions.get('m', []):
1427 1426 f1, f2, fa, move, anc = args
1428 1427 if branchmerge:
1429 1428 # We've done a branch merge, mark this file as merged
1430 1429 # so that we properly record the merger later
1431 1430 repo.dirstate.merge(f)
1432 1431 if f1 != f2: # copy/rename
1433 1432 if move:
1434 1433 repo.dirstate.remove(f1)
1435 1434 if f1 != f:
1436 1435 repo.dirstate.copy(f1, f)
1437 1436 else:
1438 1437 repo.dirstate.copy(f2, f)
1439 1438 else:
1440 1439 # We've update-merged a locally modified file, so
1441 1440 # we set the dirstate to emulate a normal checkout
1442 1441 # of that file some time in the past. Thus our
1443 1442 # merge will appear as a normal local file
1444 1443 # modification.
1445 1444 if f2 == f: # file not locally copied/moved
1446 1445 repo.dirstate.normallookup(f)
1447 1446 if move:
1448 1447 repo.dirstate.drop(f1)
1449 1448
1450 1449 # directory rename, move local
1451 1450 for f, args, msg in actions.get('dm', []):
1452 1451 f0, flag = args
1453 1452 if branchmerge:
1454 1453 repo.dirstate.add(f)
1455 1454 repo.dirstate.remove(f0)
1456 1455 repo.dirstate.copy(f0, f)
1457 1456 else:
1458 1457 repo.dirstate.normal(f)
1459 1458 repo.dirstate.drop(f0)
1460 1459
1461 1460 # directory rename, get
1462 1461 for f, args, msg in actions.get('dg', []):
1463 1462 f0, flag = args
1464 1463 if branchmerge:
1465 1464 repo.dirstate.add(f)
1466 1465 repo.dirstate.copy(f0, f)
1467 1466 else:
1468 1467 repo.dirstate.normal(f)
1469 1468
1470 1469 def update(repo, node, branchmerge, force, ancestor=None,
1471 1470 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1472 1471 updatecheck=None):
1473 1472 """
1474 1473 Perform a merge between the working directory and the given node
1475 1474
1476 1475 node = the node to update to
1477 1476 branchmerge = whether to merge between branches
1478 1477 force = whether to force branch merging or file overwriting
1479 1478 matcher = a matcher to filter file lists (dirstate not updated)
1480 1479 mergeancestor = whether it is merging with an ancestor. If true,
1481 1480 we should accept the incoming changes for any prompts that occur.
1482 1481 If false, merging with an ancestor (fast-forward) is only allowed
1483 1482 between different named branches. This flag is used by rebase extension
1484 1483 as a temporary fix and should be avoided in general.
1485 1484 labels = labels to use for base, local and other
1486 1485 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1487 1486 this is True, then 'force' should be True as well.
1488 1487
1489 1488 The table below shows all the behaviors of the update command
1490 1489 given the -c and -C or no options, whether the working directory
1491 1490 is dirty, whether a revision is specified, and the relationship of
1492 1491 the parent rev to the target rev (linear or not). Match from top first. The
1493 1492 -n option doesn't exist on the command line, but represents the
1494 1493 experimental.updatecheck=noconflict option.
1495 1494
1496 1495 This logic is tested by test-update-branches.t.
1497 1496
1498 1497 -c -C -n -m dirty rev linear | result
1499 1498 y y * * * * * | (1)
1500 1499 y * y * * * * | (1)
1501 1500 y * * y * * * | (1)
1502 1501 * y y * * * * | (1)
1503 1502 * y * y * * * | (1)
1504 1503 * * y y * * * | (1)
1505 1504 * * * * * n n | x
1506 1505 * * * * n * * | ok
1507 1506 n n n n y * y | merge
1508 1507 n n n n y y n | (2)
1509 1508 n n n y y * * | merge
1510 1509 n n y n y * * | merge if no conflict
1511 1510 n y n n y * * | discard
1512 1511 y n n n y * * | (3)
1513 1512
1514 1513 x = can't happen
1515 1514 * = don't-care
1516 1515 1 = incompatible options (checked in commands.py)
1517 1516 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1518 1517 3 = abort: uncommitted changes (checked in commands.py)
1519 1518
1520 1519 Return the same tuple as applyupdates().
1521 1520 """
1522 1521
1523 1522 # This function used to find the default destination if node was None, but
1524 1523 # that's now in destutil.py.
1525 1524 assert node is not None
1526 1525 if not branchmerge and not force:
1527 1526 # TODO: remove the default once all callers that pass branchmerge=False
1528 1527 # and force=False pass a value for updatecheck. We may want to allow
1529 1528 # updatecheck='abort' to better suppport some of these callers.
1530 1529 if updatecheck is None:
1531 1530 updatecheck = 'linear'
1532 1531 assert updatecheck in ('none', 'linear', 'noconflict')
1533 1532 # If we're doing a partial update, we need to skip updating
1534 1533 # the dirstate, so make a note of any partial-ness to the
1535 1534 # update here.
1536 1535 if matcher is None or matcher.always():
1537 1536 partial = False
1538 1537 else:
1539 1538 partial = True
1540 1539 with repo.wlock():
1541 1540 wc = repo[None]
1542 1541 pl = wc.parents()
1543 1542 p1 = pl[0]
1544 1543 pas = [None]
1545 1544 if ancestor is not None:
1546 1545 pas = [repo[ancestor]]
1547 1546
1548 1547 overwrite = force and not branchmerge
1549 1548
1550 1549 p2 = repo[node]
1551 1550 if pas[0] is None:
1552 1551 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1553 1552 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1554 1553 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1555 1554 else:
1556 1555 pas = [p1.ancestor(p2, warn=branchmerge)]
1557 1556
1558 1557 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1559 1558
1560 1559 ### check phase
1561 1560 if not overwrite:
1562 1561 if len(pl) > 1:
1563 1562 raise error.Abort(_("outstanding uncommitted merge"))
1564 1563 ms = mergestate.read(repo)
1565 1564 if list(ms.unresolved()):
1566 1565 raise error.Abort(_("outstanding merge conflicts"))
1567 1566 if branchmerge:
1568 1567 if pas == [p2]:
1569 1568 raise error.Abort(_("merging with a working directory ancestor"
1570 1569 " has no effect"))
1571 1570 elif pas == [p1]:
1572 1571 if not mergeancestor and wc.branch() == p2.branch():
1573 1572 raise error.Abort(_("nothing to merge"),
1574 1573 hint=_("use 'hg update' "
1575 1574 "or check 'hg heads'"))
1576 1575 if not force and (wc.files() or wc.deleted()):
1577 1576 raise error.Abort(_("uncommitted changes"),
1578 1577 hint=_("use 'hg status' to list changes"))
1579 1578 for s in sorted(wc.substate):
1580 1579 wc.sub(s).bailifchanged()
1581 1580
1582 1581 elif not overwrite:
1583 1582 if p1 == p2: # no-op update
1584 1583 # call the hooks and exit early
1585 1584 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1586 1585 repo.hook('update', parent1=xp2, parent2='', error=0)
1587 1586 return 0, 0, 0, 0
1588 1587
1589 1588 if (updatecheck == 'linear' and
1590 1589 pas not in ([p1], [p2])): # nonlinear
1591 1590 dirty = wc.dirty(missing=True)
1592 1591 if dirty:
1593 1592 # Branching is a bit strange to ensure we do the minimal
1594 1593 # amount of call to obsolete.foreground.
1595 1594 foreground = obsolete.foreground(repo, [p1.node()])
1596 1595 # note: the <node> variable contains a random identifier
1597 1596 if repo[node].node() in foreground:
1598 1597 pass # allow updating to successors
1599 1598 else:
1600 1599 msg = _("uncommitted changes")
1601 1600 hint = _("commit or update --clean to discard changes")
1602 1601 raise error.UpdateAbort(msg, hint=hint)
1603 1602 else:
1604 1603 # Allow jumping branches if clean and specific rev given
1605 1604 pass
1606 1605
1607 1606 if overwrite:
1608 1607 pas = [wc]
1609 1608 elif not branchmerge:
1610 1609 pas = [p1]
1611 1610
1612 1611 # deprecated config: merge.followcopies
1613 1612 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1614 1613 if overwrite:
1615 1614 followcopies = False
1616 1615 elif not pas[0]:
1617 1616 followcopies = False
1618 1617 if not branchmerge and not wc.dirty(missing=True):
1619 1618 followcopies = False
1620 1619
1621 1620 ### calculate phase
1622 1621 actionbyfile, diverge, renamedelete = calculateupdates(
1623 1622 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1624 1623 followcopies, matcher=matcher, mergeforce=mergeforce)
1625 1624
1626 1625 if updatecheck == 'noconflict':
1627 1626 for f, (m, args, msg) in actionbyfile.iteritems():
1628 1627 if m not in ('g', 'k', 'e', 'r'):
1629 1628 msg = _("conflicting changes")
1630 1629 hint = _("commit or update --clean to discard changes")
1631 1630 raise error.Abort(msg, hint=hint)
1632 1631
1633 1632 # Prompt and create actions. Most of this is in the resolve phase
1634 1633 # already, but we can't handle .hgsubstate in filemerge or
1635 1634 # subrepo.submerge yet so we have to keep prompting for it.
1636 1635 if '.hgsubstate' in actionbyfile:
1637 1636 f = '.hgsubstate'
1638 1637 m, args, msg = actionbyfile[f]
1639 1638 prompts = filemerge.partextras(labels)
1640 1639 prompts['f'] = f
1641 1640 if m == 'cd':
1642 1641 if repo.ui.promptchoice(
1643 1642 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1644 1643 "use (c)hanged version or (d)elete?"
1645 1644 "$$ &Changed $$ &Delete") % prompts, 0):
1646 1645 actionbyfile[f] = ('r', None, "prompt delete")
1647 1646 elif f in p1:
1648 1647 actionbyfile[f] = ('am', None, "prompt keep")
1649 1648 else:
1650 1649 actionbyfile[f] = ('a', None, "prompt keep")
1651 1650 elif m == 'dc':
1652 1651 f1, f2, fa, move, anc = args
1653 1652 flags = p2[f2].flags()
1654 1653 if repo.ui.promptchoice(
1655 1654 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1656 1655 "use (c)hanged version or leave (d)eleted?"
1657 1656 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1658 1657 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1659 1658 else:
1660 1659 del actionbyfile[f]
1661 1660
1662 1661 # Convert to dictionary-of-lists format
1663 1662 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1664 1663 for f, (m, args, msg) in actionbyfile.iteritems():
1665 1664 if m not in actions:
1666 1665 actions[m] = []
1667 1666 actions[m].append((f, args, msg))
1668 1667
1669 1668 if not util.fscasesensitive(repo.path):
1670 1669 # check collision between files only in p2 for clean update
1671 1670 if (not branchmerge and
1672 1671 (force or not wc.dirty(missing=True, branch=False))):
1673 1672 _checkcollision(repo, p2.manifest(), None)
1674 1673 else:
1675 1674 _checkcollision(repo, wc.manifest(), actions)
1676 1675
1677 1676 # divergent renames
1678 1677 for f, fl in sorted(diverge.iteritems()):
1679 1678 repo.ui.warn(_("note: possible conflict - %s was renamed "
1680 1679 "multiple times to:\n") % f)
1681 1680 for nf in fl:
1682 1681 repo.ui.warn(" %s\n" % nf)
1683 1682
1684 1683 # rename and delete
1685 1684 for f, fl in sorted(renamedelete.iteritems()):
1686 1685 repo.ui.warn(_("note: possible conflict - %s was deleted "
1687 1686 "and renamed to:\n") % f)
1688 1687 for nf in fl:
1689 1688 repo.ui.warn(" %s\n" % nf)
1690 1689
1691 1690 ### apply phase
1692 1691 if not branchmerge: # just jump to the new rev
1693 1692 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1694 1693 if not partial:
1695 1694 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1696 1695 # note that we're in the middle of an update
1697 1696 repo.vfs.write('updatestate', p2.hex())
1698 1697
1699 1698 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1700 1699
1701 1700 if not partial:
1702 1701 with repo.dirstate.parentchange():
1703 1702 repo.setparents(fp1, fp2)
1704 1703 recordupdates(repo, actions, branchmerge)
1705 1704 # update completed, clear state
1706 1705 util.unlink(repo.vfs.join('updatestate'))
1707 1706
1708 1707 if not branchmerge:
1709 1708 repo.dirstate.setbranch(p2.branch())
1710 1709
1711 1710 if not partial:
1712 1711 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1713 1712 return stats
1714 1713
1715 1714 def graft(repo, ctx, pctx, labels, keepparent=False):
1716 1715 """Do a graft-like merge.
1717 1716
1718 1717 This is a merge where the merge ancestor is chosen such that one
1719 1718 or more changesets are grafted onto the current changeset. In
1720 1719 addition to the merge, this fixes up the dirstate to include only
1721 1720 a single parent (if keepparent is False) and tries to duplicate any
1722 1721 renames/copies appropriately.
1723 1722
1724 1723 ctx - changeset to rebase
1725 1724 pctx - merge base, usually ctx.p1()
1726 1725 labels - merge labels eg ['local', 'graft']
1727 1726 keepparent - keep second parent if any
1728 1727
1729 1728 """
1730 1729 # If we're grafting a descendant onto an ancestor, be sure to pass
1731 1730 # mergeancestor=True to update. This does two things: 1) allows the merge if
1732 1731 # the destination is the same as the parent of the ctx (so we can use graft
1733 1732 # to copy commits), and 2) informs update that the incoming changes are
1734 1733 # newer than the destination so it doesn't prompt about "remote changed foo
1735 1734 # which local deleted".
1736 1735 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1737 1736
1738 1737 stats = update(repo, ctx.node(), True, True, pctx.node(),
1739 1738 mergeancestor=mergeancestor, labels=labels)
1740 1739
1741 1740 pother = nullid
1742 1741 parents = ctx.parents()
1743 1742 if keepparent and len(parents) == 2 and pctx in parents:
1744 1743 parents.remove(pctx)
1745 1744 pother = parents[0].node()
1746 1745
1747 1746 with repo.dirstate.parentchange():
1748 1747 repo.setparents(repo['.'].node(), pother)
1749 1748 repo.dirstate.write(repo.currenttransaction())
1750 1749 # fix up dirstate for copies and renames
1751 1750 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1752 1751 return stats
General Comments 0
You need to be logged in to leave comments. Login now