##// END OF EJS Templates
merge: make merge.preferancestor type and default consistent...
Matt Mackall -
r25844:18541e95 default
parent child Browse files
Show More
@@ -1,1928 +1,1929
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, wdirid, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 return iter(self._manifest)
70 70
71 71 def _manifestmatches(self, match, s):
72 72 """generate a new manifest filtered by the match argument
73 73
74 74 This method is for internal use only and mainly exists to provide an
75 75 object oriented way for other contexts to customize the manifest
76 76 generation.
77 77 """
78 78 return self.manifest().matches(match)
79 79
80 80 def _matchstatus(self, other, match):
81 81 """return match.always if match is none
82 82
83 83 This internal method provides a way for child objects to override the
84 84 match operator.
85 85 """
86 86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 87
88 88 def _buildstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """build a status with respect to another context"""
91 91 # Load earliest manifest first for caching reasons. More specifically,
92 92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 95 # delta to what's in the cache. So that's one full reconstruction + one
96 96 # delta application.
97 97 if self.rev() is not None and self.rev() < other.rev():
98 98 self.manifest()
99 99 mf1 = other._manifestmatches(match, s)
100 100 mf2 = self._manifestmatches(match, s)
101 101
102 102 modified, added = [], []
103 103 removed = []
104 104 clean = []
105 105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 106 deletedset = set(deleted)
107 107 d = mf1.diff(mf2, clean=listclean)
108 108 for fn, value in d.iteritems():
109 109 if fn in deletedset:
110 110 continue
111 111 if value is None:
112 112 clean.append(fn)
113 113 continue
114 114 (node1, flag1), (node2, flag2) = value
115 115 if node1 is None:
116 116 added.append(fn)
117 117 elif node2 is None:
118 118 removed.append(fn)
119 119 elif node2 != _newnode:
120 120 # The file was not a new file in mf2, so an entry
121 121 # from diff is really a difference.
122 122 modified.append(fn)
123 123 elif self[fn].cmp(other[fn]):
124 124 # node2 was newnode, but the working file doesn't
125 125 # match the one in mf1.
126 126 modified.append(fn)
127 127 else:
128 128 clean.append(fn)
129 129
130 130 if removed:
131 131 # need to filter files if they are already reported as removed
132 132 unknown = [fn for fn in unknown if fn not in mf1]
133 133 ignored = [fn for fn in ignored if fn not in mf1]
134 134 # if they're deleted, don't report them as removed
135 135 removed = [fn for fn in removed if fn not in deletedset]
136 136
137 137 return scmutil.status(modified, added, removed, deleted, unknown,
138 138 ignored, clean)
139 139
140 140 @propertycache
141 141 def substate(self):
142 142 return subrepo.state(self, self._repo.ui)
143 143
144 144 def subrev(self, subpath):
145 145 return self.substate[subpath][1]
146 146
147 147 def rev(self):
148 148 return self._rev
149 149 def node(self):
150 150 return self._node
151 151 def hex(self):
152 152 return hex(self.node())
153 153 def manifest(self):
154 154 return self._manifest
155 155 def repo(self):
156 156 return self._repo
157 157 def phasestr(self):
158 158 return phases.phasenames[self.phase()]
159 159 def mutable(self):
160 160 return self.phase() > phases.public
161 161
162 162 def getfileset(self, expr):
163 163 return fileset.getfileset(self, expr)
164 164
165 165 def obsolete(self):
166 166 """True if the changeset is obsolete"""
167 167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168 168
169 169 def extinct(self):
170 170 """True if the changeset is extinct"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172 172
173 173 def unstable(self):
174 174 """True if the changeset is not obsolete but it's ancestor are"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176 176
177 177 def bumped(self):
178 178 """True if the changeset try to be a successor of a public changeset
179 179
180 180 Only non-public and non-obsolete changesets may be bumped.
181 181 """
182 182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183 183
184 184 def divergent(self):
185 185 """Is a successors of a changeset with multiple possible successors set
186 186
187 187 Only non-public and non-obsolete changesets may be divergent.
188 188 """
189 189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190 190
191 191 def troubled(self):
192 192 """True if the changeset is either unstable, bumped or divergent"""
193 193 return self.unstable() or self.bumped() or self.divergent()
194 194
195 195 def troubles(self):
196 196 """return the list of troubles affecting this changesets.
197 197
198 198 Troubles are returned as strings. possible values are:
199 199 - unstable,
200 200 - bumped,
201 201 - divergent.
202 202 """
203 203 troubles = []
204 204 if self.unstable():
205 205 troubles.append('unstable')
206 206 if self.bumped():
207 207 troubles.append('bumped')
208 208 if self.divergent():
209 209 troubles.append('divergent')
210 210 return troubles
211 211
212 212 def parents(self):
213 213 """return contexts for each parent changeset"""
214 214 return self._parents
215 215
216 216 def p1(self):
217 217 return self._parents[0]
218 218
219 219 def p2(self):
220 220 if len(self._parents) == 2:
221 221 return self._parents[1]
222 222 return changectx(self._repo, -1)
223 223
224 224 def _fileinfo(self, path):
225 225 if '_manifest' in self.__dict__:
226 226 try:
227 227 return self._manifest[path], self._manifest.flags(path)
228 228 except KeyError:
229 229 raise error.ManifestLookupError(self._node, path,
230 230 _('not found in manifest'))
231 231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 232 if path in self._manifestdelta:
233 233 return (self._manifestdelta[path],
234 234 self._manifestdelta.flags(path))
235 235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 236 if not node:
237 237 raise error.ManifestLookupError(self._node, path,
238 238 _('not found in manifest'))
239 239
240 240 return node, flag
241 241
242 242 def filenode(self, path):
243 243 return self._fileinfo(path)[0]
244 244
245 245 def flags(self, path):
246 246 try:
247 247 return self._fileinfo(path)[1]
248 248 except error.LookupError:
249 249 return ''
250 250
251 251 def sub(self, path):
252 252 '''return a subrepo for the stored revision of path, never wdir()'''
253 253 return subrepo.subrepo(self, path)
254 254
255 255 def nullsub(self, path, pctx):
256 256 return subrepo.nullsubrepo(self, path, pctx)
257 257
258 258 def workingsub(self, path):
259 259 '''return a subrepo for the stored revision, or wdir if this is a wdir
260 260 context.
261 261 '''
262 262 return subrepo.subrepo(self, path, allowwdir=True)
263 263
264 264 def match(self, pats=[], include=None, exclude=None, default='glob',
265 265 listsubrepos=False, badfn=None):
266 266 r = self._repo
267 267 return matchmod.match(r.root, r.getcwd(), pats,
268 268 include, exclude, default,
269 269 auditor=r.auditor, ctx=self,
270 270 listsubrepos=listsubrepos, badfn=badfn)
271 271
272 272 def diff(self, ctx2=None, match=None, **opts):
273 273 """Returns a diff generator for the given contexts and matcher"""
274 274 if ctx2 is None:
275 275 ctx2 = self.p1()
276 276 if ctx2 is not None:
277 277 ctx2 = self._repo[ctx2]
278 278 diffopts = patch.diffopts(self._repo.ui, opts)
279 279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
280 280
281 281 def dirs(self):
282 282 return self._manifest.dirs()
283 283
284 284 def hasdir(self, dir):
285 285 return self._manifest.hasdir(dir)
286 286
287 287 def dirty(self, missing=False, merge=True, branch=True):
288 288 return False
289 289
290 290 def status(self, other=None, match=None, listignored=False,
291 291 listclean=False, listunknown=False, listsubrepos=False):
292 292 """return status of files between two nodes or node and working
293 293 directory.
294 294
295 295 If other is None, compare this node with working directory.
296 296
297 297 returns (modified, added, removed, deleted, unknown, ignored, clean)
298 298 """
299 299
300 300 ctx1 = self
301 301 ctx2 = self._repo[other]
302 302
303 303 # This next code block is, admittedly, fragile logic that tests for
304 304 # reversing the contexts and wouldn't need to exist if it weren't for
305 305 # the fast (and common) code path of comparing the working directory
306 306 # with its first parent.
307 307 #
308 308 # What we're aiming for here is the ability to call:
309 309 #
310 310 # workingctx.status(parentctx)
311 311 #
312 312 # If we always built the manifest for each context and compared those,
313 313 # then we'd be done. But the special case of the above call means we
314 314 # just copy the manifest of the parent.
315 315 reversed = False
316 316 if (not isinstance(ctx1, changectx)
317 317 and isinstance(ctx2, changectx)):
318 318 reversed = True
319 319 ctx1, ctx2 = ctx2, ctx1
320 320
321 321 match = ctx2._matchstatus(ctx1, match)
322 322 r = scmutil.status([], [], [], [], [], [], [])
323 323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
324 324 listunknown)
325 325
326 326 if reversed:
327 327 # Reverse added and removed. Clear deleted, unknown and ignored as
328 328 # these make no sense to reverse.
329 329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
330 330 r.clean)
331 331
332 332 if listsubrepos:
333 333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
334 334 rev2 = ctx2.subrev(subpath)
335 335 try:
336 336 submatch = matchmod.narrowmatcher(subpath, match)
337 337 s = sub.status(rev2, match=submatch, ignored=listignored,
338 338 clean=listclean, unknown=listunknown,
339 339 listsubrepos=True)
340 340 for rfiles, sfiles in zip(r, s):
341 341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
342 342 except error.LookupError:
343 343 self._repo.ui.status(_("skipping missing "
344 344 "subrepository: %s\n") % subpath)
345 345
346 346 for l in r:
347 347 l.sort()
348 348
349 349 return r
350 350
351 351
352 352 def makememctx(repo, parents, text, user, date, branch, files, store,
353 353 editor=None, extra=None):
354 354 def getfilectx(repo, memctx, path):
355 355 data, mode, copied = store.getfile(path)
356 356 if data is None:
357 357 return None
358 358 islink, isexec = mode
359 359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
360 360 copied=copied, memctx=memctx)
361 361 if extra is None:
362 362 extra = {}
363 363 if branch:
364 364 extra['branch'] = encoding.fromlocal(branch)
365 365 ctx = memctx(repo, parents, text, files, getfilectx, user,
366 366 date, extra, editor)
367 367 return ctx
368 368
369 369 class changectx(basectx):
370 370 """A changecontext object makes access to data related to a particular
371 371 changeset convenient. It represents a read-only context already present in
372 372 the repo."""
373 373 def __init__(self, repo, changeid=''):
374 374 """changeid is a revision number, node, or tag"""
375 375
376 376 # since basectx.__new__ already took care of copying the object, we
377 377 # don't need to do anything in __init__, so we just exit here
378 378 if isinstance(changeid, basectx):
379 379 return
380 380
381 381 if changeid == '':
382 382 changeid = '.'
383 383 self._repo = repo
384 384
385 385 try:
386 386 if isinstance(changeid, int):
387 387 self._node = repo.changelog.node(changeid)
388 388 self._rev = changeid
389 389 return
390 390 if isinstance(changeid, long):
391 391 changeid = str(changeid)
392 392 if changeid == 'null':
393 393 self._node = nullid
394 394 self._rev = nullrev
395 395 return
396 396 if changeid == 'tip':
397 397 self._node = repo.changelog.tip()
398 398 self._rev = repo.changelog.rev(self._node)
399 399 return
400 400 if changeid == '.' or changeid == repo.dirstate.p1():
401 401 # this is a hack to delay/avoid loading obsmarkers
402 402 # when we know that '.' won't be hidden
403 403 self._node = repo.dirstate.p1()
404 404 self._rev = repo.unfiltered().changelog.rev(self._node)
405 405 return
406 406 if len(changeid) == 20:
407 407 try:
408 408 self._node = changeid
409 409 self._rev = repo.changelog.rev(changeid)
410 410 return
411 411 except error.FilteredRepoLookupError:
412 412 raise
413 413 except LookupError:
414 414 pass
415 415
416 416 try:
417 417 r = int(changeid)
418 418 if str(r) != changeid:
419 419 raise ValueError
420 420 l = len(repo.changelog)
421 421 if r < 0:
422 422 r += l
423 423 if r < 0 or r >= l:
424 424 raise ValueError
425 425 self._rev = r
426 426 self._node = repo.changelog.node(r)
427 427 return
428 428 except error.FilteredIndexError:
429 429 raise
430 430 except (ValueError, OverflowError, IndexError):
431 431 pass
432 432
433 433 if len(changeid) == 40:
434 434 try:
435 435 self._node = bin(changeid)
436 436 self._rev = repo.changelog.rev(self._node)
437 437 return
438 438 except error.FilteredLookupError:
439 439 raise
440 440 except (TypeError, LookupError):
441 441 pass
442 442
443 443 # lookup bookmarks through the name interface
444 444 try:
445 445 self._node = repo.names.singlenode(repo, changeid)
446 446 self._rev = repo.changelog.rev(self._node)
447 447 return
448 448 except KeyError:
449 449 pass
450 450 except error.FilteredRepoLookupError:
451 451 raise
452 452 except error.RepoLookupError:
453 453 pass
454 454
455 455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
456 456 if self._node is not None:
457 457 self._rev = repo.changelog.rev(self._node)
458 458 return
459 459
460 460 # lookup failed
461 461 # check if it might have come from damaged dirstate
462 462 #
463 463 # XXX we could avoid the unfiltered if we had a recognizable
464 464 # exception for filtered changeset access
465 465 if changeid in repo.unfiltered().dirstate.parents():
466 466 msg = _("working directory has unknown parent '%s'!")
467 467 raise error.Abort(msg % short(changeid))
468 468 try:
469 469 if len(changeid) == 20:
470 470 changeid = hex(changeid)
471 471 except TypeError:
472 472 pass
473 473 except (error.FilteredIndexError, error.FilteredLookupError,
474 474 error.FilteredRepoLookupError):
475 475 if repo.filtername.startswith('visible'):
476 476 msg = _("hidden revision '%s'") % changeid
477 477 hint = _('use --hidden to access hidden revisions')
478 478 raise error.FilteredRepoLookupError(msg, hint=hint)
479 479 msg = _("filtered revision '%s' (not in '%s' subset)")
480 480 msg %= (changeid, repo.filtername)
481 481 raise error.FilteredRepoLookupError(msg)
482 482 except IndexError:
483 483 pass
484 484 raise error.RepoLookupError(
485 485 _("unknown revision '%s'") % changeid)
486 486
487 487 def __hash__(self):
488 488 try:
489 489 return hash(self._rev)
490 490 except AttributeError:
491 491 return id(self)
492 492
493 493 def __nonzero__(self):
494 494 return self._rev != nullrev
495 495
496 496 @propertycache
497 497 def _changeset(self):
498 498 return self._repo.changelog.read(self.rev())
499 499
500 500 @propertycache
501 501 def _manifest(self):
502 502 return self._repo.manifest.read(self._changeset[0])
503 503
504 504 @propertycache
505 505 def _manifestdelta(self):
506 506 return self._repo.manifest.readdelta(self._changeset[0])
507 507
508 508 @propertycache
509 509 def _parents(self):
510 510 p = self._repo.changelog.parentrevs(self._rev)
511 511 if p[1] == nullrev:
512 512 p = p[:-1]
513 513 return [changectx(self._repo, x) for x in p]
514 514
515 515 def changeset(self):
516 516 return self._changeset
517 517 def manifestnode(self):
518 518 return self._changeset[0]
519 519
520 520 def user(self):
521 521 return self._changeset[1]
522 522 def date(self):
523 523 return self._changeset[2]
524 524 def files(self):
525 525 return self._changeset[3]
526 526 def description(self):
527 527 return self._changeset[4]
528 528 def branch(self):
529 529 return encoding.tolocal(self._changeset[5].get("branch"))
530 530 def closesbranch(self):
531 531 return 'close' in self._changeset[5]
532 532 def extra(self):
533 533 return self._changeset[5]
534 534 def tags(self):
535 535 return self._repo.nodetags(self._node)
536 536 def bookmarks(self):
537 537 return self._repo.nodebookmarks(self._node)
538 538 def phase(self):
539 539 return self._repo._phasecache.phase(self._repo, self._rev)
540 540 def hidden(self):
541 541 return self._rev in repoview.filterrevs(self._repo, 'visible')
542 542
543 543 def children(self):
544 544 """return contexts for each child changeset"""
545 545 c = self._repo.changelog.children(self._node)
546 546 return [changectx(self._repo, x) for x in c]
547 547
548 548 def ancestors(self):
549 549 for a in self._repo.changelog.ancestors([self._rev]):
550 550 yield changectx(self._repo, a)
551 551
552 552 def descendants(self):
553 553 for d in self._repo.changelog.descendants([self._rev]):
554 554 yield changectx(self._repo, d)
555 555
556 556 def filectx(self, path, fileid=None, filelog=None):
557 557 """get a file context from this changeset"""
558 558 if fileid is None:
559 559 fileid = self.filenode(path)
560 560 return filectx(self._repo, path, fileid=fileid,
561 561 changectx=self, filelog=filelog)
562 562
563 563 def ancestor(self, c2, warn=False):
564 564 """return the "best" ancestor context of self and c2
565 565
566 566 If there are multiple candidates, it will show a message and check
567 567 merge.preferancestor configuration before falling back to the
568 568 revlog ancestor."""
569 569 # deal with workingctxs
570 570 n2 = c2._node
571 571 if n2 is None:
572 572 n2 = c2._parents[0]._node
573 573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
574 574 if not cahs:
575 575 anc = nullid
576 576 elif len(cahs) == 1:
577 577 anc = cahs[0]
578 578 else:
579 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 # experimental config: merge.preferancestor
580 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
580 581 try:
581 582 ctx = changectx(self._repo, r)
582 583 except error.RepoLookupError:
583 584 continue
584 585 anc = ctx.node()
585 586 if anc in cahs:
586 587 break
587 588 else:
588 589 anc = self._repo.changelog.ancestor(self._node, n2)
589 590 if warn:
590 591 self._repo.ui.status(
591 592 (_("note: using %s as ancestor of %s and %s\n") %
592 593 (short(anc), short(self._node), short(n2))) +
593 594 ''.join(_(" alternatively, use --config "
594 595 "merge.preferancestor=%s\n") %
595 596 short(n) for n in sorted(cahs) if n != anc))
596 597 return changectx(self._repo, anc)
597 598
598 599 def descendant(self, other):
599 600 """True if other is descendant of this changeset"""
600 601 return self._repo.changelog.descendant(self._rev, other._rev)
601 602
602 603 def walk(self, match):
603 604 '''Generates matching file names.'''
604 605
605 606 # Wrap match.bad method to have message with nodeid
606 607 def bad(fn, msg):
607 608 # The manifest doesn't know about subrepos, so don't complain about
608 609 # paths into valid subrepos.
609 610 if any(fn == s or fn.startswith(s + '/')
610 611 for s in self.substate):
611 612 return
612 613 match.bad(fn, _('no such file in rev %s') % self)
613 614
614 615 m = matchmod.badmatch(match, bad)
615 616 return self._manifest.walk(m)
616 617
617 618 def matches(self, match):
618 619 return self.walk(match)
619 620
620 621 class basefilectx(object):
621 622 """A filecontext object represents the common logic for its children:
622 623 filectx: read-only access to a filerevision that is already present
623 624 in the repo,
624 625 workingfilectx: a filecontext that represents files from the working
625 626 directory,
626 627 memfilectx: a filecontext that represents files in-memory."""
627 628 def __new__(cls, repo, path, *args, **kwargs):
628 629 return super(basefilectx, cls).__new__(cls)
629 630
630 631 @propertycache
631 632 def _filelog(self):
632 633 return self._repo.file(self._path)
633 634
634 635 @propertycache
635 636 def _changeid(self):
636 637 if '_changeid' in self.__dict__:
637 638 return self._changeid
638 639 elif '_changectx' in self.__dict__:
639 640 return self._changectx.rev()
640 641 elif '_descendantrev' in self.__dict__:
641 642 # this file context was created from a revision with a known
642 643 # descendant, we can (lazily) correct for linkrev aliases
643 644 return self._adjustlinkrev(self._path, self._filelog,
644 645 self._filenode, self._descendantrev)
645 646 else:
646 647 return self._filelog.linkrev(self._filerev)
647 648
648 649 @propertycache
649 650 def _filenode(self):
650 651 if '_fileid' in self.__dict__:
651 652 return self._filelog.lookup(self._fileid)
652 653 else:
653 654 return self._changectx.filenode(self._path)
654 655
655 656 @propertycache
656 657 def _filerev(self):
657 658 return self._filelog.rev(self._filenode)
658 659
659 660 @propertycache
660 661 def _repopath(self):
661 662 return self._path
662 663
663 664 def __nonzero__(self):
664 665 try:
665 666 self._filenode
666 667 return True
667 668 except error.LookupError:
668 669 # file is missing
669 670 return False
670 671
671 672 def __str__(self):
672 673 return "%s@%s" % (self.path(), self._changectx)
673 674
674 675 def __repr__(self):
675 676 return "<%s %s>" % (type(self).__name__, str(self))
676 677
677 678 def __hash__(self):
678 679 try:
679 680 return hash((self._path, self._filenode))
680 681 except AttributeError:
681 682 return id(self)
682 683
683 684 def __eq__(self, other):
684 685 try:
685 686 return (type(self) == type(other) and self._path == other._path
686 687 and self._filenode == other._filenode)
687 688 except AttributeError:
688 689 return False
689 690
690 691 def __ne__(self, other):
691 692 return not (self == other)
692 693
693 694 def filerev(self):
694 695 return self._filerev
695 696 def filenode(self):
696 697 return self._filenode
697 698 def flags(self):
698 699 return self._changectx.flags(self._path)
699 700 def filelog(self):
700 701 return self._filelog
701 702 def rev(self):
702 703 return self._changeid
703 704 def linkrev(self):
704 705 return self._filelog.linkrev(self._filerev)
705 706 def node(self):
706 707 return self._changectx.node()
707 708 def hex(self):
708 709 return self._changectx.hex()
709 710 def user(self):
710 711 return self._changectx.user()
711 712 def date(self):
712 713 return self._changectx.date()
713 714 def files(self):
714 715 return self._changectx.files()
715 716 def description(self):
716 717 return self._changectx.description()
717 718 def branch(self):
718 719 return self._changectx.branch()
719 720 def extra(self):
720 721 return self._changectx.extra()
721 722 def phase(self):
722 723 return self._changectx.phase()
723 724 def phasestr(self):
724 725 return self._changectx.phasestr()
725 726 def manifest(self):
726 727 return self._changectx.manifest()
727 728 def changectx(self):
728 729 return self._changectx
729 730 def repo(self):
730 731 return self._repo
731 732
732 733 def path(self):
733 734 return self._path
734 735
735 736 def isbinary(self):
736 737 try:
737 738 return util.binary(self.data())
738 739 except IOError:
739 740 return False
740 741 def isexec(self):
741 742 return 'x' in self.flags()
742 743 def islink(self):
743 744 return 'l' in self.flags()
744 745
745 746 def cmp(self, fctx):
746 747 """compare with other file context
747 748
748 749 returns True if different than fctx.
749 750 """
750 751 if (fctx._filerev is None
751 752 and (self._repo._encodefilterpats
752 753 # if file data starts with '\1\n', empty metadata block is
753 754 # prepended, which adds 4 bytes to filelog.size().
754 755 or self.size() - 4 == fctx.size())
755 756 or self.size() == fctx.size()):
756 757 return self._filelog.cmp(self._filenode, fctx.data())
757 758
758 759 return True
759 760
760 761 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
761 762 """return the first ancestor of <srcrev> introducing <fnode>
762 763
763 764 If the linkrev of the file revision does not point to an ancestor of
764 765 srcrev, we'll walk down the ancestors until we find one introducing
765 766 this file revision.
766 767
767 768 :repo: a localrepository object (used to access changelog and manifest)
768 769 :path: the file path
769 770 :fnode: the nodeid of the file revision
770 771 :filelog: the filelog of this path
771 772 :srcrev: the changeset revision we search ancestors from
772 773 :inclusive: if true, the src revision will also be checked
773 774 """
774 775 repo = self._repo
775 776 cl = repo.unfiltered().changelog
776 777 ma = repo.manifest
777 778 # fetch the linkrev
778 779 fr = filelog.rev(fnode)
779 780 lkr = filelog.linkrev(fr)
780 781 # hack to reuse ancestor computation when searching for renames
781 782 memberanc = getattr(self, '_ancestrycontext', None)
782 783 iteranc = None
783 784 if srcrev is None:
784 785 # wctx case, used by workingfilectx during mergecopy
785 786 revs = [p.rev() for p in self._repo[None].parents()]
786 787 inclusive = True # we skipped the real (revless) source
787 788 else:
788 789 revs = [srcrev]
789 790 if memberanc is None:
790 791 memberanc = iteranc = cl.ancestors(revs, lkr,
791 792 inclusive=inclusive)
792 793 # check if this linkrev is an ancestor of srcrev
793 794 if lkr not in memberanc:
794 795 if iteranc is None:
795 796 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
796 797 for a in iteranc:
797 798 ac = cl.read(a) # get changeset data (we avoid object creation)
798 799 if path in ac[3]: # checking the 'files' field.
799 800 # The file has been touched, check if the content is
800 801 # similar to the one we search for.
801 802 if fnode == ma.readfast(ac[0]).get(path):
802 803 return a
803 804 # In theory, we should never get out of that loop without a result.
804 805 # But if manifest uses a buggy file revision (not children of the
805 806 # one it replaces) we could. Such a buggy situation will likely
806 807 # result is crash somewhere else at to some point.
807 808 return lkr
808 809
809 810 def introrev(self):
810 811 """return the rev of the changeset which introduced this file revision
811 812
812 813 This method is different from linkrev because it take into account the
813 814 changeset the filectx was created from. It ensures the returned
814 815 revision is one of its ancestors. This prevents bugs from
815 816 'linkrev-shadowing' when a file revision is used by multiple
816 817 changesets.
817 818 """
818 819 lkr = self.linkrev()
819 820 attrs = vars(self)
820 821 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
821 822 if noctx or self.rev() == lkr:
822 823 return self.linkrev()
823 824 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
824 825 self.rev(), inclusive=True)
825 826
826 827 def _parentfilectx(self, path, fileid, filelog):
827 828 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 829 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 830 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 831 # If self is associated with a changeset (probably explicitly
831 832 # fed), ensure the created filectx is associated with a
832 833 # changeset that is an ancestor of self.changectx.
833 834 # This lets us later use _adjustlinkrev to get a correct link.
834 835 fctx._descendantrev = self.rev()
835 836 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 837 elif '_descendantrev' in vars(self):
837 838 # Otherwise propagate _descendantrev if we have one associated.
838 839 fctx._descendantrev = self._descendantrev
839 840 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 841 return fctx
841 842
842 843 def parents(self):
843 844 _path = self._path
844 845 fl = self._filelog
845 846 parents = self._filelog.parents(self._filenode)
846 847 pl = [(_path, node, fl) for node in parents if node != nullid]
847 848
848 849 r = fl.renamed(self._filenode)
849 850 if r:
850 851 # - In the simple rename case, both parent are nullid, pl is empty.
851 852 # - In case of merge, only one of the parent is null id and should
852 853 # be replaced with the rename information. This parent is -always-
853 854 # the first one.
854 855 #
855 856 # As null id have always been filtered out in the previous list
856 857 # comprehension, inserting to 0 will always result in "replacing
857 858 # first nullid parent with rename information.
858 859 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859 860
860 861 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861 862
862 863 def p1(self):
863 864 return self.parents()[0]
864 865
865 866 def p2(self):
866 867 p = self.parents()
867 868 if len(p) == 2:
868 869 return p[1]
869 870 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870 871
871 872 def annotate(self, follow=False, linenumber=None, diffopts=None):
872 873 '''returns a list of tuples of (ctx, line) for each line
873 874 in the file, where ctx is the filectx of the node where
874 875 that line was last changed.
875 876 This returns tuples of ((ctx, linenumber), line) for each line,
876 877 if "linenumber" parameter is NOT "None".
877 878 In such tuples, linenumber means one at the first appearance
878 879 in the managed file.
879 880 To reduce annotation cost,
880 881 this returns fixed value(False is used) as linenumber,
881 882 if "linenumber" parameter is "False".'''
882 883
883 884 if linenumber is None:
884 885 def decorate(text, rev):
885 886 return ([rev] * len(text.splitlines()), text)
886 887 elif linenumber:
887 888 def decorate(text, rev):
888 889 size = len(text.splitlines())
889 890 return ([(rev, i) for i in xrange(1, size + 1)], text)
890 891 else:
891 892 def decorate(text, rev):
892 893 return ([(rev, False)] * len(text.splitlines()), text)
893 894
894 895 def pair(parent, child):
895 896 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
896 897 refine=True)
897 898 for (a1, a2, b1, b2), t in blocks:
898 899 # Changed blocks ('!') or blocks made only of blank lines ('~')
899 900 # belong to the child.
900 901 if t == '=':
901 902 child[0][b1:b2] = parent[0][a1:a2]
902 903 return child
903 904
904 905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 906
906 907 def parents(f):
907 908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 911 # isn't an ancestor of the srcrev.
911 912 f._changeid
912 913 pl = f.parents()
913 914
914 915 # Don't return renamed parents if we aren't following.
915 916 if not follow:
916 917 pl = [p for p in pl if p.path() == f.path()]
917 918
918 919 # renamed filectx won't have a filelog yet, so set it
919 920 # from the cache to save time
920 921 for p in pl:
921 922 if not '_filelog' in p.__dict__:
922 923 p._filelog = getlog(p.path())
923 924
924 925 return pl
925 926
926 927 # use linkrev to find the first changeset where self appeared
927 928 base = self
928 929 introrev = self.introrev()
929 930 if self.rev() != introrev:
930 931 base = self.filectx(self.filenode(), changeid=introrev)
931 932 if getattr(base, '_ancestrycontext', None) is None:
932 933 cl = self._repo.changelog
933 934 if introrev is None:
934 935 # wctx is not inclusive, but works because _ancestrycontext
935 936 # is used to test filelog revisions
936 937 ac = cl.ancestors([p.rev() for p in base.parents()],
937 938 inclusive=True)
938 939 else:
939 940 ac = cl.ancestors([introrev], inclusive=True)
940 941 base._ancestrycontext = ac
941 942
942 943 # This algorithm would prefer to be recursive, but Python is a
943 944 # bit recursion-hostile. Instead we do an iterative
944 945 # depth-first search.
945 946
946 947 visit = [base]
947 948 hist = {}
948 949 pcache = {}
949 950 needed = {base: 1}
950 951 while visit:
951 952 f = visit[-1]
952 953 pcached = f in pcache
953 954 if not pcached:
954 955 pcache[f] = parents(f)
955 956
956 957 ready = True
957 958 pl = pcache[f]
958 959 for p in pl:
959 960 if p not in hist:
960 961 ready = False
961 962 visit.append(p)
962 963 if not pcached:
963 964 needed[p] = needed.get(p, 0) + 1
964 965 if ready:
965 966 visit.pop()
966 967 reusable = f in hist
967 968 if reusable:
968 969 curr = hist[f]
969 970 else:
970 971 curr = decorate(f.data(), f)
971 972 for p in pl:
972 973 if not reusable:
973 974 curr = pair(hist[p], curr)
974 975 if needed[p] == 1:
975 976 del hist[p]
976 977 del needed[p]
977 978 else:
978 979 needed[p] -= 1
979 980
980 981 hist[f] = curr
981 982 pcache[f] = []
982 983
983 984 return zip(hist[base][0], hist[base][1].splitlines(True))
984 985
985 986 def ancestors(self, followfirst=False):
986 987 visit = {}
987 988 c = self
988 989 if followfirst:
989 990 cut = 1
990 991 else:
991 992 cut = None
992 993
993 994 while True:
994 995 for parent in c.parents()[:cut]:
995 996 visit[(parent.linkrev(), parent.filenode())] = parent
996 997 if not visit:
997 998 break
998 999 c = visit.pop(max(visit))
999 1000 yield c
1000 1001
1001 1002 class filectx(basefilectx):
1002 1003 """A filecontext object makes access to data related to a particular
1003 1004 filerevision convenient."""
1004 1005 def __init__(self, repo, path, changeid=None, fileid=None,
1005 1006 filelog=None, changectx=None):
1006 1007 """changeid can be a changeset revision, node, or tag.
1007 1008 fileid can be a file revision or node."""
1008 1009 self._repo = repo
1009 1010 self._path = path
1010 1011
1011 1012 assert (changeid is not None
1012 1013 or fileid is not None
1013 1014 or changectx is not None), \
1014 1015 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1015 1016 % (changeid, fileid, changectx))
1016 1017
1017 1018 if filelog is not None:
1018 1019 self._filelog = filelog
1019 1020
1020 1021 if changeid is not None:
1021 1022 self._changeid = changeid
1022 1023 if changectx is not None:
1023 1024 self._changectx = changectx
1024 1025 if fileid is not None:
1025 1026 self._fileid = fileid
1026 1027
1027 1028 @propertycache
1028 1029 def _changectx(self):
1029 1030 try:
1030 1031 return changectx(self._repo, self._changeid)
1031 1032 except error.FilteredRepoLookupError:
1032 1033 # Linkrev may point to any revision in the repository. When the
1033 1034 # repository is filtered this may lead to `filectx` trying to build
1034 1035 # `changectx` for filtered revision. In such case we fallback to
1035 1036 # creating `changectx` on the unfiltered version of the reposition.
1036 1037 # This fallback should not be an issue because `changectx` from
1037 1038 # `filectx` are not used in complex operations that care about
1038 1039 # filtering.
1039 1040 #
1040 1041 # This fallback is a cheap and dirty fix that prevent several
1041 1042 # crashes. It does not ensure the behavior is correct. However the
1042 1043 # behavior was not correct before filtering either and "incorrect
1043 1044 # behavior" is seen as better as "crash"
1044 1045 #
1045 1046 # Linkrevs have several serious troubles with filtering that are
1046 1047 # complicated to solve. Proper handling of the issue here should be
1047 1048 # considered when solving linkrev issue are on the table.
1048 1049 return changectx(self._repo.unfiltered(), self._changeid)
1049 1050
1050 1051 def filectx(self, fileid, changeid=None):
1051 1052 '''opens an arbitrary revision of the file without
1052 1053 opening a new filelog'''
1053 1054 return filectx(self._repo, self._path, fileid=fileid,
1054 1055 filelog=self._filelog, changeid=changeid)
1055 1056
1056 1057 def data(self):
1057 1058 try:
1058 1059 return self._filelog.read(self._filenode)
1059 1060 except error.CensoredNodeError:
1060 1061 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1061 1062 return ""
1062 1063 raise util.Abort(_("censored node: %s") % short(self._filenode),
1063 1064 hint=_("set censor.policy to ignore errors"))
1064 1065
1065 1066 def size(self):
1066 1067 return self._filelog.size(self._filerev)
1067 1068
1068 1069 def renamed(self):
1069 1070 """check if file was actually renamed in this changeset revision
1070 1071
1071 1072 If rename logged in file revision, we report copy for changeset only
1072 1073 if file revisions linkrev points back to the changeset in question
1073 1074 or both changeset parents contain different file revisions.
1074 1075 """
1075 1076
1076 1077 renamed = self._filelog.renamed(self._filenode)
1077 1078 if not renamed:
1078 1079 return renamed
1079 1080
1080 1081 if self.rev() == self.linkrev():
1081 1082 return renamed
1082 1083
1083 1084 name = self.path()
1084 1085 fnode = self._filenode
1085 1086 for p in self._changectx.parents():
1086 1087 try:
1087 1088 if fnode == p.filenode(name):
1088 1089 return None
1089 1090 except error.LookupError:
1090 1091 pass
1091 1092 return renamed
1092 1093
1093 1094 def children(self):
1094 1095 # hard for renames
1095 1096 c = self._filelog.children(self._filenode)
1096 1097 return [filectx(self._repo, self._path, fileid=x,
1097 1098 filelog=self._filelog) for x in c]
1098 1099
1099 1100 class committablectx(basectx):
1100 1101 """A committablectx object provides common functionality for a context that
1101 1102 wants the ability to commit, e.g. workingctx or memctx."""
1102 1103 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 1104 changes=None):
1104 1105 self._repo = repo
1105 1106 self._rev = None
1106 1107 self._node = None
1107 1108 self._text = text
1108 1109 if date:
1109 1110 self._date = util.parsedate(date)
1110 1111 if user:
1111 1112 self._user = user
1112 1113 if changes:
1113 1114 self._status = changes
1114 1115
1115 1116 self._extra = {}
1116 1117 if extra:
1117 1118 self._extra = extra.copy()
1118 1119 if 'branch' not in self._extra:
1119 1120 try:
1120 1121 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 1122 except UnicodeDecodeError:
1122 1123 raise util.Abort(_('branch name not in UTF-8!'))
1123 1124 self._extra['branch'] = branch
1124 1125 if self._extra['branch'] == '':
1125 1126 self._extra['branch'] = 'default'
1126 1127
1127 1128 def __str__(self):
1128 1129 return str(self._parents[0]) + "+"
1129 1130
1130 1131 def __nonzero__(self):
1131 1132 return True
1132 1133
1133 1134 def _buildflagfunc(self):
1134 1135 # Create a fallback function for getting file flags when the
1135 1136 # filesystem doesn't support them
1136 1137
1137 1138 copiesget = self._repo.dirstate.copies().get
1138 1139
1139 1140 if len(self._parents) < 2:
1140 1141 # when we have one parent, it's easy: copy from parent
1141 1142 man = self._parents[0].manifest()
1142 1143 def func(f):
1143 1144 f = copiesget(f, f)
1144 1145 return man.flags(f)
1145 1146 else:
1146 1147 # merges are tricky: we try to reconstruct the unstored
1147 1148 # result from the merge (issue1802)
1148 1149 p1, p2 = self._parents
1149 1150 pa = p1.ancestor(p2)
1150 1151 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1151 1152
1152 1153 def func(f):
1153 1154 f = copiesget(f, f) # may be wrong for merges with copies
1154 1155 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1155 1156 if fl1 == fl2:
1156 1157 return fl1
1157 1158 if fl1 == fla:
1158 1159 return fl2
1159 1160 if fl2 == fla:
1160 1161 return fl1
1161 1162 return '' # punt for conflicts
1162 1163
1163 1164 return func
1164 1165
1165 1166 @propertycache
1166 1167 def _flagfunc(self):
1167 1168 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1168 1169
1169 1170 @propertycache
1170 1171 def _manifest(self):
1171 1172 """generate a manifest corresponding to the values in self._status
1172 1173
1173 1174 This reuse the file nodeid from parent, but we append an extra letter
1174 1175 when modified. Modified files get an extra 'm' while added files get
1175 1176 an extra 'a'. This is used by manifests merge to see that files
1176 1177 are different and by update logic to avoid deleting newly added files.
1177 1178 """
1178 1179
1179 1180 man1 = self._parents[0].manifest()
1180 1181 man = man1.copy()
1181 1182 if len(self._parents) > 1:
1182 1183 man2 = self.p2().manifest()
1183 1184 def getman(f):
1184 1185 if f in man1:
1185 1186 return man1
1186 1187 return man2
1187 1188 else:
1188 1189 getman = lambda f: man1
1189 1190
1190 1191 copied = self._repo.dirstate.copies()
1191 1192 ff = self._flagfunc
1192 1193 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1193 1194 for f in l:
1194 1195 orig = copied.get(f, f)
1195 1196 man[f] = getman(orig).get(orig, nullid) + i
1196 1197 try:
1197 1198 man.setflag(f, ff(f))
1198 1199 except OSError:
1199 1200 pass
1200 1201
1201 1202 for f in self._status.deleted + self._status.removed:
1202 1203 if f in man:
1203 1204 del man[f]
1204 1205
1205 1206 return man
1206 1207
1207 1208 @propertycache
1208 1209 def _status(self):
1209 1210 return self._repo.status()
1210 1211
1211 1212 @propertycache
1212 1213 def _user(self):
1213 1214 return self._repo.ui.username()
1214 1215
1215 1216 @propertycache
1216 1217 def _date(self):
1217 1218 return util.makedate()
1218 1219
1219 1220 def subrev(self, subpath):
1220 1221 return None
1221 1222
1222 1223 def manifestnode(self):
1223 1224 return None
1224 1225 def user(self):
1225 1226 return self._user or self._repo.ui.username()
1226 1227 def date(self):
1227 1228 return self._date
1228 1229 def description(self):
1229 1230 return self._text
1230 1231 def files(self):
1231 1232 return sorted(self._status.modified + self._status.added +
1232 1233 self._status.removed)
1233 1234
1234 1235 def modified(self):
1235 1236 return self._status.modified
1236 1237 def added(self):
1237 1238 return self._status.added
1238 1239 def removed(self):
1239 1240 return self._status.removed
1240 1241 def deleted(self):
1241 1242 return self._status.deleted
1242 1243 def branch(self):
1243 1244 return encoding.tolocal(self._extra['branch'])
1244 1245 def closesbranch(self):
1245 1246 return 'close' in self._extra
1246 1247 def extra(self):
1247 1248 return self._extra
1248 1249
1249 1250 def tags(self):
1250 1251 return []
1251 1252
1252 1253 def bookmarks(self):
1253 1254 b = []
1254 1255 for p in self.parents():
1255 1256 b.extend(p.bookmarks())
1256 1257 return b
1257 1258
1258 1259 def phase(self):
1259 1260 phase = phases.draft # default phase to draft
1260 1261 for p in self.parents():
1261 1262 phase = max(phase, p.phase())
1262 1263 return phase
1263 1264
1264 1265 def hidden(self):
1265 1266 return False
1266 1267
1267 1268 def children(self):
1268 1269 return []
1269 1270
1270 1271 def flags(self, path):
1271 1272 if '_manifest' in self.__dict__:
1272 1273 try:
1273 1274 return self._manifest.flags(path)
1274 1275 except KeyError:
1275 1276 return ''
1276 1277
1277 1278 try:
1278 1279 return self._flagfunc(path)
1279 1280 except OSError:
1280 1281 return ''
1281 1282
1282 1283 def ancestor(self, c2):
1283 1284 """return the "best" ancestor context of self and c2"""
1284 1285 return self._parents[0].ancestor(c2) # punt on two parents for now
1285 1286
1286 1287 def walk(self, match):
1287 1288 '''Generates matching file names.'''
1288 1289 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1289 1290 True, False))
1290 1291
1291 1292 def matches(self, match):
1292 1293 return sorted(self._repo.dirstate.matches(match))
1293 1294
1294 1295 def ancestors(self):
1295 1296 for p in self._parents:
1296 1297 yield p
1297 1298 for a in self._repo.changelog.ancestors(
1298 1299 [p.rev() for p in self._parents]):
1299 1300 yield changectx(self._repo, a)
1300 1301
1301 1302 def markcommitted(self, node):
1302 1303 """Perform post-commit cleanup necessary after committing this ctx
1303 1304
1304 1305 Specifically, this updates backing stores this working context
1305 1306 wraps to reflect the fact that the changes reflected by this
1306 1307 workingctx have been committed. For example, it marks
1307 1308 modified and added files as normal in the dirstate.
1308 1309
1309 1310 """
1310 1311
1311 1312 self._repo.dirstate.beginparentchange()
1312 1313 for f in self.modified() + self.added():
1313 1314 self._repo.dirstate.normal(f)
1314 1315 for f in self.removed():
1315 1316 self._repo.dirstate.drop(f)
1316 1317 self._repo.dirstate.setparents(node)
1317 1318 self._repo.dirstate.endparentchange()
1318 1319
1319 1320 # write changes out explicitly, because nesting wlock at
1320 1321 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1321 1322 # from immediately doing so for subsequent changing files
1322 1323 self._repo.dirstate.write()
1323 1324
1324 1325 class workingctx(committablectx):
1325 1326 """A workingctx object makes access to data related to
1326 1327 the current working directory convenient.
1327 1328 date - any valid date string or (unixtime, offset), or None.
1328 1329 user - username string, or None.
1329 1330 extra - a dictionary of extra values, or None.
1330 1331 changes - a list of file lists as returned by localrepo.status()
1331 1332 or None to use the repository status.
1332 1333 """
1333 1334 def __init__(self, repo, text="", user=None, date=None, extra=None,
1334 1335 changes=None):
1335 1336 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1336 1337
1337 1338 def __iter__(self):
1338 1339 d = self._repo.dirstate
1339 1340 for f in d:
1340 1341 if d[f] != 'r':
1341 1342 yield f
1342 1343
1343 1344 def __contains__(self, key):
1344 1345 return self._repo.dirstate[key] not in "?r"
1345 1346
1346 1347 def hex(self):
1347 1348 return hex(wdirid)
1348 1349
1349 1350 @propertycache
1350 1351 def _parents(self):
1351 1352 p = self._repo.dirstate.parents()
1352 1353 if p[1] == nullid:
1353 1354 p = p[:-1]
1354 1355 return [changectx(self._repo, x) for x in p]
1355 1356
1356 1357 def filectx(self, path, filelog=None):
1357 1358 """get a file context from the working directory"""
1358 1359 return workingfilectx(self._repo, path, workingctx=self,
1359 1360 filelog=filelog)
1360 1361
1361 1362 def dirty(self, missing=False, merge=True, branch=True):
1362 1363 "check whether a working directory is modified"
1363 1364 # check subrepos first
1364 1365 for s in sorted(self.substate):
1365 1366 if self.sub(s).dirty():
1366 1367 return True
1367 1368 # check current working dir
1368 1369 return ((merge and self.p2()) or
1369 1370 (branch and self.branch() != self.p1().branch()) or
1370 1371 self.modified() or self.added() or self.removed() or
1371 1372 (missing and self.deleted()))
1372 1373
1373 1374 def add(self, list, prefix=""):
1374 1375 join = lambda f: os.path.join(prefix, f)
1375 1376 wlock = self._repo.wlock()
1376 1377 ui, ds = self._repo.ui, self._repo.dirstate
1377 1378 try:
1378 1379 rejected = []
1379 1380 lstat = self._repo.wvfs.lstat
1380 1381 for f in list:
1381 1382 scmutil.checkportable(ui, join(f))
1382 1383 try:
1383 1384 st = lstat(f)
1384 1385 except OSError:
1385 1386 ui.warn(_("%s does not exist!\n") % join(f))
1386 1387 rejected.append(f)
1387 1388 continue
1388 1389 if st.st_size > 10000000:
1389 1390 ui.warn(_("%s: up to %d MB of RAM may be required "
1390 1391 "to manage this file\n"
1391 1392 "(use 'hg revert %s' to cancel the "
1392 1393 "pending addition)\n")
1393 1394 % (f, 3 * st.st_size // 1000000, join(f)))
1394 1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 1396 ui.warn(_("%s not added: only files and symlinks "
1396 1397 "supported currently\n") % join(f))
1397 1398 rejected.append(f)
1398 1399 elif ds[f] in 'amn':
1399 1400 ui.warn(_("%s already tracked!\n") % join(f))
1400 1401 elif ds[f] == 'r':
1401 1402 ds.normallookup(f)
1402 1403 else:
1403 1404 ds.add(f)
1404 1405 return rejected
1405 1406 finally:
1406 1407 wlock.release()
1407 1408
1408 1409 def forget(self, files, prefix=""):
1409 1410 join = lambda f: os.path.join(prefix, f)
1410 1411 wlock = self._repo.wlock()
1411 1412 try:
1412 1413 rejected = []
1413 1414 for f in files:
1414 1415 if f not in self._repo.dirstate:
1415 1416 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1416 1417 rejected.append(f)
1417 1418 elif self._repo.dirstate[f] != 'a':
1418 1419 self._repo.dirstate.remove(f)
1419 1420 else:
1420 1421 self._repo.dirstate.drop(f)
1421 1422 return rejected
1422 1423 finally:
1423 1424 wlock.release()
1424 1425
1425 1426 def undelete(self, list):
1426 1427 pctxs = self.parents()
1427 1428 wlock = self._repo.wlock()
1428 1429 try:
1429 1430 for f in list:
1430 1431 if self._repo.dirstate[f] != 'r':
1431 1432 self._repo.ui.warn(_("%s not removed!\n") % f)
1432 1433 else:
1433 1434 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1434 1435 t = fctx.data()
1435 1436 self._repo.wwrite(f, t, fctx.flags())
1436 1437 self._repo.dirstate.normal(f)
1437 1438 finally:
1438 1439 wlock.release()
1439 1440
1440 1441 def copy(self, source, dest):
1441 1442 try:
1442 1443 st = self._repo.wvfs.lstat(dest)
1443 1444 except OSError as err:
1444 1445 if err.errno != errno.ENOENT:
1445 1446 raise
1446 1447 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1447 1448 return
1448 1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 1450 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1450 1451 "symbolic link\n") % dest)
1451 1452 else:
1452 1453 wlock = self._repo.wlock()
1453 1454 try:
1454 1455 if self._repo.dirstate[dest] in '?':
1455 1456 self._repo.dirstate.add(dest)
1456 1457 elif self._repo.dirstate[dest] in 'r':
1457 1458 self._repo.dirstate.normallookup(dest)
1458 1459 self._repo.dirstate.copy(source, dest)
1459 1460 finally:
1460 1461 wlock.release()
1461 1462
1462 1463 def match(self, pats=[], include=None, exclude=None, default='glob',
1463 1464 listsubrepos=False, badfn=None):
1464 1465 r = self._repo
1465 1466
1466 1467 # Only a case insensitive filesystem needs magic to translate user input
1467 1468 # to actual case in the filesystem.
1468 1469 if not util.checkcase(r.root):
1469 1470 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1470 1471 exclude, default, r.auditor, self,
1471 1472 listsubrepos=listsubrepos,
1472 1473 badfn=badfn)
1473 1474 return matchmod.match(r.root, r.getcwd(), pats,
1474 1475 include, exclude, default,
1475 1476 auditor=r.auditor, ctx=self,
1476 1477 listsubrepos=listsubrepos, badfn=badfn)
1477 1478
1478 1479 def _filtersuspectsymlink(self, files):
1479 1480 if not files or self._repo.dirstate._checklink:
1480 1481 return files
1481 1482
1482 1483 # Symlink placeholders may get non-symlink-like contents
1483 1484 # via user error or dereferencing by NFS or Samba servers,
1484 1485 # so we filter out any placeholders that don't look like a
1485 1486 # symlink
1486 1487 sane = []
1487 1488 for f in files:
1488 1489 if self.flags(f) == 'l':
1489 1490 d = self[f].data()
1490 1491 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1491 1492 self._repo.ui.debug('ignoring suspect symlink placeholder'
1492 1493 ' "%s"\n' % f)
1493 1494 continue
1494 1495 sane.append(f)
1495 1496 return sane
1496 1497
1497 1498 def _checklookup(self, files):
1498 1499 # check for any possibly clean files
1499 1500 if not files:
1500 1501 return [], []
1501 1502
1502 1503 modified = []
1503 1504 fixup = []
1504 1505 pctx = self._parents[0]
1505 1506 # do a full compare of any files that might have changed
1506 1507 for f in sorted(files):
1507 1508 if (f not in pctx or self.flags(f) != pctx.flags(f)
1508 1509 or pctx[f].cmp(self[f])):
1509 1510 modified.append(f)
1510 1511 else:
1511 1512 fixup.append(f)
1512 1513
1513 1514 # update dirstate for files that are actually clean
1514 1515 if fixup:
1515 1516 try:
1516 1517 # updating the dirstate is optional
1517 1518 # so we don't wait on the lock
1518 1519 # wlock can invalidate the dirstate, so cache normal _after_
1519 1520 # taking the lock
1520 1521 wlock = self._repo.wlock(False)
1521 1522 normal = self._repo.dirstate.normal
1522 1523 try:
1523 1524 for f in fixup:
1524 1525 normal(f)
1525 1526 # write changes out explicitly, because nesting
1526 1527 # wlock at runtime may prevent 'wlock.release()'
1527 1528 # below from doing so for subsequent changing files
1528 1529 self._repo.dirstate.write()
1529 1530 finally:
1530 1531 wlock.release()
1531 1532 except error.LockError:
1532 1533 pass
1533 1534 return modified, fixup
1534 1535
1535 1536 def _manifestmatches(self, match, s):
1536 1537 """Slow path for workingctx
1537 1538
1538 1539 The fast path is when we compare the working directory to its parent
1539 1540 which means this function is comparing with a non-parent; therefore we
1540 1541 need to build a manifest and return what matches.
1541 1542 """
1542 1543 mf = self._repo['.']._manifestmatches(match, s)
1543 1544 for f in s.modified + s.added:
1544 1545 mf[f] = _newnode
1545 1546 mf.setflag(f, self.flags(f))
1546 1547 for f in s.removed:
1547 1548 if f in mf:
1548 1549 del mf[f]
1549 1550 return mf
1550 1551
1551 1552 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1552 1553 unknown=False):
1553 1554 '''Gets the status from the dirstate -- internal use only.'''
1554 1555 listignored, listclean, listunknown = ignored, clean, unknown
1555 1556 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1556 1557 subrepos = []
1557 1558 if '.hgsub' in self:
1558 1559 subrepos = sorted(self.substate)
1559 1560 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1560 1561 listclean, listunknown)
1561 1562
1562 1563 # check for any possibly clean files
1563 1564 if cmp:
1564 1565 modified2, fixup = self._checklookup(cmp)
1565 1566 s.modified.extend(modified2)
1566 1567
1567 1568 # update dirstate for files that are actually clean
1568 1569 if fixup and listclean:
1569 1570 s.clean.extend(fixup)
1570 1571
1571 1572 if match.always():
1572 1573 # cache for performance
1573 1574 if s.unknown or s.ignored or s.clean:
1574 1575 # "_status" is cached with list*=False in the normal route
1575 1576 self._status = scmutil.status(s.modified, s.added, s.removed,
1576 1577 s.deleted, [], [], [])
1577 1578 else:
1578 1579 self._status = s
1579 1580
1580 1581 return s
1581 1582
1582 1583 def _buildstatus(self, other, s, match, listignored, listclean,
1583 1584 listunknown):
1584 1585 """build a status with respect to another context
1585 1586
1586 1587 This includes logic for maintaining the fast path of status when
1587 1588 comparing the working directory against its parent, which is to skip
1588 1589 building a new manifest if self (working directory) is not comparing
1589 1590 against its parent (repo['.']).
1590 1591 """
1591 1592 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1592 1593 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1593 1594 # might have accidentally ended up with the entire contents of the file
1594 1595 # they are supposed to be linking to.
1595 1596 s.modified[:] = self._filtersuspectsymlink(s.modified)
1596 1597 if other != self._repo['.']:
1597 1598 s = super(workingctx, self)._buildstatus(other, s, match,
1598 1599 listignored, listclean,
1599 1600 listunknown)
1600 1601 return s
1601 1602
1602 1603 def _matchstatus(self, other, match):
1603 1604 """override the match method with a filter for directory patterns
1604 1605
1605 1606 We use inheritance to customize the match.bad method only in cases of
1606 1607 workingctx since it belongs only to the working directory when
1607 1608 comparing against the parent changeset.
1608 1609
1609 1610 If we aren't comparing against the working directory's parent, then we
1610 1611 just use the default match object sent to us.
1611 1612 """
1612 1613 superself = super(workingctx, self)
1613 1614 match = superself._matchstatus(other, match)
1614 1615 if other != self._repo['.']:
1615 1616 def bad(f, msg):
1616 1617 # 'f' may be a directory pattern from 'match.files()',
1617 1618 # so 'f not in ctx1' is not enough
1618 1619 if f not in other and not other.hasdir(f):
1619 1620 self._repo.ui.warn('%s: %s\n' %
1620 1621 (self._repo.dirstate.pathto(f), msg))
1621 1622 match.bad = bad
1622 1623 return match
1623 1624
1624 1625 class committablefilectx(basefilectx):
1625 1626 """A committablefilectx provides common functionality for a file context
1626 1627 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 1628 def __init__(self, repo, path, filelog=None, ctx=None):
1628 1629 self._repo = repo
1629 1630 self._path = path
1630 1631 self._changeid = None
1631 1632 self._filerev = self._filenode = None
1632 1633
1633 1634 if filelog is not None:
1634 1635 self._filelog = filelog
1635 1636 if ctx:
1636 1637 self._changectx = ctx
1637 1638
1638 1639 def __nonzero__(self):
1639 1640 return True
1640 1641
1641 1642 def linkrev(self):
1642 1643 # linked to self._changectx no matter if file is modified or not
1643 1644 return self.rev()
1644 1645
1645 1646 def parents(self):
1646 1647 '''return parent filectxs, following copies if necessary'''
1647 1648 def filenode(ctx, path):
1648 1649 return ctx._manifest.get(path, nullid)
1649 1650
1650 1651 path = self._path
1651 1652 fl = self._filelog
1652 1653 pcl = self._changectx._parents
1653 1654 renamed = self.renamed()
1654 1655
1655 1656 if renamed:
1656 1657 pl = [renamed + (None,)]
1657 1658 else:
1658 1659 pl = [(path, filenode(pcl[0], path), fl)]
1659 1660
1660 1661 for pc in pcl[1:]:
1661 1662 pl.append((path, filenode(pc, path), fl))
1662 1663
1663 1664 return [self._parentfilectx(p, fileid=n, filelog=l)
1664 1665 for p, n, l in pl if n != nullid]
1665 1666
1666 1667 def children(self):
1667 1668 return []
1668 1669
1669 1670 class workingfilectx(committablefilectx):
1670 1671 """A workingfilectx object makes access to data related to a particular
1671 1672 file in the working directory convenient."""
1672 1673 def __init__(self, repo, path, filelog=None, workingctx=None):
1673 1674 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1674 1675
1675 1676 @propertycache
1676 1677 def _changectx(self):
1677 1678 return workingctx(self._repo)
1678 1679
1679 1680 def data(self):
1680 1681 return self._repo.wread(self._path)
1681 1682 def renamed(self):
1682 1683 rp = self._repo.dirstate.copied(self._path)
1683 1684 if not rp:
1684 1685 return None
1685 1686 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1686 1687
1687 1688 def size(self):
1688 1689 return self._repo.wvfs.lstat(self._path).st_size
1689 1690 def date(self):
1690 1691 t, tz = self._changectx.date()
1691 1692 try:
1692 1693 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1693 1694 except OSError as err:
1694 1695 if err.errno != errno.ENOENT:
1695 1696 raise
1696 1697 return (t, tz)
1697 1698
1698 1699 def cmp(self, fctx):
1699 1700 """compare with other file context
1700 1701
1701 1702 returns True if different than fctx.
1702 1703 """
1703 1704 # fctx should be a filectx (not a workingfilectx)
1704 1705 # invert comparison to reuse the same code path
1705 1706 return fctx.cmp(self)
1706 1707
1707 1708 def remove(self, ignoremissing=False):
1708 1709 """wraps unlink for a repo's working directory"""
1709 1710 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1710 1711
1711 1712 def write(self, data, flags):
1712 1713 """wraps repo.wwrite"""
1713 1714 self._repo.wwrite(self._path, data, flags)
1714 1715
1715 1716 class workingcommitctx(workingctx):
1716 1717 """A workingcommitctx object makes access to data related to
1717 1718 the revision being committed convenient.
1718 1719
1719 1720 This hides changes in the working directory, if they aren't
1720 1721 committed in this context.
1721 1722 """
1722 1723 def __init__(self, repo, changes,
1723 1724 text="", user=None, date=None, extra=None):
1724 1725 super(workingctx, self).__init__(repo, text, user, date, extra,
1725 1726 changes)
1726 1727
1727 1728 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1728 1729 unknown=False):
1729 1730 """Return matched files only in ``self._status``
1730 1731
1731 1732 Uncommitted files appear "clean" via this context, even if
1732 1733 they aren't actually so in the working directory.
1733 1734 """
1734 1735 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1735 1736 if clean:
1736 1737 clean = [f for f in self._manifest if f not in self._changedset]
1737 1738 else:
1738 1739 clean = []
1739 1740 return scmutil.status([f for f in self._status.modified if match(f)],
1740 1741 [f for f in self._status.added if match(f)],
1741 1742 [f for f in self._status.removed if match(f)],
1742 1743 [], [], [], clean)
1743 1744
1744 1745 @propertycache
1745 1746 def _changedset(self):
1746 1747 """Return the set of files changed in this context
1747 1748 """
1748 1749 changed = set(self._status.modified)
1749 1750 changed.update(self._status.added)
1750 1751 changed.update(self._status.removed)
1751 1752 return changed
1752 1753
1753 1754 class memctx(committablectx):
1754 1755 """Use memctx to perform in-memory commits via localrepo.commitctx().
1755 1756
1756 1757 Revision information is supplied at initialization time while
1757 1758 related files data and is made available through a callback
1758 1759 mechanism. 'repo' is the current localrepo, 'parents' is a
1759 1760 sequence of two parent revisions identifiers (pass None for every
1760 1761 missing parent), 'text' is the commit message and 'files' lists
1761 1762 names of files touched by the revision (normalized and relative to
1762 1763 repository root).
1763 1764
1764 1765 filectxfn(repo, memctx, path) is a callable receiving the
1765 1766 repository, the current memctx object and the normalized path of
1766 1767 requested file, relative to repository root. It is fired by the
1767 1768 commit function for every file in 'files', but calls order is
1768 1769 undefined. If the file is available in the revision being
1769 1770 committed (updated or added), filectxfn returns a memfilectx
1770 1771 object. If the file was removed, filectxfn raises an
1771 1772 IOError. Moved files are represented by marking the source file
1772 1773 removed and the new file added with copy information (see
1773 1774 memfilectx).
1774 1775
1775 1776 user receives the committer name and defaults to current
1776 1777 repository username, date is the commit date in any format
1777 1778 supported by util.parsedate() and defaults to current date, extra
1778 1779 is a dictionary of metadata or is left empty.
1779 1780 """
1780 1781
1781 1782 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1782 1783 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1783 1784 # this field to determine what to do in filectxfn.
1784 1785 _returnnoneformissingfiles = True
1785 1786
1786 1787 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1787 1788 date=None, extra=None, editor=False):
1788 1789 super(memctx, self).__init__(repo, text, user, date, extra)
1789 1790 self._rev = None
1790 1791 self._node = None
1791 1792 parents = [(p or nullid) for p in parents]
1792 1793 p1, p2 = parents
1793 1794 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1794 1795 files = sorted(set(files))
1795 1796 self._files = files
1796 1797 self.substate = {}
1797 1798
1798 1799 # if store is not callable, wrap it in a function
1799 1800 if not callable(filectxfn):
1800 1801 def getfilectx(repo, memctx, path):
1801 1802 fctx = filectxfn[path]
1802 1803 # this is weird but apparently we only keep track of one parent
1803 1804 # (why not only store that instead of a tuple?)
1804 1805 copied = fctx.renamed()
1805 1806 if copied:
1806 1807 copied = copied[0]
1807 1808 return memfilectx(repo, path, fctx.data(),
1808 1809 islink=fctx.islink(), isexec=fctx.isexec(),
1809 1810 copied=copied, memctx=memctx)
1810 1811 self._filectxfn = getfilectx
1811 1812 else:
1812 1813 # "util.cachefunc" reduces invocation of possibly expensive
1813 1814 # "filectxfn" for performance (e.g. converting from another VCS)
1814 1815 self._filectxfn = util.cachefunc(filectxfn)
1815 1816
1816 1817 if extra:
1817 1818 self._extra = extra.copy()
1818 1819 else:
1819 1820 self._extra = {}
1820 1821
1821 1822 if self._extra.get('branch', '') == '':
1822 1823 self._extra['branch'] = 'default'
1823 1824
1824 1825 if editor:
1825 1826 self._text = editor(self._repo, self, [])
1826 1827 self._repo.savecommitmessage(self._text)
1827 1828
1828 1829 def filectx(self, path, filelog=None):
1829 1830 """get a file context from the working directory
1830 1831
1831 1832 Returns None if file doesn't exist and should be removed."""
1832 1833 return self._filectxfn(self._repo, self, path)
1833 1834
1834 1835 def commit(self):
1835 1836 """commit context to the repo"""
1836 1837 return self._repo.commitctx(self)
1837 1838
1838 1839 @propertycache
1839 1840 def _manifest(self):
1840 1841 """generate a manifest based on the return values of filectxfn"""
1841 1842
1842 1843 # keep this simple for now; just worry about p1
1843 1844 pctx = self._parents[0]
1844 1845 man = pctx.manifest().copy()
1845 1846
1846 1847 for f in self._status.modified:
1847 1848 p1node = nullid
1848 1849 p2node = nullid
1849 1850 p = pctx[f].parents() # if file isn't in pctx, check p2?
1850 1851 if len(p) > 0:
1851 1852 p1node = p[0].node()
1852 1853 if len(p) > 1:
1853 1854 p2node = p[1].node()
1854 1855 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1855 1856
1856 1857 for f in self._status.added:
1857 1858 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1858 1859
1859 1860 for f in self._status.removed:
1860 1861 if f in man:
1861 1862 del man[f]
1862 1863
1863 1864 return man
1864 1865
1865 1866 @propertycache
1866 1867 def _status(self):
1867 1868 """Calculate exact status from ``files`` specified at construction
1868 1869 """
1869 1870 man1 = self.p1().manifest()
1870 1871 p2 = self._parents[1]
1871 1872 # "1 < len(self._parents)" can't be used for checking
1872 1873 # existence of the 2nd parent, because "memctx._parents" is
1873 1874 # explicitly initialized by the list, of which length is 2.
1874 1875 if p2.node() != nullid:
1875 1876 man2 = p2.manifest()
1876 1877 managing = lambda f: f in man1 or f in man2
1877 1878 else:
1878 1879 managing = lambda f: f in man1
1879 1880
1880 1881 modified, added, removed = [], [], []
1881 1882 for f in self._files:
1882 1883 if not managing(f):
1883 1884 added.append(f)
1884 1885 elif self[f]:
1885 1886 modified.append(f)
1886 1887 else:
1887 1888 removed.append(f)
1888 1889
1889 1890 return scmutil.status(modified, added, removed, [], [], [], [])
1890 1891
1891 1892 class memfilectx(committablefilectx):
1892 1893 """memfilectx represents an in-memory file to commit.
1893 1894
1894 1895 See memctx and committablefilectx for more details.
1895 1896 """
1896 1897 def __init__(self, repo, path, data, islink=False,
1897 1898 isexec=False, copied=None, memctx=None):
1898 1899 """
1899 1900 path is the normalized file path relative to repository root.
1900 1901 data is the file content as a string.
1901 1902 islink is True if the file is a symbolic link.
1902 1903 isexec is True if the file is executable.
1903 1904 copied is the source file path if current file was copied in the
1904 1905 revision being committed, or None."""
1905 1906 super(memfilectx, self).__init__(repo, path, None, memctx)
1906 1907 self._data = data
1907 1908 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1908 1909 self._copied = None
1909 1910 if copied:
1910 1911 self._copied = (copied, nullid)
1911 1912
1912 1913 def data(self):
1913 1914 return self._data
1914 1915 def size(self):
1915 1916 return len(self.data())
1916 1917 def flags(self):
1917 1918 return self._flags
1918 1919 def renamed(self):
1919 1920 return self._copied
1920 1921
1921 1922 def remove(self, ignoremissing=False):
1922 1923 """wraps unlink for a repo's working directory"""
1923 1924 # need to figure out what to do here
1924 1925 del self._changectx[self._path]
1925 1926
1926 1927 def write(self, data, flags):
1927 1928 """wraps repo.wwrite"""
1928 1929 self._data = data
@@ -1,1198 +1,1198
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import struct
9 9
10 10 from node import nullid, nullrev, hex, bin
11 11 from i18n import _
12 12 from mercurial import obsolete
13 13 import error as errormod, util, filemerge, copies, subrepo, worker
14 14 import errno, os, shutil
15 15
16 16 _pack = struct.pack
17 17 _unpack = struct.unpack
18 18
19 19 def _droponode(data):
20 20 # used for compatibility for v1
21 21 bits = data.split('\0')
22 22 bits = bits[:-2] + bits[-1:]
23 23 return '\0'.join(bits)
24 24
25 25 class mergestate(object):
26 26 '''track 3-way merge state of individual files
27 27
28 28 it is stored on disk when needed. Two file are used, one with an old
29 29 format, one with a new format. Both contains similar data, but the new
30 30 format can store new kind of field.
31 31
32 32 Current new format is a list of arbitrary record of the form:
33 33
34 34 [type][length][content]
35 35
36 36 Type is a single character, length is a 4 bytes integer, content is an
37 37 arbitrary suites of bytes of length `length`.
38 38
39 39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 40 should abort if they are unknown. lower case record can be safely ignored.
41 41
42 42 Currently known record:
43 43
44 44 L: the node of the "local" part of the merge (hexified version)
45 45 O: the node of the "other" part of the merge (hexified version)
46 46 F: a file to be merged entry
47 47 '''
48 48 statepathv1 = 'merge/state'
49 49 statepathv2 = 'merge/state2'
50 50
51 51 def __init__(self, repo):
52 52 self._repo = repo
53 53 self._dirty = False
54 54 self._read()
55 55
56 56 def reset(self, node=None, other=None):
57 57 self._state = {}
58 58 self._local = None
59 59 self._other = None
60 60 if node:
61 61 self._local = node
62 62 self._other = other
63 63 shutil.rmtree(self._repo.join('merge'), True)
64 64 self._dirty = False
65 65
66 66 def _read(self):
67 67 """Analyse each record content to restore a serialized state from disk
68 68
69 69 This function process "record" entry produced by the de-serialization
70 70 of on disk file.
71 71 """
72 72 self._state = {}
73 73 self._local = None
74 74 self._other = None
75 75 records = self._readrecords()
76 76 for rtype, record in records:
77 77 if rtype == 'L':
78 78 self._local = bin(record)
79 79 elif rtype == 'O':
80 80 self._other = bin(record)
81 81 elif rtype == 'F':
82 82 bits = record.split('\0')
83 83 self._state[bits[0]] = bits[1:]
84 84 elif not rtype.islower():
85 85 raise util.Abort(_('unsupported merge state record: %s')
86 86 % rtype)
87 87 self._dirty = False
88 88
89 89 def _readrecords(self):
90 90 """Read merge state from disk and return a list of record (TYPE, data)
91 91
92 92 We read data from both v1 and v2 files and decide which one to use.
93 93
94 94 V1 has been used by version prior to 2.9.1 and contains less data than
95 95 v2. We read both versions and check if no data in v2 contradicts
96 96 v1. If there is not contradiction we can safely assume that both v1
97 97 and v2 were written at the same time and use the extract data in v2. If
98 98 there is contradiction we ignore v2 content as we assume an old version
99 99 of Mercurial has overwritten the mergestate file and left an old v2
100 100 file around.
101 101
102 102 returns list of record [(TYPE, data), ...]"""
103 103 v1records = self._readrecordsv1()
104 104 v2records = self._readrecordsv2()
105 105 oldv2 = set() # old format version of v2 record
106 106 for rec in v2records:
107 107 if rec[0] == 'L':
108 108 oldv2.add(rec)
109 109 elif rec[0] == 'F':
110 110 # drop the onode data (not contained in v1)
111 111 oldv2.add(('F', _droponode(rec[1])))
112 112 for rec in v1records:
113 113 if rec not in oldv2:
114 114 # v1 file is newer than v2 file, use it
115 115 # we have to infer the "other" changeset of the merge
116 116 # we cannot do better than that with v1 of the format
117 117 mctx = self._repo[None].parents()[-1]
118 118 v1records.append(('O', mctx.hex()))
119 119 # add place holder "other" file node information
120 120 # nobody is using it yet so we do no need to fetch the data
121 121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 122 for idx, r in enumerate(v1records):
123 123 if r[0] == 'F':
124 124 bits = r[1].split('\0')
125 125 bits.insert(-2, '')
126 126 v1records[idx] = (r[0], '\0'.join(bits))
127 127 return v1records
128 128 else:
129 129 return v2records
130 130
131 131 def _readrecordsv1(self):
132 132 """read on disk merge state for version 1 file
133 133
134 134 returns list of record [(TYPE, data), ...]
135 135
136 136 Note: the "F" data from this file are one entry short
137 137 (no "other file node" entry)
138 138 """
139 139 records = []
140 140 try:
141 141 f = self._repo.vfs(self.statepathv1)
142 142 for i, l in enumerate(f):
143 143 if i == 0:
144 144 records.append(('L', l[:-1]))
145 145 else:
146 146 records.append(('F', l[:-1]))
147 147 f.close()
148 148 except IOError as err:
149 149 if err.errno != errno.ENOENT:
150 150 raise
151 151 return records
152 152
153 153 def _readrecordsv2(self):
154 154 """read on disk merge state for version 2 file
155 155
156 156 returns list of record [(TYPE, data), ...]
157 157 """
158 158 records = []
159 159 try:
160 160 f = self._repo.vfs(self.statepathv2)
161 161 data = f.read()
162 162 off = 0
163 163 end = len(data)
164 164 while off < end:
165 165 rtype = data[off]
166 166 off += 1
167 167 length = _unpack('>I', data[off:(off + 4)])[0]
168 168 off += 4
169 169 record = data[off:(off + length)]
170 170 off += length
171 171 records.append((rtype, record))
172 172 f.close()
173 173 except IOError as err:
174 174 if err.errno != errno.ENOENT:
175 175 raise
176 176 return records
177 177
178 178 def active(self):
179 179 """Whether mergestate is active.
180 180
181 181 Returns True if there appears to be mergestate. This is a rough proxy
182 182 for "is a merge in progress."
183 183 """
184 184 # Check local variables before looking at filesystem for performance
185 185 # reasons.
186 186 return bool(self._local) or bool(self._state) or \
187 187 self._repo.vfs.exists(self.statepathv1) or \
188 188 self._repo.vfs.exists(self.statepathv2)
189 189
190 190 def commit(self):
191 191 """Write current state on disk (if necessary)"""
192 192 if self._dirty:
193 193 records = []
194 194 records.append(('L', hex(self._local)))
195 195 records.append(('O', hex(self._other)))
196 196 for d, v in self._state.iteritems():
197 197 records.append(('F', '\0'.join([d] + v)))
198 198 self._writerecords(records)
199 199 self._dirty = False
200 200
201 201 def _writerecords(self, records):
202 202 """Write current state on disk (both v1 and v2)"""
203 203 self._writerecordsv1(records)
204 204 self._writerecordsv2(records)
205 205
206 206 def _writerecordsv1(self, records):
207 207 """Write current state on disk in a version 1 file"""
208 208 f = self._repo.vfs(self.statepathv1, 'w')
209 209 irecords = iter(records)
210 210 lrecords = irecords.next()
211 211 assert lrecords[0] == 'L'
212 212 f.write(hex(self._local) + '\n')
213 213 for rtype, data in irecords:
214 214 if rtype == 'F':
215 215 f.write('%s\n' % _droponode(data))
216 216 f.close()
217 217
218 218 def _writerecordsv2(self, records):
219 219 """Write current state on disk in a version 2 file"""
220 220 f = self._repo.vfs(self.statepathv2, 'w')
221 221 for key, data in records:
222 222 assert len(key) == 1
223 223 format = '>sI%is' % len(data)
224 224 f.write(_pack(format, key, len(data), data))
225 225 f.close()
226 226
227 227 def add(self, fcl, fco, fca, fd):
228 228 """add a new (potentially?) conflicting file the merge state
229 229 fcl: file context for local,
230 230 fco: file context for remote,
231 231 fca: file context for ancestors,
232 232 fd: file path of the resulting merge.
233 233
234 234 note: also write the local version to the `.hg/merge` directory.
235 235 """
236 236 hash = util.sha1(fcl.path()).hexdigest()
237 237 self._repo.vfs.write('merge/' + hash, fcl.data())
238 238 self._state[fd] = ['u', hash, fcl.path(),
239 239 fca.path(), hex(fca.filenode()),
240 240 fco.path(), hex(fco.filenode()),
241 241 fcl.flags()]
242 242 self._dirty = True
243 243
244 244 def __contains__(self, dfile):
245 245 return dfile in self._state
246 246
247 247 def __getitem__(self, dfile):
248 248 return self._state[dfile][0]
249 249
250 250 def __iter__(self):
251 251 return iter(sorted(self._state))
252 252
253 253 def files(self):
254 254 return self._state.keys()
255 255
256 256 def mark(self, dfile, state):
257 257 self._state[dfile][0] = state
258 258 self._dirty = True
259 259
260 260 def unresolved(self):
261 261 """Obtain the paths of unresolved files."""
262 262
263 263 for f, entry in self._state.items():
264 264 if entry[0] == 'u':
265 265 yield f
266 266
267 267 def resolve(self, dfile, wctx, labels=None):
268 268 """rerun merge process for file path `dfile`"""
269 269 if self[dfile] == 'r':
270 270 return 0
271 271 stateentry = self._state[dfile]
272 272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 273 octx = self._repo[self._other]
274 274 fcd = wctx[dfile]
275 275 fco = octx[ofile]
276 276 fca = self._repo.filectx(afile, fileid=anode)
277 277 # "premerge" x flags
278 278 flo = fco.flags()
279 279 fla = fca.flags()
280 280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 281 if fca.node() == nullid:
282 282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 283 afile)
284 284 elif flags == fla:
285 285 flags = flo
286 286 # restore local
287 287 f = self._repo.vfs('merge/' + hash)
288 288 self._repo.wwrite(dfile, f.read(), flags)
289 289 f.close()
290 290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 291 labels=labels)
292 292 if r is None:
293 293 # no real conflict
294 294 del self._state[dfile]
295 295 self._dirty = True
296 296 elif not r:
297 297 self.mark(dfile, 'r')
298 298 return r
299 299
300 300 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
301 301 if f2 is None:
302 302 f2 = f
303 303 return (os.path.isfile(repo.wjoin(f))
304 304 and repo.wvfs.audit.check(f)
305 305 and repo.dirstate.normalize(f) not in repo.dirstate
306 306 and mctx[f2].cmp(wctx[f]))
307 307
308 308 def _checkunknownfiles(repo, wctx, mctx, force, actions):
309 309 """
310 310 Considers any actions that care about the presence of conflicting unknown
311 311 files. For some actions, the result is to abort; for others, it is to
312 312 choose a different action.
313 313 """
314 314 aborts = []
315 315 if not force:
316 316 for f, (m, args, msg) in actions.iteritems():
317 317 if m in ('c', 'dc'):
318 318 if _checkunknownfile(repo, wctx, mctx, f):
319 319 aborts.append(f)
320 320 elif m == 'dg':
321 321 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
322 322 aborts.append(f)
323 323
324 324 for f in sorted(aborts):
325 325 repo.ui.warn(_("%s: untracked file differs\n") % f)
326 326 if aborts:
327 327 raise util.Abort(_("untracked files in working directory differ "
328 328 "from files in requested revision"))
329 329
330 330 for f, (m, args, msg) in actions.iteritems():
331 331 if m == 'c':
332 332 actions[f] = ('g', args, msg)
333 333 elif m == 'cm':
334 334 fl2, anc = args
335 335 different = _checkunknownfile(repo, wctx, mctx, f)
336 336 if different:
337 337 actions[f] = ('m', (f, f, None, False, anc),
338 338 "remote differs from untracked local")
339 339 else:
340 340 actions[f] = ('g', (fl2,), "remote created")
341 341
342 342 def _forgetremoved(wctx, mctx, branchmerge):
343 343 """
344 344 Forget removed files
345 345
346 346 If we're jumping between revisions (as opposed to merging), and if
347 347 neither the working directory nor the target rev has the file,
348 348 then we need to remove it from the dirstate, to prevent the
349 349 dirstate from listing the file when it is no longer in the
350 350 manifest.
351 351
352 352 If we're merging, and the other revision has removed a file
353 353 that is not present in the working directory, we need to mark it
354 354 as removed.
355 355 """
356 356
357 357 actions = {}
358 358 m = 'f'
359 359 if branchmerge:
360 360 m = 'r'
361 361 for f in wctx.deleted():
362 362 if f not in mctx:
363 363 actions[f] = m, None, "forget deleted"
364 364
365 365 if not branchmerge:
366 366 for f in wctx.removed():
367 367 if f not in mctx:
368 368 actions[f] = 'f', None, "forget removed"
369 369
370 370 return actions
371 371
372 372 def _checkcollision(repo, wmf, actions):
373 373 # build provisional merged manifest up
374 374 pmmf = set(wmf)
375 375
376 376 if actions:
377 377 # k, dr, e and rd are no-op
378 378 for m in 'a', 'f', 'g', 'cd', 'dc':
379 379 for f, args, msg in actions[m]:
380 380 pmmf.add(f)
381 381 for f, args, msg in actions['r']:
382 382 pmmf.discard(f)
383 383 for f, args, msg in actions['dm']:
384 384 f2, flags = args
385 385 pmmf.discard(f2)
386 386 pmmf.add(f)
387 387 for f, args, msg in actions['dg']:
388 388 pmmf.add(f)
389 389 for f, args, msg in actions['m']:
390 390 f1, f2, fa, move, anc = args
391 391 if move:
392 392 pmmf.discard(f1)
393 393 pmmf.add(f)
394 394
395 395 # check case-folding collision in provisional merged manifest
396 396 foldmap = {}
397 397 for f in sorted(pmmf):
398 398 fold = util.normcase(f)
399 399 if fold in foldmap:
400 400 raise util.Abort(_("case-folding collision between %s and %s")
401 401 % (f, foldmap[fold]))
402 402 foldmap[fold] = f
403 403
404 404 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
405 405 acceptremote, followcopies):
406 406 """
407 407 Merge p1 and p2 with ancestor pa and generate merge action list
408 408
409 409 branchmerge and force are as passed in to update
410 410 partial = function to filter file lists
411 411 acceptremote = accept the incoming changes without prompting
412 412 """
413 413
414 414 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
415 415
416 416 # manifests fetched in order are going to be faster, so prime the caches
417 417 [x.manifest() for x in
418 418 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
419 419
420 420 if followcopies:
421 421 ret = copies.mergecopies(repo, wctx, p2, pa)
422 422 copy, movewithdir, diverge, renamedelete = ret
423 423
424 424 repo.ui.note(_("resolving manifests\n"))
425 425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
426 426 % (bool(branchmerge), bool(force), bool(partial)))
427 427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
428 428
429 429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
430 430 copied = set(copy.values())
431 431 copied.update(movewithdir.values())
432 432
433 433 if '.hgsubstate' in m1:
434 434 # check whether sub state is modified
435 435 for s in sorted(wctx.substate):
436 436 if wctx.sub(s).dirty():
437 437 m1['.hgsubstate'] += '+'
438 438 break
439 439
440 440 # Compare manifests
441 441 diff = m1.diff(m2)
442 442
443 443 actions = {}
444 444 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
445 445 if partial and not partial(f):
446 446 continue
447 447 if n1 and n2: # file exists on both local and remote side
448 448 if f not in ma:
449 449 fa = copy.get(f, None)
450 450 if fa is not None:
451 451 actions[f] = ('m', (f, f, fa, False, pa.node()),
452 452 "both renamed from " + fa)
453 453 else:
454 454 actions[f] = ('m', (f, f, None, False, pa.node()),
455 455 "both created")
456 456 else:
457 457 a = ma[f]
458 458 fla = ma.flags(f)
459 459 nol = 'l' not in fl1 + fl2 + fla
460 460 if n2 == a and fl2 == fla:
461 461 actions[f] = ('k' , (), "remote unchanged")
462 462 elif n1 == a and fl1 == fla: # local unchanged - use remote
463 463 if n1 == n2: # optimization: keep local content
464 464 actions[f] = ('e', (fl2,), "update permissions")
465 465 else:
466 466 actions[f] = ('g', (fl2,), "remote is newer")
467 467 elif nol and n2 == a: # remote only changed 'x'
468 468 actions[f] = ('e', (fl2,), "update permissions")
469 469 elif nol and n1 == a: # local only changed 'x'
470 470 actions[f] = ('g', (fl1,), "remote is newer")
471 471 else: # both changed something
472 472 actions[f] = ('m', (f, f, f, False, pa.node()),
473 473 "versions differ")
474 474 elif n1: # file exists only on local side
475 475 if f in copied:
476 476 pass # we'll deal with it on m2 side
477 477 elif f in movewithdir: # directory rename, move local
478 478 f2 = movewithdir[f]
479 479 if f2 in m2:
480 480 actions[f2] = ('m', (f, f2, None, True, pa.node()),
481 481 "remote directory rename, both created")
482 482 else:
483 483 actions[f2] = ('dm', (f, fl1),
484 484 "remote directory rename - move from " + f)
485 485 elif f in copy:
486 486 f2 = copy[f]
487 487 actions[f] = ('m', (f, f2, f2, False, pa.node()),
488 488 "local copied/moved from " + f2)
489 489 elif f in ma: # clean, a different, no remote
490 490 if n1 != ma[f]:
491 491 if acceptremote:
492 492 actions[f] = ('r', None, "remote delete")
493 493 else:
494 494 actions[f] = ('cd', None, "prompt changed/deleted")
495 495 elif n1[20:] == 'a':
496 496 # This extra 'a' is added by working copy manifest to mark
497 497 # the file as locally added. We should forget it instead of
498 498 # deleting it.
499 499 actions[f] = ('f', None, "remote deleted")
500 500 else:
501 501 actions[f] = ('r', None, "other deleted")
502 502 elif n2: # file exists only on remote side
503 503 if f in copied:
504 504 pass # we'll deal with it on m1 side
505 505 elif f in movewithdir:
506 506 f2 = movewithdir[f]
507 507 if f2 in m1:
508 508 actions[f2] = ('m', (f2, f, None, False, pa.node()),
509 509 "local directory rename, both created")
510 510 else:
511 511 actions[f2] = ('dg', (f, fl2),
512 512 "local directory rename - get from " + f)
513 513 elif f in copy:
514 514 f2 = copy[f]
515 515 if f2 in m2:
516 516 actions[f] = ('m', (f2, f, f2, False, pa.node()),
517 517 "remote copied from " + f2)
518 518 else:
519 519 actions[f] = ('m', (f2, f, f2, True, pa.node()),
520 520 "remote moved from " + f2)
521 521 elif f not in ma:
522 522 # local unknown, remote created: the logic is described by the
523 523 # following table:
524 524 #
525 525 # force branchmerge different | action
526 526 # n * * | create
527 527 # y n * | create
528 528 # y y n | create
529 529 # y y y | merge
530 530 #
531 531 # Checking whether the files are different is expensive, so we
532 532 # don't do that when we can avoid it.
533 533 if not force:
534 534 actions[f] = ('c', (fl2,), "remote created")
535 535 elif not branchmerge:
536 536 actions[f] = ('c', (fl2,), "remote created")
537 537 else:
538 538 actions[f] = ('cm', (fl2, pa.node()),
539 539 "remote created, get or merge")
540 540 elif n2 != ma[f]:
541 541 if acceptremote:
542 542 actions[f] = ('c', (fl2,), "remote recreating")
543 543 else:
544 544 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
545 545
546 546 return actions, diverge, renamedelete
547 547
548 548 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
549 549 """Resolves false conflicts where the nodeid changed but the content
550 550 remained the same."""
551 551
552 552 for f, (m, args, msg) in actions.items():
553 553 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
554 554 # local did change but ended up with same content
555 555 actions[f] = 'r', None, "prompt same"
556 556 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
557 557 # remote did change but ended up with same content
558 558 del actions[f] # don't get = keep local deleted
559 559
560 560 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
561 561 acceptremote, followcopies):
562 562 "Calculate the actions needed to merge mctx into wctx using ancestors"
563 563
564 564 if len(ancestors) == 1: # default
565 565 actions, diverge, renamedelete = manifestmerge(
566 566 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
567 567 acceptremote, followcopies)
568 568 _checkunknownfiles(repo, wctx, mctx, force, actions)
569 569
570 570 else: # only when merge.preferancestor=* - the default
571 571 repo.ui.note(
572 572 _("note: merging %s and %s using bids from ancestors %s\n") %
573 573 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
574 574
575 575 # Call for bids
576 576 fbids = {} # mapping filename to bids (action method to list af actions)
577 577 diverge, renamedelete = None, None
578 578 for ancestor in ancestors:
579 579 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
580 580 actions, diverge1, renamedelete1 = manifestmerge(
581 581 repo, wctx, mctx, ancestor, branchmerge, force, partial,
582 582 acceptremote, followcopies)
583 583 _checkunknownfiles(repo, wctx, mctx, force, actions)
584 584 if diverge is None: # and renamedelete is None.
585 585 # Arbitrarily pick warnings from first iteration
586 586 diverge = diverge1
587 587 renamedelete = renamedelete1
588 588 for f, a in sorted(actions.iteritems()):
589 589 m, args, msg = a
590 590 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
591 591 if f in fbids:
592 592 d = fbids[f]
593 593 if m in d:
594 594 d[m].append(a)
595 595 else:
596 596 d[m] = [a]
597 597 else:
598 598 fbids[f] = {m: [a]}
599 599
600 600 # Pick the best bid for each file
601 601 repo.ui.note(_('\nauction for merging merge bids\n'))
602 602 actions = {}
603 603 for f, bids in sorted(fbids.items()):
604 604 # bids is a mapping from action method to list af actions
605 605 # Consensus?
606 606 if len(bids) == 1: # all bids are the same kind of method
607 607 m, l = bids.items()[0]
608 608 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
609 609 repo.ui.note(" %s: consensus for %s\n" % (f, m))
610 610 actions[f] = l[0]
611 611 continue
612 612 # If keep is an option, just do it.
613 613 if 'k' in bids:
614 614 repo.ui.note(" %s: picking 'keep' action\n" % f)
615 615 actions[f] = bids['k'][0]
616 616 continue
617 617 # If there are gets and they all agree [how could they not?], do it.
618 618 if 'g' in bids:
619 619 ga0 = bids['g'][0]
620 620 if all(a == ga0 for a in bids['g'][1:]):
621 621 repo.ui.note(" %s: picking 'get' action\n" % f)
622 622 actions[f] = ga0
623 623 continue
624 624 # TODO: Consider other simple actions such as mode changes
625 625 # Handle inefficient democrazy.
626 626 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
627 627 for m, l in sorted(bids.items()):
628 628 for _f, args, msg in l:
629 629 repo.ui.note(' %s -> %s\n' % (msg, m))
630 630 # Pick random action. TODO: Instead, prompt user when resolving
631 631 m, l = bids.items()[0]
632 632 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
633 633 (f, m))
634 634 actions[f] = l[0]
635 635 continue
636 636 repo.ui.note(_('end of auction\n\n'))
637 637
638 638 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
639 639
640 640 if wctx.rev() is None:
641 641 fractions = _forgetremoved(wctx, mctx, branchmerge)
642 642 actions.update(fractions)
643 643
644 644 return actions, diverge, renamedelete
645 645
646 646 def batchremove(repo, actions):
647 647 """apply removes to the working directory
648 648
649 649 yields tuples for progress updates
650 650 """
651 651 verbose = repo.ui.verbose
652 652 unlink = util.unlinkpath
653 653 wjoin = repo.wjoin
654 654 audit = repo.wvfs.audit
655 655 i = 0
656 656 for f, args, msg in actions:
657 657 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
658 658 if verbose:
659 659 repo.ui.note(_("removing %s\n") % f)
660 660 audit(f)
661 661 try:
662 662 unlink(wjoin(f), ignoremissing=True)
663 663 except OSError as inst:
664 664 repo.ui.warn(_("update failed to remove %s: %s!\n") %
665 665 (f, inst.strerror))
666 666 if i == 100:
667 667 yield i, f
668 668 i = 0
669 669 i += 1
670 670 if i > 0:
671 671 yield i, f
672 672
673 673 def batchget(repo, mctx, actions):
674 674 """apply gets to the working directory
675 675
676 676 mctx is the context to get from
677 677
678 678 yields tuples for progress updates
679 679 """
680 680 verbose = repo.ui.verbose
681 681 fctx = mctx.filectx
682 682 wwrite = repo.wwrite
683 683 i = 0
684 684 for f, args, msg in actions:
685 685 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
686 686 if verbose:
687 687 repo.ui.note(_("getting %s\n") % f)
688 688 wwrite(f, fctx(f).data(), args[0])
689 689 if i == 100:
690 690 yield i, f
691 691 i = 0
692 692 i += 1
693 693 if i > 0:
694 694 yield i, f
695 695
696 696 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
697 697 """apply the merge action list to the working directory
698 698
699 699 wctx is the working copy context
700 700 mctx is the context to be merged into the working copy
701 701
702 702 Return a tuple of counts (updated, merged, removed, unresolved) that
703 703 describes how many files were affected by the update.
704 704 """
705 705
706 706 updated, merged, removed, unresolved = 0, 0, 0, 0
707 707 ms = mergestate(repo)
708 708 ms.reset(wctx.p1().node(), mctx.node())
709 709 moves = []
710 710 for m, l in actions.items():
711 711 l.sort()
712 712
713 713 # prescan for merges
714 714 for f, args, msg in actions['m']:
715 715 f1, f2, fa, move, anc = args
716 716 if f == '.hgsubstate': # merged internally
717 717 continue
718 718 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
719 719 fcl = wctx[f1]
720 720 fco = mctx[f2]
721 721 actx = repo[anc]
722 722 if fa in actx:
723 723 fca = actx[fa]
724 724 else:
725 725 fca = repo.filectx(f1, fileid=nullrev)
726 726 ms.add(fcl, fco, fca, f)
727 727 if f1 != f and move:
728 728 moves.append(f1)
729 729
730 730 audit = repo.wvfs.audit
731 731 _updating = _('updating')
732 732 _files = _('files')
733 733 progress = repo.ui.progress
734 734
735 735 # remove renamed files after safely stored
736 736 for f in moves:
737 737 if os.path.lexists(repo.wjoin(f)):
738 738 repo.ui.debug("removing %s\n" % f)
739 739 audit(f)
740 740 util.unlinkpath(repo.wjoin(f))
741 741
742 742 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
743 743
744 744 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
745 745 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
746 746
747 747 # remove in parallel (must come first)
748 748 z = 0
749 749 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
750 750 for i, item in prog:
751 751 z += i
752 752 progress(_updating, z, item=item, total=numupdates, unit=_files)
753 753 removed = len(actions['r'])
754 754
755 755 # get in parallel
756 756 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
757 757 for i, item in prog:
758 758 z += i
759 759 progress(_updating, z, item=item, total=numupdates, unit=_files)
760 760 updated = len(actions['g'])
761 761
762 762 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
763 763 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
764 764
765 765 # forget (manifest only, just log it) (must come first)
766 766 for f, args, msg in actions['f']:
767 767 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
768 768 z += 1
769 769 progress(_updating, z, item=f, total=numupdates, unit=_files)
770 770
771 771 # re-add (manifest only, just log it)
772 772 for f, args, msg in actions['a']:
773 773 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
774 774 z += 1
775 775 progress(_updating, z, item=f, total=numupdates, unit=_files)
776 776
777 777 # keep (noop, just log it)
778 778 for f, args, msg in actions['k']:
779 779 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
780 780 # no progress
781 781
782 782 # merge
783 783 for f, args, msg in actions['m']:
784 784 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
785 785 z += 1
786 786 progress(_updating, z, item=f, total=numupdates, unit=_files)
787 787 if f == '.hgsubstate': # subrepo states need updating
788 788 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
789 789 overwrite)
790 790 continue
791 791 audit(f)
792 792 r = ms.resolve(f, wctx, labels=labels)
793 793 if r is not None and r > 0:
794 794 unresolved += 1
795 795 else:
796 796 if r is None:
797 797 updated += 1
798 798 else:
799 799 merged += 1
800 800
801 801 # directory rename, move local
802 802 for f, args, msg in actions['dm']:
803 803 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
804 804 z += 1
805 805 progress(_updating, z, item=f, total=numupdates, unit=_files)
806 806 f0, flags = args
807 807 repo.ui.note(_("moving %s to %s\n") % (f0, f))
808 808 audit(f)
809 809 repo.wwrite(f, wctx.filectx(f0).data(), flags)
810 810 util.unlinkpath(repo.wjoin(f0))
811 811 updated += 1
812 812
813 813 # local directory rename, get
814 814 for f, args, msg in actions['dg']:
815 815 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
816 816 z += 1
817 817 progress(_updating, z, item=f, total=numupdates, unit=_files)
818 818 f0, flags = args
819 819 repo.ui.note(_("getting %s to %s\n") % (f0, f))
820 820 repo.wwrite(f, mctx.filectx(f0).data(), flags)
821 821 updated += 1
822 822
823 823 # exec
824 824 for f, args, msg in actions['e']:
825 825 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
826 826 z += 1
827 827 progress(_updating, z, item=f, total=numupdates, unit=_files)
828 828 flags, = args
829 829 audit(f)
830 830 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
831 831 updated += 1
832 832
833 833 ms.commit()
834 834 progress(_updating, None, total=numupdates, unit=_files)
835 835
836 836 return updated, merged, removed, unresolved
837 837
838 838 def recordupdates(repo, actions, branchmerge):
839 839 "record merge actions to the dirstate"
840 840 # remove (must come first)
841 841 for f, args, msg in actions['r']:
842 842 if branchmerge:
843 843 repo.dirstate.remove(f)
844 844 else:
845 845 repo.dirstate.drop(f)
846 846
847 847 # forget (must come first)
848 848 for f, args, msg in actions['f']:
849 849 repo.dirstate.drop(f)
850 850
851 851 # re-add
852 852 for f, args, msg in actions['a']:
853 853 if not branchmerge:
854 854 repo.dirstate.add(f)
855 855
856 856 # exec change
857 857 for f, args, msg in actions['e']:
858 858 repo.dirstate.normallookup(f)
859 859
860 860 # keep
861 861 for f, args, msg in actions['k']:
862 862 pass
863 863
864 864 # get
865 865 for f, args, msg in actions['g']:
866 866 if branchmerge:
867 867 repo.dirstate.otherparent(f)
868 868 else:
869 869 repo.dirstate.normal(f)
870 870
871 871 # merge
872 872 for f, args, msg in actions['m']:
873 873 f1, f2, fa, move, anc = args
874 874 if branchmerge:
875 875 # We've done a branch merge, mark this file as merged
876 876 # so that we properly record the merger later
877 877 repo.dirstate.merge(f)
878 878 if f1 != f2: # copy/rename
879 879 if move:
880 880 repo.dirstate.remove(f1)
881 881 if f1 != f:
882 882 repo.dirstate.copy(f1, f)
883 883 else:
884 884 repo.dirstate.copy(f2, f)
885 885 else:
886 886 # We've update-merged a locally modified file, so
887 887 # we set the dirstate to emulate a normal checkout
888 888 # of that file some time in the past. Thus our
889 889 # merge will appear as a normal local file
890 890 # modification.
891 891 if f2 == f: # file not locally copied/moved
892 892 repo.dirstate.normallookup(f)
893 893 if move:
894 894 repo.dirstate.drop(f1)
895 895
896 896 # directory rename, move local
897 897 for f, args, msg in actions['dm']:
898 898 f0, flag = args
899 899 if branchmerge:
900 900 repo.dirstate.add(f)
901 901 repo.dirstate.remove(f0)
902 902 repo.dirstate.copy(f0, f)
903 903 else:
904 904 repo.dirstate.normal(f)
905 905 repo.dirstate.drop(f0)
906 906
907 907 # directory rename, get
908 908 for f, args, msg in actions['dg']:
909 909 f0, flag = args
910 910 if branchmerge:
911 911 repo.dirstate.add(f)
912 912 repo.dirstate.copy(f0, f)
913 913 else:
914 914 repo.dirstate.normal(f)
915 915
916 916 def update(repo, node, branchmerge, force, partial, ancestor=None,
917 917 mergeancestor=False, labels=None):
918 918 """
919 919 Perform a merge between the working directory and the given node
920 920
921 921 node = the node to update to, or None if unspecified
922 922 branchmerge = whether to merge between branches
923 923 force = whether to force branch merging or file overwriting
924 924 partial = a function to filter file lists (dirstate not updated)
925 925 mergeancestor = whether it is merging with an ancestor. If true,
926 926 we should accept the incoming changes for any prompts that occur.
927 927 If false, merging with an ancestor (fast-forward) is only allowed
928 928 between different named branches. This flag is used by rebase extension
929 929 as a temporary fix and should be avoided in general.
930 930
931 931 The table below shows all the behaviors of the update command
932 932 given the -c and -C or no options, whether the working directory
933 933 is dirty, whether a revision is specified, and the relationship of
934 934 the parent rev to the target rev (linear, on the same named
935 935 branch, or on another named branch).
936 936
937 937 This logic is tested by test-update-branches.t.
938 938
939 939 -c -C dirty rev | linear same cross
940 940 n n n n | ok (1) x
941 941 n n n y | ok ok ok
942 942 n n y n | merge (2) (2)
943 943 n n y y | merge (3) (3)
944 944 n y * * | --- discard ---
945 945 y n y * | --- (4) ---
946 946 y n n * | --- ok ---
947 947 y y * * | --- (5) ---
948 948
949 949 x = can't happen
950 950 * = don't-care
951 951 1 = abort: not a linear update (merge or update --check to force update)
952 952 2 = abort: uncommitted changes (commit and merge, or update --clean to
953 953 discard changes)
954 954 3 = abort: uncommitted changes (commit or update --clean to discard changes)
955 955 4 = abort: uncommitted changes (checked in commands.py)
956 956 5 = incompatible options (checked in commands.py)
957 957
958 958 Return the same tuple as applyupdates().
959 959 """
960 960
961 961 onode = node
962 962 wlock = repo.wlock()
963 963 try:
964 964 wc = repo[None]
965 965 pl = wc.parents()
966 966 p1 = pl[0]
967 967 pas = [None]
968 968 if ancestor is not None:
969 969 pas = [repo[ancestor]]
970 970
971 971 if node is None:
972 972 # Here is where we should consider bookmarks, divergent bookmarks,
973 973 # foreground changesets (successors), and tip of current branch;
974 974 # but currently we are only checking the branch tips.
975 975 try:
976 976 node = repo.branchtip(wc.branch())
977 977 except errormod.RepoLookupError:
978 978 if wc.branch() == 'default': # no default branch!
979 979 node = repo.lookup('tip') # update to tip
980 980 else:
981 981 raise util.Abort(_("branch %s not found") % wc.branch())
982 982
983 983 if p1.obsolete() and not p1.children():
984 984 # allow updating to successors
985 985 successors = obsolete.successorssets(repo, p1.node())
986 986
987 987 # behavior of certain cases is as follows,
988 988 #
989 989 # divergent changesets: update to highest rev, similar to what
990 990 # is currently done when there are more than one head
991 991 # (i.e. 'tip')
992 992 #
993 993 # replaced changesets: same as divergent except we know there
994 994 # is no conflict
995 995 #
996 996 # pruned changeset: no update is done; though, we could
997 997 # consider updating to the first non-obsolete parent,
998 998 # similar to what is current done for 'hg prune'
999 999
1000 1000 if successors:
1001 1001 # flatten the list here handles both divergent (len > 1)
1002 1002 # and the usual case (len = 1)
1003 1003 successors = [n for sub in successors for n in sub]
1004 1004
1005 1005 # get the max revision for the given successors set,
1006 1006 # i.e. the 'tip' of a set
1007 1007 node = repo.revs('max(%ln)', successors).first()
1008 1008 pas = [p1]
1009 1009
1010 1010 overwrite = force and not branchmerge
1011 1011
1012 1012 p2 = repo[node]
1013 1013 if pas[0] is None:
1014 if repo.ui.config('merge', 'preferancestor', '*') == '*':
1014 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1015 1015 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1016 1016 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1017 1017 else:
1018 1018 pas = [p1.ancestor(p2, warn=branchmerge)]
1019 1019
1020 1020 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1021 1021
1022 1022 ### check phase
1023 1023 if not overwrite and len(pl) > 1:
1024 1024 raise util.Abort(_("outstanding uncommitted merge"))
1025 1025 if branchmerge:
1026 1026 if pas == [p2]:
1027 1027 raise util.Abort(_("merging with a working directory ancestor"
1028 1028 " has no effect"))
1029 1029 elif pas == [p1]:
1030 1030 if not mergeancestor and p1.branch() == p2.branch():
1031 1031 raise util.Abort(_("nothing to merge"),
1032 1032 hint=_("use 'hg update' "
1033 1033 "or check 'hg heads'"))
1034 1034 if not force and (wc.files() or wc.deleted()):
1035 1035 raise util.Abort(_("uncommitted changes"),
1036 1036 hint=_("use 'hg status' to list changes"))
1037 1037 for s in sorted(wc.substate):
1038 1038 wc.sub(s).bailifchanged()
1039 1039
1040 1040 elif not overwrite:
1041 1041 if p1 == p2: # no-op update
1042 1042 # call the hooks and exit early
1043 1043 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1044 1044 repo.hook('update', parent1=xp2, parent2='', error=0)
1045 1045 return 0, 0, 0, 0
1046 1046
1047 1047 if pas not in ([p1], [p2]): # nonlinear
1048 1048 dirty = wc.dirty(missing=True)
1049 1049 if dirty or onode is None:
1050 1050 # Branching is a bit strange to ensure we do the minimal
1051 1051 # amount of call to obsolete.background.
1052 1052 foreground = obsolete.foreground(repo, [p1.node()])
1053 1053 # note: the <node> variable contains a random identifier
1054 1054 if repo[node].node() in foreground:
1055 1055 pas = [p1] # allow updating to successors
1056 1056 elif dirty:
1057 1057 msg = _("uncommitted changes")
1058 1058 if onode is None:
1059 1059 hint = _("commit and merge, or update --clean to"
1060 1060 " discard changes")
1061 1061 else:
1062 1062 hint = _("commit or update --clean to discard"
1063 1063 " changes")
1064 1064 raise util.Abort(msg, hint=hint)
1065 1065 else: # node is none
1066 1066 msg = _("not a linear update")
1067 1067 hint = _("merge or update --check to force update")
1068 1068 raise util.Abort(msg, hint=hint)
1069 1069 else:
1070 1070 # Allow jumping branches if clean and specific rev given
1071 1071 pas = [p1]
1072 1072
1073 1073 # deprecated config: merge.followcopies
1074 1074 followcopies = False
1075 1075 if overwrite:
1076 1076 pas = [wc]
1077 1077 elif pas == [p2]: # backwards
1078 1078 pas = [wc.p1()]
1079 1079 elif not branchmerge and not wc.dirty(missing=True):
1080 1080 pass
1081 1081 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1082 1082 followcopies = True
1083 1083
1084 1084 ### calculate phase
1085 1085 actionbyfile, diverge, renamedelete = calculateupdates(
1086 1086 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1087 1087 followcopies)
1088 1088 # Convert to dictionary-of-lists format
1089 1089 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1090 1090 for f, (m, args, msg) in actionbyfile.iteritems():
1091 1091 if m not in actions:
1092 1092 actions[m] = []
1093 1093 actions[m].append((f, args, msg))
1094 1094
1095 1095 if not util.checkcase(repo.path):
1096 1096 # check collision between files only in p2 for clean update
1097 1097 if (not branchmerge and
1098 1098 (force or not wc.dirty(missing=True, branch=False))):
1099 1099 _checkcollision(repo, p2.manifest(), None)
1100 1100 else:
1101 1101 _checkcollision(repo, wc.manifest(), actions)
1102 1102
1103 1103 # Prompt and create actions. TODO: Move this towards resolve phase.
1104 1104 for f, args, msg in sorted(actions['cd']):
1105 1105 if repo.ui.promptchoice(
1106 1106 _("local changed %s which remote deleted\n"
1107 1107 "use (c)hanged version or (d)elete?"
1108 1108 "$$ &Changed $$ &Delete") % f, 0):
1109 1109 actions['r'].append((f, None, "prompt delete"))
1110 1110 else:
1111 1111 actions['a'].append((f, None, "prompt keep"))
1112 1112 del actions['cd'][:]
1113 1113
1114 1114 for f, args, msg in sorted(actions['dc']):
1115 1115 flags, = args
1116 1116 if repo.ui.promptchoice(
1117 1117 _("remote changed %s which local deleted\n"
1118 1118 "use (c)hanged version or leave (d)eleted?"
1119 1119 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1120 1120 actions['g'].append((f, (flags,), "prompt recreating"))
1121 1121 del actions['dc'][:]
1122 1122
1123 1123 ### apply phase
1124 1124 if not branchmerge: # just jump to the new rev
1125 1125 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1126 1126 if not partial:
1127 1127 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1128 1128 # note that we're in the middle of an update
1129 1129 repo.vfs.write('updatestate', p2.hex())
1130 1130
1131 1131 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1132 1132
1133 1133 # divergent renames
1134 1134 for f, fl in sorted(diverge.iteritems()):
1135 1135 repo.ui.warn(_("note: possible conflict - %s was renamed "
1136 1136 "multiple times to:\n") % f)
1137 1137 for nf in fl:
1138 1138 repo.ui.warn(" %s\n" % nf)
1139 1139
1140 1140 # rename and delete
1141 1141 for f, fl in sorted(renamedelete.iteritems()):
1142 1142 repo.ui.warn(_("note: possible conflict - %s was deleted "
1143 1143 "and renamed to:\n") % f)
1144 1144 for nf in fl:
1145 1145 repo.ui.warn(" %s\n" % nf)
1146 1146
1147 1147 if not partial:
1148 1148 repo.dirstate.beginparentchange()
1149 1149 repo.setparents(fp1, fp2)
1150 1150 recordupdates(repo, actions, branchmerge)
1151 1151 # update completed, clear state
1152 1152 util.unlink(repo.join('updatestate'))
1153 1153
1154 1154 if not branchmerge:
1155 1155 repo.dirstate.setbranch(p2.branch())
1156 1156 repo.dirstate.endparentchange()
1157 1157 finally:
1158 1158 wlock.release()
1159 1159
1160 1160 if not partial:
1161 1161 def updatehook(parent1=xp1, parent2=xp2, error=stats[3]):
1162 1162 repo.hook('update', parent1=parent1, parent2=parent2, error=error)
1163 1163 repo._afterlock(updatehook)
1164 1164 return stats
1165 1165
1166 1166 def graft(repo, ctx, pctx, labels):
1167 1167 """Do a graft-like merge.
1168 1168
1169 1169 This is a merge where the merge ancestor is chosen such that one
1170 1170 or more changesets are grafted onto the current changeset. In
1171 1171 addition to the merge, this fixes up the dirstate to include only
1172 1172 a single parent and tries to duplicate any renames/copies
1173 1173 appropriately.
1174 1174
1175 1175 ctx - changeset to rebase
1176 1176 pctx - merge base, usually ctx.p1()
1177 1177 labels - merge labels eg ['local', 'graft']
1178 1178
1179 1179 """
1180 1180 # If we're grafting a descendant onto an ancestor, be sure to pass
1181 1181 # mergeancestor=True to update. This does two things: 1) allows the merge if
1182 1182 # the destination is the same as the parent of the ctx (so we can use graft
1183 1183 # to copy commits), and 2) informs update that the incoming changes are
1184 1184 # newer than the destination so it doesn't prompt about "remote changed foo
1185 1185 # which local deleted".
1186 1186 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1187 1187
1188 1188 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1189 1189 mergeancestor=mergeancestor, labels=labels)
1190 1190
1191 1191 # drop the second merge parent
1192 1192 repo.dirstate.beginparentchange()
1193 1193 repo.setparents(repo['.'].node(), nullid)
1194 1194 repo.dirstate.write()
1195 1195 # fix up dirstate for copies and renames
1196 1196 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1197 1197 repo.dirstate.endparentchange()
1198 1198 return stats
General Comments 0
You need to be logged in to leave comments. Login now