##// END OF EJS Templates
py3: explicitly convert dict.keys() and dict.items() into a list...
Pulkit Goyal -
r34350:1a5abc45 default
parent child Browse files
Show More
@@ -1,867 +1,867 b''
1 1 # copies.py - copy detection for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import heapq
12 12 import os
13 13
14 14 from . import (
15 15 match as matchmod,
16 16 node,
17 17 pathutil,
18 18 phases,
19 19 scmutil,
20 20 util,
21 21 )
22 22
23 23 def _findlimit(repo, a, b):
24 24 """
25 25 Find the last revision that needs to be checked to ensure that a full
26 26 transitive closure for file copies can be properly calculated.
27 27 Generally, this means finding the earliest revision number that's an
28 28 ancestor of a or b but not both, except when a or b is a direct descendent
29 29 of the other, in which case we can return the minimum revnum of a and b.
30 30 None if no such revision exists.
31 31 """
32 32
33 33 # basic idea:
34 34 # - mark a and b with different sides
35 35 # - if a parent's children are all on the same side, the parent is
36 36 # on that side, otherwise it is on no side
37 37 # - walk the graph in topological order with the help of a heap;
38 38 # - add unseen parents to side map
39 39 # - clear side of any parent that has children on different sides
40 40 # - track number of interesting revs that might still be on a side
41 41 # - track the lowest interesting rev seen
42 42 # - quit when interesting revs is zero
43 43
44 44 cl = repo.changelog
45 45 working = len(cl) # pseudo rev for the working directory
46 46 if a is None:
47 47 a = working
48 48 if b is None:
49 49 b = working
50 50
51 51 side = {a: -1, b: 1}
52 52 visit = [-a, -b]
53 53 heapq.heapify(visit)
54 54 interesting = len(visit)
55 55 hascommonancestor = False
56 56 limit = working
57 57
58 58 while interesting:
59 59 r = -heapq.heappop(visit)
60 60 if r == working:
61 61 parents = [cl.rev(p) for p in repo.dirstate.parents()]
62 62 else:
63 63 parents = cl.parentrevs(r)
64 64 for p in parents:
65 65 if p < 0:
66 66 continue
67 67 if p not in side:
68 68 # first time we see p; add it to visit
69 69 side[p] = side[r]
70 70 if side[p]:
71 71 interesting += 1
72 72 heapq.heappush(visit, -p)
73 73 elif side[p] and side[p] != side[r]:
74 74 # p was interesting but now we know better
75 75 side[p] = 0
76 76 interesting -= 1
77 77 hascommonancestor = True
78 78 if side[r]:
79 79 limit = r # lowest rev visited
80 80 interesting -= 1
81 81
82 82 if not hascommonancestor:
83 83 return None
84 84
85 85 # Consider the following flow (see test-commit-amend.t under issue4405):
86 86 # 1/ File 'a0' committed
87 87 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
88 88 # 3/ Move back to first commit
89 89 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
90 90 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
91 91 #
92 92 # During the amend in step five, we will be in this state:
93 93 #
94 94 # @ 3 temporary amend commit for a1-amend
95 95 # |
96 96 # o 2 a1-amend
97 97 # |
98 98 # | o 1 a1
99 99 # |/
100 100 # o 0 a0
101 101 #
102 102 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
103 103 # yet the filelog has the copy information in rev 1 and we will not look
104 104 # back far enough unless we also look at the a and b as candidates.
105 105 # This only occurs when a is a descendent of b or visa-versa.
106 106 return min(limit, a, b)
107 107
108 108 def _chain(src, dst, a, b):
109 109 '''chain two sets of copies a->b'''
110 110 t = a.copy()
111 111 for k, v in b.iteritems():
112 112 if v in t:
113 113 # found a chain
114 114 if t[v] != k:
115 115 # file wasn't renamed back to itself
116 116 t[k] = t[v]
117 117 if v not in dst:
118 118 # chain was a rename, not a copy
119 119 del t[v]
120 120 if v in src:
121 121 # file is a copy of an existing file
122 122 t[k] = v
123 123
124 124 # remove criss-crossed copies
125 125 for k, v in t.items():
126 126 if k in src and v in dst:
127 127 del t[k]
128 128
129 129 return t
130 130
131 131 def _tracefile(fctx, am, limit=-1):
132 132 '''return file context that is the ancestor of fctx present in ancestor
133 133 manifest am, stopping after the first ancestor lower than limit'''
134 134
135 135 for f in fctx.ancestors():
136 136 if am.get(f.path(), None) == f.filenode():
137 137 return f
138 138 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
139 139 return None
140 140
141 141 def _dirstatecopies(d):
142 142 ds = d._repo.dirstate
143 143 c = ds.copies().copy()
144 for k in c.keys():
144 for k in list(c):
145 145 if ds[k] not in 'anm':
146 146 del c[k]
147 147 return c
148 148
149 149 def _computeforwardmissing(a, b, match=None):
150 150 """Computes which files are in b but not a.
151 151 This is its own function so extensions can easily wrap this call to see what
152 152 files _forwardcopies is about to process.
153 153 """
154 154 ma = a.manifest()
155 155 mb = b.manifest()
156 156 return mb.filesnotin(ma, match=match)
157 157
158 158 def _forwardcopies(a, b, match=None):
159 159 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
160 160
161 161 # check for working copy
162 162 w = None
163 163 if b.rev() is None:
164 164 w = b
165 165 b = w.p1()
166 166 if a == b:
167 167 # short-circuit to avoid issues with merge states
168 168 return _dirstatecopies(w)
169 169
170 170 # files might have to be traced back to the fctx parent of the last
171 171 # one-side-only changeset, but not further back than that
172 172 limit = _findlimit(a._repo, a.rev(), b.rev())
173 173 if limit is None:
174 174 limit = -1
175 175 am = a.manifest()
176 176
177 177 # find where new files came from
178 178 # we currently don't try to find where old files went, too expensive
179 179 # this means we can miss a case like 'hg rm b; hg cp a b'
180 180 cm = {}
181 181
182 182 # Computing the forward missing is quite expensive on large manifests, since
183 183 # it compares the entire manifests. We can optimize it in the common use
184 184 # case of computing what copies are in a commit versus its parent (like
185 185 # during a rebase or histedit). Note, we exclude merge commits from this
186 186 # optimization, since the ctx.files() for a merge commit is not correct for
187 187 # this comparison.
188 188 forwardmissingmatch = match
189 189 if b.p1() == a and b.p2().node() == node.nullid:
190 190 filesmatcher = scmutil.matchfiles(a._repo, b.files())
191 191 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
192 192 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
193 193
194 194 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
195 195 for f in missing:
196 196 fctx = b[f]
197 197 fctx._ancestrycontext = ancestrycontext
198 198 ofctx = _tracefile(fctx, am, limit)
199 199 if ofctx:
200 200 cm[f] = ofctx.path()
201 201
202 202 # combine copies from dirstate if necessary
203 203 if w is not None:
204 204 cm = _chain(a, w, cm, _dirstatecopies(w))
205 205
206 206 return cm
207 207
208 208 def _backwardrenames(a, b):
209 209 if a._repo.ui.config('experimental', 'copytrace') == 'off':
210 210 return {}
211 211
212 212 # Even though we're not taking copies into account, 1:n rename situations
213 213 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
214 214 # arbitrarily pick one of the renames.
215 215 f = _forwardcopies(b, a)
216 216 r = {}
217 217 for k, v in sorted(f.iteritems()):
218 218 # remove copies
219 219 if v in a:
220 220 continue
221 221 r[v] = k
222 222 return r
223 223
224 224 def pathcopies(x, y, match=None):
225 225 '''find {dst@y: src@x} copy mapping for directed compare'''
226 226 if x == y or not x or not y:
227 227 return {}
228 228 a = y.ancestor(x)
229 229 if a == x:
230 230 return _forwardcopies(x, y, match=match)
231 231 if a == y:
232 232 return _backwardrenames(x, y)
233 233 return _chain(x, y, _backwardrenames(x, a),
234 234 _forwardcopies(a, y, match=match))
235 235
236 236 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
237 237 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
238 238 and c2. This is its own function so extensions can easily wrap this call
239 239 to see what files mergecopies is about to process.
240 240
241 241 Even though c1 and c2 are not used in this function, they are useful in
242 242 other extensions for being able to read the file nodes of the changed files.
243 243
244 244 "baselabel" can be passed to help distinguish the multiple computations
245 245 done in the graft case.
246 246 """
247 247 u1 = sorted(addedinm1 - addedinm2)
248 248 u2 = sorted(addedinm2 - addedinm1)
249 249
250 250 header = " unmatched files in %s"
251 251 if baselabel:
252 252 header += ' (from %s)' % baselabel
253 253 if u1:
254 254 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
255 255 if u2:
256 256 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
257 257 return u1, u2
258 258
259 259 def _makegetfctx(ctx):
260 260 """return a 'getfctx' function suitable for _checkcopies usage
261 261
262 262 We have to re-setup the function building 'filectx' for each
263 263 '_checkcopies' to ensure the linkrev adjustment is properly setup for
264 264 each. Linkrev adjustment is important to avoid bug in rename
265 265 detection. Moreover, having a proper '_ancestrycontext' setup ensures
266 266 the performance impact of this adjustment is kept limited. Without it,
267 267 each file could do a full dag traversal making the time complexity of
268 268 the operation explode (see issue4537).
269 269
270 270 This function exists here mostly to limit the impact on stable. Feel
271 271 free to refactor on default.
272 272 """
273 273 rev = ctx.rev()
274 274 repo = ctx._repo
275 275 ac = getattr(ctx, '_ancestrycontext', None)
276 276 if ac is None:
277 277 revs = [rev]
278 278 if rev is None:
279 279 revs = [p.rev() for p in ctx.parents()]
280 280 ac = repo.changelog.ancestors(revs, inclusive=True)
281 281 ctx._ancestrycontext = ac
282 282 def makectx(f, n):
283 283 if n in node.wdirnodes: # in a working context?
284 284 if ctx.rev() is None:
285 285 return ctx.filectx(f)
286 286 return repo[None][f]
287 287 fctx = repo.filectx(f, fileid=n)
288 288 # setup only needed for filectx not create from a changectx
289 289 fctx._ancestrycontext = ac
290 290 fctx._descendantrev = rev
291 291 return fctx
292 292 return util.lrucachefunc(makectx)
293 293
294 294 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
295 295 """combine partial copy paths"""
296 296 remainder = {}
297 297 for f in copyfrom:
298 298 if f in copyto:
299 299 finalcopy[copyto[f]] = copyfrom[f]
300 300 del copyto[f]
301 301 for f in incompletediverge:
302 302 assert f not in diverge
303 303 ic = incompletediverge[f]
304 304 if ic[0] in copyto:
305 305 diverge[f] = [copyto[ic[0]], ic[1]]
306 306 else:
307 307 remainder[f] = ic
308 308 return remainder
309 309
310 310 def mergecopies(repo, c1, c2, base):
311 311 """
312 312 The function calling different copytracing algorithms on the basis of config
313 313 which find moves and copies between context c1 and c2 that are relevant for
314 314 merging. 'base' will be used as the merge base.
315 315
316 316 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
317 317 files that were moved/ copied in one merge parent and modified in another.
318 318 For example:
319 319
320 320 o ---> 4 another commit
321 321 |
322 322 | o ---> 3 commit that modifies a.txt
323 323 | /
324 324 o / ---> 2 commit that moves a.txt to b.txt
325 325 |/
326 326 o ---> 1 merge base
327 327
328 328 If we try to rebase revision 3 on revision 4, since there is no a.txt in
329 329 revision 4, and if user have copytrace disabled, we prints the following
330 330 message:
331 331
332 332 ```other changed <file> which local deleted```
333 333
334 334 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
335 335 "dirmove".
336 336
337 337 "copy" is a mapping from destination name -> source name,
338 338 where source is in c1 and destination is in c2 or vice-versa.
339 339
340 340 "movewithdir" is a mapping from source name -> destination name,
341 341 where the file at source present in one context but not the other
342 342 needs to be moved to destination by the merge process, because the
343 343 other context moved the directory it is in.
344 344
345 345 "diverge" is a mapping of source name -> list of destination names
346 346 for divergent renames.
347 347
348 348 "renamedelete" is a mapping of source name -> list of destination
349 349 names for files deleted in c1 that were renamed in c2 or vice-versa.
350 350
351 351 "dirmove" is a mapping of detected source dir -> destination dir renames.
352 352 This is needed for handling changes to new files previously grafted into
353 353 renamed directories.
354 354 """
355 355 # avoid silly behavior for update from empty dir
356 356 if not c1 or not c2 or c1 == c2:
357 357 return {}, {}, {}, {}, {}
358 358
359 359 # avoid silly behavior for parent -> working dir
360 360 if c2.node() is None and c1.node() == repo.dirstate.p1():
361 361 return repo.dirstate.copies(), {}, {}, {}, {}
362 362
363 363 copytracing = repo.ui.config('experimental', 'copytrace')
364 364
365 365 # Copy trace disabling is explicitly below the node == p1 logic above
366 366 # because the logic above is required for a simple copy to be kept across a
367 367 # rebase.
368 368 if copytracing == 'off':
369 369 return {}, {}, {}, {}, {}
370 370 elif copytracing == 'heuristics':
371 371 # Do full copytracing if only drafts are involved as that will be fast
372 372 # enough and will also cover the copies which can be missed by
373 373 # heuristics
374 374 if _isfullcopytraceable(repo, c1, base):
375 375 return _fullcopytracing(repo, c1, c2, base)
376 376 return _heuristicscopytracing(repo, c1, c2, base)
377 377 else:
378 378 return _fullcopytracing(repo, c1, c2, base)
379 379
380 380 def _isfullcopytraceable(repo, c1, base):
381 381 """ Checks that if base, source and destination are all draft branches, if
382 382 yes let's use the full copytrace algorithm for increased capabilities since
383 383 it will be fast enough.
384 384 """
385 385 if c1.rev() is None:
386 386 c1 = c1.p1()
387 387
388 388 nonpublicphases = set([phases.draft, phases.secret])
389 389
390 390 if (c1.phase() in nonpublicphases) and (base.phase() in nonpublicphases):
391 391 sourcecommitlimit = repo.ui.configint('experimental',
392 392 'copytrace.sourcecommitlimit')
393 393 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
394 394 return commits < sourcecommitlimit
395 395 return False
396 396
397 397 def _fullcopytracing(repo, c1, c2, base):
398 398 """ The full copytracing algorithm which finds all the new files that were
399 399 added from merge base up to the top commit and for each file it checks if
400 400 this file was copied from another file.
401 401
402 402 This is pretty slow when a lot of changesets are involved but will track all
403 403 the copies.
404 404 """
405 405 # In certain scenarios (e.g. graft, update or rebase), base can be
406 406 # overridden We still need to know a real common ancestor in this case We
407 407 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
408 408 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
409 409 # caller may not know if the revision passed in lieu of the CA is a genuine
410 410 # common ancestor or not without explicitly checking it, it's better to
411 411 # determine that here.
412 412 #
413 413 # base.descendant(wc) and base.descendant(base) are False, work around that
414 414 _c1 = c1.p1() if c1.rev() is None else c1
415 415 _c2 = c2.p1() if c2.rev() is None else c2
416 416 # an endpoint is "dirty" if it isn't a descendant of the merge base
417 417 # if we have a dirty endpoint, we need to trigger graft logic, and also
418 418 # keep track of which endpoint is dirty
419 419 dirtyc1 = not (base == _c1 or base.descendant(_c1))
420 420 dirtyc2 = not (base == _c2 or base.descendant(_c2))
421 421 graft = dirtyc1 or dirtyc2
422 422 tca = base
423 423 if graft:
424 424 tca = _c1.ancestor(_c2)
425 425
426 426 limit = _findlimit(repo, c1.rev(), c2.rev())
427 427 if limit is None:
428 428 # no common ancestor, no copies
429 429 return {}, {}, {}, {}, {}
430 430 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
431 431
432 432 m1 = c1.manifest()
433 433 m2 = c2.manifest()
434 434 mb = base.manifest()
435 435
436 436 # gather data from _checkcopies:
437 437 # - diverge = record all diverges in this dict
438 438 # - copy = record all non-divergent copies in this dict
439 439 # - fullcopy = record all copies in this dict
440 440 # - incomplete = record non-divergent partial copies here
441 441 # - incompletediverge = record divergent partial copies here
442 442 diverge = {} # divergence data is shared
443 443 incompletediverge = {}
444 444 data1 = {'copy': {},
445 445 'fullcopy': {},
446 446 'incomplete': {},
447 447 'diverge': diverge,
448 448 'incompletediverge': incompletediverge,
449 449 }
450 450 data2 = {'copy': {},
451 451 'fullcopy': {},
452 452 'incomplete': {},
453 453 'diverge': diverge,
454 454 'incompletediverge': incompletediverge,
455 455 }
456 456
457 457 # find interesting file sets from manifests
458 458 addedinm1 = m1.filesnotin(mb)
459 459 addedinm2 = m2.filesnotin(mb)
460 460 bothnew = sorted(addedinm1 & addedinm2)
461 461 if tca == base:
462 462 # unmatched file from base
463 463 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
464 464 u1u, u2u = u1r, u2r
465 465 else:
466 466 # unmatched file from base (DAG rotation in the graft case)
467 467 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
468 468 baselabel='base')
469 469 # unmatched file from topological common ancestors (no DAG rotation)
470 470 # need to recompute this for directory move handling when grafting
471 471 mta = tca.manifest()
472 472 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
473 473 m2.filesnotin(mta),
474 474 baselabel='topological common ancestor')
475 475
476 476 for f in u1u:
477 477 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
478 478
479 479 for f in u2u:
480 480 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
481 481
482 482 copy = dict(data1['copy'])
483 483 copy.update(data2['copy'])
484 484 fullcopy = dict(data1['fullcopy'])
485 485 fullcopy.update(data2['fullcopy'])
486 486
487 487 if dirtyc1:
488 488 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
489 489 incompletediverge)
490 490 else:
491 491 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
492 492 incompletediverge)
493 493
494 494 renamedelete = {}
495 495 renamedeleteset = set()
496 496 divergeset = set()
497 for of, fl in diverge.items():
497 for of, fl in list(diverge.items()):
498 498 if len(fl) == 1 or of in c1 or of in c2:
499 499 del diverge[of] # not actually divergent, or not a rename
500 500 if of not in c1 and of not in c2:
501 501 # renamed on one side, deleted on the other side, but filter
502 502 # out files that have been renamed and then deleted
503 503 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
504 504 renamedeleteset.update(fl) # reverse map for below
505 505 else:
506 506 divergeset.update(fl) # reverse map for below
507 507
508 508 if bothnew:
509 509 repo.ui.debug(" unmatched files new in both:\n %s\n"
510 510 % "\n ".join(bothnew))
511 511 bothdiverge = {}
512 512 bothincompletediverge = {}
513 513 remainder = {}
514 514 both1 = {'copy': {},
515 515 'fullcopy': {},
516 516 'incomplete': {},
517 517 'diverge': bothdiverge,
518 518 'incompletediverge': bothincompletediverge
519 519 }
520 520 both2 = {'copy': {},
521 521 'fullcopy': {},
522 522 'incomplete': {},
523 523 'diverge': bothdiverge,
524 524 'incompletediverge': bothincompletediverge
525 525 }
526 526 for f in bothnew:
527 527 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
528 528 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
529 529 if dirtyc1:
530 530 # incomplete copies may only be found on the "dirty" side for bothnew
531 531 assert not both2['incomplete']
532 532 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
533 533 bothincompletediverge)
534 534 elif dirtyc2:
535 535 assert not both1['incomplete']
536 536 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
537 537 bothincompletediverge)
538 538 else:
539 539 # incomplete copies and divergences can't happen outside grafts
540 540 assert not both1['incomplete']
541 541 assert not both2['incomplete']
542 542 assert not bothincompletediverge
543 543 for f in remainder:
544 544 assert f not in bothdiverge
545 545 ic = remainder[f]
546 546 if ic[0] in (m1 if dirtyc1 else m2):
547 547 # backed-out rename on one side, but watch out for deleted files
548 548 bothdiverge[f] = ic
549 549 for of, fl in bothdiverge.items():
550 550 if len(fl) == 2 and fl[0] == fl[1]:
551 551 copy[fl[0]] = of # not actually divergent, just matching renames
552 552
553 553 if fullcopy and repo.ui.debugflag:
554 554 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
555 555 "% = renamed and deleted):\n")
556 556 for f in sorted(fullcopy):
557 557 note = ""
558 558 if f in copy:
559 559 note += "*"
560 560 if f in divergeset:
561 561 note += "!"
562 562 if f in renamedeleteset:
563 563 note += "%"
564 564 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
565 565 note))
566 566 del divergeset
567 567
568 568 if not fullcopy:
569 569 return copy, {}, diverge, renamedelete, {}
570 570
571 571 repo.ui.debug(" checking for directory renames\n")
572 572
573 573 # generate a directory move map
574 574 d1, d2 = c1.dirs(), c2.dirs()
575 575 # Hack for adding '', which is not otherwise added, to d1 and d2
576 576 d1.addpath('/')
577 577 d2.addpath('/')
578 578 invalid = set()
579 579 dirmove = {}
580 580
581 581 # examine each file copy for a potential directory move, which is
582 582 # when all the files in a directory are moved to a new directory
583 583 for dst, src in fullcopy.iteritems():
584 584 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
585 585 if dsrc in invalid:
586 586 # already seen to be uninteresting
587 587 continue
588 588 elif dsrc in d1 and ddst in d1:
589 589 # directory wasn't entirely moved locally
590 590 invalid.add(dsrc + "/")
591 591 elif dsrc in d2 and ddst in d2:
592 592 # directory wasn't entirely moved remotely
593 593 invalid.add(dsrc + "/")
594 594 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
595 595 # files from the same directory moved to two different places
596 596 invalid.add(dsrc + "/")
597 597 else:
598 598 # looks good so far
599 599 dirmove[dsrc + "/"] = ddst + "/"
600 600
601 601 for i in invalid:
602 602 if i in dirmove:
603 603 del dirmove[i]
604 604 del d1, d2, invalid
605 605
606 606 if not dirmove:
607 607 return copy, {}, diverge, renamedelete, {}
608 608
609 609 for d in dirmove:
610 610 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
611 611 (d, dirmove[d]))
612 612
613 613 movewithdir = {}
614 614 # check unaccounted nonoverlapping files against directory moves
615 615 for f in u1r + u2r:
616 616 if f not in fullcopy:
617 617 for d in dirmove:
618 618 if f.startswith(d):
619 619 # new file added in a directory that was moved, move it
620 620 df = dirmove[d] + f[len(d):]
621 621 if df not in copy:
622 622 movewithdir[f] = df
623 623 repo.ui.debug((" pending file src: '%s' -> "
624 624 "dst: '%s'\n") % (f, df))
625 625 break
626 626
627 627 return copy, movewithdir, diverge, renamedelete, dirmove
628 628
629 629 def _heuristicscopytracing(repo, c1, c2, base):
630 630 """ Fast copytracing using filename heuristics
631 631
632 632 Assumes that moves or renames are of following two types:
633 633
634 634 1) Inside a directory only (same directory name but different filenames)
635 635 2) Move from one directory to another
636 636 (same filenames but different directory names)
637 637
638 638 Works only when there are no merge commits in the "source branch".
639 639 Source branch is commits from base up to c2 not including base.
640 640
641 641 If merge is involved it fallbacks to _fullcopytracing().
642 642
643 643 Can be used by setting the following config:
644 644
645 645 [experimental]
646 646 copytrace = heuristics
647 647 """
648 648
649 649 if c1.rev() is None:
650 650 c1 = c1.p1()
651 651 if c2.rev() is None:
652 652 c2 = c2.p1()
653 653
654 654 copies = {}
655 655
656 656 changedfiles = set()
657 657 m1 = c1.manifest()
658 658 if not repo.revs('%d::%d', base.rev(), c2.rev()):
659 659 # If base is not in c2 branch, we switch to fullcopytracing
660 660 repo.ui.debug("switching to full copytracing as base is not "
661 661 "an ancestor of c2\n")
662 662 return _fullcopytracing(repo, c1, c2, base)
663 663
664 664 ctx = c2
665 665 while ctx != base:
666 666 if len(ctx.parents()) == 2:
667 667 # To keep things simple let's not handle merges
668 668 repo.ui.debug("switching to full copytracing because of merges\n")
669 669 return _fullcopytracing(repo, c1, c2, base)
670 670 changedfiles.update(ctx.files())
671 671 ctx = ctx.p1()
672 672
673 673 cp = _forwardcopies(base, c2)
674 674 for dst, src in cp.iteritems():
675 675 if src in m1:
676 676 copies[dst] = src
677 677
678 678 # file is missing if it isn't present in the destination, but is present in
679 679 # the base and present in the source.
680 680 # Presence in the base is important to exclude added files, presence in the
681 681 # source is important to exclude removed files.
682 682 missingfiles = filter(lambda f: f not in m1 and f in base and f in c2,
683 683 changedfiles)
684 684
685 685 if missingfiles:
686 686 basenametofilename = collections.defaultdict(list)
687 687 dirnametofilename = collections.defaultdict(list)
688 688
689 689 for f in m1.filesnotin(base.manifest()):
690 690 basename = os.path.basename(f)
691 691 dirname = os.path.dirname(f)
692 692 basenametofilename[basename].append(f)
693 693 dirnametofilename[dirname].append(f)
694 694
695 695 # in case of a rebase/graft, base may not be a common ancestor
696 696 anc = c1.ancestor(c2)
697 697
698 698 for f in missingfiles:
699 699 basename = os.path.basename(f)
700 700 dirname = os.path.dirname(f)
701 701 samebasename = basenametofilename[basename]
702 702 samedirname = dirnametofilename[dirname]
703 703 movecandidates = samebasename + samedirname
704 704 # f is guaranteed to be present in c2, that's why
705 705 # c2.filectx(f) won't fail
706 706 f2 = c2.filectx(f)
707 707 for candidate in movecandidates:
708 708 f1 = c1.filectx(candidate)
709 709 if _related(f1, f2, anc.rev()):
710 710 # if there are a few related copies then we'll merge
711 711 # changes into all of them. This matches the behaviour
712 712 # of upstream copytracing
713 713 copies[candidate] = f
714 714
715 715 return copies, {}, {}, {}, {}
716 716
717 717 def _related(f1, f2, limit):
718 718 """return True if f1 and f2 filectx have a common ancestor
719 719
720 720 Walk back to common ancestor to see if the two files originate
721 721 from the same file. Since workingfilectx's rev() is None it messes
722 722 up the integer comparison logic, hence the pre-step check for
723 723 None (f1 and f2 can only be workingfilectx's initially).
724 724 """
725 725
726 726 if f1 == f2:
727 727 return f1 # a match
728 728
729 729 g1, g2 = f1.ancestors(), f2.ancestors()
730 730 try:
731 731 f1r, f2r = f1.linkrev(), f2.linkrev()
732 732
733 733 if f1r is None:
734 734 f1 = next(g1)
735 735 if f2r is None:
736 736 f2 = next(g2)
737 737
738 738 while True:
739 739 f1r, f2r = f1.linkrev(), f2.linkrev()
740 740 if f1r > f2r:
741 741 f1 = next(g1)
742 742 elif f2r > f1r:
743 743 f2 = next(g2)
744 744 elif f1 == f2:
745 745 return f1 # a match
746 746 elif f1r == f2r or f1r < limit or f2r < limit:
747 747 return False # copy no longer relevant
748 748 except StopIteration:
749 749 return False
750 750
751 751 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
752 752 """
753 753 check possible copies of f from msrc to mdst
754 754
755 755 srcctx = starting context for f in msrc
756 756 dstctx = destination context for f in mdst
757 757 f = the filename to check (as in msrc)
758 758 base = the changectx used as a merge base
759 759 tca = topological common ancestor for graft-like scenarios
760 760 remotebase = True if base is outside tca::srcctx, False otherwise
761 761 limit = the rev number to not search beyond
762 762 data = dictionary of dictionary to store copy data. (see mergecopies)
763 763
764 764 note: limit is only an optimization, and provides no guarantee that
765 765 irrelevant revisions will not be visited
766 766 there is no easy way to make this algorithm stop in a guaranteed way
767 767 once it "goes behind a certain revision".
768 768 """
769 769
770 770 msrc = srcctx.manifest()
771 771 mdst = dstctx.manifest()
772 772 mb = base.manifest()
773 773 mta = tca.manifest()
774 774 # Might be true if this call is about finding backward renames,
775 775 # This happens in the case of grafts because the DAG is then rotated.
776 776 # If the file exists in both the base and the source, we are not looking
777 777 # for a rename on the source side, but on the part of the DAG that is
778 778 # traversed backwards.
779 779 #
780 780 # In the case there is both backward and forward renames (before and after
781 781 # the base) this is more complicated as we must detect a divergence.
782 782 # We use 'backwards = False' in that case.
783 783 backwards = not remotebase and base != tca and f in mb
784 784 getsrcfctx = _makegetfctx(srcctx)
785 785 getdstfctx = _makegetfctx(dstctx)
786 786
787 787 if msrc[f] == mb.get(f) and not remotebase:
788 788 # Nothing to merge
789 789 return
790 790
791 791 of = None
792 792 seen = {f}
793 793 for oc in getsrcfctx(f, msrc[f]).ancestors():
794 794 ocr = oc.linkrev()
795 795 of = oc.path()
796 796 if of in seen:
797 797 # check limit late - grab last rename before
798 798 if ocr < limit:
799 799 break
800 800 continue
801 801 seen.add(of)
802 802
803 803 # remember for dir rename detection
804 804 if backwards:
805 805 data['fullcopy'][of] = f # grafting backwards through renames
806 806 else:
807 807 data['fullcopy'][f] = of
808 808 if of not in mdst:
809 809 continue # no match, keep looking
810 810 if mdst[of] == mb.get(of):
811 811 return # no merge needed, quit early
812 812 c2 = getdstfctx(of, mdst[of])
813 813 # c2 might be a plain new file on added on destination side that is
814 814 # unrelated to the droids we are looking for.
815 815 cr = _related(oc, c2, tca.rev())
816 816 if cr and (of == f or of == c2.path()): # non-divergent
817 817 if backwards:
818 818 data['copy'][of] = f
819 819 elif of in mb:
820 820 data['copy'][f] = of
821 821 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
822 822 data['copy'][of] = f
823 823 del data['fullcopy'][f]
824 824 data['fullcopy'][of] = f
825 825 else: # divergence w.r.t. graft CA on one side of topological CA
826 826 for sf in seen:
827 827 if sf in mb:
828 828 assert sf not in data['diverge']
829 829 data['diverge'][sf] = [f, of]
830 830 break
831 831 return
832 832
833 833 if of in mta:
834 834 if backwards or remotebase:
835 835 data['incomplete'][of] = f
836 836 else:
837 837 for sf in seen:
838 838 if sf in mb:
839 839 if tca == base:
840 840 data['diverge'].setdefault(sf, []).append(f)
841 841 else:
842 842 data['incompletediverge'][sf] = [of, f]
843 843 return
844 844
845 845 def duplicatecopies(repo, rev, fromrev, skiprev=None):
846 846 '''reproduce copies from fromrev to rev in the dirstate
847 847
848 848 If skiprev is specified, it's a revision that should be used to
849 849 filter copy records. Any copies that occur between fromrev and
850 850 skiprev will not be duplicated, even if they appear in the set of
851 851 copies between fromrev and rev.
852 852 '''
853 853 exclude = {}
854 854 if (skiprev is not None and
855 855 repo.ui.config('experimental', 'copytrace') != 'off'):
856 856 # copytrace='off' skips this line, but not the entire function because
857 857 # the line below is O(size of the repo) during a rebase, while the rest
858 858 # of the function is much faster (and is required for carrying copy
859 859 # metadata across the rebase anyway).
860 860 exclude = pathcopies(repo[fromrev], repo[skiprev])
861 861 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
862 862 # copies.pathcopies returns backward renames, so dst might not
863 863 # actually be in the dirstate
864 864 if dst in exclude:
865 865 continue
866 866 if repo.dirstate[dst] in "nma":
867 867 repo.dirstate.copy(src, dst)
@@ -1,1774 +1,1774 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullhex,
22 22 nullid,
23 23 nullrev,
24 24 )
25 25 from . import (
26 26 copies,
27 27 error,
28 28 filemerge,
29 29 match as matchmod,
30 30 obsutil,
31 31 pycompat,
32 32 scmutil,
33 33 subrepo,
34 34 util,
35 35 worker,
36 36 )
37 37
38 38 _pack = struct.pack
39 39 _unpack = struct.unpack
40 40
41 41 def _droponode(data):
42 42 # used for compatibility for v1
43 43 bits = data.split('\0')
44 44 bits = bits[:-2] + bits[-1:]
45 45 return '\0'.join(bits)
46 46
47 47 class mergestate(object):
48 48 '''track 3-way merge state of individual files
49 49
50 50 The merge state is stored on disk when needed. Two files are used: one with
51 51 an old format (version 1), and one with a new format (version 2). Version 2
52 52 stores a superset of the data in version 1, including new kinds of records
53 53 in the future. For more about the new format, see the documentation for
54 54 `_readrecordsv2`.
55 55
56 56 Each record can contain arbitrary content, and has an associated type. This
57 57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 58 versions of Mercurial that don't support it should abort. If `type` is
59 59 lowercase, the record can be safely ignored.
60 60
61 61 Currently known records:
62 62
63 63 L: the node of the "local" part of the merge (hexified version)
64 64 O: the node of the "other" part of the merge (hexified version)
65 65 F: a file to be merged entry
66 66 C: a change/delete or delete/change conflict
67 67 D: a file that the external merge driver will merge internally
68 68 (experimental)
69 69 m: the external merge driver defined for this merge plus its run state
70 70 (experimental)
71 71 f: a (filename, dictionary) tuple of optional values for a given file
72 72 X: unsupported mandatory record type (used in tests)
73 73 x: unsupported advisory record type (used in tests)
74 74 l: the labels for the parts of the merge.
75 75
76 76 Merge driver run states (experimental):
77 77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 78 to resolve or commit
79 79 m: driver-resolved files marked -- only needs to be run before commit
80 80 s: success/skipped -- does not need to be run any more
81 81
82 82 '''
83 83 statepathv1 = 'merge/state'
84 84 statepathv2 = 'merge/state2'
85 85
86 86 @staticmethod
87 87 def clean(repo, node=None, other=None, labels=None):
88 88 """Initialize a brand new merge state, removing any existing state on
89 89 disk."""
90 90 ms = mergestate(repo)
91 91 ms.reset(node, other, labels)
92 92 return ms
93 93
94 94 @staticmethod
95 95 def read(repo):
96 96 """Initialize the merge state, reading it from disk."""
97 97 ms = mergestate(repo)
98 98 ms._read()
99 99 return ms
100 100
101 101 def __init__(self, repo):
102 102 """Initialize the merge state.
103 103
104 104 Do not use this directly! Instead call read() or clean()."""
105 105 self._repo = repo
106 106 self._dirty = False
107 107 self._labels = None
108 108
109 109 def reset(self, node=None, other=None, labels=None):
110 110 self._state = {}
111 111 self._stateextras = {}
112 112 self._local = None
113 113 self._other = None
114 114 self._labels = labels
115 115 for var in ('localctx', 'otherctx'):
116 116 if var in vars(self):
117 117 delattr(self, var)
118 118 if node:
119 119 self._local = node
120 120 self._other = other
121 121 self._readmergedriver = None
122 122 if self.mergedriver:
123 123 self._mdstate = 's'
124 124 else:
125 125 self._mdstate = 'u'
126 126 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 127 self._results = {}
128 128 self._dirty = False
129 129
130 130 def _read(self):
131 131 """Analyse each record content to restore a serialized state from disk
132 132
133 133 This function process "record" entry produced by the de-serialization
134 134 of on disk file.
135 135 """
136 136 self._state = {}
137 137 self._stateextras = {}
138 138 self._local = None
139 139 self._other = None
140 140 for var in ('localctx', 'otherctx'):
141 141 if var in vars(self):
142 142 delattr(self, var)
143 143 self._readmergedriver = None
144 144 self._mdstate = 's'
145 145 unsupported = set()
146 146 records = self._readrecords()
147 147 for rtype, record in records:
148 148 if rtype == 'L':
149 149 self._local = bin(record)
150 150 elif rtype == 'O':
151 151 self._other = bin(record)
152 152 elif rtype == 'm':
153 153 bits = record.split('\0', 1)
154 154 mdstate = bits[1]
155 155 if len(mdstate) != 1 or mdstate not in 'ums':
156 156 # the merge driver should be idempotent, so just rerun it
157 157 mdstate = 'u'
158 158
159 159 self._readmergedriver = bits[0]
160 160 self._mdstate = mdstate
161 161 elif rtype in 'FDC':
162 162 bits = record.split('\0')
163 163 self._state[bits[0]] = bits[1:]
164 164 elif rtype == 'f':
165 165 filename, rawextras = record.split('\0', 1)
166 166 extraparts = rawextras.split('\0')
167 167 extras = {}
168 168 i = 0
169 169 while i < len(extraparts):
170 170 extras[extraparts[i]] = extraparts[i + 1]
171 171 i += 2
172 172
173 173 self._stateextras[filename] = extras
174 174 elif rtype == 'l':
175 175 labels = record.split('\0', 2)
176 176 self._labels = [l for l in labels if len(l) > 0]
177 177 elif not rtype.islower():
178 178 unsupported.add(rtype)
179 179 self._results = {}
180 180 self._dirty = False
181 181
182 182 if unsupported:
183 183 raise error.UnsupportedMergeRecords(unsupported)
184 184
185 185 def _readrecords(self):
186 186 """Read merge state from disk and return a list of record (TYPE, data)
187 187
188 188 We read data from both v1 and v2 files and decide which one to use.
189 189
190 190 V1 has been used by version prior to 2.9.1 and contains less data than
191 191 v2. We read both versions and check if no data in v2 contradicts
192 192 v1. If there is not contradiction we can safely assume that both v1
193 193 and v2 were written at the same time and use the extract data in v2. If
194 194 there is contradiction we ignore v2 content as we assume an old version
195 195 of Mercurial has overwritten the mergestate file and left an old v2
196 196 file around.
197 197
198 198 returns list of record [(TYPE, data), ...]"""
199 199 v1records = self._readrecordsv1()
200 200 v2records = self._readrecordsv2()
201 201 if self._v1v2match(v1records, v2records):
202 202 return v2records
203 203 else:
204 204 # v1 file is newer than v2 file, use it
205 205 # we have to infer the "other" changeset of the merge
206 206 # we cannot do better than that with v1 of the format
207 207 mctx = self._repo[None].parents()[-1]
208 208 v1records.append(('O', mctx.hex()))
209 209 # add place holder "other" file node information
210 210 # nobody is using it yet so we do no need to fetch the data
211 211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 212 for idx, r in enumerate(v1records):
213 213 if r[0] == 'F':
214 214 bits = r[1].split('\0')
215 215 bits.insert(-2, '')
216 216 v1records[idx] = (r[0], '\0'.join(bits))
217 217 return v1records
218 218
219 219 def _v1v2match(self, v1records, v2records):
220 220 oldv2 = set() # old format version of v2 record
221 221 for rec in v2records:
222 222 if rec[0] == 'L':
223 223 oldv2.add(rec)
224 224 elif rec[0] == 'F':
225 225 # drop the onode data (not contained in v1)
226 226 oldv2.add(('F', _droponode(rec[1])))
227 227 for rec in v1records:
228 228 if rec not in oldv2:
229 229 return False
230 230 else:
231 231 return True
232 232
233 233 def _readrecordsv1(self):
234 234 """read on disk merge state for version 1 file
235 235
236 236 returns list of record [(TYPE, data), ...]
237 237
238 238 Note: the "F" data from this file are one entry short
239 239 (no "other file node" entry)
240 240 """
241 241 records = []
242 242 try:
243 243 f = self._repo.vfs(self.statepathv1)
244 244 for i, l in enumerate(f):
245 245 if i == 0:
246 246 records.append(('L', l[:-1]))
247 247 else:
248 248 records.append(('F', l[:-1]))
249 249 f.close()
250 250 except IOError as err:
251 251 if err.errno != errno.ENOENT:
252 252 raise
253 253 return records
254 254
255 255 def _readrecordsv2(self):
256 256 """read on disk merge state for version 2 file
257 257
258 258 This format is a list of arbitrary records of the form:
259 259
260 260 [type][length][content]
261 261
262 262 `type` is a single character, `length` is a 4 byte integer, and
263 263 `content` is an arbitrary byte sequence of length `length`.
264 264
265 265 Mercurial versions prior to 3.7 have a bug where if there are
266 266 unsupported mandatory merge records, attempting to clear out the merge
267 267 state with hg update --clean or similar aborts. The 't' record type
268 268 works around that by writing out what those versions treat as an
269 269 advisory record, but later versions interpret as special: the first
270 270 character is the 'real' record type and everything onwards is the data.
271 271
272 272 Returns list of records [(TYPE, data), ...]."""
273 273 records = []
274 274 try:
275 275 f = self._repo.vfs(self.statepathv2)
276 276 data = f.read()
277 277 off = 0
278 278 end = len(data)
279 279 while off < end:
280 280 rtype = data[off]
281 281 off += 1
282 282 length = _unpack('>I', data[off:(off + 4)])[0]
283 283 off += 4
284 284 record = data[off:(off + length)]
285 285 off += length
286 286 if rtype == 't':
287 287 rtype, record = record[0], record[1:]
288 288 records.append((rtype, record))
289 289 f.close()
290 290 except IOError as err:
291 291 if err.errno != errno.ENOENT:
292 292 raise
293 293 return records
294 294
295 295 @util.propertycache
296 296 def mergedriver(self):
297 297 # protect against the following:
298 298 # - A configures a malicious merge driver in their hgrc, then
299 299 # pauses the merge
300 300 # - A edits their hgrc to remove references to the merge driver
301 301 # - A gives a copy of their entire repo, including .hg, to B
302 302 # - B inspects .hgrc and finds it to be clean
303 303 # - B then continues the merge and the malicious merge driver
304 304 # gets invoked
305 305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 306 if (self._readmergedriver is not None
307 307 and self._readmergedriver != configmergedriver):
308 308 raise error.ConfigError(
309 309 _("merge driver changed since merge started"),
310 310 hint=_("revert merge driver change or abort merge"))
311 311
312 312 return configmergedriver
313 313
314 314 @util.propertycache
315 315 def localctx(self):
316 316 if self._local is None:
317 317 msg = "localctx accessed but self._local isn't set"
318 318 raise error.ProgrammingError(msg)
319 319 return self._repo[self._local]
320 320
321 321 @util.propertycache
322 322 def otherctx(self):
323 323 if self._other is None:
324 324 msg = "otherctx accessed but self._other isn't set"
325 325 raise error.ProgrammingError(msg)
326 326 return self._repo[self._other]
327 327
328 328 def active(self):
329 329 """Whether mergestate is active.
330 330
331 331 Returns True if there appears to be mergestate. This is a rough proxy
332 332 for "is a merge in progress."
333 333 """
334 334 # Check local variables before looking at filesystem for performance
335 335 # reasons.
336 336 return bool(self._local) or bool(self._state) or \
337 337 self._repo.vfs.exists(self.statepathv1) or \
338 338 self._repo.vfs.exists(self.statepathv2)
339 339
340 340 def commit(self):
341 341 """Write current state on disk (if necessary)"""
342 342 if self._dirty:
343 343 records = self._makerecords()
344 344 self._writerecords(records)
345 345 self._dirty = False
346 346
347 347 def _makerecords(self):
348 348 records = []
349 349 records.append(('L', hex(self._local)))
350 350 records.append(('O', hex(self._other)))
351 351 if self.mergedriver:
352 352 records.append(('m', '\0'.join([
353 353 self.mergedriver, self._mdstate])))
354 354 for d, v in self._state.iteritems():
355 355 if v[0] == 'd':
356 356 records.append(('D', '\0'.join([d] + v)))
357 357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 358 # older versions of Mercurial
359 359 elif v[1] == nullhex or v[6] == nullhex:
360 360 records.append(('C', '\0'.join([d] + v)))
361 361 else:
362 362 records.append(('F', '\0'.join([d] + v)))
363 363 for filename, extras in sorted(self._stateextras.iteritems()):
364 364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 365 extras.iteritems())
366 366 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 367 if self._labels is not None:
368 368 labels = '\0'.join(self._labels)
369 369 records.append(('l', labels))
370 370 return records
371 371
372 372 def _writerecords(self, records):
373 373 """Write current state on disk (both v1 and v2)"""
374 374 self._writerecordsv1(records)
375 375 self._writerecordsv2(records)
376 376
377 377 def _writerecordsv1(self, records):
378 378 """Write current state on disk in a version 1 file"""
379 379 f = self._repo.vfs(self.statepathv1, 'w')
380 380 irecords = iter(records)
381 381 lrecords = next(irecords)
382 382 assert lrecords[0] == 'L'
383 383 f.write(hex(self._local) + '\n')
384 384 for rtype, data in irecords:
385 385 if rtype == 'F':
386 386 f.write('%s\n' % _droponode(data))
387 387 f.close()
388 388
389 389 def _writerecordsv2(self, records):
390 390 """Write current state on disk in a version 2 file
391 391
392 392 See the docstring for _readrecordsv2 for why we use 't'."""
393 393 # these are the records that all version 2 clients can read
394 394 whitelist = 'LOF'
395 395 f = self._repo.vfs(self.statepathv2, 'w')
396 396 for key, data in records:
397 397 assert len(key) == 1
398 398 if key not in whitelist:
399 399 key, data = 't', '%s%s' % (key, data)
400 400 format = '>sI%is' % len(data)
401 401 f.write(_pack(format, key, len(data), data))
402 402 f.close()
403 403
404 404 def add(self, fcl, fco, fca, fd):
405 405 """add a new (potentially?) conflicting file the merge state
406 406 fcl: file context for local,
407 407 fco: file context for remote,
408 408 fca: file context for ancestors,
409 409 fd: file path of the resulting merge.
410 410
411 411 note: also write the local version to the `.hg/merge` directory.
412 412 """
413 413 if fcl.isabsent():
414 414 hash = nullhex
415 415 else:
416 416 hash = hex(hashlib.sha1(fcl.path()).digest())
417 417 self._repo.vfs.write('merge/' + hash, fcl.data())
418 418 self._state[fd] = ['u', hash, fcl.path(),
419 419 fca.path(), hex(fca.filenode()),
420 420 fco.path(), hex(fco.filenode()),
421 421 fcl.flags()]
422 422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 423 self._dirty = True
424 424
425 425 def __contains__(self, dfile):
426 426 return dfile in self._state
427 427
428 428 def __getitem__(self, dfile):
429 429 return self._state[dfile][0]
430 430
431 431 def __iter__(self):
432 432 return iter(sorted(self._state))
433 433
434 434 def files(self):
435 435 return self._state.keys()
436 436
437 437 def mark(self, dfile, state):
438 438 self._state[dfile][0] = state
439 439 self._dirty = True
440 440
441 441 def mdstate(self):
442 442 return self._mdstate
443 443
444 444 def unresolved(self):
445 445 """Obtain the paths of unresolved files."""
446 446
447 447 for f, entry in self._state.iteritems():
448 448 if entry[0] == 'u':
449 449 yield f
450 450
451 451 def driverresolved(self):
452 452 """Obtain the paths of driver-resolved files."""
453 453
454 454 for f, entry in self._state.items():
455 455 if entry[0] == 'd':
456 456 yield f
457 457
458 458 def extras(self, filename):
459 459 return self._stateextras.setdefault(filename, {})
460 460
461 461 def _resolve(self, preresolve, dfile, wctx):
462 462 """rerun merge process for file path `dfile`"""
463 463 if self[dfile] in 'rd':
464 464 return True, 0
465 465 stateentry = self._state[dfile]
466 466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 467 octx = self._repo[self._other]
468 468 extras = self.extras(dfile)
469 469 anccommitnode = extras.get('ancestorlinknode')
470 470 if anccommitnode:
471 471 actx = self._repo[anccommitnode]
472 472 else:
473 473 actx = None
474 474 fcd = self._filectxorabsent(hash, wctx, dfile)
475 475 fco = self._filectxorabsent(onode, octx, ofile)
476 476 # TODO: move this to filectxorabsent
477 477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 478 # "premerge" x flags
479 479 flo = fco.flags()
480 480 fla = fca.flags()
481 481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 482 if fca.node() == nullid and flags != flo:
483 483 if preresolve:
484 484 self._repo.ui.warn(
485 485 _('warning: cannot merge flags for %s '
486 486 'without common ancestor - keeping local flags\n')
487 487 % afile)
488 488 elif flags == fla:
489 489 flags = flo
490 490 if preresolve:
491 491 # restore local
492 492 if hash != nullhex:
493 493 f = self._repo.vfs('merge/' + hash)
494 494 wctx[dfile].write(f.read(), flags)
495 495 f.close()
496 496 else:
497 497 wctx[dfile].remove(ignoremissing=True)
498 498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
499 499 self._local, lfile, fcd,
500 500 fco, fca,
501 501 labels=self._labels)
502 502 else:
503 503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
504 504 self._local, lfile, fcd,
505 505 fco, fca,
506 506 labels=self._labels)
507 507 if r is None:
508 508 # no real conflict
509 509 del self._state[dfile]
510 510 self._stateextras.pop(dfile, None)
511 511 self._dirty = True
512 512 elif not r:
513 513 self.mark(dfile, 'r')
514 514
515 515 if complete:
516 516 action = None
517 517 if deleted:
518 518 if fcd.isabsent():
519 519 # dc: local picked. Need to drop if present, which may
520 520 # happen on re-resolves.
521 521 action = 'f'
522 522 else:
523 523 # cd: remote picked (or otherwise deleted)
524 524 action = 'r'
525 525 else:
526 526 if fcd.isabsent(): # dc: remote picked
527 527 action = 'g'
528 528 elif fco.isabsent(): # cd: local picked
529 529 if dfile in self.localctx:
530 530 action = 'am'
531 531 else:
532 532 action = 'a'
533 533 # else: regular merges (no action necessary)
534 534 self._results[dfile] = r, action
535 535
536 536 return complete, r
537 537
538 538 def _filectxorabsent(self, hexnode, ctx, f):
539 539 if hexnode == nullhex:
540 540 return filemerge.absentfilectx(ctx, f)
541 541 else:
542 542 return ctx[f]
543 543
544 544 def preresolve(self, dfile, wctx):
545 545 """run premerge process for dfile
546 546
547 547 Returns whether the merge is complete, and the exit code."""
548 548 return self._resolve(True, dfile, wctx)
549 549
550 550 def resolve(self, dfile, wctx):
551 551 """run merge process (assuming premerge was run) for dfile
552 552
553 553 Returns the exit code of the merge."""
554 554 return self._resolve(False, dfile, wctx)[1]
555 555
556 556 def counts(self):
557 557 """return counts for updated, merged and removed files in this
558 558 session"""
559 559 updated, merged, removed = 0, 0, 0
560 560 for r, action in self._results.itervalues():
561 561 if r is None:
562 562 updated += 1
563 563 elif r == 0:
564 564 if action == 'r':
565 565 removed += 1
566 566 else:
567 567 merged += 1
568 568 return updated, merged, removed
569 569
570 570 def unresolvedcount(self):
571 571 """get unresolved count for this merge (persistent)"""
572 572 return len(list(self.unresolved()))
573 573
574 574 def actions(self):
575 575 """return lists of actions to perform on the dirstate"""
576 576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 577 for f, (r, action) in self._results.iteritems():
578 578 if action is not None:
579 579 actions[action].append((f, None, "merge result"))
580 580 return actions
581 581
582 582 def recordactions(self):
583 583 """record remove/add/get actions in the dirstate"""
584 584 branchmerge = self._repo.dirstate.p2() != nullid
585 585 recordupdates(self._repo, self.actions(), branchmerge)
586 586
587 587 def queueremove(self, f):
588 588 """queues a file to be removed from the dirstate
589 589
590 590 Meant for use by custom merge drivers."""
591 591 self._results[f] = 0, 'r'
592 592
593 593 def queueadd(self, f):
594 594 """queues a file to be added to the dirstate
595 595
596 596 Meant for use by custom merge drivers."""
597 597 self._results[f] = 0, 'a'
598 598
599 599 def queueget(self, f):
600 600 """queues a file to be marked modified in the dirstate
601 601
602 602 Meant for use by custom merge drivers."""
603 603 self._results[f] = 0, 'g'
604 604
605 605 def _getcheckunknownconfig(repo, section, name):
606 606 config = repo.ui.config(section, name, default='abort')
607 607 valid = ['abort', 'ignore', 'warn']
608 608 if config not in valid:
609 609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 610 raise error.ConfigError(_("%s.%s not valid "
611 611 "('%s' is none of %s)")
612 612 % (section, name, config, validstr))
613 613 return config
614 614
615 615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 616 if f2 is None:
617 617 f2 = f
618 618 return (repo.wvfs.audit.check(f)
619 619 and repo.wvfs.isfileorlink(f)
620 620 and repo.dirstate.normalize(f) not in repo.dirstate
621 621 and mctx[f2].cmp(wctx[f]))
622 622
623 623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 624 """
625 625 Considers any actions that care about the presence of conflicting unknown
626 626 files. For some actions, the result is to abort; for others, it is to
627 627 choose a different action.
628 628 """
629 629 conflicts = set()
630 630 warnconflicts = set()
631 631 abortconflicts = set()
632 632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 634 if not force:
635 635 def collectconflicts(conflicts, config):
636 636 if config == 'abort':
637 637 abortconflicts.update(conflicts)
638 638 elif config == 'warn':
639 639 warnconflicts.update(conflicts)
640 640
641 641 for f, (m, args, msg) in actions.iteritems():
642 642 if m in ('c', 'dc'):
643 643 if _checkunknownfile(repo, wctx, mctx, f):
644 644 conflicts.add(f)
645 645 elif m == 'dg':
646 646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 647 conflicts.add(f)
648 648
649 649 ignoredconflicts = set([c for c in conflicts
650 650 if repo.dirstate._ignore(c)])
651 651 unknownconflicts = conflicts - ignoredconflicts
652 652 collectconflicts(ignoredconflicts, ignoredconfig)
653 653 collectconflicts(unknownconflicts, unknownconfig)
654 654 else:
655 655 for f, (m, args, msg) in actions.iteritems():
656 656 if m == 'cm':
657 657 fl2, anc = args
658 658 different = _checkunknownfile(repo, wctx, mctx, f)
659 659 if repo.dirstate._ignore(f):
660 660 config = ignoredconfig
661 661 else:
662 662 config = unknownconfig
663 663
664 664 # The behavior when force is True is described by this table:
665 665 # config different mergeforce | action backup
666 666 # * n * | get n
667 667 # * y y | merge -
668 668 # abort y n | merge - (1)
669 669 # warn y n | warn + get y
670 670 # ignore y n | get y
671 671 #
672 672 # (1) this is probably the wrong behavior here -- we should
673 673 # probably abort, but some actions like rebases currently
674 674 # don't like an abort happening in the middle of
675 675 # merge.update.
676 676 if not different:
677 677 actions[f] = ('g', (fl2, False), "remote created")
678 678 elif mergeforce or config == 'abort':
679 679 actions[f] = ('m', (f, f, None, False, anc),
680 680 "remote differs from untracked local")
681 681 elif config == 'abort':
682 682 abortconflicts.add(f)
683 683 else:
684 684 if config == 'warn':
685 685 warnconflicts.add(f)
686 686 actions[f] = ('g', (fl2, True), "remote created")
687 687
688 688 for f in sorted(abortconflicts):
689 689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 690 if abortconflicts:
691 691 raise error.Abort(_("untracked files in working directory "
692 692 "differ from files in requested revision"))
693 693
694 694 for f in sorted(warnconflicts):
695 695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696 696
697 697 for f, (m, args, msg) in actions.iteritems():
698 698 backup = f in conflicts
699 699 if m == 'c':
700 700 flags, = args
701 701 actions[f] = ('g', (flags, backup), msg)
702 702
703 703 def _forgetremoved(wctx, mctx, branchmerge):
704 704 """
705 705 Forget removed files
706 706
707 707 If we're jumping between revisions (as opposed to merging), and if
708 708 neither the working directory nor the target rev has the file,
709 709 then we need to remove it from the dirstate, to prevent the
710 710 dirstate from listing the file when it is no longer in the
711 711 manifest.
712 712
713 713 If we're merging, and the other revision has removed a file
714 714 that is not present in the working directory, we need to mark it
715 715 as removed.
716 716 """
717 717
718 718 actions = {}
719 719 m = 'f'
720 720 if branchmerge:
721 721 m = 'r'
722 722 for f in wctx.deleted():
723 723 if f not in mctx:
724 724 actions[f] = m, None, "forget deleted"
725 725
726 726 if not branchmerge:
727 727 for f in wctx.removed():
728 728 if f not in mctx:
729 729 actions[f] = 'f', None, "forget removed"
730 730
731 731 return actions
732 732
733 733 def _checkcollision(repo, wmf, actions):
734 734 # build provisional merged manifest up
735 735 pmmf = set(wmf)
736 736
737 737 if actions:
738 738 # k, dr, e and rd are no-op
739 739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 740 for f, args, msg in actions[m]:
741 741 pmmf.add(f)
742 742 for f, args, msg in actions['r']:
743 743 pmmf.discard(f)
744 744 for f, args, msg in actions['dm']:
745 745 f2, flags = args
746 746 pmmf.discard(f2)
747 747 pmmf.add(f)
748 748 for f, args, msg in actions['dg']:
749 749 pmmf.add(f)
750 750 for f, args, msg in actions['m']:
751 751 f1, f2, fa, move, anc = args
752 752 if move:
753 753 pmmf.discard(f1)
754 754 pmmf.add(f)
755 755
756 756 # check case-folding collision in provisional merged manifest
757 757 foldmap = {}
758 758 for f in pmmf:
759 759 fold = util.normcase(f)
760 760 if fold in foldmap:
761 761 raise error.Abort(_("case-folding collision between %s and %s")
762 762 % (f, foldmap[fold]))
763 763 foldmap[fold] = f
764 764
765 765 # check case-folding of directories
766 766 foldprefix = unfoldprefix = lastfull = ''
767 767 for fold, f in sorted(foldmap.items()):
768 768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 769 # the folded prefix matches but actual casing is different
770 770 raise error.Abort(_("case-folding collision between "
771 771 "%s and directory of %s") % (lastfull, f))
772 772 foldprefix = fold + '/'
773 773 unfoldprefix = f + '/'
774 774 lastfull = f
775 775
776 776 def driverpreprocess(repo, ms, wctx, labels=None):
777 777 """run the preprocess step of the merge driver, if any
778 778
779 779 This is currently not implemented -- it's an extension point."""
780 780 return True
781 781
782 782 def driverconclude(repo, ms, wctx, labels=None):
783 783 """run the conclude step of the merge driver, if any
784 784
785 785 This is currently not implemented -- it's an extension point."""
786 786 return True
787 787
788 788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 789 acceptremote, followcopies, forcefulldiff=False):
790 790 """
791 791 Merge wctx and p2 with ancestor pa and generate merge action list
792 792
793 793 branchmerge and force are as passed in to update
794 794 matcher = matcher to filter file lists
795 795 acceptremote = accept the incoming changes without prompting
796 796 """
797 797 if matcher is not None and matcher.always():
798 798 matcher = None
799 799
800 800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801 801
802 802 # manifests fetched in order are going to be faster, so prime the caches
803 803 [x.manifest() for x in
804 804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805 805
806 806 if followcopies:
807 807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809 809
810 810 boolbm = pycompat.bytestr(bool(branchmerge))
811 811 boolf = pycompat.bytestr(bool(force))
812 812 boolm = pycompat.bytestr(bool(matcher))
813 813 repo.ui.note(_("resolving manifests\n"))
814 814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 815 % (boolbm, boolf, boolm))
816 816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817 817
818 818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 819 copied = set(copy.values())
820 820 copied.update(movewithdir.values())
821 821
822 822 if '.hgsubstate' in m1:
823 823 # check whether sub state is modified
824 824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 825 m1['.hgsubstate'] = modifiednodeid
826 826
827 827 # Don't use m2-vs-ma optimization if:
828 828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 829 # - The caller specifically asks for a full diff, which is useful during bid
830 830 # merge.
831 831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 832 # Identify which files are relevant to the merge, so we can limit the
833 833 # total m1-vs-m2 diff to just those files. This has significant
834 834 # performance benefits in large repositories.
835 835 relevantfiles = set(ma.diff(m2).keys())
836 836
837 837 # For copied and moved files, we need to add the source file too.
838 838 for copykey, copyvalue in copy.iteritems():
839 839 if copyvalue in relevantfiles:
840 840 relevantfiles.add(copykey)
841 841 for movedirkey in movewithdir:
842 842 relevantfiles.add(movedirkey)
843 843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845 845
846 846 diff = m1.diff(m2, match=matcher)
847 847
848 848 if matcher is None:
849 849 matcher = matchmod.always('', '')
850 850
851 851 actions = {}
852 852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 853 if n1 and n2: # file exists on both local and remote side
854 854 if f not in ma:
855 855 fa = copy.get(f, None)
856 856 if fa is not None:
857 857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 858 "both renamed from " + fa)
859 859 else:
860 860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 861 "both created")
862 862 else:
863 863 a = ma[f]
864 864 fla = ma.flags(f)
865 865 nol = 'l' not in fl1 + fl2 + fla
866 866 if n2 == a and fl2 == fla:
867 867 actions[f] = ('k' , (), "remote unchanged")
868 868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 869 if n1 == n2: # optimization: keep local content
870 870 actions[f] = ('e', (fl2,), "update permissions")
871 871 else:
872 872 actions[f] = ('g', (fl2, False), "remote is newer")
873 873 elif nol and n2 == a: # remote only changed 'x'
874 874 actions[f] = ('e', (fl2,), "update permissions")
875 875 elif nol and n1 == a: # local only changed 'x'
876 876 actions[f] = ('g', (fl1, False), "remote is newer")
877 877 else: # both changed something
878 878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 879 "versions differ")
880 880 elif n1: # file exists only on local side
881 881 if f in copied:
882 882 pass # we'll deal with it on m2 side
883 883 elif f in movewithdir: # directory rename, move local
884 884 f2 = movewithdir[f]
885 885 if f2 in m2:
886 886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 887 "remote directory rename, both created")
888 888 else:
889 889 actions[f2] = ('dm', (f, fl1),
890 890 "remote directory rename - move from " + f)
891 891 elif f in copy:
892 892 f2 = copy[f]
893 893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 894 "local copied/moved from " + f2)
895 895 elif f in ma: # clean, a different, no remote
896 896 if n1 != ma[f]:
897 897 if acceptremote:
898 898 actions[f] = ('r', None, "remote delete")
899 899 else:
900 900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 901 "prompt changed/deleted")
902 902 elif n1 == addednodeid:
903 903 # This extra 'a' is added by working copy manifest to mark
904 904 # the file as locally added. We should forget it instead of
905 905 # deleting it.
906 906 actions[f] = ('f', None, "remote deleted")
907 907 else:
908 908 actions[f] = ('r', None, "other deleted")
909 909 elif n2: # file exists only on remote side
910 910 if f in copied:
911 911 pass # we'll deal with it on m1 side
912 912 elif f in movewithdir:
913 913 f2 = movewithdir[f]
914 914 if f2 in m1:
915 915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 916 "local directory rename, both created")
917 917 else:
918 918 actions[f2] = ('dg', (f, fl2),
919 919 "local directory rename - get from " + f)
920 920 elif f in copy:
921 921 f2 = copy[f]
922 922 if f2 in m2:
923 923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 924 "remote copied from " + f2)
925 925 else:
926 926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 927 "remote moved from " + f2)
928 928 elif f not in ma:
929 929 # local unknown, remote created: the logic is described by the
930 930 # following table:
931 931 #
932 932 # force branchmerge different | action
933 933 # n * * | create
934 934 # y n * | create
935 935 # y y n | create
936 936 # y y y | merge
937 937 #
938 938 # Checking whether the files are different is expensive, so we
939 939 # don't do that when we can avoid it.
940 940 if not force:
941 941 actions[f] = ('c', (fl2,), "remote created")
942 942 elif not branchmerge:
943 943 actions[f] = ('c', (fl2,), "remote created")
944 944 else:
945 945 actions[f] = ('cm', (fl2, pa.node()),
946 946 "remote created, get or merge")
947 947 elif n2 != ma[f]:
948 948 df = None
949 949 for d in dirmove:
950 950 if f.startswith(d):
951 951 # new file added in a directory that was moved
952 952 df = dirmove[d] + f[len(d):]
953 953 break
954 954 if df is not None and df in m1:
955 955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 956 "local directory rename - respect move from " + f)
957 957 elif acceptremote:
958 958 actions[f] = ('c', (fl2,), "remote recreating")
959 959 else:
960 960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 961 "prompt deleted/changed")
962 962
963 963 return actions, diverge, renamedelete
964 964
965 965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 966 """Resolves false conflicts where the nodeid changed but the content
967 967 remained the same."""
968 968
969 969 for f, (m, args, msg) in actions.items():
970 970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 971 # local did change but ended up with same content
972 972 actions[f] = 'r', None, "prompt same"
973 973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 974 # remote did change but ended up with same content
975 975 del actions[f] # don't get = keep local deleted
976 976
977 977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 978 acceptremote, followcopies, matcher=None,
979 979 mergeforce=False):
980 980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
981 981 # Avoid cycle.
982 982 from . import sparse
983 983
984 984 if len(ancestors) == 1: # default
985 985 actions, diverge, renamedelete = manifestmerge(
986 986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
987 987 acceptremote, followcopies)
988 988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
989 989
990 990 else: # only when merge.preferancestor=* - the default
991 991 repo.ui.note(
992 992 _("note: merging %s and %s using bids from ancestors %s\n") %
993 993 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
994 994
995 995 # Call for bids
996 996 fbids = {} # mapping filename to bids (action method to list af actions)
997 997 diverge, renamedelete = None, None
998 998 for ancestor in ancestors:
999 999 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1000 1000 actions, diverge1, renamedelete1 = manifestmerge(
1001 1001 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1002 1002 acceptremote, followcopies, forcefulldiff=True)
1003 1003 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1004 1004
1005 1005 # Track the shortest set of warning on the theory that bid
1006 1006 # merge will correctly incorporate more information
1007 1007 if diverge is None or len(diverge1) < len(diverge):
1008 1008 diverge = diverge1
1009 1009 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1010 1010 renamedelete = renamedelete1
1011 1011
1012 1012 for f, a in sorted(actions.iteritems()):
1013 1013 m, args, msg = a
1014 1014 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1015 1015 if f in fbids:
1016 1016 d = fbids[f]
1017 1017 if m in d:
1018 1018 d[m].append(a)
1019 1019 else:
1020 1020 d[m] = [a]
1021 1021 else:
1022 1022 fbids[f] = {m: [a]}
1023 1023
1024 1024 # Pick the best bid for each file
1025 1025 repo.ui.note(_('\nauction for merging merge bids\n'))
1026 1026 actions = {}
1027 1027 dms = [] # filenames that have dm actions
1028 1028 for f, bids in sorted(fbids.items()):
1029 1029 # bids is a mapping from action method to list af actions
1030 1030 # Consensus?
1031 1031 if len(bids) == 1: # all bids are the same kind of method
1032 m, l = bids.items()[0]
1032 m, l = list(bids.items())[0]
1033 1033 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1034 1034 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1035 1035 actions[f] = l[0]
1036 1036 if m == 'dm':
1037 1037 dms.append(f)
1038 1038 continue
1039 1039 # If keep is an option, just do it.
1040 1040 if 'k' in bids:
1041 1041 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1042 1042 actions[f] = bids['k'][0]
1043 1043 continue
1044 1044 # If there are gets and they all agree [how could they not?], do it.
1045 1045 if 'g' in bids:
1046 1046 ga0 = bids['g'][0]
1047 1047 if all(a == ga0 for a in bids['g'][1:]):
1048 1048 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1049 1049 actions[f] = ga0
1050 1050 continue
1051 1051 # TODO: Consider other simple actions such as mode changes
1052 1052 # Handle inefficient democrazy.
1053 1053 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1054 1054 for m, l in sorted(bids.items()):
1055 1055 for _f, args, msg in l:
1056 1056 repo.ui.note(' %s -> %s\n' % (msg, m))
1057 1057 # Pick random action. TODO: Instead, prompt user when resolving
1058 m, l = bids.items()[0]
1058 m, l = list(bids.items())[0]
1059 1059 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1060 1060 (f, m))
1061 1061 actions[f] = l[0]
1062 1062 if m == 'dm':
1063 1063 dms.append(f)
1064 1064 continue
1065 1065 # Work around 'dm' that can cause multiple actions for the same file
1066 1066 for f in dms:
1067 1067 dm, (f0, flags), msg = actions[f]
1068 1068 assert dm == 'dm', dm
1069 1069 if f0 in actions and actions[f0][0] == 'r':
1070 1070 # We have one bid for removing a file and another for moving it.
1071 1071 # These two could be merged as first move and then delete ...
1072 1072 # but instead drop moving and just delete.
1073 1073 del actions[f]
1074 1074 repo.ui.note(_('end of auction\n\n'))
1075 1075
1076 1076 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1077 1077
1078 1078 if wctx.rev() is None:
1079 1079 fractions = _forgetremoved(wctx, mctx, branchmerge)
1080 1080 actions.update(fractions)
1081 1081
1082 1082 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1083 1083 actions)
1084 1084
1085 1085 return prunedactions, diverge, renamedelete
1086 1086
1087 1087 def _getcwd():
1088 1088 try:
1089 1089 return pycompat.getcwd()
1090 1090 except OSError as err:
1091 1091 if err.errno == errno.ENOENT:
1092 1092 return None
1093 1093 raise
1094 1094
1095 1095 def batchremove(repo, wctx, actions):
1096 1096 """apply removes to the working directory
1097 1097
1098 1098 yields tuples for progress updates
1099 1099 """
1100 1100 verbose = repo.ui.verbose
1101 1101 cwd = _getcwd()
1102 1102 i = 0
1103 1103 for f, args, msg in actions:
1104 1104 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1105 1105 if verbose:
1106 1106 repo.ui.note(_("removing %s\n") % f)
1107 1107 wctx[f].audit()
1108 1108 try:
1109 1109 wctx[f].remove(ignoremissing=True)
1110 1110 except OSError as inst:
1111 1111 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1112 1112 (f, inst.strerror))
1113 1113 if i == 100:
1114 1114 yield i, f
1115 1115 i = 0
1116 1116 i += 1
1117 1117 if i > 0:
1118 1118 yield i, f
1119 1119
1120 1120 if cwd and not _getcwd():
1121 1121 # cwd was removed in the course of removing files; print a helpful
1122 1122 # warning.
1123 1123 repo.ui.warn(_("current directory was removed\n"
1124 1124 "(consider changing to repo root: %s)\n") % repo.root)
1125 1125
1126 1126 # It's necessary to flush here in case we're inside a worker fork and will
1127 1127 # quit after this function.
1128 1128 wctx.flushall()
1129 1129
1130 1130 def batchget(repo, mctx, wctx, actions):
1131 1131 """apply gets to the working directory
1132 1132
1133 1133 mctx is the context to get from
1134 1134
1135 1135 yields tuples for progress updates
1136 1136 """
1137 1137 verbose = repo.ui.verbose
1138 1138 fctx = mctx.filectx
1139 1139 ui = repo.ui
1140 1140 i = 0
1141 1141 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1142 1142 for f, (flags, backup), msg in actions:
1143 1143 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1144 1144 if verbose:
1145 1145 repo.ui.note(_("getting %s\n") % f)
1146 1146
1147 1147 if backup:
1148 1148 absf = repo.wjoin(f)
1149 1149 orig = scmutil.origpath(ui, repo, absf)
1150 1150 try:
1151 1151 if repo.wvfs.isfileorlink(f):
1152 1152 util.rename(absf, orig)
1153 1153 except OSError as e:
1154 1154 if e.errno != errno.ENOENT:
1155 1155 raise
1156 1156 wctx[f].clearunknown()
1157 1157 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1158 1158 if i == 100:
1159 1159 yield i, f
1160 1160 i = 0
1161 1161 i += 1
1162 1162 if i > 0:
1163 1163 yield i, f
1164 1164
1165 1165 # It's necessary to flush here in case we're inside a worker fork and will
1166 1166 # quit after this function.
1167 1167 wctx.flushall()
1168 1168
1169 1169 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1170 1170 """apply the merge action list to the working directory
1171 1171
1172 1172 wctx is the working copy context
1173 1173 mctx is the context to be merged into the working copy
1174 1174
1175 1175 Return a tuple of counts (updated, merged, removed, unresolved) that
1176 1176 describes how many files were affected by the update.
1177 1177 """
1178 1178
1179 1179 updated, merged, removed = 0, 0, 0
1180 1180 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1181 1181 moves = []
1182 1182 for m, l in actions.items():
1183 1183 l.sort()
1184 1184
1185 1185 # 'cd' and 'dc' actions are treated like other merge conflicts
1186 1186 mergeactions = sorted(actions['cd'])
1187 1187 mergeactions.extend(sorted(actions['dc']))
1188 1188 mergeactions.extend(actions['m'])
1189 1189 for f, args, msg in mergeactions:
1190 1190 f1, f2, fa, move, anc = args
1191 1191 if f == '.hgsubstate': # merged internally
1192 1192 continue
1193 1193 if f1 is None:
1194 1194 fcl = filemerge.absentfilectx(wctx, fa)
1195 1195 else:
1196 1196 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1197 1197 fcl = wctx[f1]
1198 1198 if f2 is None:
1199 1199 fco = filemerge.absentfilectx(mctx, fa)
1200 1200 else:
1201 1201 fco = mctx[f2]
1202 1202 actx = repo[anc]
1203 1203 if fa in actx:
1204 1204 fca = actx[fa]
1205 1205 else:
1206 1206 # TODO: move to absentfilectx
1207 1207 fca = repo.filectx(f1, fileid=nullrev)
1208 1208 ms.add(fcl, fco, fca, f)
1209 1209 if f1 != f and move:
1210 1210 moves.append(f1)
1211 1211
1212 1212 _updating = _('updating')
1213 1213 _files = _('files')
1214 1214 progress = repo.ui.progress
1215 1215
1216 1216 # remove renamed files after safely stored
1217 1217 for f in moves:
1218 1218 if wctx[f].lexists():
1219 1219 repo.ui.debug("removing %s\n" % f)
1220 1220 wctx[f].audit()
1221 1221 wctx[f].remove()
1222 1222
1223 1223 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1224 1224
1225 1225 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1226 1226 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1227 1227
1228 1228 # remove in parallel (must come first)
1229 1229 z = 0
1230 1230 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1231 1231 actions['r'])
1232 1232 for i, item in prog:
1233 1233 z += i
1234 1234 progress(_updating, z, item=item, total=numupdates, unit=_files)
1235 1235 removed = len(actions['r'])
1236 1236
1237 1237 # We should flush before forking into worker processes, since those workers
1238 1238 # flush when they complete, and we don't want to duplicate work.
1239 1239 wctx.flushall()
1240 1240
1241 1241 # get in parallel
1242 1242 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1243 1243 actions['g'])
1244 1244 for i, item in prog:
1245 1245 z += i
1246 1246 progress(_updating, z, item=item, total=numupdates, unit=_files)
1247 1247 updated = len(actions['g'])
1248 1248
1249 1249 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1250 1250 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1251 1251
1252 1252 # forget (manifest only, just log it) (must come first)
1253 1253 for f, args, msg in actions['f']:
1254 1254 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1255 1255 z += 1
1256 1256 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257 1257
1258 1258 # re-add (manifest only, just log it)
1259 1259 for f, args, msg in actions['a']:
1260 1260 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1261 1261 z += 1
1262 1262 progress(_updating, z, item=f, total=numupdates, unit=_files)
1263 1263
1264 1264 # re-add/mark as modified (manifest only, just log it)
1265 1265 for f, args, msg in actions['am']:
1266 1266 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1267 1267 z += 1
1268 1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1269 1269
1270 1270 # keep (noop, just log it)
1271 1271 for f, args, msg in actions['k']:
1272 1272 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1273 1273 # no progress
1274 1274
1275 1275 # directory rename, move local
1276 1276 for f, args, msg in actions['dm']:
1277 1277 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1278 1278 z += 1
1279 1279 progress(_updating, z, item=f, total=numupdates, unit=_files)
1280 1280 f0, flags = args
1281 1281 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1282 1282 wctx[f].audit()
1283 1283 wctx[f].write(wctx.filectx(f0).data(), flags)
1284 1284 wctx[f0].remove()
1285 1285 updated += 1
1286 1286
1287 1287 # local directory rename, get
1288 1288 for f, args, msg in actions['dg']:
1289 1289 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1290 1290 z += 1
1291 1291 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 1292 f0, flags = args
1293 1293 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1294 1294 wctx[f].write(mctx.filectx(f0).data(), flags)
1295 1295 updated += 1
1296 1296
1297 1297 # exec
1298 1298 for f, args, msg in actions['e']:
1299 1299 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1300 1300 z += 1
1301 1301 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 1302 flags, = args
1303 1303 wctx[f].audit()
1304 1304 wctx[f].setflags('l' in flags, 'x' in flags)
1305 1305 updated += 1
1306 1306
1307 1307 # the ordering is important here -- ms.mergedriver will raise if the merge
1308 1308 # driver has changed, and we want to be able to bypass it when overwrite is
1309 1309 # True
1310 1310 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1311 1311
1312 1312 if usemergedriver:
1313 1313 ms.commit()
1314 1314 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1315 1315 # the driver might leave some files unresolved
1316 1316 unresolvedf = set(ms.unresolved())
1317 1317 if not proceed:
1318 1318 # XXX setting unresolved to at least 1 is a hack to make sure we
1319 1319 # error out
1320 1320 return updated, merged, removed, max(len(unresolvedf), 1)
1321 1321 newactions = []
1322 1322 for f, args, msg in mergeactions:
1323 1323 if f in unresolvedf:
1324 1324 newactions.append((f, args, msg))
1325 1325 mergeactions = newactions
1326 1326
1327 1327 # premerge
1328 1328 tocomplete = []
1329 1329 for f, args, msg in mergeactions:
1330 1330 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1331 1331 z += 1
1332 1332 progress(_updating, z, item=f, total=numupdates, unit=_files)
1333 1333 if f == '.hgsubstate': # subrepo states need updating
1334 1334 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1335 1335 overwrite, labels)
1336 1336 continue
1337 1337 wctx[f].audit()
1338 1338 complete, r = ms.preresolve(f, wctx)
1339 1339 if not complete:
1340 1340 numupdates += 1
1341 1341 tocomplete.append((f, args, msg))
1342 1342
1343 1343 # merge
1344 1344 for f, args, msg in tocomplete:
1345 1345 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1346 1346 z += 1
1347 1347 progress(_updating, z, item=f, total=numupdates, unit=_files)
1348 1348 ms.resolve(f, wctx)
1349 1349
1350 1350 ms.commit()
1351 1351
1352 1352 unresolved = ms.unresolvedcount()
1353 1353
1354 1354 if usemergedriver and not unresolved and ms.mdstate() != 's':
1355 1355 if not driverconclude(repo, ms, wctx, labels=labels):
1356 1356 # XXX setting unresolved to at least 1 is a hack to make sure we
1357 1357 # error out
1358 1358 unresolved = max(unresolved, 1)
1359 1359
1360 1360 ms.commit()
1361 1361
1362 1362 msupdated, msmerged, msremoved = ms.counts()
1363 1363 updated += msupdated
1364 1364 merged += msmerged
1365 1365 removed += msremoved
1366 1366
1367 1367 extraactions = ms.actions()
1368 1368 if extraactions:
1369 1369 mfiles = set(a[0] for a in actions['m'])
1370 1370 for k, acts in extraactions.iteritems():
1371 1371 actions[k].extend(acts)
1372 1372 # Remove these files from actions['m'] as well. This is important
1373 1373 # because in recordupdates, files in actions['m'] are processed
1374 1374 # after files in other actions, and the merge driver might add
1375 1375 # files to those actions via extraactions above. This can lead to a
1376 1376 # file being recorded twice, with poor results. This is especially
1377 1377 # problematic for actions['r'] (currently only possible with the
1378 1378 # merge driver in the initial merge process; interrupted merges
1379 1379 # don't go through this flow).
1380 1380 #
1381 1381 # The real fix here is to have indexes by both file and action so
1382 1382 # that when the action for a file is changed it is automatically
1383 1383 # reflected in the other action lists. But that involves a more
1384 1384 # complex data structure, so this will do for now.
1385 1385 #
1386 1386 # We don't need to do the same operation for 'dc' and 'cd' because
1387 1387 # those lists aren't consulted again.
1388 1388 mfiles.difference_update(a[0] for a in acts)
1389 1389
1390 1390 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1391 1391
1392 1392 progress(_updating, None, total=numupdates, unit=_files)
1393 1393
1394 1394 return updated, merged, removed, unresolved
1395 1395
1396 1396 def recordupdates(repo, actions, branchmerge):
1397 1397 "record merge actions to the dirstate"
1398 1398 # remove (must come first)
1399 1399 for f, args, msg in actions.get('r', []):
1400 1400 if branchmerge:
1401 1401 repo.dirstate.remove(f)
1402 1402 else:
1403 1403 repo.dirstate.drop(f)
1404 1404
1405 1405 # forget (must come first)
1406 1406 for f, args, msg in actions.get('f', []):
1407 1407 repo.dirstate.drop(f)
1408 1408
1409 1409 # re-add
1410 1410 for f, args, msg in actions.get('a', []):
1411 1411 repo.dirstate.add(f)
1412 1412
1413 1413 # re-add/mark as modified
1414 1414 for f, args, msg in actions.get('am', []):
1415 1415 if branchmerge:
1416 1416 repo.dirstate.normallookup(f)
1417 1417 else:
1418 1418 repo.dirstate.add(f)
1419 1419
1420 1420 # exec change
1421 1421 for f, args, msg in actions.get('e', []):
1422 1422 repo.dirstate.normallookup(f)
1423 1423
1424 1424 # keep
1425 1425 for f, args, msg in actions.get('k', []):
1426 1426 pass
1427 1427
1428 1428 # get
1429 1429 for f, args, msg in actions.get('g', []):
1430 1430 if branchmerge:
1431 1431 repo.dirstate.otherparent(f)
1432 1432 else:
1433 1433 repo.dirstate.normal(f)
1434 1434
1435 1435 # merge
1436 1436 for f, args, msg in actions.get('m', []):
1437 1437 f1, f2, fa, move, anc = args
1438 1438 if branchmerge:
1439 1439 # We've done a branch merge, mark this file as merged
1440 1440 # so that we properly record the merger later
1441 1441 repo.dirstate.merge(f)
1442 1442 if f1 != f2: # copy/rename
1443 1443 if move:
1444 1444 repo.dirstate.remove(f1)
1445 1445 if f1 != f:
1446 1446 repo.dirstate.copy(f1, f)
1447 1447 else:
1448 1448 repo.dirstate.copy(f2, f)
1449 1449 else:
1450 1450 # We've update-merged a locally modified file, so
1451 1451 # we set the dirstate to emulate a normal checkout
1452 1452 # of that file some time in the past. Thus our
1453 1453 # merge will appear as a normal local file
1454 1454 # modification.
1455 1455 if f2 == f: # file not locally copied/moved
1456 1456 repo.dirstate.normallookup(f)
1457 1457 if move:
1458 1458 repo.dirstate.drop(f1)
1459 1459
1460 1460 # directory rename, move local
1461 1461 for f, args, msg in actions.get('dm', []):
1462 1462 f0, flag = args
1463 1463 if branchmerge:
1464 1464 repo.dirstate.add(f)
1465 1465 repo.dirstate.remove(f0)
1466 1466 repo.dirstate.copy(f0, f)
1467 1467 else:
1468 1468 repo.dirstate.normal(f)
1469 1469 repo.dirstate.drop(f0)
1470 1470
1471 1471 # directory rename, get
1472 1472 for f, args, msg in actions.get('dg', []):
1473 1473 f0, flag = args
1474 1474 if branchmerge:
1475 1475 repo.dirstate.add(f)
1476 1476 repo.dirstate.copy(f0, f)
1477 1477 else:
1478 1478 repo.dirstate.normal(f)
1479 1479
1480 1480 def update(repo, node, branchmerge, force, ancestor=None,
1481 1481 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1482 1482 updatecheck=None, wc=None):
1483 1483 """
1484 1484 Perform a merge between the working directory and the given node
1485 1485
1486 1486 node = the node to update to
1487 1487 branchmerge = whether to merge between branches
1488 1488 force = whether to force branch merging or file overwriting
1489 1489 matcher = a matcher to filter file lists (dirstate not updated)
1490 1490 mergeancestor = whether it is merging with an ancestor. If true,
1491 1491 we should accept the incoming changes for any prompts that occur.
1492 1492 If false, merging with an ancestor (fast-forward) is only allowed
1493 1493 between different named branches. This flag is used by rebase extension
1494 1494 as a temporary fix and should be avoided in general.
1495 1495 labels = labels to use for base, local and other
1496 1496 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1497 1497 this is True, then 'force' should be True as well.
1498 1498
1499 1499 The table below shows all the behaviors of the update command
1500 1500 given the -c and -C or no options, whether the working directory
1501 1501 is dirty, whether a revision is specified, and the relationship of
1502 1502 the parent rev to the target rev (linear or not). Match from top first. The
1503 1503 -n option doesn't exist on the command line, but represents the
1504 1504 experimental.updatecheck=noconflict option.
1505 1505
1506 1506 This logic is tested by test-update-branches.t.
1507 1507
1508 1508 -c -C -n -m dirty rev linear | result
1509 1509 y y * * * * * | (1)
1510 1510 y * y * * * * | (1)
1511 1511 y * * y * * * | (1)
1512 1512 * y y * * * * | (1)
1513 1513 * y * y * * * | (1)
1514 1514 * * y y * * * | (1)
1515 1515 * * * * * n n | x
1516 1516 * * * * n * * | ok
1517 1517 n n n n y * y | merge
1518 1518 n n n n y y n | (2)
1519 1519 n n n y y * * | merge
1520 1520 n n y n y * * | merge if no conflict
1521 1521 n y n n y * * | discard
1522 1522 y n n n y * * | (3)
1523 1523
1524 1524 x = can't happen
1525 1525 * = don't-care
1526 1526 1 = incompatible options (checked in commands.py)
1527 1527 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1528 1528 3 = abort: uncommitted changes (checked in commands.py)
1529 1529
1530 1530 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1531 1531 to repo[None] if None is passed.
1532 1532
1533 1533 Return the same tuple as applyupdates().
1534 1534 """
1535 1535 # Avoid cycle.
1536 1536 from . import sparse
1537 1537
1538 1538 # This function used to find the default destination if node was None, but
1539 1539 # that's now in destutil.py.
1540 1540 assert node is not None
1541 1541 if not branchmerge and not force:
1542 1542 # TODO: remove the default once all callers that pass branchmerge=False
1543 1543 # and force=False pass a value for updatecheck. We may want to allow
1544 1544 # updatecheck='abort' to better suppport some of these callers.
1545 1545 if updatecheck is None:
1546 1546 updatecheck = 'linear'
1547 1547 assert updatecheck in ('none', 'linear', 'noconflict')
1548 1548 # If we're doing a partial update, we need to skip updating
1549 1549 # the dirstate, so make a note of any partial-ness to the
1550 1550 # update here.
1551 1551 if matcher is None or matcher.always():
1552 1552 partial = False
1553 1553 else:
1554 1554 partial = True
1555 1555 with repo.wlock():
1556 1556 if wc is None:
1557 1557 wc = repo[None]
1558 1558 pl = wc.parents()
1559 1559 p1 = pl[0]
1560 1560 pas = [None]
1561 1561 if ancestor is not None:
1562 1562 pas = [repo[ancestor]]
1563 1563
1564 1564 overwrite = force and not branchmerge
1565 1565
1566 1566 p2 = repo[node]
1567 1567 if pas[0] is None:
1568 1568 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1569 1569 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1570 1570 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1571 1571 else:
1572 1572 pas = [p1.ancestor(p2, warn=branchmerge)]
1573 1573
1574 1574 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1575 1575
1576 1576 ### check phase
1577 1577 if not overwrite:
1578 1578 if len(pl) > 1:
1579 1579 raise error.Abort(_("outstanding uncommitted merge"))
1580 1580 ms = mergestate.read(repo)
1581 1581 if list(ms.unresolved()):
1582 1582 raise error.Abort(_("outstanding merge conflicts"))
1583 1583 if branchmerge:
1584 1584 if pas == [p2]:
1585 1585 raise error.Abort(_("merging with a working directory ancestor"
1586 1586 " has no effect"))
1587 1587 elif pas == [p1]:
1588 1588 if not mergeancestor and wc.branch() == p2.branch():
1589 1589 raise error.Abort(_("nothing to merge"),
1590 1590 hint=_("use 'hg update' "
1591 1591 "or check 'hg heads'"))
1592 1592 if not force and (wc.files() or wc.deleted()):
1593 1593 raise error.Abort(_("uncommitted changes"),
1594 1594 hint=_("use 'hg status' to list changes"))
1595 1595 for s in sorted(wc.substate):
1596 1596 wc.sub(s).bailifchanged()
1597 1597
1598 1598 elif not overwrite:
1599 1599 if p1 == p2: # no-op update
1600 1600 # call the hooks and exit early
1601 1601 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1602 1602 repo.hook('update', parent1=xp2, parent2='', error=0)
1603 1603 return 0, 0, 0, 0
1604 1604
1605 1605 if (updatecheck == 'linear' and
1606 1606 pas not in ([p1], [p2])): # nonlinear
1607 1607 dirty = wc.dirty(missing=True)
1608 1608 if dirty:
1609 1609 # Branching is a bit strange to ensure we do the minimal
1610 1610 # amount of call to obsutil.foreground.
1611 1611 foreground = obsutil.foreground(repo, [p1.node()])
1612 1612 # note: the <node> variable contains a random identifier
1613 1613 if repo[node].node() in foreground:
1614 1614 pass # allow updating to successors
1615 1615 else:
1616 1616 msg = _("uncommitted changes")
1617 1617 hint = _("commit or update --clean to discard changes")
1618 1618 raise error.UpdateAbort(msg, hint=hint)
1619 1619 else:
1620 1620 # Allow jumping branches if clean and specific rev given
1621 1621 pass
1622 1622
1623 1623 if overwrite:
1624 1624 pas = [wc]
1625 1625 elif not branchmerge:
1626 1626 pas = [p1]
1627 1627
1628 1628 # deprecated config: merge.followcopies
1629 1629 followcopies = repo.ui.configbool('merge', 'followcopies')
1630 1630 if overwrite:
1631 1631 followcopies = False
1632 1632 elif not pas[0]:
1633 1633 followcopies = False
1634 1634 if not branchmerge and not wc.dirty(missing=True):
1635 1635 followcopies = False
1636 1636
1637 1637 ### calculate phase
1638 1638 actionbyfile, diverge, renamedelete = calculateupdates(
1639 1639 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1640 1640 followcopies, matcher=matcher, mergeforce=mergeforce)
1641 1641
1642 1642 if updatecheck == 'noconflict':
1643 1643 for f, (m, args, msg) in actionbyfile.iteritems():
1644 1644 if m not in ('g', 'k', 'e', 'r'):
1645 1645 msg = _("conflicting changes")
1646 1646 hint = _("commit or update --clean to discard changes")
1647 1647 raise error.Abort(msg, hint=hint)
1648 1648
1649 1649 # Prompt and create actions. Most of this is in the resolve phase
1650 1650 # already, but we can't handle .hgsubstate in filemerge or
1651 1651 # subrepo.submerge yet so we have to keep prompting for it.
1652 1652 if '.hgsubstate' in actionbyfile:
1653 1653 f = '.hgsubstate'
1654 1654 m, args, msg = actionbyfile[f]
1655 1655 prompts = filemerge.partextras(labels)
1656 1656 prompts['f'] = f
1657 1657 if m == 'cd':
1658 1658 if repo.ui.promptchoice(
1659 1659 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1660 1660 "use (c)hanged version or (d)elete?"
1661 1661 "$$ &Changed $$ &Delete") % prompts, 0):
1662 1662 actionbyfile[f] = ('r', None, "prompt delete")
1663 1663 elif f in p1:
1664 1664 actionbyfile[f] = ('am', None, "prompt keep")
1665 1665 else:
1666 1666 actionbyfile[f] = ('a', None, "prompt keep")
1667 1667 elif m == 'dc':
1668 1668 f1, f2, fa, move, anc = args
1669 1669 flags = p2[f2].flags()
1670 1670 if repo.ui.promptchoice(
1671 1671 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1672 1672 "use (c)hanged version or leave (d)eleted?"
1673 1673 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1674 1674 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1675 1675 else:
1676 1676 del actionbyfile[f]
1677 1677
1678 1678 # Convert to dictionary-of-lists format
1679 1679 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1680 1680 for f, (m, args, msg) in actionbyfile.iteritems():
1681 1681 if m not in actions:
1682 1682 actions[m] = []
1683 1683 actions[m].append((f, args, msg))
1684 1684
1685 1685 if not util.fscasesensitive(repo.path):
1686 1686 # check collision between files only in p2 for clean update
1687 1687 if (not branchmerge and
1688 1688 (force or not wc.dirty(missing=True, branch=False))):
1689 1689 _checkcollision(repo, p2.manifest(), None)
1690 1690 else:
1691 1691 _checkcollision(repo, wc.manifest(), actions)
1692 1692
1693 1693 # divergent renames
1694 1694 for f, fl in sorted(diverge.iteritems()):
1695 1695 repo.ui.warn(_("note: possible conflict - %s was renamed "
1696 1696 "multiple times to:\n") % f)
1697 1697 for nf in fl:
1698 1698 repo.ui.warn(" %s\n" % nf)
1699 1699
1700 1700 # rename and delete
1701 1701 for f, fl in sorted(renamedelete.iteritems()):
1702 1702 repo.ui.warn(_("note: possible conflict - %s was deleted "
1703 1703 "and renamed to:\n") % f)
1704 1704 for nf in fl:
1705 1705 repo.ui.warn(" %s\n" % nf)
1706 1706
1707 1707 ### apply phase
1708 1708 if not branchmerge: # just jump to the new rev
1709 1709 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1710 1710 if not partial:
1711 1711 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1712 1712 # note that we're in the middle of an update
1713 1713 repo.vfs.write('updatestate', p2.hex())
1714 1714
1715 1715 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1716 1716 wc.flushall()
1717 1717
1718 1718 if not partial:
1719 1719 with repo.dirstate.parentchange():
1720 1720 repo.setparents(fp1, fp2)
1721 1721 recordupdates(repo, actions, branchmerge)
1722 1722 # update completed, clear state
1723 1723 util.unlink(repo.vfs.join('updatestate'))
1724 1724
1725 1725 if not branchmerge:
1726 1726 repo.dirstate.setbranch(p2.branch())
1727 1727
1728 1728 # If we're updating to a location, clean up any stale temporary includes
1729 1729 # (ex: this happens during hg rebase --abort).
1730 1730 if not branchmerge:
1731 1731 sparse.prunetemporaryincludes(repo)
1732 1732
1733 1733 if not partial:
1734 1734 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1735 1735 return stats
1736 1736
1737 1737 def graft(repo, ctx, pctx, labels, keepparent=False):
1738 1738 """Do a graft-like merge.
1739 1739
1740 1740 This is a merge where the merge ancestor is chosen such that one
1741 1741 or more changesets are grafted onto the current changeset. In
1742 1742 addition to the merge, this fixes up the dirstate to include only
1743 1743 a single parent (if keepparent is False) and tries to duplicate any
1744 1744 renames/copies appropriately.
1745 1745
1746 1746 ctx - changeset to rebase
1747 1747 pctx - merge base, usually ctx.p1()
1748 1748 labels - merge labels eg ['local', 'graft']
1749 1749 keepparent - keep second parent if any
1750 1750
1751 1751 """
1752 1752 # If we're grafting a descendant onto an ancestor, be sure to pass
1753 1753 # mergeancestor=True to update. This does two things: 1) allows the merge if
1754 1754 # the destination is the same as the parent of the ctx (so we can use graft
1755 1755 # to copy commits), and 2) informs update that the incoming changes are
1756 1756 # newer than the destination so it doesn't prompt about "remote changed foo
1757 1757 # which local deleted".
1758 1758 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1759 1759
1760 1760 stats = update(repo, ctx.node(), True, True, pctx.node(),
1761 1761 mergeancestor=mergeancestor, labels=labels)
1762 1762
1763 1763 pother = nullid
1764 1764 parents = ctx.parents()
1765 1765 if keepparent and len(parents) == 2 and pctx in parents:
1766 1766 parents.remove(pctx)
1767 1767 pother = parents[0].node()
1768 1768
1769 1769 with repo.dirstate.parentchange():
1770 1770 repo.setparents(repo['.'].node(), pother)
1771 1771 repo.dirstate.write(repo.currenttransaction())
1772 1772 # fix up dirstate for copies and renames
1773 1773 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1774 1774 return stats
General Comments 0
You need to be logged in to leave comments. Login now