##// END OF EJS Templates
copies: avoid early return in _combine_changeset_copies...
marmoute -
r46773:1fcfff09 default
parent child Browse files
Show More
@@ -1,1223 +1,1221 b''
1 1 # coding: utf8
2 2 # copies.py - copy detection for Mercurial
3 3 #
4 4 # Copyright 2008 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import collections
12 12 import os
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 nullid,
17 17 nullrev,
18 18 )
19 19
20 20 from . import (
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 util,
26 26 )
27 27
28 28
29 29 from .utils import stringutil
30 30
31 31 from .revlogutils import (
32 32 flagutil,
33 33 sidedata as sidedatamod,
34 34 )
35 35
36 36 rustmod = policy.importrust("copy_tracing")
37 37
38 38
39 39 def _filter(src, dst, t):
40 40 """filters out invalid copies after chaining"""
41 41
42 42 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
43 43 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
44 44 # in the following table (not including trivial cases). For example, case 2
45 45 # is where a file existed in 'src' and remained under that name in 'mid' and
46 46 # then was renamed between 'mid' and 'dst'.
47 47 #
48 48 # case src mid dst result
49 49 # 1 x y - -
50 50 # 2 x y y x->y
51 51 # 3 x y x -
52 52 # 4 x y z x->z
53 53 # 5 - x y -
54 54 # 6 x x y x->y
55 55 #
56 56 # _chain() takes care of chaining the copies in 'a' and 'b', but it
57 57 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
58 58 # between 5 and 6, so it includes all cases in its result.
59 59 # Cases 1, 3, and 5 are then removed by _filter().
60 60
61 61 for k, v in list(t.items()):
62 62 # remove copies from files that didn't exist
63 63 if v not in src:
64 64 del t[k]
65 65 # remove criss-crossed copies
66 66 elif k in src and v in dst:
67 67 del t[k]
68 68 # remove copies to files that were then removed
69 69 elif k not in dst:
70 70 del t[k]
71 71
72 72
73 73 def _chain(prefix, suffix):
74 74 """chain two sets of copies 'prefix' and 'suffix'"""
75 75 result = prefix.copy()
76 76 for key, value in pycompat.iteritems(suffix):
77 77 result[key] = prefix.get(value, value)
78 78 return result
79 79
80 80
81 81 def _tracefile(fctx, am, basemf):
82 82 """return file context that is the ancestor of fctx present in ancestor
83 83 manifest am
84 84
85 85 Note: we used to try and stop after a given limit, however checking if that
86 86 limit is reached turned out to be very expensive. we are better off
87 87 disabling that feature."""
88 88
89 89 for f in fctx.ancestors():
90 90 path = f.path()
91 91 if am.get(path, None) == f.filenode():
92 92 return path
93 93 if basemf and basemf.get(path, None) == f.filenode():
94 94 return path
95 95
96 96
97 97 def _dirstatecopies(repo, match=None):
98 98 ds = repo.dirstate
99 99 c = ds.copies().copy()
100 100 for k in list(c):
101 101 if ds[k] not in b'anm' or (match and not match(k)):
102 102 del c[k]
103 103 return c
104 104
105 105
106 106 def _computeforwardmissing(a, b, match=None):
107 107 """Computes which files are in b but not a.
108 108 This is its own function so extensions can easily wrap this call to see what
109 109 files _forwardcopies is about to process.
110 110 """
111 111 ma = a.manifest()
112 112 mb = b.manifest()
113 113 return mb.filesnotin(ma, match=match)
114 114
115 115
116 116 def usechangesetcentricalgo(repo):
117 117 """Checks if we should use changeset-centric copy algorithms"""
118 118 if repo.filecopiesmode == b'changeset-sidedata':
119 119 return True
120 120 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
121 121 changesetsource = (b'changeset-only', b'compatibility')
122 122 return readfrom in changesetsource
123 123
124 124
125 125 def _committedforwardcopies(a, b, base, match):
126 126 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
127 127 # files might have to be traced back to the fctx parent of the last
128 128 # one-side-only changeset, but not further back than that
129 129 repo = a._repo
130 130
131 131 if usechangesetcentricalgo(repo):
132 132 return _changesetforwardcopies(a, b, match)
133 133
134 134 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
135 135 dbg = repo.ui.debug
136 136 if debug:
137 137 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
138 138 am = a.manifest()
139 139 basemf = None if base is None else base.manifest()
140 140
141 141 # find where new files came from
142 142 # we currently don't try to find where old files went, too expensive
143 143 # this means we can miss a case like 'hg rm b; hg cp a b'
144 144 cm = {}
145 145
146 146 # Computing the forward missing is quite expensive on large manifests, since
147 147 # it compares the entire manifests. We can optimize it in the common use
148 148 # case of computing what copies are in a commit versus its parent (like
149 149 # during a rebase or histedit). Note, we exclude merge commits from this
150 150 # optimization, since the ctx.files() for a merge commit is not correct for
151 151 # this comparison.
152 152 forwardmissingmatch = match
153 153 if b.p1() == a and b.p2().node() == nullid:
154 154 filesmatcher = matchmod.exact(b.files())
155 155 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
156 156 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
157 157
158 158 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
159 159
160 160 if debug:
161 161 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
162 162
163 163 for f in sorted(missing):
164 164 if debug:
165 165 dbg(b'debug.copies: tracing file: %s\n' % f)
166 166 fctx = b[f]
167 167 fctx._ancestrycontext = ancestrycontext
168 168
169 169 if debug:
170 170 start = util.timer()
171 171 opath = _tracefile(fctx, am, basemf)
172 172 if opath:
173 173 if debug:
174 174 dbg(b'debug.copies: rename of: %s\n' % opath)
175 175 cm[f] = opath
176 176 if debug:
177 177 dbg(
178 178 b'debug.copies: time: %f seconds\n'
179 179 % (util.timer() - start)
180 180 )
181 181 return cm
182 182
183 183
184 184 def _revinfo_getter(repo, match):
185 185 """returns a function that returns the following data given a <rev>"
186 186
187 187 * p1: revision number of first parent
188 188 * p2: revision number of first parent
189 189 * changes: a ChangingFiles object
190 190 """
191 191 cl = repo.changelog
192 192 parents = cl.parentrevs
193 193 flags = cl.flags
194 194
195 195 HASCOPIESINFO = flagutil.REVIDX_HASCOPIESINFO
196 196
197 197 changelogrevision = cl.changelogrevision
198 198
199 199 alwaysmatch = match.always()
200 200
201 201 if rustmod is not None and alwaysmatch:
202 202
203 203 def revinfo(rev):
204 204 p1, p2 = parents(rev)
205 205 if flags(rev) & HASCOPIESINFO:
206 206 raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES)
207 207 else:
208 208 raw = None
209 209 return (p1, p2, raw)
210 210
211 211 else:
212 212
213 213 def revinfo(rev):
214 214 p1, p2 = parents(rev)
215 215 if flags(rev) & HASCOPIESINFO:
216 216 changes = changelogrevision(rev).changes
217 217 else:
218 218 changes = None
219 219 return (p1, p2, changes)
220 220
221 221 return revinfo
222 222
223 223
224 224 def cached_is_ancestor(is_ancestor):
225 225 """return a cached version of is_ancestor"""
226 226 cache = {}
227 227
228 228 def _is_ancestor(anc, desc):
229 229 if anc > desc:
230 230 return False
231 231 elif anc == desc:
232 232 return True
233 233 key = (anc, desc)
234 234 ret = cache.get(key)
235 235 if ret is None:
236 236 ret = cache[key] = is_ancestor(anc, desc)
237 237 return ret
238 238
239 239 return _is_ancestor
240 240
241 241
242 242 def _changesetforwardcopies(a, b, match):
243 243 if a.rev() in (nullrev, b.rev()):
244 244 return {}
245 245
246 246 repo = a.repo().unfiltered()
247 247 children = {}
248 248
249 249 cl = repo.changelog
250 250 isancestor = cl.isancestorrev
251 251
252 252 # To track rename from "A" to B, we need to gather all parent β†’ children
253 253 # edges that are contains in `::B` but not in `::A`.
254 254 #
255 255 #
256 256 # To do so, we need to gather all revisions exclusiveΒΉ to "B" (ieΒΉ: `::b -
257 257 # ::a`) and also all the "roots point", ie the parents of the exclusive set
258 258 # that belong to ::a. These are exactly all the revisions needed to express
259 259 # the parent β†’ children we need to combine.
260 260 #
261 261 # [1] actually, we need to gather all the edges within `(::a)::b`, ie:
262 262 # excluding paths that leads to roots that are not ancestors of `a`. We
263 263 # keep this out of the explanation because it is hard enough without this special case..
264 264
265 265 parents = cl._uncheckedparentrevs
266 266 graph_roots = (nullrev, nullrev)
267 267
268 268 ancestors = cl.ancestors([a.rev()], inclusive=True)
269 269 revs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
270 270 roots = set()
271 271 has_graph_roots = False
272 272
273 273 # iterate over `only(B, A)`
274 274 for r in revs:
275 275 ps = parents(r)
276 276 if ps == graph_roots:
277 277 has_graph_roots = True
278 278 else:
279 279 p1, p2 = ps
280 280
281 281 # find all the "root points" (see larger comment above)
282 282 if p1 != nullrev and p1 in ancestors:
283 283 roots.add(p1)
284 284 if p2 != nullrev and p2 in ancestors:
285 285 roots.add(p2)
286 286 if not roots:
287 287 # no common revision to track copies from
288 288 return {}
289 289 if has_graph_roots:
290 290 # this deal with the special case mentionned in the [1] footnotes. We
291 291 # must filter out revisions that leads to non-common graphroots.
292 292 roots = list(roots)
293 293 m = min(roots)
294 294 h = [b.rev()]
295 295 roots_to_head = cl.reachableroots(m, h, roots, includepath=True)
296 296 roots_to_head = set(roots_to_head)
297 297 revs = [r for r in revs if r in roots_to_head]
298 298
299 299 if repo.filecopiesmode == b'changeset-sidedata':
300 300 # When using side-data, we will process the edges "from" the children.
301 301 # We iterate over the childre, gathering previous collected data for
302 302 # the parents. Do know when the parents data is no longer necessary, we
303 303 # keep a counter of how many children each revision has.
304 304 #
305 305 # An interresting property of `children_count` is that it only contains
306 306 # revision that will be relevant for a edge of the graph. So if a
307 307 # children has parent not in `children_count`, that edges should not be
308 308 # processed.
309 309 children_count = dict((r, 0) for r in roots)
310 310 for r in revs:
311 311 for p in cl.parentrevs(r):
312 312 if p == nullrev:
313 313 continue
314 314 children_count[r] = 0
315 315 if p in children_count:
316 316 children_count[p] += 1
317 317 revinfo = _revinfo_getter(repo, match)
318 318 return _combine_changeset_copies(
319 319 revs, children_count, b.rev(), revinfo, match, isancestor
320 320 )
321 321 else:
322 322 # When not using side-data, we will process the edges "from" the parent.
323 323 # so we need a full mapping of the parent -> children relation.
324 324 children = dict((r, []) for r in roots)
325 325 for r in revs:
326 326 for p in cl.parentrevs(r):
327 327 if p == nullrev:
328 328 continue
329 329 children[r] = []
330 330 if p in children:
331 331 children[p].append(r)
332 332 x = revs.pop()
333 333 assert x == b.rev()
334 334 revs.extend(roots)
335 335 revs.sort()
336 336
337 337 revinfo = _revinfo_getter_extra(repo)
338 338 return _combine_changeset_copies_extra(
339 339 revs, children, b.rev(), revinfo, match, isancestor
340 340 )
341 341
342 342
343 343 def _combine_changeset_copies(
344 344 revs, children_count, targetrev, revinfo, match, isancestor
345 345 ):
346 346 """combine the copies information for each item of iterrevs
347 347
348 348 revs: sorted iterable of revision to visit
349 349 children_count: a {parent: <number-of-relevant-children>} mapping.
350 350 targetrev: the final copies destination revision (not in iterrevs)
351 351 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
352 352 match: a matcher
353 353
354 354 It returns the aggregated copies information for `targetrev`.
355 355 """
356 356
357 357 alwaysmatch = match.always()
358 358
359 359 if rustmod is not None and alwaysmatch:
360 return rustmod.combine_changeset_copies(
360 final_copies = rustmod.combine_changeset_copies(
361 361 list(revs), children_count, targetrev, revinfo, isancestor
362 362 )
363
364 isancestor = cached_is_ancestor(isancestor)
365
366 all_copies = {}
367 # iterate over all the "children" side of copy tracing "edge"
368 for current_rev in revs:
369 p1, p2, changes = revinfo(current_rev)
370 current_copies = None
363 else:
364 isancestor = cached_is_ancestor(isancestor)
371 365
372 # iterate over all parents to chain the existing data with the
373 # data from the parent β†’ child edge.
374 for parent, parent_rev in ((1, p1), (2, p2)):
375 if parent_rev == nullrev:
376 continue
377 remaining_children = children_count.get(parent_rev)
378 if remaining_children is None:
379 continue
380 remaining_children -= 1
381 children_count[parent_rev] = remaining_children
382 if remaining_children:
383 copies = all_copies.get(parent_rev, None)
384 else:
385 copies = all_copies.pop(parent_rev, None)
366 all_copies = {}
367 # iterate over all the "children" side of copy tracing "edge"
368 for current_rev in revs:
369 p1, p2, changes = revinfo(current_rev)
370 current_copies = None
371 # iterate over all parents to chain the existing data with the
372 # data from the parent β†’ child edge.
373 for parent, parent_rev in ((1, p1), (2, p2)):
374 if parent_rev == nullrev:
375 continue
376 remaining_children = children_count.get(parent_rev)
377 if remaining_children is None:
378 continue
379 remaining_children -= 1
380 children_count[parent_rev] = remaining_children
381 if remaining_children:
382 copies = all_copies.get(parent_rev, None)
383 else:
384 copies = all_copies.pop(parent_rev, None)
386 385
387 if copies is None:
388 # this is a root
389 copies = {}
386 if copies is None:
387 # this is a root
388 copies = {}
390 389
391 newcopies = copies
392 # chain the data in the edge with the existing data
393 if changes is not None:
394 childcopies = {}
395 if parent == 1:
396 childcopies = changes.copied_from_p1
397 elif parent == 2:
398 childcopies = changes.copied_from_p2
390 newcopies = copies
391 # chain the data in the edge with the existing data
392 if changes is not None:
393 childcopies = {}
394 if parent == 1:
395 childcopies = changes.copied_from_p1
396 elif parent == 2:
397 childcopies = changes.copied_from_p2
399 398
400 if not alwaysmatch:
401 childcopies = {
402 dst: src
403 for dst, src in childcopies.items()
404 if match(dst)
405 }
406 if childcopies:
407 newcopies = copies.copy()
408 for dest, source in pycompat.iteritems(childcopies):
409 prev = copies.get(source)
410 if prev is not None and prev[1] is not None:
411 source = prev[1]
412 newcopies[dest] = (current_rev, source)
413 assert newcopies is not copies
414 if changes.removed:
415 if newcopies is copies:
399 if not alwaysmatch:
400 childcopies = {
401 dst: src
402 for dst, src in childcopies.items()
403 if match(dst)
404 }
405 if childcopies:
416 406 newcopies = copies.copy()
417 for f in changes.removed:
418 if f in newcopies:
419 if newcopies is copies:
420 # copy on write to avoid affecting potential other
421 # branches. when there are no other branches, this
422 # could be avoided.
423 newcopies = copies.copy()
424 newcopies[f] = (current_rev, None)
407 for dest, source in pycompat.iteritems(childcopies):
408 prev = copies.get(source)
409 if prev is not None and prev[1] is not None:
410 source = prev[1]
411 newcopies[dest] = (current_rev, source)
412 assert newcopies is not copies
413 if changes.removed:
414 if newcopies is copies:
415 newcopies = copies.copy()
416 for f in changes.removed:
417 if f in newcopies:
418 if newcopies is copies:
419 # copy on write to avoid affecting potential other
420 # branches. when there are no other branches, this
421 # could be avoided.
422 newcopies = copies.copy()
423 newcopies[f] = (current_rev, None)
424 # check potential need to combine the data from another parent (for
425 # that child). See comment below for details.
426 if current_copies is None:
427 current_copies = newcopies
428 elif current_copies is newcopies:
429 # nothing to merge:
430 pass
431 else:
432 # we are the second parent to work on c, we need to merge our
433 # work with the other.
434 #
435 # In case of conflict, parent 1 take precedence over parent 2.
436 # This is an arbitrary choice made anew when implementing
437 # changeset based copies. It was made without regards with
438 # potential filelog related behavior.
439 assert parent == 2
440 current_copies = _merge_copies_dict(
441 newcopies, current_copies, isancestor, changes
442 )
443 all_copies[current_rev] = current_copies
425 444
426 # check potential need to combine the data from another parent (for
427 # that child). See comment below for details.
428 if current_copies is None:
429 current_copies = newcopies
430 elif current_copies is newcopies:
431 # nothing to merge:
432 pass
433 else:
434 # we are the second parent to work on c, we need to merge our
435 # work with the other.
436 #
437 # In case of conflict, parent 1 take precedence over parent 2.
438 # This is an arbitrary choice made anew when implementing
439 # changeset based copies. It was made without regards with
440 # potential filelog related behavior.
441 assert parent == 2
442 current_copies = _merge_copies_dict(
443 newcopies, current_copies, isancestor, changes
444 )
445 all_copies[current_rev] = current_copies
446
447 # filter out internal details and return a {dest: source mapping}
448 final_copies = {}
449 for dest, (tt, source) in all_copies[targetrev].items():
450 if source is not None:
451 final_copies[dest] = source
445 # filter out internal details and return a {dest: source mapping}
446 final_copies = {}
447 for dest, (tt, source) in all_copies[targetrev].items():
448 if source is not None:
449 final_copies[dest] = source
452 450 return final_copies
453 451
454 452
455 453 def _merge_copies_dict(minor, major, isancestor, changes):
456 454 """merge two copies-mapping together, minor and major
457 455
458 456 In case of conflict, value from "major" will be picked.
459 457
460 458 - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
461 459 ancestors of `high_rev`,
462 460
463 461 - `ismerged(path)`: callable return True if `path` have been merged in the
464 462 current revision,
465 463
466 464 return the resulting dict (in practice, the "minor" object, updated)
467 465 """
468 466 for dest, value in major.items():
469 467 other = minor.get(dest)
470 468 if other is None:
471 469 minor[dest] = value
472 470 else:
473 471 new_tt = value[0]
474 472 other_tt = other[0]
475 473 if value[1] == other[1]:
476 474 continue
477 475 # content from "major" wins, unless it is older
478 476 # than the branch point or there is a merge
479 477 if new_tt == other_tt:
480 478 minor[dest] = value
481 479 elif (
482 480 changes is not None
483 481 and value[1] is None
484 482 and dest in changes.salvaged
485 483 ):
486 484 pass
487 485 elif (
488 486 changes is not None
489 487 and other[1] is None
490 488 and dest in changes.salvaged
491 489 ):
492 490 minor[dest] = value
493 491 elif changes is not None and dest in changes.merged:
494 492 minor[dest] = value
495 493 elif not isancestor(new_tt, other_tt):
496 494 if value[1] is not None:
497 495 minor[dest] = value
498 496 elif isancestor(other_tt, new_tt):
499 497 minor[dest] = value
500 498 return minor
501 499
502 500
503 501 def _revinfo_getter_extra(repo):
504 502 """return a function that return multiple data given a <rev>"i
505 503
506 504 * p1: revision number of first parent
507 505 * p2: revision number of first parent
508 506 * p1copies: mapping of copies from p1
509 507 * p2copies: mapping of copies from p2
510 508 * removed: a list of removed files
511 509 * ismerged: a callback to know if file was merged in that revision
512 510 """
513 511 cl = repo.changelog
514 512 parents = cl.parentrevs
515 513
516 514 def get_ismerged(rev):
517 515 ctx = repo[rev]
518 516
519 517 def ismerged(path):
520 518 if path not in ctx.files():
521 519 return False
522 520 fctx = ctx[path]
523 521 parents = fctx._filelog.parents(fctx._filenode)
524 522 nb_parents = 0
525 523 for n in parents:
526 524 if n != nullid:
527 525 nb_parents += 1
528 526 return nb_parents >= 2
529 527
530 528 return ismerged
531 529
532 530 def revinfo(rev):
533 531 p1, p2 = parents(rev)
534 532 ctx = repo[rev]
535 533 p1copies, p2copies = ctx._copies
536 534 removed = ctx.filesremoved()
537 535 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
538 536
539 537 return revinfo
540 538
541 539
542 540 def _combine_changeset_copies_extra(
543 541 revs, children, targetrev, revinfo, match, isancestor
544 542 ):
545 543 """version of `_combine_changeset_copies` that works with the Google
546 544 specific "extra" based storage for copy information"""
547 545 all_copies = {}
548 546 alwaysmatch = match.always()
549 547 for r in revs:
550 548 copies = all_copies.pop(r, None)
551 549 if copies is None:
552 550 # this is a root
553 551 copies = {}
554 552 for i, c in enumerate(children[r]):
555 553 p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
556 554 if r == p1:
557 555 parent = 1
558 556 childcopies = p1copies
559 557 else:
560 558 assert r == p2
561 559 parent = 2
562 560 childcopies = p2copies
563 561 if not alwaysmatch:
564 562 childcopies = {
565 563 dst: src for dst, src in childcopies.items() if match(dst)
566 564 }
567 565 newcopies = copies
568 566 if childcopies:
569 567 newcopies = copies.copy()
570 568 for dest, source in pycompat.iteritems(childcopies):
571 569 prev = copies.get(source)
572 570 if prev is not None and prev[1] is not None:
573 571 source = prev[1]
574 572 newcopies[dest] = (c, source)
575 573 assert newcopies is not copies
576 574 for f in removed:
577 575 if f in newcopies:
578 576 if newcopies is copies:
579 577 # copy on write to avoid affecting potential other
580 578 # branches. when there are no other branches, this
581 579 # could be avoided.
582 580 newcopies = copies.copy()
583 581 newcopies[f] = (c, None)
584 582 othercopies = all_copies.get(c)
585 583 if othercopies is None:
586 584 all_copies[c] = newcopies
587 585 else:
588 586 # we are the second parent to work on c, we need to merge our
589 587 # work with the other.
590 588 #
591 589 # In case of conflict, parent 1 take precedence over parent 2.
592 590 # This is an arbitrary choice made anew when implementing
593 591 # changeset based copies. It was made without regards with
594 592 # potential filelog related behavior.
595 593 if parent == 1:
596 594 _merge_copies_dict_extra(
597 595 othercopies, newcopies, isancestor, ismerged
598 596 )
599 597 else:
600 598 _merge_copies_dict_extra(
601 599 newcopies, othercopies, isancestor, ismerged
602 600 )
603 601 all_copies[c] = newcopies
604 602
605 603 final_copies = {}
606 604 for dest, (tt, source) in all_copies[targetrev].items():
607 605 if source is not None:
608 606 final_copies[dest] = source
609 607 return final_copies
610 608
611 609
612 610 def _merge_copies_dict_extra(minor, major, isancestor, ismerged):
613 611 """version of `_merge_copies_dict` that works with the Google
614 612 specific "extra" based storage for copy information"""
615 613 for dest, value in major.items():
616 614 other = minor.get(dest)
617 615 if other is None:
618 616 minor[dest] = value
619 617 else:
620 618 new_tt = value[0]
621 619 other_tt = other[0]
622 620 if value[1] == other[1]:
623 621 continue
624 622 # content from "major" wins, unless it is older
625 623 # than the branch point or there is a merge
626 624 if (
627 625 new_tt == other_tt
628 626 or not isancestor(new_tt, other_tt)
629 627 or ismerged(dest)
630 628 ):
631 629 minor[dest] = value
632 630
633 631
634 632 def _forwardcopies(a, b, base=None, match=None):
635 633 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
636 634
637 635 if base is None:
638 636 base = a
639 637 match = a.repo().narrowmatch(match)
640 638 # check for working copy
641 639 if b.rev() is None:
642 640 cm = _committedforwardcopies(a, b.p1(), base, match)
643 641 # combine copies from dirstate if necessary
644 642 copies = _chain(cm, _dirstatecopies(b._repo, match))
645 643 else:
646 644 copies = _committedforwardcopies(a, b, base, match)
647 645 return copies
648 646
649 647
650 648 def _backwardrenames(a, b, match):
651 649 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
652 650 return {}
653 651
654 652 # Even though we're not taking copies into account, 1:n rename situations
655 653 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
656 654 # arbitrarily pick one of the renames.
657 655 # We don't want to pass in "match" here, since that would filter
658 656 # the destination by it. Since we're reversing the copies, we want
659 657 # to filter the source instead.
660 658 f = _forwardcopies(b, a)
661 659 r = {}
662 660 for k, v in sorted(pycompat.iteritems(f)):
663 661 if match and not match(v):
664 662 continue
665 663 # remove copies
666 664 if v in a:
667 665 continue
668 666 r[v] = k
669 667 return r
670 668
671 669
672 670 def pathcopies(x, y, match=None):
673 671 """find {dst@y: src@x} copy mapping for directed compare"""
674 672 repo = x._repo
675 673 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
676 674 if debug:
677 675 repo.ui.debug(
678 676 b'debug.copies: searching copies from %s to %s\n' % (x, y)
679 677 )
680 678 if x == y or not x or not y:
681 679 return {}
682 680 if y.rev() is None and x == y.p1():
683 681 if debug:
684 682 repo.ui.debug(b'debug.copies: search mode: dirstate\n')
685 683 # short-circuit to avoid issues with merge states
686 684 return _dirstatecopies(repo, match)
687 685 a = y.ancestor(x)
688 686 if a == x:
689 687 if debug:
690 688 repo.ui.debug(b'debug.copies: search mode: forward\n')
691 689 copies = _forwardcopies(x, y, match=match)
692 690 elif a == y:
693 691 if debug:
694 692 repo.ui.debug(b'debug.copies: search mode: backward\n')
695 693 copies = _backwardrenames(x, y, match=match)
696 694 else:
697 695 if debug:
698 696 repo.ui.debug(b'debug.copies: search mode: combined\n')
699 697 base = None
700 698 if a.rev() != nullrev:
701 699 base = x
702 700 copies = _chain(
703 701 _backwardrenames(x, a, match=match),
704 702 _forwardcopies(a, y, base, match=match),
705 703 )
706 704 _filter(x, y, copies)
707 705 return copies
708 706
709 707
710 708 def mergecopies(repo, c1, c2, base):
711 709 """
712 710 Finds moves and copies between context c1 and c2 that are relevant for
713 711 merging. 'base' will be used as the merge base.
714 712
715 713 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
716 714 files that were moved/ copied in one merge parent and modified in another.
717 715 For example:
718 716
719 717 o ---> 4 another commit
720 718 |
721 719 | o ---> 3 commit that modifies a.txt
722 720 | /
723 721 o / ---> 2 commit that moves a.txt to b.txt
724 722 |/
725 723 o ---> 1 merge base
726 724
727 725 If we try to rebase revision 3 on revision 4, since there is no a.txt in
728 726 revision 4, and if user have copytrace disabled, we prints the following
729 727 message:
730 728
731 729 ```other changed <file> which local deleted```
732 730
733 731 Returns a tuple where:
734 732
735 733 "branch_copies" an instance of branch_copies.
736 734
737 735 "diverge" is a mapping of source name -> list of destination names
738 736 for divergent renames.
739 737
740 738 This function calls different copytracing algorithms based on config.
741 739 """
742 740 # avoid silly behavior for update from empty dir
743 741 if not c1 or not c2 or c1 == c2:
744 742 return branch_copies(), branch_copies(), {}
745 743
746 744 narrowmatch = c1.repo().narrowmatch()
747 745
748 746 # avoid silly behavior for parent -> working dir
749 747 if c2.node() is None and c1.node() == repo.dirstate.p1():
750 748 return (
751 749 branch_copies(_dirstatecopies(repo, narrowmatch)),
752 750 branch_copies(),
753 751 {},
754 752 )
755 753
756 754 copytracing = repo.ui.config(b'experimental', b'copytrace')
757 755 if stringutil.parsebool(copytracing) is False:
758 756 # stringutil.parsebool() returns None when it is unable to parse the
759 757 # value, so we should rely on making sure copytracing is on such cases
760 758 return branch_copies(), branch_copies(), {}
761 759
762 760 if usechangesetcentricalgo(repo):
763 761 # The heuristics don't make sense when we need changeset-centric algos
764 762 return _fullcopytracing(repo, c1, c2, base)
765 763
766 764 # Copy trace disabling is explicitly below the node == p1 logic above
767 765 # because the logic above is required for a simple copy to be kept across a
768 766 # rebase.
769 767 if copytracing == b'heuristics':
770 768 # Do full copytracing if only non-public revisions are involved as
771 769 # that will be fast enough and will also cover the copies which could
772 770 # be missed by heuristics
773 771 if _isfullcopytraceable(repo, c1, base):
774 772 return _fullcopytracing(repo, c1, c2, base)
775 773 return _heuristicscopytracing(repo, c1, c2, base)
776 774 else:
777 775 return _fullcopytracing(repo, c1, c2, base)
778 776
779 777
780 778 def _isfullcopytraceable(repo, c1, base):
781 779 """Checks that if base, source and destination are all no-public branches,
782 780 if yes let's use the full copytrace algorithm for increased capabilities
783 781 since it will be fast enough.
784 782
785 783 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
786 784 number of changesets from c1 to base such that if number of changesets are
787 785 more than the limit, full copytracing algorithm won't be used.
788 786 """
789 787 if c1.rev() is None:
790 788 c1 = c1.p1()
791 789 if c1.mutable() and base.mutable():
792 790 sourcecommitlimit = repo.ui.configint(
793 791 b'experimental', b'copytrace.sourcecommitlimit'
794 792 )
795 793 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
796 794 return commits < sourcecommitlimit
797 795 return False
798 796
799 797
800 798 def _checksinglesidecopies(
801 799 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
802 800 ):
803 801 if src not in m2:
804 802 # deleted on side 2
805 803 if src not in m1:
806 804 # renamed on side 1, deleted on side 2
807 805 renamedelete[src] = dsts1
808 806 elif src not in mb:
809 807 # Work around the "short-circuit to avoid issues with merge states"
810 808 # thing in pathcopies(): pathcopies(x, y) can return a copy where the
811 809 # destination doesn't exist in y.
812 810 pass
813 811 elif mb[src] != m2[src] and not _related(c2[src], base[src]):
814 812 return
815 813 elif mb[src] != m2[src] or mb.flags(src) != m2.flags(src):
816 814 # modified on side 2
817 815 for dst in dsts1:
818 816 copy[dst] = src
819 817
820 818
821 819 class branch_copies(object):
822 820 """Information about copies made on one side of a merge/graft.
823 821
824 822 "copy" is a mapping from destination name -> source name,
825 823 where source is in c1 and destination is in c2 or vice-versa.
826 824
827 825 "movewithdir" is a mapping from source name -> destination name,
828 826 where the file at source present in one context but not the other
829 827 needs to be moved to destination by the merge process, because the
830 828 other context moved the directory it is in.
831 829
832 830 "renamedelete" is a mapping of source name -> list of destination
833 831 names for files deleted in c1 that were renamed in c2 or vice-versa.
834 832
835 833 "dirmove" is a mapping of detected source dir -> destination dir renames.
836 834 This is needed for handling changes to new files previously grafted into
837 835 renamed directories.
838 836 """
839 837
840 838 def __init__(
841 839 self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
842 840 ):
843 841 self.copy = {} if copy is None else copy
844 842 self.renamedelete = {} if renamedelete is None else renamedelete
845 843 self.dirmove = {} if dirmove is None else dirmove
846 844 self.movewithdir = {} if movewithdir is None else movewithdir
847 845
848 846 def __repr__(self):
849 847 return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % (
850 848 self.copy,
851 849 self.renamedelete,
852 850 self.dirmove,
853 851 self.movewithdir,
854 852 )
855 853
856 854
857 855 def _fullcopytracing(repo, c1, c2, base):
858 856 """The full copytracing algorithm which finds all the new files that were
859 857 added from merge base up to the top commit and for each file it checks if
860 858 this file was copied from another file.
861 859
862 860 This is pretty slow when a lot of changesets are involved but will track all
863 861 the copies.
864 862 """
865 863 m1 = c1.manifest()
866 864 m2 = c2.manifest()
867 865 mb = base.manifest()
868 866
869 867 copies1 = pathcopies(base, c1)
870 868 copies2 = pathcopies(base, c2)
871 869
872 870 if not (copies1 or copies2):
873 871 return branch_copies(), branch_copies(), {}
874 872
875 873 inversecopies1 = {}
876 874 inversecopies2 = {}
877 875 for dst, src in copies1.items():
878 876 inversecopies1.setdefault(src, []).append(dst)
879 877 for dst, src in copies2.items():
880 878 inversecopies2.setdefault(src, []).append(dst)
881 879
882 880 copy1 = {}
883 881 copy2 = {}
884 882 diverge = {}
885 883 renamedelete1 = {}
886 884 renamedelete2 = {}
887 885 allsources = set(inversecopies1) | set(inversecopies2)
888 886 for src in allsources:
889 887 dsts1 = inversecopies1.get(src)
890 888 dsts2 = inversecopies2.get(src)
891 889 if dsts1 and dsts2:
892 890 # copied/renamed on both sides
893 891 if src not in m1 and src not in m2:
894 892 # renamed on both sides
895 893 dsts1 = set(dsts1)
896 894 dsts2 = set(dsts2)
897 895 # If there's some overlap in the rename destinations, we
898 896 # consider it not divergent. For example, if side 1 copies 'a'
899 897 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
900 898 # and 'd' and deletes 'a'.
901 899 if dsts1 & dsts2:
902 900 for dst in dsts1 & dsts2:
903 901 copy1[dst] = src
904 902 copy2[dst] = src
905 903 else:
906 904 diverge[src] = sorted(dsts1 | dsts2)
907 905 elif src in m1 and src in m2:
908 906 # copied on both sides
909 907 dsts1 = set(dsts1)
910 908 dsts2 = set(dsts2)
911 909 for dst in dsts1 & dsts2:
912 910 copy1[dst] = src
913 911 copy2[dst] = src
914 912 # TODO: Handle cases where it was renamed on one side and copied
915 913 # on the other side
916 914 elif dsts1:
917 915 # copied/renamed only on side 1
918 916 _checksinglesidecopies(
919 917 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
920 918 )
921 919 elif dsts2:
922 920 # copied/renamed only on side 2
923 921 _checksinglesidecopies(
924 922 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
925 923 )
926 924
927 925 # find interesting file sets from manifests
928 926 cache = []
929 927
930 928 def _get_addedfiles(idx):
931 929 if not cache:
932 930 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
933 931 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
934 932 u1 = sorted(addedinm1 - addedinm2)
935 933 u2 = sorted(addedinm2 - addedinm1)
936 934 cache.extend((u1, u2))
937 935 return cache[idx]
938 936
939 937 u1fn = lambda: _get_addedfiles(0)
940 938 u2fn = lambda: _get_addedfiles(1)
941 939 if repo.ui.debugflag:
942 940 u1 = u1fn()
943 941 u2 = u2fn()
944 942
945 943 header = b" unmatched files in %s"
946 944 if u1:
947 945 repo.ui.debug(
948 946 b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1))
949 947 )
950 948 if u2:
951 949 repo.ui.debug(
952 950 b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2))
953 951 )
954 952
955 953 renamedeleteset = set()
956 954 divergeset = set()
957 955 for dsts in diverge.values():
958 956 divergeset.update(dsts)
959 957 for dsts in renamedelete1.values():
960 958 renamedeleteset.update(dsts)
961 959 for dsts in renamedelete2.values():
962 960 renamedeleteset.update(dsts)
963 961
964 962 repo.ui.debug(
965 963 b" all copies found (* = to merge, ! = divergent, "
966 964 b"% = renamed and deleted):\n"
967 965 )
968 966 for side, copies in ((b"local", copies1), (b"remote", copies2)):
969 967 if not copies:
970 968 continue
971 969 repo.ui.debug(b" on %s side:\n" % side)
972 970 for f in sorted(copies):
973 971 note = b""
974 972 if f in copy1 or f in copy2:
975 973 note += b"*"
976 974 if f in divergeset:
977 975 note += b"!"
978 976 if f in renamedeleteset:
979 977 note += b"%"
980 978 repo.ui.debug(
981 979 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
982 980 )
983 981 del renamedeleteset
984 982 del divergeset
985 983
986 984 repo.ui.debug(b" checking for directory renames\n")
987 985
988 986 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2fn)
989 987 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1fn)
990 988
991 989 branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
992 990 branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
993 991
994 992 return branch_copies1, branch_copies2, diverge
995 993
996 994
997 995 def _dir_renames(repo, ctx, copy, fullcopy, addedfilesfn):
998 996 """Finds moved directories and files that should move with them.
999 997
1000 998 ctx: the context for one of the sides
1001 999 copy: files copied on the same side (as ctx)
1002 1000 fullcopy: files copied on the same side (as ctx), including those that
1003 1001 merge.manifestmerge() won't care about
1004 1002 addedfilesfn: function returning added files on the other side (compared to
1005 1003 ctx)
1006 1004 """
1007 1005 # generate a directory move map
1008 1006 invalid = set()
1009 1007 dirmove = {}
1010 1008
1011 1009 # examine each file copy for a potential directory move, which is
1012 1010 # when all the files in a directory are moved to a new directory
1013 1011 for dst, src in pycompat.iteritems(fullcopy):
1014 1012 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
1015 1013 if dsrc in invalid:
1016 1014 # already seen to be uninteresting
1017 1015 continue
1018 1016 elif ctx.hasdir(dsrc) and ctx.hasdir(ddst):
1019 1017 # directory wasn't entirely moved locally
1020 1018 invalid.add(dsrc)
1021 1019 elif dsrc in dirmove and dirmove[dsrc] != ddst:
1022 1020 # files from the same directory moved to two different places
1023 1021 invalid.add(dsrc)
1024 1022 else:
1025 1023 # looks good so far
1026 1024 dirmove[dsrc] = ddst
1027 1025
1028 1026 for i in invalid:
1029 1027 if i in dirmove:
1030 1028 del dirmove[i]
1031 1029 del invalid
1032 1030
1033 1031 if not dirmove:
1034 1032 return {}, {}
1035 1033
1036 1034 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
1037 1035
1038 1036 for d in dirmove:
1039 1037 repo.ui.debug(
1040 1038 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
1041 1039 )
1042 1040
1043 1041 movewithdir = {}
1044 1042 # check unaccounted nonoverlapping files against directory moves
1045 1043 for f in addedfilesfn():
1046 1044 if f not in fullcopy:
1047 1045 for d in dirmove:
1048 1046 if f.startswith(d):
1049 1047 # new file added in a directory that was moved, move it
1050 1048 df = dirmove[d] + f[len(d) :]
1051 1049 if df not in copy:
1052 1050 movewithdir[f] = df
1053 1051 repo.ui.debug(
1054 1052 b" pending file src: '%s' -> dst: '%s'\n"
1055 1053 % (f, df)
1056 1054 )
1057 1055 break
1058 1056
1059 1057 return dirmove, movewithdir
1060 1058
1061 1059
1062 1060 def _heuristicscopytracing(repo, c1, c2, base):
1063 1061 """Fast copytracing using filename heuristics
1064 1062
1065 1063 Assumes that moves or renames are of following two types:
1066 1064
1067 1065 1) Inside a directory only (same directory name but different filenames)
1068 1066 2) Move from one directory to another
1069 1067 (same filenames but different directory names)
1070 1068
1071 1069 Works only when there are no merge commits in the "source branch".
1072 1070 Source branch is commits from base up to c2 not including base.
1073 1071
1074 1072 If merge is involved it fallbacks to _fullcopytracing().
1075 1073
1076 1074 Can be used by setting the following config:
1077 1075
1078 1076 [experimental]
1079 1077 copytrace = heuristics
1080 1078
1081 1079 In some cases the copy/move candidates found by heuristics can be very large
1082 1080 in number and that will make the algorithm slow. The number of possible
1083 1081 candidates to check can be limited by using the config
1084 1082 `experimental.copytrace.movecandidateslimit` which defaults to 100.
1085 1083 """
1086 1084
1087 1085 if c1.rev() is None:
1088 1086 c1 = c1.p1()
1089 1087 if c2.rev() is None:
1090 1088 c2 = c2.p1()
1091 1089
1092 1090 changedfiles = set()
1093 1091 m1 = c1.manifest()
1094 1092 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
1095 1093 # If base is not in c2 branch, we switch to fullcopytracing
1096 1094 repo.ui.debug(
1097 1095 b"switching to full copytracing as base is not "
1098 1096 b"an ancestor of c2\n"
1099 1097 )
1100 1098 return _fullcopytracing(repo, c1, c2, base)
1101 1099
1102 1100 ctx = c2
1103 1101 while ctx != base:
1104 1102 if len(ctx.parents()) == 2:
1105 1103 # To keep things simple let's not handle merges
1106 1104 repo.ui.debug(b"switching to full copytracing because of merges\n")
1107 1105 return _fullcopytracing(repo, c1, c2, base)
1108 1106 changedfiles.update(ctx.files())
1109 1107 ctx = ctx.p1()
1110 1108
1111 1109 copies2 = {}
1112 1110 cp = _forwardcopies(base, c2)
1113 1111 for dst, src in pycompat.iteritems(cp):
1114 1112 if src in m1:
1115 1113 copies2[dst] = src
1116 1114
1117 1115 # file is missing if it isn't present in the destination, but is present in
1118 1116 # the base and present in the source.
1119 1117 # Presence in the base is important to exclude added files, presence in the
1120 1118 # source is important to exclude removed files.
1121 1119 filt = lambda f: f not in m1 and f in base and f in c2
1122 1120 missingfiles = [f for f in changedfiles if filt(f)]
1123 1121
1124 1122 copies1 = {}
1125 1123 if missingfiles:
1126 1124 basenametofilename = collections.defaultdict(list)
1127 1125 dirnametofilename = collections.defaultdict(list)
1128 1126
1129 1127 for f in m1.filesnotin(base.manifest()):
1130 1128 basename = os.path.basename(f)
1131 1129 dirname = os.path.dirname(f)
1132 1130 basenametofilename[basename].append(f)
1133 1131 dirnametofilename[dirname].append(f)
1134 1132
1135 1133 for f in missingfiles:
1136 1134 basename = os.path.basename(f)
1137 1135 dirname = os.path.dirname(f)
1138 1136 samebasename = basenametofilename[basename]
1139 1137 samedirname = dirnametofilename[dirname]
1140 1138 movecandidates = samebasename + samedirname
1141 1139 # f is guaranteed to be present in c2, that's why
1142 1140 # c2.filectx(f) won't fail
1143 1141 f2 = c2.filectx(f)
1144 1142 # we can have a lot of candidates which can slow down the heuristics
1145 1143 # config value to limit the number of candidates moves to check
1146 1144 maxcandidates = repo.ui.configint(
1147 1145 b'experimental', b'copytrace.movecandidateslimit'
1148 1146 )
1149 1147
1150 1148 if len(movecandidates) > maxcandidates:
1151 1149 repo.ui.status(
1152 1150 _(
1153 1151 b"skipping copytracing for '%s', more "
1154 1152 b"candidates than the limit: %d\n"
1155 1153 )
1156 1154 % (f, len(movecandidates))
1157 1155 )
1158 1156 continue
1159 1157
1160 1158 for candidate in movecandidates:
1161 1159 f1 = c1.filectx(candidate)
1162 1160 if _related(f1, f2):
1163 1161 # if there are a few related copies then we'll merge
1164 1162 # changes into all of them. This matches the behaviour
1165 1163 # of upstream copytracing
1166 1164 copies1[candidate] = f
1167 1165
1168 1166 return branch_copies(copies1), branch_copies(copies2), {}
1169 1167
1170 1168
1171 1169 def _related(f1, f2):
1172 1170 """return True if f1 and f2 filectx have a common ancestor
1173 1171
1174 1172 Walk back to common ancestor to see if the two files originate
1175 1173 from the same file. Since workingfilectx's rev() is None it messes
1176 1174 up the integer comparison logic, hence the pre-step check for
1177 1175 None (f1 and f2 can only be workingfilectx's initially).
1178 1176 """
1179 1177
1180 1178 if f1 == f2:
1181 1179 return True # a match
1182 1180
1183 1181 g1, g2 = f1.ancestors(), f2.ancestors()
1184 1182 try:
1185 1183 f1r, f2r = f1.linkrev(), f2.linkrev()
1186 1184
1187 1185 if f1r is None:
1188 1186 f1 = next(g1)
1189 1187 if f2r is None:
1190 1188 f2 = next(g2)
1191 1189
1192 1190 while True:
1193 1191 f1r, f2r = f1.linkrev(), f2.linkrev()
1194 1192 if f1r > f2r:
1195 1193 f1 = next(g1)
1196 1194 elif f2r > f1r:
1197 1195 f2 = next(g2)
1198 1196 else: # f1 and f2 point to files in the same linkrev
1199 1197 return f1 == f2 # true if they point to the same file
1200 1198 except StopIteration:
1201 1199 return False
1202 1200
1203 1201
1204 1202 def graftcopies(wctx, ctx, base):
1205 1203 """reproduce copies between base and ctx in the wctx
1206 1204
1207 1205 Unlike mergecopies(), this function will only consider copies between base
1208 1206 and ctx; it will ignore copies between base and wctx. Also unlike
1209 1207 mergecopies(), this function will apply copies to the working copy (instead
1210 1208 of just returning information about the copies). That makes it cheaper
1211 1209 (especially in the common case of base==ctx.p1()) and useful also when
1212 1210 experimental.copytrace=off.
1213 1211
1214 1212 merge.update() will have already marked most copies, but it will only
1215 1213 mark copies if it thinks the source files are related (see
1216 1214 merge._related()). It will also not mark copies if the file wasn't modified
1217 1215 on the local side. This function adds the copies that were "missed"
1218 1216 by merge.update().
1219 1217 """
1220 1218 new_copies = pathcopies(base, ctx)
1221 1219 _filter(wctx.p1(), wctx, new_copies)
1222 1220 for dst, src in pycompat.iteritems(new_copies):
1223 1221 wctx[dst].markcopied(src)
General Comments 0
You need to be logged in to leave comments. Login now