##// END OF EJS Templates
copies: process files in deterministic order for stable tests...
Martin von Zweigbergk -
r42396:390ec72b default
parent child Browse files
Show More
@@ -1,1018 +1,1018 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16 from . import (
16 from . import (
17 match as matchmod,
17 match as matchmod,
18 node,
18 node,
19 pathutil,
19 pathutil,
20 util,
20 util,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 stringutil,
23 stringutil,
24 )
24 )
25
25
26 def _findlimit(repo, ctxa, ctxb):
26 def _findlimit(repo, ctxa, ctxb):
27 """
27 """
28 Find the last revision that needs to be checked to ensure that a full
28 Find the last revision that needs to be checked to ensure that a full
29 transitive closure for file copies can be properly calculated.
29 transitive closure for file copies can be properly calculated.
30 Generally, this means finding the earliest revision number that's an
30 Generally, this means finding the earliest revision number that's an
31 ancestor of a or b but not both, except when a or b is a direct descendent
31 ancestor of a or b but not both, except when a or b is a direct descendent
32 of the other, in which case we can return the minimum revnum of a and b.
32 of the other, in which case we can return the minimum revnum of a and b.
33 """
33 """
34
34
35 # basic idea:
35 # basic idea:
36 # - mark a and b with different sides
36 # - mark a and b with different sides
37 # - if a parent's children are all on the same side, the parent is
37 # - if a parent's children are all on the same side, the parent is
38 # on that side, otherwise it is on no side
38 # on that side, otherwise it is on no side
39 # - walk the graph in topological order with the help of a heap;
39 # - walk the graph in topological order with the help of a heap;
40 # - add unseen parents to side map
40 # - add unseen parents to side map
41 # - clear side of any parent that has children on different sides
41 # - clear side of any parent that has children on different sides
42 # - track number of interesting revs that might still be on a side
42 # - track number of interesting revs that might still be on a side
43 # - track the lowest interesting rev seen
43 # - track the lowest interesting rev seen
44 # - quit when interesting revs is zero
44 # - quit when interesting revs is zero
45
45
46 cl = repo.changelog
46 cl = repo.changelog
47 wdirparents = None
47 wdirparents = None
48 a = ctxa.rev()
48 a = ctxa.rev()
49 b = ctxb.rev()
49 b = ctxb.rev()
50 if a is None:
50 if a is None:
51 wdirparents = (ctxa.p1(), ctxa.p2())
51 wdirparents = (ctxa.p1(), ctxa.p2())
52 a = node.wdirrev
52 a = node.wdirrev
53 if b is None:
53 if b is None:
54 assert not wdirparents
54 assert not wdirparents
55 wdirparents = (ctxb.p1(), ctxb.p2())
55 wdirparents = (ctxb.p1(), ctxb.p2())
56 b = node.wdirrev
56 b = node.wdirrev
57
57
58 side = {a: -1, b: 1}
58 side = {a: -1, b: 1}
59 visit = [-a, -b]
59 visit = [-a, -b]
60 heapq.heapify(visit)
60 heapq.heapify(visit)
61 interesting = len(visit)
61 interesting = len(visit)
62 limit = node.wdirrev
62 limit = node.wdirrev
63
63
64 while interesting:
64 while interesting:
65 r = -heapq.heappop(visit)
65 r = -heapq.heappop(visit)
66 if r == node.wdirrev:
66 if r == node.wdirrev:
67 parents = [pctx.rev() for pctx in wdirparents]
67 parents = [pctx.rev() for pctx in wdirparents]
68 else:
68 else:
69 parents = cl.parentrevs(r)
69 parents = cl.parentrevs(r)
70 if parents[1] == node.nullrev:
70 if parents[1] == node.nullrev:
71 parents = parents[:1]
71 parents = parents[:1]
72 for p in parents:
72 for p in parents:
73 if p not in side:
73 if p not in side:
74 # first time we see p; add it to visit
74 # first time we see p; add it to visit
75 side[p] = side[r]
75 side[p] = side[r]
76 if side[p]:
76 if side[p]:
77 interesting += 1
77 interesting += 1
78 heapq.heappush(visit, -p)
78 heapq.heappush(visit, -p)
79 elif side[p] and side[p] != side[r]:
79 elif side[p] and side[p] != side[r]:
80 # p was interesting but now we know better
80 # p was interesting but now we know better
81 side[p] = 0
81 side[p] = 0
82 interesting -= 1
82 interesting -= 1
83 if side[r]:
83 if side[r]:
84 limit = r # lowest rev visited
84 limit = r # lowest rev visited
85 interesting -= 1
85 interesting -= 1
86
86
87 # Consider the following flow (see test-commit-amend.t under issue4405):
87 # Consider the following flow (see test-commit-amend.t under issue4405):
88 # 1/ File 'a0' committed
88 # 1/ File 'a0' committed
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
90 # 3/ Move back to first commit
90 # 3/ Move back to first commit
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
93 #
93 #
94 # During the amend in step five, we will be in this state:
94 # During the amend in step five, we will be in this state:
95 #
95 #
96 # @ 3 temporary amend commit for a1-amend
96 # @ 3 temporary amend commit for a1-amend
97 # |
97 # |
98 # o 2 a1-amend
98 # o 2 a1-amend
99 # |
99 # |
100 # | o 1 a1
100 # | o 1 a1
101 # |/
101 # |/
102 # o 0 a0
102 # o 0 a0
103 #
103 #
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
105 # yet the filelog has the copy information in rev 1 and we will not look
105 # yet the filelog has the copy information in rev 1 and we will not look
106 # back far enough unless we also look at the a and b as candidates.
106 # back far enough unless we also look at the a and b as candidates.
107 # This only occurs when a is a descendent of b or visa-versa.
107 # This only occurs when a is a descendent of b or visa-versa.
108 return min(limit, a, b)
108 return min(limit, a, b)
109
109
110 def _chain(src, dst, a, b):
110 def _chain(src, dst, a, b):
111 """chain two sets of copies a->b"""
111 """chain two sets of copies a->b"""
112 t = a.copy()
112 t = a.copy()
113 for k, v in b.iteritems():
113 for k, v in b.iteritems():
114 if v in t:
114 if v in t:
115 # found a chain
115 # found a chain
116 if t[v] != k:
116 if t[v] != k:
117 # file wasn't renamed back to itself
117 # file wasn't renamed back to itself
118 t[k] = t[v]
118 t[k] = t[v]
119 if v not in dst:
119 if v not in dst:
120 # chain was a rename, not a copy
120 # chain was a rename, not a copy
121 del t[v]
121 del t[v]
122 if v in src:
122 if v in src:
123 # file is a copy of an existing file
123 # file is a copy of an existing file
124 t[k] = v
124 t[k] = v
125
125
126 for k, v in list(t.items()):
126 for k, v in list(t.items()):
127 # remove criss-crossed copies
127 # remove criss-crossed copies
128 if k in src and v in dst:
128 if k in src and v in dst:
129 del t[k]
129 del t[k]
130 # remove copies to files that were then removed
130 # remove copies to files that were then removed
131 elif k not in dst:
131 elif k not in dst:
132 del t[k]
132 del t[k]
133
133
134 return t
134 return t
135
135
136 def _tracefile(fctx, am, limit=node.nullrev):
136 def _tracefile(fctx, am, limit=node.nullrev):
137 """return file context that is the ancestor of fctx present in ancestor
137 """return file context that is the ancestor of fctx present in ancestor
138 manifest am, stopping after the first ancestor lower than limit"""
138 manifest am, stopping after the first ancestor lower than limit"""
139
139
140 for f in fctx.ancestors():
140 for f in fctx.ancestors():
141 if am.get(f.path(), None) == f.filenode():
141 if am.get(f.path(), None) == f.filenode():
142 return f
142 return f
143 if limit >= 0 and not f.isintroducedafter(limit):
143 if limit >= 0 and not f.isintroducedafter(limit):
144 return None
144 return None
145
145
146 def _dirstatecopies(repo, match=None):
146 def _dirstatecopies(repo, match=None):
147 ds = repo.dirstate
147 ds = repo.dirstate
148 c = ds.copies().copy()
148 c = ds.copies().copy()
149 for k in list(c):
149 for k in list(c):
150 if ds[k] not in 'anm' or (match and not match(k)):
150 if ds[k] not in 'anm' or (match and not match(k)):
151 del c[k]
151 del c[k]
152 return c
152 return c
153
153
154 def _computeforwardmissing(a, b, match=None):
154 def _computeforwardmissing(a, b, match=None):
155 """Computes which files are in b but not a.
155 """Computes which files are in b but not a.
156 This is its own function so extensions can easily wrap this call to see what
156 This is its own function so extensions can easily wrap this call to see what
157 files _forwardcopies is about to process.
157 files _forwardcopies is about to process.
158 """
158 """
159 ma = a.manifest()
159 ma = a.manifest()
160 mb = b.manifest()
160 mb = b.manifest()
161 return mb.filesnotin(ma, match=match)
161 return mb.filesnotin(ma, match=match)
162
162
163 def usechangesetcentricalgo(repo):
163 def usechangesetcentricalgo(repo):
164 """Checks if we should use changeset-centric copy algorithms"""
164 """Checks if we should use changeset-centric copy algorithms"""
165 return (repo.ui.config('experimental', 'copies.read-from') in
165 return (repo.ui.config('experimental', 'copies.read-from') in
166 ('changeset-only', 'compatibility'))
166 ('changeset-only', 'compatibility'))
167
167
168 def _committedforwardcopies(a, b, match):
168 def _committedforwardcopies(a, b, match):
169 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
169 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
170 # files might have to be traced back to the fctx parent of the last
170 # files might have to be traced back to the fctx parent of the last
171 # one-side-only changeset, but not further back than that
171 # one-side-only changeset, but not further back than that
172 repo = a._repo
172 repo = a._repo
173
173
174 if usechangesetcentricalgo(repo):
174 if usechangesetcentricalgo(repo):
175 return _changesetforwardcopies(a, b, match)
175 return _changesetforwardcopies(a, b, match)
176
176
177 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
177 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
178 dbg = repo.ui.debug
178 dbg = repo.ui.debug
179 if debug:
179 if debug:
180 dbg('debug.copies: looking into rename from %s to %s\n'
180 dbg('debug.copies: looking into rename from %s to %s\n'
181 % (a, b))
181 % (a, b))
182 limit = _findlimit(repo, a, b)
182 limit = _findlimit(repo, a, b)
183 if debug:
183 if debug:
184 dbg('debug.copies: search limit: %d\n' % limit)
184 dbg('debug.copies: search limit: %d\n' % limit)
185 am = a.manifest()
185 am = a.manifest()
186
186
187 # find where new files came from
187 # find where new files came from
188 # we currently don't try to find where old files went, too expensive
188 # we currently don't try to find where old files went, too expensive
189 # this means we can miss a case like 'hg rm b; hg cp a b'
189 # this means we can miss a case like 'hg rm b; hg cp a b'
190 cm = {}
190 cm = {}
191
191
192 # Computing the forward missing is quite expensive on large manifests, since
192 # Computing the forward missing is quite expensive on large manifests, since
193 # it compares the entire manifests. We can optimize it in the common use
193 # it compares the entire manifests. We can optimize it in the common use
194 # case of computing what copies are in a commit versus its parent (like
194 # case of computing what copies are in a commit versus its parent (like
195 # during a rebase or histedit). Note, we exclude merge commits from this
195 # during a rebase or histedit). Note, we exclude merge commits from this
196 # optimization, since the ctx.files() for a merge commit is not correct for
196 # optimization, since the ctx.files() for a merge commit is not correct for
197 # this comparison.
197 # this comparison.
198 forwardmissingmatch = match
198 forwardmissingmatch = match
199 if b.p1() == a and b.p2().node() == node.nullid:
199 if b.p1() == a and b.p2().node() == node.nullid:
200 filesmatcher = matchmod.exact(b.files())
200 filesmatcher = matchmod.exact(b.files())
201 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
201 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
202 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
202 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
203
203
204 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
204 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
205
205
206 if debug:
206 if debug:
207 dbg('debug.copies: missing file to search: %d\n' % len(missing))
207 dbg('debug.copies: missing files to search: %d\n' % len(missing))
208
208
209 for f in missing:
209 for f in sorted(missing):
210 if debug:
210 if debug:
211 dbg('debug.copies: tracing file: %s\n' % f)
211 dbg('debug.copies: tracing file: %s\n' % f)
212 fctx = b[f]
212 fctx = b[f]
213 fctx._ancestrycontext = ancestrycontext
213 fctx._ancestrycontext = ancestrycontext
214
214
215 if debug:
215 if debug:
216 start = util.timer()
216 start = util.timer()
217 ofctx = _tracefile(fctx, am, limit)
217 ofctx = _tracefile(fctx, am, limit)
218 if ofctx:
218 if ofctx:
219 if debug:
219 if debug:
220 dbg('debug.copies: rename of: %s\n' % ofctx._path)
220 dbg('debug.copies: rename of: %s\n' % ofctx._path)
221 cm[f] = ofctx.path()
221 cm[f] = ofctx.path()
222 if debug:
222 if debug:
223 dbg('debug.copies: time: %f seconds\n'
223 dbg('debug.copies: time: %f seconds\n'
224 % (util.timer() - start))
224 % (util.timer() - start))
225 return cm
225 return cm
226
226
227 def _changesetforwardcopies(a, b, match):
227 def _changesetforwardcopies(a, b, match):
228 if a.rev() == node.nullrev:
228 if a.rev() == node.nullrev:
229 return {}
229 return {}
230
230
231 repo = a.repo()
231 repo = a.repo()
232 children = {}
232 children = {}
233 cl = repo.changelog
233 cl = repo.changelog
234 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
234 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
235 for r in missingrevs:
235 for r in missingrevs:
236 for p in cl.parentrevs(r):
236 for p in cl.parentrevs(r):
237 if p == node.nullrev:
237 if p == node.nullrev:
238 continue
238 continue
239 if p not in children:
239 if p not in children:
240 children[p] = [r]
240 children[p] = [r]
241 else:
241 else:
242 children[p].append(r)
242 children[p].append(r)
243
243
244 roots = set(children) - set(missingrevs)
244 roots = set(children) - set(missingrevs)
245 # 'work' contains 3-tuples of a (revision number, parent number, copies).
245 # 'work' contains 3-tuples of a (revision number, parent number, copies).
246 # The parent number is only used for knowing which parent the copies dict
246 # The parent number is only used for knowing which parent the copies dict
247 # came from.
247 # came from.
248 work = [(r, 1, {}) for r in roots]
248 work = [(r, 1, {}) for r in roots]
249 heapq.heapify(work)
249 heapq.heapify(work)
250 while work:
250 while work:
251 r, i1, copies1 = heapq.heappop(work)
251 r, i1, copies1 = heapq.heappop(work)
252 if work and work[0][0] == r:
252 if work and work[0][0] == r:
253 # We are tracing copies from both parents
253 # We are tracing copies from both parents
254 r, i2, copies2 = heapq.heappop(work)
254 r, i2, copies2 = heapq.heappop(work)
255 copies = {}
255 copies = {}
256 ctx = repo[r]
256 ctx = repo[r]
257 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
257 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
258 allcopies = set(copies1) | set(copies2)
258 allcopies = set(copies1) | set(copies2)
259 # TODO: perhaps this filtering should be done as long as ctx
259 # TODO: perhaps this filtering should be done as long as ctx
260 # is merge, whether or not we're tracing from both parent.
260 # is merge, whether or not we're tracing from both parent.
261 for dst in allcopies:
261 for dst in allcopies:
262 if not match(dst):
262 if not match(dst):
263 continue
263 continue
264 if dst not in copies2:
264 if dst not in copies2:
265 # Copied on p1 side: mark as copy from p1 side if it didn't
265 # Copied on p1 side: mark as copy from p1 side if it didn't
266 # already exist on p2 side
266 # already exist on p2 side
267 if dst not in p2man:
267 if dst not in p2man:
268 copies[dst] = copies1[dst]
268 copies[dst] = copies1[dst]
269 elif dst not in copies1:
269 elif dst not in copies1:
270 # Copied on p2 side: mark as copy from p2 side if it didn't
270 # Copied on p2 side: mark as copy from p2 side if it didn't
271 # already exist on p1 side
271 # already exist on p1 side
272 if dst not in p1man:
272 if dst not in p1man:
273 copies[dst] = copies2[dst]
273 copies[dst] = copies2[dst]
274 else:
274 else:
275 # Copied on both sides: mark as copy from p1 side
275 # Copied on both sides: mark as copy from p1 side
276 copies[dst] = copies1[dst]
276 copies[dst] = copies1[dst]
277 else:
277 else:
278 copies = copies1
278 copies = copies1
279 if r == b.rev():
279 if r == b.rev():
280 return copies
280 return copies
281 for c in children[r]:
281 for c in children[r]:
282 childctx = repo[c]
282 childctx = repo[c]
283 if r == childctx.p1().rev():
283 if r == childctx.p1().rev():
284 parent = 1
284 parent = 1
285 childcopies = childctx.p1copies()
285 childcopies = childctx.p1copies()
286 else:
286 else:
287 assert r == childctx.p2().rev()
287 assert r == childctx.p2().rev()
288 parent = 2
288 parent = 2
289 childcopies = childctx.p2copies()
289 childcopies = childctx.p2copies()
290 if not match.always():
290 if not match.always():
291 childcopies = {dst: src for dst, src in childcopies.items()
291 childcopies = {dst: src for dst, src in childcopies.items()
292 if match(dst)}
292 if match(dst)}
293 childcopies = _chain(a, childctx, copies, childcopies)
293 childcopies = _chain(a, childctx, copies, childcopies)
294 heapq.heappush(work, (c, parent, childcopies))
294 heapq.heappush(work, (c, parent, childcopies))
295 assert False
295 assert False
296
296
297 def _forwardcopies(a, b, match=None):
297 def _forwardcopies(a, b, match=None):
298 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
298 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
299
299
300 match = a.repo().narrowmatch(match)
300 match = a.repo().narrowmatch(match)
301 # check for working copy
301 # check for working copy
302 if b.rev() is None:
302 if b.rev() is None:
303 if a == b.p1():
303 if a == b.p1():
304 # short-circuit to avoid issues with merge states
304 # short-circuit to avoid issues with merge states
305 return _dirstatecopies(b._repo, match)
305 return _dirstatecopies(b._repo, match)
306
306
307 cm = _committedforwardcopies(a, b.p1(), match)
307 cm = _committedforwardcopies(a, b.p1(), match)
308 # combine copies from dirstate if necessary
308 # combine copies from dirstate if necessary
309 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
309 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
310 return _committedforwardcopies(a, b, match)
310 return _committedforwardcopies(a, b, match)
311
311
312 def _backwardrenames(a, b, match):
312 def _backwardrenames(a, b, match):
313 if a._repo.ui.config('experimental', 'copytrace') == 'off':
313 if a._repo.ui.config('experimental', 'copytrace') == 'off':
314 return {}
314 return {}
315
315
316 # Even though we're not taking copies into account, 1:n rename situations
316 # Even though we're not taking copies into account, 1:n rename situations
317 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
317 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
318 # arbitrarily pick one of the renames.
318 # arbitrarily pick one of the renames.
319 # We don't want to pass in "match" here, since that would filter
319 # We don't want to pass in "match" here, since that would filter
320 # the destination by it. Since we're reversing the copies, we want
320 # the destination by it. Since we're reversing the copies, we want
321 # to filter the source instead.
321 # to filter the source instead.
322 f = _forwardcopies(b, a)
322 f = _forwardcopies(b, a)
323 r = {}
323 r = {}
324 for k, v in sorted(f.iteritems()):
324 for k, v in sorted(f.iteritems()):
325 if match and not match(v):
325 if match and not match(v):
326 continue
326 continue
327 # remove copies
327 # remove copies
328 if v in a:
328 if v in a:
329 continue
329 continue
330 r[v] = k
330 r[v] = k
331 return r
331 return r
332
332
333 def pathcopies(x, y, match=None):
333 def pathcopies(x, y, match=None):
334 """find {dst@y: src@x} copy mapping for directed compare"""
334 """find {dst@y: src@x} copy mapping for directed compare"""
335 repo = x._repo
335 repo = x._repo
336 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
336 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
337 if debug:
337 if debug:
338 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
338 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
339 % (x, y))
339 % (x, y))
340 if x == y or not x or not y:
340 if x == y or not x or not y:
341 return {}
341 return {}
342 a = y.ancestor(x)
342 a = y.ancestor(x)
343 if a == x:
343 if a == x:
344 if debug:
344 if debug:
345 repo.ui.debug('debug.copies: search mode: forward\n')
345 repo.ui.debug('debug.copies: search mode: forward\n')
346 return _forwardcopies(x, y, match=match)
346 return _forwardcopies(x, y, match=match)
347 if a == y:
347 if a == y:
348 if debug:
348 if debug:
349 repo.ui.debug('debug.copies: search mode: backward\n')
349 repo.ui.debug('debug.copies: search mode: backward\n')
350 return _backwardrenames(x, y, match=match)
350 return _backwardrenames(x, y, match=match)
351 if debug:
351 if debug:
352 repo.ui.debug('debug.copies: search mode: combined\n')
352 repo.ui.debug('debug.copies: search mode: combined\n')
353 return _chain(x, y, _backwardrenames(x, a, match=match),
353 return _chain(x, y, _backwardrenames(x, a, match=match),
354 _forwardcopies(a, y, match=match))
354 _forwardcopies(a, y, match=match))
355
355
356 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, debug=True):
356 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, debug=True):
357 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
357 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
358 and c2. This is its own function so extensions can easily wrap this call
358 and c2. This is its own function so extensions can easily wrap this call
359 to see what files mergecopies is about to process.
359 to see what files mergecopies is about to process.
360
360
361 Even though c1 and c2 are not used in this function, they are useful in
361 Even though c1 and c2 are not used in this function, they are useful in
362 other extensions for being able to read the file nodes of the changed files.
362 other extensions for being able to read the file nodes of the changed files.
363 """
363 """
364 u1 = sorted(addedinm1 - addedinm2)
364 u1 = sorted(addedinm1 - addedinm2)
365 u2 = sorted(addedinm2 - addedinm1)
365 u2 = sorted(addedinm2 - addedinm1)
366
366
367 if debug:
367 if debug:
368 header = " unmatched files in %s"
368 header = " unmatched files in %s"
369 if u1:
369 if u1:
370 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
370 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
371 if u2:
371 if u2:
372 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
372 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
373
373
374 return u1, u2
374 return u1, u2
375
375
376 def _makegetfctx(ctx):
376 def _makegetfctx(ctx):
377 """return a 'getfctx' function suitable for _checkcopies usage
377 """return a 'getfctx' function suitable for _checkcopies usage
378
378
379 We have to re-setup the function building 'filectx' for each
379 We have to re-setup the function building 'filectx' for each
380 '_checkcopies' to ensure the linkrev adjustment is properly setup for
380 '_checkcopies' to ensure the linkrev adjustment is properly setup for
381 each. Linkrev adjustment is important to avoid bug in rename
381 each. Linkrev adjustment is important to avoid bug in rename
382 detection. Moreover, having a proper '_ancestrycontext' setup ensures
382 detection. Moreover, having a proper '_ancestrycontext' setup ensures
383 the performance impact of this adjustment is kept limited. Without it,
383 the performance impact of this adjustment is kept limited. Without it,
384 each file could do a full dag traversal making the time complexity of
384 each file could do a full dag traversal making the time complexity of
385 the operation explode (see issue4537).
385 the operation explode (see issue4537).
386
386
387 This function exists here mostly to limit the impact on stable. Feel
387 This function exists here mostly to limit the impact on stable. Feel
388 free to refactor on default.
388 free to refactor on default.
389 """
389 """
390 rev = ctx.rev()
390 rev = ctx.rev()
391 repo = ctx._repo
391 repo = ctx._repo
392 ac = getattr(ctx, '_ancestrycontext', None)
392 ac = getattr(ctx, '_ancestrycontext', None)
393 if ac is None:
393 if ac is None:
394 revs = [rev]
394 revs = [rev]
395 if rev is None:
395 if rev is None:
396 revs = [p.rev() for p in ctx.parents()]
396 revs = [p.rev() for p in ctx.parents()]
397 ac = repo.changelog.ancestors(revs, inclusive=True)
397 ac = repo.changelog.ancestors(revs, inclusive=True)
398 ctx._ancestrycontext = ac
398 ctx._ancestrycontext = ac
399 def makectx(f, n):
399 def makectx(f, n):
400 if n in node.wdirfilenodeids: # in a working context?
400 if n in node.wdirfilenodeids: # in a working context?
401 if ctx.rev() is None:
401 if ctx.rev() is None:
402 return ctx.filectx(f)
402 return ctx.filectx(f)
403 return repo[None][f]
403 return repo[None][f]
404 fctx = repo.filectx(f, fileid=n)
404 fctx = repo.filectx(f, fileid=n)
405 # setup only needed for filectx not create from a changectx
405 # setup only needed for filectx not create from a changectx
406 fctx._ancestrycontext = ac
406 fctx._ancestrycontext = ac
407 fctx._descendantrev = rev
407 fctx._descendantrev = rev
408 return fctx
408 return fctx
409 return util.lrucachefunc(makectx)
409 return util.lrucachefunc(makectx)
410
410
411 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
411 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
412 """combine partial copy paths"""
412 """combine partial copy paths"""
413 remainder = {}
413 remainder = {}
414 for f in copyfrom:
414 for f in copyfrom:
415 if f in copyto:
415 if f in copyto:
416 finalcopy[copyto[f]] = copyfrom[f]
416 finalcopy[copyto[f]] = copyfrom[f]
417 del copyto[f]
417 del copyto[f]
418 for f in incompletediverge:
418 for f in incompletediverge:
419 assert f not in diverge
419 assert f not in diverge
420 ic = incompletediverge[f]
420 ic = incompletediverge[f]
421 if ic[0] in copyto:
421 if ic[0] in copyto:
422 diverge[f] = [copyto[ic[0]], ic[1]]
422 diverge[f] = [copyto[ic[0]], ic[1]]
423 else:
423 else:
424 remainder[f] = ic
424 remainder[f] = ic
425 return remainder
425 return remainder
426
426
427 def mergecopies(repo, c1, c2, base):
427 def mergecopies(repo, c1, c2, base):
428 """
428 """
429 Finds moves and copies between context c1 and c2 that are relevant for
429 Finds moves and copies between context c1 and c2 that are relevant for
430 merging. 'base' will be used as the merge base.
430 merging. 'base' will be used as the merge base.
431
431
432 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
432 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
433 files that were moved/ copied in one merge parent and modified in another.
433 files that were moved/ copied in one merge parent and modified in another.
434 For example:
434 For example:
435
435
436 o ---> 4 another commit
436 o ---> 4 another commit
437 |
437 |
438 | o ---> 3 commit that modifies a.txt
438 | o ---> 3 commit that modifies a.txt
439 | /
439 | /
440 o / ---> 2 commit that moves a.txt to b.txt
440 o / ---> 2 commit that moves a.txt to b.txt
441 |/
441 |/
442 o ---> 1 merge base
442 o ---> 1 merge base
443
443
444 If we try to rebase revision 3 on revision 4, since there is no a.txt in
444 If we try to rebase revision 3 on revision 4, since there is no a.txt in
445 revision 4, and if user have copytrace disabled, we prints the following
445 revision 4, and if user have copytrace disabled, we prints the following
446 message:
446 message:
447
447
448 ```other changed <file> which local deleted```
448 ```other changed <file> which local deleted```
449
449
450 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
450 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
451 "dirmove".
451 "dirmove".
452
452
453 "copy" is a mapping from destination name -> source name,
453 "copy" is a mapping from destination name -> source name,
454 where source is in c1 and destination is in c2 or vice-versa.
454 where source is in c1 and destination is in c2 or vice-versa.
455
455
456 "movewithdir" is a mapping from source name -> destination name,
456 "movewithdir" is a mapping from source name -> destination name,
457 where the file at source present in one context but not the other
457 where the file at source present in one context but not the other
458 needs to be moved to destination by the merge process, because the
458 needs to be moved to destination by the merge process, because the
459 other context moved the directory it is in.
459 other context moved the directory it is in.
460
460
461 "diverge" is a mapping of source name -> list of destination names
461 "diverge" is a mapping of source name -> list of destination names
462 for divergent renames.
462 for divergent renames.
463
463
464 "renamedelete" is a mapping of source name -> list of destination
464 "renamedelete" is a mapping of source name -> list of destination
465 names for files deleted in c1 that were renamed in c2 or vice-versa.
465 names for files deleted in c1 that were renamed in c2 or vice-versa.
466
466
467 "dirmove" is a mapping of detected source dir -> destination dir renames.
467 "dirmove" is a mapping of detected source dir -> destination dir renames.
468 This is needed for handling changes to new files previously grafted into
468 This is needed for handling changes to new files previously grafted into
469 renamed directories.
469 renamed directories.
470
470
471 This function calls different copytracing algorithms based on config.
471 This function calls different copytracing algorithms based on config.
472 """
472 """
473 # avoid silly behavior for update from empty dir
473 # avoid silly behavior for update from empty dir
474 if not c1 or not c2 or c1 == c2:
474 if not c1 or not c2 or c1 == c2:
475 return {}, {}, {}, {}, {}
475 return {}, {}, {}, {}, {}
476
476
477 narrowmatch = c1.repo().narrowmatch()
477 narrowmatch = c1.repo().narrowmatch()
478
478
479 # avoid silly behavior for parent -> working dir
479 # avoid silly behavior for parent -> working dir
480 if c2.node() is None and c1.node() == repo.dirstate.p1():
480 if c2.node() is None and c1.node() == repo.dirstate.p1():
481 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
481 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
482
482
483 copytracing = repo.ui.config('experimental', 'copytrace')
483 copytracing = repo.ui.config('experimental', 'copytrace')
484 boolctrace = stringutil.parsebool(copytracing)
484 boolctrace = stringutil.parsebool(copytracing)
485
485
486 # Copy trace disabling is explicitly below the node == p1 logic above
486 # Copy trace disabling is explicitly below the node == p1 logic above
487 # because the logic above is required for a simple copy to be kept across a
487 # because the logic above is required for a simple copy to be kept across a
488 # rebase.
488 # rebase.
489 if copytracing == 'heuristics':
489 if copytracing == 'heuristics':
490 # Do full copytracing if only non-public revisions are involved as
490 # Do full copytracing if only non-public revisions are involved as
491 # that will be fast enough and will also cover the copies which could
491 # that will be fast enough and will also cover the copies which could
492 # be missed by heuristics
492 # be missed by heuristics
493 if _isfullcopytraceable(repo, c1, base):
493 if _isfullcopytraceable(repo, c1, base):
494 return _fullcopytracing(repo, c1, c2, base)
494 return _fullcopytracing(repo, c1, c2, base)
495 return _heuristicscopytracing(repo, c1, c2, base)
495 return _heuristicscopytracing(repo, c1, c2, base)
496 elif boolctrace is False:
496 elif boolctrace is False:
497 # stringutil.parsebool() returns None when it is unable to parse the
497 # stringutil.parsebool() returns None when it is unable to parse the
498 # value, so we should rely on making sure copytracing is on such cases
498 # value, so we should rely on making sure copytracing is on such cases
499 return {}, {}, {}, {}, {}
499 return {}, {}, {}, {}, {}
500 else:
500 else:
501 return _fullcopytracing(repo, c1, c2, base)
501 return _fullcopytracing(repo, c1, c2, base)
502
502
503 def _isfullcopytraceable(repo, c1, base):
503 def _isfullcopytraceable(repo, c1, base):
504 """ Checks that if base, source and destination are all no-public branches,
504 """ Checks that if base, source and destination are all no-public branches,
505 if yes let's use the full copytrace algorithm for increased capabilities
505 if yes let's use the full copytrace algorithm for increased capabilities
506 since it will be fast enough.
506 since it will be fast enough.
507
507
508 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
508 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
509 number of changesets from c1 to base such that if number of changesets are
509 number of changesets from c1 to base such that if number of changesets are
510 more than the limit, full copytracing algorithm won't be used.
510 more than the limit, full copytracing algorithm won't be used.
511 """
511 """
512 if c1.rev() is None:
512 if c1.rev() is None:
513 c1 = c1.p1()
513 c1 = c1.p1()
514 if c1.mutable() and base.mutable():
514 if c1.mutable() and base.mutable():
515 sourcecommitlimit = repo.ui.configint('experimental',
515 sourcecommitlimit = repo.ui.configint('experimental',
516 'copytrace.sourcecommitlimit')
516 'copytrace.sourcecommitlimit')
517 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
517 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
518 return commits < sourcecommitlimit
518 return commits < sourcecommitlimit
519 return False
519 return False
520
520
521 def _fullcopytracing(repo, c1, c2, base):
521 def _fullcopytracing(repo, c1, c2, base):
522 """ The full copytracing algorithm which finds all the new files that were
522 """ The full copytracing algorithm which finds all the new files that were
523 added from merge base up to the top commit and for each file it checks if
523 added from merge base up to the top commit and for each file it checks if
524 this file was copied from another file.
524 this file was copied from another file.
525
525
526 This is pretty slow when a lot of changesets are involved but will track all
526 This is pretty slow when a lot of changesets are involved but will track all
527 the copies.
527 the copies.
528 """
528 """
529 # In certain scenarios (e.g. graft, update or rebase), base can be
529 # In certain scenarios (e.g. graft, update or rebase), base can be
530 # overridden We still need to know a real common ancestor in this case We
530 # overridden We still need to know a real common ancestor in this case We
531 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
531 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
532 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
532 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
533 # caller may not know if the revision passed in lieu of the CA is a genuine
533 # caller may not know if the revision passed in lieu of the CA is a genuine
534 # common ancestor or not without explicitly checking it, it's better to
534 # common ancestor or not without explicitly checking it, it's better to
535 # determine that here.
535 # determine that here.
536 #
536 #
537 # base.isancestorof(wc) is False, work around that
537 # base.isancestorof(wc) is False, work around that
538 _c1 = c1.p1() if c1.rev() is None else c1
538 _c1 = c1.p1() if c1.rev() is None else c1
539 _c2 = c2.p1() if c2.rev() is None else c2
539 _c2 = c2.p1() if c2.rev() is None else c2
540 # an endpoint is "dirty" if it isn't a descendant of the merge base
540 # an endpoint is "dirty" if it isn't a descendant of the merge base
541 # if we have a dirty endpoint, we need to trigger graft logic, and also
541 # if we have a dirty endpoint, we need to trigger graft logic, and also
542 # keep track of which endpoint is dirty
542 # keep track of which endpoint is dirty
543 dirtyc1 = not base.isancestorof(_c1)
543 dirtyc1 = not base.isancestorof(_c1)
544 dirtyc2 = not base.isancestorof(_c2)
544 dirtyc2 = not base.isancestorof(_c2)
545 graft = dirtyc1 or dirtyc2
545 graft = dirtyc1 or dirtyc2
546 tca = base
546 tca = base
547 if graft:
547 if graft:
548 tca = _c1.ancestor(_c2)
548 tca = _c1.ancestor(_c2)
549
549
550 limit = _findlimit(repo, c1, c2)
550 limit = _findlimit(repo, c1, c2)
551
551
552 m1 = c1.manifest()
552 m1 = c1.manifest()
553 m2 = c2.manifest()
553 m2 = c2.manifest()
554 mb = base.manifest()
554 mb = base.manifest()
555
555
556 # gather data from _checkcopies:
556 # gather data from _checkcopies:
557 # - diverge = record all diverges in this dict
557 # - diverge = record all diverges in this dict
558 # - copy = record all non-divergent copies in this dict
558 # - copy = record all non-divergent copies in this dict
559 # - fullcopy = record all copies in this dict
559 # - fullcopy = record all copies in this dict
560 # - incomplete = record non-divergent partial copies here
560 # - incomplete = record non-divergent partial copies here
561 # - incompletediverge = record divergent partial copies here
561 # - incompletediverge = record divergent partial copies here
562 diverge = {} # divergence data is shared
562 diverge = {} # divergence data is shared
563 incompletediverge = {}
563 incompletediverge = {}
564 data1 = {'copy': {},
564 data1 = {'copy': {},
565 'fullcopy': {},
565 'fullcopy': {},
566 'incomplete': {},
566 'incomplete': {},
567 'diverge': diverge,
567 'diverge': diverge,
568 'incompletediverge': incompletediverge,
568 'incompletediverge': incompletediverge,
569 }
569 }
570 data2 = {'copy': {},
570 data2 = {'copy': {},
571 'fullcopy': {},
571 'fullcopy': {},
572 'incomplete': {},
572 'incomplete': {},
573 'diverge': diverge,
573 'diverge': diverge,
574 'incompletediverge': incompletediverge,
574 'incompletediverge': incompletediverge,
575 }
575 }
576
576
577 # find interesting file sets from manifests
577 # find interesting file sets from manifests
578 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
578 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
579 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
579 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
580 bothnew = sorted(addedinm1 & addedinm2)
580 bothnew = sorted(addedinm1 & addedinm2)
581 if tca == base:
581 if tca == base:
582 # unmatched file from base
582 # unmatched file from base
583 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
583 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
584 u1u, u2u = u1r, u2r
584 u1u, u2u = u1r, u2r
585 else:
585 else:
586 # unmatched file from base (DAG rotation in the graft case)
586 # unmatched file from base (DAG rotation in the graft case)
587 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
587 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
588 # unmatched file from topological common ancestors (no DAG rotation)
588 # unmatched file from topological common ancestors (no DAG rotation)
589 # need to recompute this for directory move handling when grafting
589 # need to recompute this for directory move handling when grafting
590 mta = tca.manifest()
590 mta = tca.manifest()
591 u1u, u2u = _computenonoverlap(repo, c1, c2,
591 u1u, u2u = _computenonoverlap(repo, c1, c2,
592 m1.filesnotin(mta, repo.narrowmatch()),
592 m1.filesnotin(mta, repo.narrowmatch()),
593 m2.filesnotin(mta, repo.narrowmatch()),
593 m2.filesnotin(mta, repo.narrowmatch()),
594 debug=False)
594 debug=False)
595
595
596 for f in u1u:
596 for f in u1u:
597 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
597 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
598
598
599 for f in u2u:
599 for f in u2u:
600 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
600 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
601
601
602 copy = dict(data1['copy'])
602 copy = dict(data1['copy'])
603 copy.update(data2['copy'])
603 copy.update(data2['copy'])
604 fullcopy = dict(data1['fullcopy'])
604 fullcopy = dict(data1['fullcopy'])
605 fullcopy.update(data2['fullcopy'])
605 fullcopy.update(data2['fullcopy'])
606
606
607 if dirtyc1:
607 if dirtyc1:
608 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
608 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
609 incompletediverge)
609 incompletediverge)
610 if dirtyc2:
610 if dirtyc2:
611 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
611 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
612 incompletediverge)
612 incompletediverge)
613
613
614 renamedelete = {}
614 renamedelete = {}
615 renamedeleteset = set()
615 renamedeleteset = set()
616 divergeset = set()
616 divergeset = set()
617 for of, fl in list(diverge.items()):
617 for of, fl in list(diverge.items()):
618 if len(fl) == 1 or of in c1 or of in c2:
618 if len(fl) == 1 or of in c1 or of in c2:
619 del diverge[of] # not actually divergent, or not a rename
619 del diverge[of] # not actually divergent, or not a rename
620 if of not in c1 and of not in c2:
620 if of not in c1 and of not in c2:
621 # renamed on one side, deleted on the other side, but filter
621 # renamed on one side, deleted on the other side, but filter
622 # out files that have been renamed and then deleted
622 # out files that have been renamed and then deleted
623 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
623 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
624 renamedeleteset.update(fl) # reverse map for below
624 renamedeleteset.update(fl) # reverse map for below
625 else:
625 else:
626 divergeset.update(fl) # reverse map for below
626 divergeset.update(fl) # reverse map for below
627
627
628 bothdiverge = {}
628 bothdiverge = {}
629 bothincompletediverge = {}
629 bothincompletediverge = {}
630 remainder = {}
630 remainder = {}
631 both1 = {'copy': {},
631 both1 = {'copy': {},
632 'fullcopy': {},
632 'fullcopy': {},
633 'incomplete': {},
633 'incomplete': {},
634 'diverge': bothdiverge,
634 'diverge': bothdiverge,
635 'incompletediverge': bothincompletediverge
635 'incompletediverge': bothincompletediverge
636 }
636 }
637 both2 = {'copy': {},
637 both2 = {'copy': {},
638 'fullcopy': {},
638 'fullcopy': {},
639 'incomplete': {},
639 'incomplete': {},
640 'diverge': bothdiverge,
640 'diverge': bothdiverge,
641 'incompletediverge': bothincompletediverge
641 'incompletediverge': bothincompletediverge
642 }
642 }
643 for f in bothnew:
643 for f in bothnew:
644 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
644 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
645 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
645 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
646 if dirtyc1 and dirtyc2:
646 if dirtyc1 and dirtyc2:
647 remainder = _combinecopies(both2['incomplete'], both1['incomplete'],
647 remainder = _combinecopies(both2['incomplete'], both1['incomplete'],
648 copy, bothdiverge, bothincompletediverge)
648 copy, bothdiverge, bothincompletediverge)
649 remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'],
649 remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'],
650 copy, bothdiverge, bothincompletediverge)
650 copy, bothdiverge, bothincompletediverge)
651 remainder.update(remainder1)
651 remainder.update(remainder1)
652 elif dirtyc1:
652 elif dirtyc1:
653 # incomplete copies may only be found on the "dirty" side for bothnew
653 # incomplete copies may only be found on the "dirty" side for bothnew
654 assert not both2['incomplete']
654 assert not both2['incomplete']
655 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
655 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
656 bothincompletediverge)
656 bothincompletediverge)
657 elif dirtyc2:
657 elif dirtyc2:
658 assert not both1['incomplete']
658 assert not both1['incomplete']
659 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
659 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
660 bothincompletediverge)
660 bothincompletediverge)
661 else:
661 else:
662 # incomplete copies and divergences can't happen outside grafts
662 # incomplete copies and divergences can't happen outside grafts
663 assert not both1['incomplete']
663 assert not both1['incomplete']
664 assert not both2['incomplete']
664 assert not both2['incomplete']
665 assert not bothincompletediverge
665 assert not bothincompletediverge
666 for f in remainder:
666 for f in remainder:
667 assert f not in bothdiverge
667 assert f not in bothdiverge
668 ic = remainder[f]
668 ic = remainder[f]
669 if ic[0] in (m1 if dirtyc1 else m2):
669 if ic[0] in (m1 if dirtyc1 else m2):
670 # backed-out rename on one side, but watch out for deleted files
670 # backed-out rename on one side, but watch out for deleted files
671 bothdiverge[f] = ic
671 bothdiverge[f] = ic
672 for of, fl in bothdiverge.items():
672 for of, fl in bothdiverge.items():
673 if len(fl) == 2 and fl[0] == fl[1]:
673 if len(fl) == 2 and fl[0] == fl[1]:
674 copy[fl[0]] = of # not actually divergent, just matching renames
674 copy[fl[0]] = of # not actually divergent, just matching renames
675
675
676 # Sometimes we get invalid copies here (the "and not remotebase" in
676 # Sometimes we get invalid copies here (the "and not remotebase" in
677 # _checkcopies() seems suspicious). Filter them out.
677 # _checkcopies() seems suspicious). Filter them out.
678 for dst, src in fullcopy.copy().items():
678 for dst, src in fullcopy.copy().items():
679 if src not in mb:
679 if src not in mb:
680 del fullcopy[dst]
680 del fullcopy[dst]
681 # Sometimes we forget to add entries from "copy" to "fullcopy", so fix
681 # Sometimes we forget to add entries from "copy" to "fullcopy", so fix
682 # that up here
682 # that up here
683 for dst, src in copy.items():
683 for dst, src in copy.items():
684 fullcopy[dst] = src
684 fullcopy[dst] = src
685 # Sometimes we forget to add entries from "diverge" to "fullcopy", so fix
685 # Sometimes we forget to add entries from "diverge" to "fullcopy", so fix
686 # that up here
686 # that up here
687 for src, dsts in diverge.items():
687 for src, dsts in diverge.items():
688 for dst in dsts:
688 for dst in dsts:
689 fullcopy[dst] = src
689 fullcopy[dst] = src
690
690
691 if not fullcopy:
691 if not fullcopy:
692 return copy, {}, diverge, renamedelete, {}
692 return copy, {}, diverge, renamedelete, {}
693
693
694 if repo.ui.debugflag:
694 if repo.ui.debugflag:
695 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
695 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
696 "% = renamed and deleted):\n")
696 "% = renamed and deleted):\n")
697 for f in sorted(fullcopy):
697 for f in sorted(fullcopy):
698 note = ""
698 note = ""
699 if f in copy:
699 if f in copy:
700 note += "*"
700 note += "*"
701 if f in divergeset:
701 if f in divergeset:
702 note += "!"
702 note += "!"
703 if f in renamedeleteset:
703 if f in renamedeleteset:
704 note += "%"
704 note += "%"
705 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
705 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
706 note))
706 note))
707 del divergeset
707 del divergeset
708
708
709 repo.ui.debug(" checking for directory renames\n")
709 repo.ui.debug(" checking for directory renames\n")
710
710
711 # generate a directory move map
711 # generate a directory move map
712 d1, d2 = c1.dirs(), c2.dirs()
712 d1, d2 = c1.dirs(), c2.dirs()
713 # Hack for adding '', which is not otherwise added, to d1 and d2
713 # Hack for adding '', which is not otherwise added, to d1 and d2
714 d1.addpath('/')
714 d1.addpath('/')
715 d2.addpath('/')
715 d2.addpath('/')
716 invalid = set()
716 invalid = set()
717 dirmove = {}
717 dirmove = {}
718
718
719 # examine each file copy for a potential directory move, which is
719 # examine each file copy for a potential directory move, which is
720 # when all the files in a directory are moved to a new directory
720 # when all the files in a directory are moved to a new directory
721 for dst, src in fullcopy.iteritems():
721 for dst, src in fullcopy.iteritems():
722 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
722 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
723 if dsrc in invalid:
723 if dsrc in invalid:
724 # already seen to be uninteresting
724 # already seen to be uninteresting
725 continue
725 continue
726 elif dsrc in d1 and ddst in d1:
726 elif dsrc in d1 and ddst in d1:
727 # directory wasn't entirely moved locally
727 # directory wasn't entirely moved locally
728 invalid.add(dsrc)
728 invalid.add(dsrc)
729 elif dsrc in d2 and ddst in d2:
729 elif dsrc in d2 and ddst in d2:
730 # directory wasn't entirely moved remotely
730 # directory wasn't entirely moved remotely
731 invalid.add(dsrc)
731 invalid.add(dsrc)
732 elif dsrc in dirmove and dirmove[dsrc] != ddst:
732 elif dsrc in dirmove and dirmove[dsrc] != ddst:
733 # files from the same directory moved to two different places
733 # files from the same directory moved to two different places
734 invalid.add(dsrc)
734 invalid.add(dsrc)
735 else:
735 else:
736 # looks good so far
736 # looks good so far
737 dirmove[dsrc] = ddst
737 dirmove[dsrc] = ddst
738
738
739 for i in invalid:
739 for i in invalid:
740 if i in dirmove:
740 if i in dirmove:
741 del dirmove[i]
741 del dirmove[i]
742 del d1, d2, invalid
742 del d1, d2, invalid
743
743
744 if not dirmove:
744 if not dirmove:
745 return copy, {}, diverge, renamedelete, {}
745 return copy, {}, diverge, renamedelete, {}
746
746
747 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
747 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
748
748
749 for d in dirmove:
749 for d in dirmove:
750 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
750 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
751 (d, dirmove[d]))
751 (d, dirmove[d]))
752
752
753 movewithdir = {}
753 movewithdir = {}
754 # check unaccounted nonoverlapping files against directory moves
754 # check unaccounted nonoverlapping files against directory moves
755 for f in u1r + u2r:
755 for f in u1r + u2r:
756 if f not in fullcopy:
756 if f not in fullcopy:
757 for d in dirmove:
757 for d in dirmove:
758 if f.startswith(d):
758 if f.startswith(d):
759 # new file added in a directory that was moved, move it
759 # new file added in a directory that was moved, move it
760 df = dirmove[d] + f[len(d):]
760 df = dirmove[d] + f[len(d):]
761 if df not in copy:
761 if df not in copy:
762 movewithdir[f] = df
762 movewithdir[f] = df
763 repo.ui.debug((" pending file src: '%s' -> "
763 repo.ui.debug((" pending file src: '%s' -> "
764 "dst: '%s'\n") % (f, df))
764 "dst: '%s'\n") % (f, df))
765 break
765 break
766
766
767 return copy, movewithdir, diverge, renamedelete, dirmove
767 return copy, movewithdir, diverge, renamedelete, dirmove
768
768
769 def _heuristicscopytracing(repo, c1, c2, base):
769 def _heuristicscopytracing(repo, c1, c2, base):
770 """ Fast copytracing using filename heuristics
770 """ Fast copytracing using filename heuristics
771
771
772 Assumes that moves or renames are of following two types:
772 Assumes that moves or renames are of following two types:
773
773
774 1) Inside a directory only (same directory name but different filenames)
774 1) Inside a directory only (same directory name but different filenames)
775 2) Move from one directory to another
775 2) Move from one directory to another
776 (same filenames but different directory names)
776 (same filenames but different directory names)
777
777
778 Works only when there are no merge commits in the "source branch".
778 Works only when there are no merge commits in the "source branch".
779 Source branch is commits from base up to c2 not including base.
779 Source branch is commits from base up to c2 not including base.
780
780
781 If merge is involved it fallbacks to _fullcopytracing().
781 If merge is involved it fallbacks to _fullcopytracing().
782
782
783 Can be used by setting the following config:
783 Can be used by setting the following config:
784
784
785 [experimental]
785 [experimental]
786 copytrace = heuristics
786 copytrace = heuristics
787
787
788 In some cases the copy/move candidates found by heuristics can be very large
788 In some cases the copy/move candidates found by heuristics can be very large
789 in number and that will make the algorithm slow. The number of possible
789 in number and that will make the algorithm slow. The number of possible
790 candidates to check can be limited by using the config
790 candidates to check can be limited by using the config
791 `experimental.copytrace.movecandidateslimit` which defaults to 100.
791 `experimental.copytrace.movecandidateslimit` which defaults to 100.
792 """
792 """
793
793
794 if c1.rev() is None:
794 if c1.rev() is None:
795 c1 = c1.p1()
795 c1 = c1.p1()
796 if c2.rev() is None:
796 if c2.rev() is None:
797 c2 = c2.p1()
797 c2 = c2.p1()
798
798
799 copies = {}
799 copies = {}
800
800
801 changedfiles = set()
801 changedfiles = set()
802 m1 = c1.manifest()
802 m1 = c1.manifest()
803 if not repo.revs('%d::%d', base.rev(), c2.rev()):
803 if not repo.revs('%d::%d', base.rev(), c2.rev()):
804 # If base is not in c2 branch, we switch to fullcopytracing
804 # If base is not in c2 branch, we switch to fullcopytracing
805 repo.ui.debug("switching to full copytracing as base is not "
805 repo.ui.debug("switching to full copytracing as base is not "
806 "an ancestor of c2\n")
806 "an ancestor of c2\n")
807 return _fullcopytracing(repo, c1, c2, base)
807 return _fullcopytracing(repo, c1, c2, base)
808
808
809 ctx = c2
809 ctx = c2
810 while ctx != base:
810 while ctx != base:
811 if len(ctx.parents()) == 2:
811 if len(ctx.parents()) == 2:
812 # To keep things simple let's not handle merges
812 # To keep things simple let's not handle merges
813 repo.ui.debug("switching to full copytracing because of merges\n")
813 repo.ui.debug("switching to full copytracing because of merges\n")
814 return _fullcopytracing(repo, c1, c2, base)
814 return _fullcopytracing(repo, c1, c2, base)
815 changedfiles.update(ctx.files())
815 changedfiles.update(ctx.files())
816 ctx = ctx.p1()
816 ctx = ctx.p1()
817
817
818 cp = _forwardcopies(base, c2)
818 cp = _forwardcopies(base, c2)
819 for dst, src in cp.iteritems():
819 for dst, src in cp.iteritems():
820 if src in m1:
820 if src in m1:
821 copies[dst] = src
821 copies[dst] = src
822
822
823 # file is missing if it isn't present in the destination, but is present in
823 # file is missing if it isn't present in the destination, but is present in
824 # the base and present in the source.
824 # the base and present in the source.
825 # Presence in the base is important to exclude added files, presence in the
825 # Presence in the base is important to exclude added files, presence in the
826 # source is important to exclude removed files.
826 # source is important to exclude removed files.
827 filt = lambda f: f not in m1 and f in base and f in c2
827 filt = lambda f: f not in m1 and f in base and f in c2
828 missingfiles = [f for f in changedfiles if filt(f)]
828 missingfiles = [f for f in changedfiles if filt(f)]
829
829
830 if missingfiles:
830 if missingfiles:
831 basenametofilename = collections.defaultdict(list)
831 basenametofilename = collections.defaultdict(list)
832 dirnametofilename = collections.defaultdict(list)
832 dirnametofilename = collections.defaultdict(list)
833
833
834 for f in m1.filesnotin(base.manifest()):
834 for f in m1.filesnotin(base.manifest()):
835 basename = os.path.basename(f)
835 basename = os.path.basename(f)
836 dirname = os.path.dirname(f)
836 dirname = os.path.dirname(f)
837 basenametofilename[basename].append(f)
837 basenametofilename[basename].append(f)
838 dirnametofilename[dirname].append(f)
838 dirnametofilename[dirname].append(f)
839
839
840 for f in missingfiles:
840 for f in missingfiles:
841 basename = os.path.basename(f)
841 basename = os.path.basename(f)
842 dirname = os.path.dirname(f)
842 dirname = os.path.dirname(f)
843 samebasename = basenametofilename[basename]
843 samebasename = basenametofilename[basename]
844 samedirname = dirnametofilename[dirname]
844 samedirname = dirnametofilename[dirname]
845 movecandidates = samebasename + samedirname
845 movecandidates = samebasename + samedirname
846 # f is guaranteed to be present in c2, that's why
846 # f is guaranteed to be present in c2, that's why
847 # c2.filectx(f) won't fail
847 # c2.filectx(f) won't fail
848 f2 = c2.filectx(f)
848 f2 = c2.filectx(f)
849 # we can have a lot of candidates which can slow down the heuristics
849 # we can have a lot of candidates which can slow down the heuristics
850 # config value to limit the number of candidates moves to check
850 # config value to limit the number of candidates moves to check
851 maxcandidates = repo.ui.configint('experimental',
851 maxcandidates = repo.ui.configint('experimental',
852 'copytrace.movecandidateslimit')
852 'copytrace.movecandidateslimit')
853
853
854 if len(movecandidates) > maxcandidates:
854 if len(movecandidates) > maxcandidates:
855 repo.ui.status(_("skipping copytracing for '%s', more "
855 repo.ui.status(_("skipping copytracing for '%s', more "
856 "candidates than the limit: %d\n")
856 "candidates than the limit: %d\n")
857 % (f, len(movecandidates)))
857 % (f, len(movecandidates)))
858 continue
858 continue
859
859
860 for candidate in movecandidates:
860 for candidate in movecandidates:
861 f1 = c1.filectx(candidate)
861 f1 = c1.filectx(candidate)
862 if _related(f1, f2):
862 if _related(f1, f2):
863 # if there are a few related copies then we'll merge
863 # if there are a few related copies then we'll merge
864 # changes into all of them. This matches the behaviour
864 # changes into all of them. This matches the behaviour
865 # of upstream copytracing
865 # of upstream copytracing
866 copies[candidate] = f
866 copies[candidate] = f
867
867
868 return copies, {}, {}, {}, {}
868 return copies, {}, {}, {}, {}
869
869
870 def _related(f1, f2):
870 def _related(f1, f2):
871 """return True if f1 and f2 filectx have a common ancestor
871 """return True if f1 and f2 filectx have a common ancestor
872
872
873 Walk back to common ancestor to see if the two files originate
873 Walk back to common ancestor to see if the two files originate
874 from the same file. Since workingfilectx's rev() is None it messes
874 from the same file. Since workingfilectx's rev() is None it messes
875 up the integer comparison logic, hence the pre-step check for
875 up the integer comparison logic, hence the pre-step check for
876 None (f1 and f2 can only be workingfilectx's initially).
876 None (f1 and f2 can only be workingfilectx's initially).
877 """
877 """
878
878
879 if f1 == f2:
879 if f1 == f2:
880 return True # a match
880 return True # a match
881
881
882 g1, g2 = f1.ancestors(), f2.ancestors()
882 g1, g2 = f1.ancestors(), f2.ancestors()
883 try:
883 try:
884 f1r, f2r = f1.linkrev(), f2.linkrev()
884 f1r, f2r = f1.linkrev(), f2.linkrev()
885
885
886 if f1r is None:
886 if f1r is None:
887 f1 = next(g1)
887 f1 = next(g1)
888 if f2r is None:
888 if f2r is None:
889 f2 = next(g2)
889 f2 = next(g2)
890
890
891 while True:
891 while True:
892 f1r, f2r = f1.linkrev(), f2.linkrev()
892 f1r, f2r = f1.linkrev(), f2.linkrev()
893 if f1r > f2r:
893 if f1r > f2r:
894 f1 = next(g1)
894 f1 = next(g1)
895 elif f2r > f1r:
895 elif f2r > f1r:
896 f2 = next(g2)
896 f2 = next(g2)
897 else: # f1 and f2 point to files in the same linkrev
897 else: # f1 and f2 point to files in the same linkrev
898 return f1 == f2 # true if they point to the same file
898 return f1 == f2 # true if they point to the same file
899 except StopIteration:
899 except StopIteration:
900 return False
900 return False
901
901
902 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
902 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
903 """
903 """
904 check possible copies of f from msrc to mdst
904 check possible copies of f from msrc to mdst
905
905
906 srcctx = starting context for f in msrc
906 srcctx = starting context for f in msrc
907 dstctx = destination context for f in mdst
907 dstctx = destination context for f in mdst
908 f = the filename to check (as in msrc)
908 f = the filename to check (as in msrc)
909 base = the changectx used as a merge base
909 base = the changectx used as a merge base
910 tca = topological common ancestor for graft-like scenarios
910 tca = topological common ancestor for graft-like scenarios
911 remotebase = True if base is outside tca::srcctx, False otherwise
911 remotebase = True if base is outside tca::srcctx, False otherwise
912 limit = the rev number to not search beyond
912 limit = the rev number to not search beyond
913 data = dictionary of dictionary to store copy data. (see mergecopies)
913 data = dictionary of dictionary to store copy data. (see mergecopies)
914
914
915 note: limit is only an optimization, and provides no guarantee that
915 note: limit is only an optimization, and provides no guarantee that
916 irrelevant revisions will not be visited
916 irrelevant revisions will not be visited
917 there is no easy way to make this algorithm stop in a guaranteed way
917 there is no easy way to make this algorithm stop in a guaranteed way
918 once it "goes behind a certain revision".
918 once it "goes behind a certain revision".
919 """
919 """
920
920
921 msrc = srcctx.manifest()
921 msrc = srcctx.manifest()
922 mdst = dstctx.manifest()
922 mdst = dstctx.manifest()
923 mb = base.manifest()
923 mb = base.manifest()
924 mta = tca.manifest()
924 mta = tca.manifest()
925 # Might be true if this call is about finding backward renames,
925 # Might be true if this call is about finding backward renames,
926 # This happens in the case of grafts because the DAG is then rotated.
926 # This happens in the case of grafts because the DAG is then rotated.
927 # If the file exists in both the base and the source, we are not looking
927 # If the file exists in both the base and the source, we are not looking
928 # for a rename on the source side, but on the part of the DAG that is
928 # for a rename on the source side, but on the part of the DAG that is
929 # traversed backwards.
929 # traversed backwards.
930 #
930 #
931 # In the case there is both backward and forward renames (before and after
931 # In the case there is both backward and forward renames (before and after
932 # the base) this is more complicated as we must detect a divergence.
932 # the base) this is more complicated as we must detect a divergence.
933 # We use 'backwards = False' in that case.
933 # We use 'backwards = False' in that case.
934 backwards = not remotebase and base != tca and f in mb
934 backwards = not remotebase and base != tca and f in mb
935 getsrcfctx = _makegetfctx(srcctx)
935 getsrcfctx = _makegetfctx(srcctx)
936 getdstfctx = _makegetfctx(dstctx)
936 getdstfctx = _makegetfctx(dstctx)
937
937
938 if msrc[f] == mb.get(f) and not remotebase:
938 if msrc[f] == mb.get(f) and not remotebase:
939 # Nothing to merge
939 # Nothing to merge
940 return
940 return
941
941
942 of = None
942 of = None
943 seen = {f}
943 seen = {f}
944 for oc in getsrcfctx(f, msrc[f]).ancestors():
944 for oc in getsrcfctx(f, msrc[f]).ancestors():
945 of = oc.path()
945 of = oc.path()
946 if of in seen:
946 if of in seen:
947 # check limit late - grab last rename before
947 # check limit late - grab last rename before
948 if oc.linkrev() < limit:
948 if oc.linkrev() < limit:
949 break
949 break
950 continue
950 continue
951 seen.add(of)
951 seen.add(of)
952
952
953 # remember for dir rename detection
953 # remember for dir rename detection
954 if backwards:
954 if backwards:
955 data['fullcopy'][of] = f # grafting backwards through renames
955 data['fullcopy'][of] = f # grafting backwards through renames
956 else:
956 else:
957 data['fullcopy'][f] = of
957 data['fullcopy'][f] = of
958 if of not in mdst:
958 if of not in mdst:
959 continue # no match, keep looking
959 continue # no match, keep looking
960 if mdst[of] == mb.get(of):
960 if mdst[of] == mb.get(of):
961 return # no merge needed, quit early
961 return # no merge needed, quit early
962 c2 = getdstfctx(of, mdst[of])
962 c2 = getdstfctx(of, mdst[of])
963 # c2 might be a plain new file on added on destination side that is
963 # c2 might be a plain new file on added on destination side that is
964 # unrelated to the droids we are looking for.
964 # unrelated to the droids we are looking for.
965 cr = _related(oc, c2)
965 cr = _related(oc, c2)
966 if cr and (of == f or of == c2.path()): # non-divergent
966 if cr and (of == f or of == c2.path()): # non-divergent
967 if backwards:
967 if backwards:
968 data['copy'][of] = f
968 data['copy'][of] = f
969 elif of in mb:
969 elif of in mb:
970 data['copy'][f] = of
970 data['copy'][f] = of
971 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
971 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
972 data['copy'][of] = f
972 data['copy'][of] = f
973 del data['fullcopy'][f]
973 del data['fullcopy'][f]
974 data['fullcopy'][of] = f
974 data['fullcopy'][of] = f
975 else: # divergence w.r.t. graft CA on one side of topological CA
975 else: # divergence w.r.t. graft CA on one side of topological CA
976 for sf in seen:
976 for sf in seen:
977 if sf in mb:
977 if sf in mb:
978 assert sf not in data['diverge']
978 assert sf not in data['diverge']
979 data['diverge'][sf] = [f, of]
979 data['diverge'][sf] = [f, of]
980 break
980 break
981 return
981 return
982
982
983 if of in mta:
983 if of in mta:
984 if backwards or remotebase:
984 if backwards or remotebase:
985 data['incomplete'][of] = f
985 data['incomplete'][of] = f
986 else:
986 else:
987 for sf in seen:
987 for sf in seen:
988 if sf in mb:
988 if sf in mb:
989 if tca == base:
989 if tca == base:
990 data['diverge'].setdefault(sf, []).append(f)
990 data['diverge'].setdefault(sf, []).append(f)
991 else:
991 else:
992 data['incompletediverge'][sf] = [of, f]
992 data['incompletediverge'][sf] = [of, f]
993 return
993 return
994
994
995 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
995 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
996 """reproduce copies from fromrev to rev in the dirstate
996 """reproduce copies from fromrev to rev in the dirstate
997
997
998 If skiprev is specified, it's a revision that should be used to
998 If skiprev is specified, it's a revision that should be used to
999 filter copy records. Any copies that occur between fromrev and
999 filter copy records. Any copies that occur between fromrev and
1000 skiprev will not be duplicated, even if they appear in the set of
1000 skiprev will not be duplicated, even if they appear in the set of
1001 copies between fromrev and rev.
1001 copies between fromrev and rev.
1002 """
1002 """
1003 exclude = {}
1003 exclude = {}
1004 ctraceconfig = repo.ui.config('experimental', 'copytrace')
1004 ctraceconfig = repo.ui.config('experimental', 'copytrace')
1005 bctrace = stringutil.parsebool(ctraceconfig)
1005 bctrace = stringutil.parsebool(ctraceconfig)
1006 if (skiprev is not None and
1006 if (skiprev is not None and
1007 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
1007 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
1008 # copytrace='off' skips this line, but not the entire function because
1008 # copytrace='off' skips this line, but not the entire function because
1009 # the line below is O(size of the repo) during a rebase, while the rest
1009 # the line below is O(size of the repo) during a rebase, while the rest
1010 # of the function is much faster (and is required for carrying copy
1010 # of the function is much faster (and is required for carrying copy
1011 # metadata across the rebase anyway).
1011 # metadata across the rebase anyway).
1012 exclude = pathcopies(repo[fromrev], repo[skiprev])
1012 exclude = pathcopies(repo[fromrev], repo[skiprev])
1013 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
1013 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
1014 # copies.pathcopies returns backward renames, so dst might not
1014 # copies.pathcopies returns backward renames, so dst might not
1015 # actually be in the dirstate
1015 # actually be in the dirstate
1016 if dst in exclude:
1016 if dst in exclude:
1017 continue
1017 continue
1018 wctx[dst].markcopied(src)
1018 wctx[dst].markcopied(src)
General Comments 0
You need to be logged in to leave comments. Login now