##// END OF EJS Templates
copies: introduce getdstfctx...
Stanislau Hlebik -
r32565:5313d980 default
parent child Browse files
Show More
@@ -1,716 +1,717 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 node,
13 node,
14 pathutil,
14 pathutil,
15 scmutil,
15 scmutil,
16 util,
16 util,
17 )
17 )
18
18
19 def _findlimit(repo, a, b):
19 def _findlimit(repo, a, b):
20 """
20 """
21 Find the last revision that needs to be checked to ensure that a full
21 Find the last revision that needs to be checked to ensure that a full
22 transitive closure for file copies can be properly calculated.
22 transitive closure for file copies can be properly calculated.
23 Generally, this means finding the earliest revision number that's an
23 Generally, this means finding the earliest revision number that's an
24 ancestor of a or b but not both, except when a or b is a direct descendent
24 ancestor of a or b but not both, except when a or b is a direct descendent
25 of the other, in which case we can return the minimum revnum of a and b.
25 of the other, in which case we can return the minimum revnum of a and b.
26 None if no such revision exists.
26 None if no such revision exists.
27 """
27 """
28
28
29 # basic idea:
29 # basic idea:
30 # - mark a and b with different sides
30 # - mark a and b with different sides
31 # - if a parent's children are all on the same side, the parent is
31 # - if a parent's children are all on the same side, the parent is
32 # on that side, otherwise it is on no side
32 # on that side, otherwise it is on no side
33 # - walk the graph in topological order with the help of a heap;
33 # - walk the graph in topological order with the help of a heap;
34 # - add unseen parents to side map
34 # - add unseen parents to side map
35 # - clear side of any parent that has children on different sides
35 # - clear side of any parent that has children on different sides
36 # - track number of interesting revs that might still be on a side
36 # - track number of interesting revs that might still be on a side
37 # - track the lowest interesting rev seen
37 # - track the lowest interesting rev seen
38 # - quit when interesting revs is zero
38 # - quit when interesting revs is zero
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41 working = len(cl) # pseudo rev for the working directory
41 working = len(cl) # pseudo rev for the working directory
42 if a is None:
42 if a is None:
43 a = working
43 a = working
44 if b is None:
44 if b is None:
45 b = working
45 b = working
46
46
47 side = {a: -1, b: 1}
47 side = {a: -1, b: 1}
48 visit = [-a, -b]
48 visit = [-a, -b]
49 heapq.heapify(visit)
49 heapq.heapify(visit)
50 interesting = len(visit)
50 interesting = len(visit)
51 hascommonancestor = False
51 hascommonancestor = False
52 limit = working
52 limit = working
53
53
54 while interesting:
54 while interesting:
55 r = -heapq.heappop(visit)
55 r = -heapq.heappop(visit)
56 if r == working:
56 if r == working:
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
58 else:
58 else:
59 parents = cl.parentrevs(r)
59 parents = cl.parentrevs(r)
60 for p in parents:
60 for p in parents:
61 if p < 0:
61 if p < 0:
62 continue
62 continue
63 if p not in side:
63 if p not in side:
64 # first time we see p; add it to visit
64 # first time we see p; add it to visit
65 side[p] = side[r]
65 side[p] = side[r]
66 if side[p]:
66 if side[p]:
67 interesting += 1
67 interesting += 1
68 heapq.heappush(visit, -p)
68 heapq.heappush(visit, -p)
69 elif side[p] and side[p] != side[r]:
69 elif side[p] and side[p] != side[r]:
70 # p was interesting but now we know better
70 # p was interesting but now we know better
71 side[p] = 0
71 side[p] = 0
72 interesting -= 1
72 interesting -= 1
73 hascommonancestor = True
73 hascommonancestor = True
74 if side[r]:
74 if side[r]:
75 limit = r # lowest rev visited
75 limit = r # lowest rev visited
76 interesting -= 1
76 interesting -= 1
77
77
78 if not hascommonancestor:
78 if not hascommonancestor:
79 return None
79 return None
80
80
81 # Consider the following flow (see test-commit-amend.t under issue4405):
81 # Consider the following flow (see test-commit-amend.t under issue4405):
82 # 1/ File 'a0' committed
82 # 1/ File 'a0' committed
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
84 # 3/ Move back to first commit
84 # 3/ Move back to first commit
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
87 #
87 #
88 # During the amend in step five, we will be in this state:
88 # During the amend in step five, we will be in this state:
89 #
89 #
90 # @ 3 temporary amend commit for a1-amend
90 # @ 3 temporary amend commit for a1-amend
91 # |
91 # |
92 # o 2 a1-amend
92 # o 2 a1-amend
93 # |
93 # |
94 # | o 1 a1
94 # | o 1 a1
95 # |/
95 # |/
96 # o 0 a0
96 # o 0 a0
97 #
97 #
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
99 # yet the filelog has the copy information in rev 1 and we will not look
99 # yet the filelog has the copy information in rev 1 and we will not look
100 # back far enough unless we also look at the a and b as candidates.
100 # back far enough unless we also look at the a and b as candidates.
101 # This only occurs when a is a descendent of b or visa-versa.
101 # This only occurs when a is a descendent of b or visa-versa.
102 return min(limit, a, b)
102 return min(limit, a, b)
103
103
104 def _chain(src, dst, a, b):
104 def _chain(src, dst, a, b):
105 '''chain two sets of copies a->b'''
105 '''chain two sets of copies a->b'''
106 t = a.copy()
106 t = a.copy()
107 for k, v in b.iteritems():
107 for k, v in b.iteritems():
108 if v in t:
108 if v in t:
109 # found a chain
109 # found a chain
110 if t[v] != k:
110 if t[v] != k:
111 # file wasn't renamed back to itself
111 # file wasn't renamed back to itself
112 t[k] = t[v]
112 t[k] = t[v]
113 if v not in dst:
113 if v not in dst:
114 # chain was a rename, not a copy
114 # chain was a rename, not a copy
115 del t[v]
115 del t[v]
116 if v in src:
116 if v in src:
117 # file is a copy of an existing file
117 # file is a copy of an existing file
118 t[k] = v
118 t[k] = v
119
119
120 # remove criss-crossed copies
120 # remove criss-crossed copies
121 for k, v in t.items():
121 for k, v in t.items():
122 if k in src and v in dst:
122 if k in src and v in dst:
123 del t[k]
123 del t[k]
124
124
125 return t
125 return t
126
126
127 def _tracefile(fctx, am, limit=-1):
127 def _tracefile(fctx, am, limit=-1):
128 '''return file context that is the ancestor of fctx present in ancestor
128 '''return file context that is the ancestor of fctx present in ancestor
129 manifest am, stopping after the first ancestor lower than limit'''
129 manifest am, stopping after the first ancestor lower than limit'''
130
130
131 for f in fctx.ancestors():
131 for f in fctx.ancestors():
132 if am.get(f.path(), None) == f.filenode():
132 if am.get(f.path(), None) == f.filenode():
133 return f
133 return f
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
135 return None
135 return None
136
136
137 def _dirstatecopies(d):
137 def _dirstatecopies(d):
138 ds = d._repo.dirstate
138 ds = d._repo.dirstate
139 c = ds.copies().copy()
139 c = ds.copies().copy()
140 for k in c.keys():
140 for k in c.keys():
141 if ds[k] not in 'anm':
141 if ds[k] not in 'anm':
142 del c[k]
142 del c[k]
143 return c
143 return c
144
144
145 def _computeforwardmissing(a, b, match=None):
145 def _computeforwardmissing(a, b, match=None):
146 """Computes which files are in b but not a.
146 """Computes which files are in b but not a.
147 This is its own function so extensions can easily wrap this call to see what
147 This is its own function so extensions can easily wrap this call to see what
148 files _forwardcopies is about to process.
148 files _forwardcopies is about to process.
149 """
149 """
150 ma = a.manifest()
150 ma = a.manifest()
151 mb = b.manifest()
151 mb = b.manifest()
152 return mb.filesnotin(ma, match=match)
152 return mb.filesnotin(ma, match=match)
153
153
154 def _forwardcopies(a, b, match=None):
154 def _forwardcopies(a, b, match=None):
155 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
155 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
156
156
157 # check for working copy
157 # check for working copy
158 w = None
158 w = None
159 if b.rev() is None:
159 if b.rev() is None:
160 w = b
160 w = b
161 b = w.p1()
161 b = w.p1()
162 if a == b:
162 if a == b:
163 # short-circuit to avoid issues with merge states
163 # short-circuit to avoid issues with merge states
164 return _dirstatecopies(w)
164 return _dirstatecopies(w)
165
165
166 # files might have to be traced back to the fctx parent of the last
166 # files might have to be traced back to the fctx parent of the last
167 # one-side-only changeset, but not further back than that
167 # one-side-only changeset, but not further back than that
168 limit = _findlimit(a._repo, a.rev(), b.rev())
168 limit = _findlimit(a._repo, a.rev(), b.rev())
169 if limit is None:
169 if limit is None:
170 limit = -1
170 limit = -1
171 am = a.manifest()
171 am = a.manifest()
172
172
173 # find where new files came from
173 # find where new files came from
174 # we currently don't try to find where old files went, too expensive
174 # we currently don't try to find where old files went, too expensive
175 # this means we can miss a case like 'hg rm b; hg cp a b'
175 # this means we can miss a case like 'hg rm b; hg cp a b'
176 cm = {}
176 cm = {}
177
177
178 # Computing the forward missing is quite expensive on large manifests, since
178 # Computing the forward missing is quite expensive on large manifests, since
179 # it compares the entire manifests. We can optimize it in the common use
179 # it compares the entire manifests. We can optimize it in the common use
180 # case of computing what copies are in a commit versus its parent (like
180 # case of computing what copies are in a commit versus its parent (like
181 # during a rebase or histedit). Note, we exclude merge commits from this
181 # during a rebase or histedit). Note, we exclude merge commits from this
182 # optimization, since the ctx.files() for a merge commit is not correct for
182 # optimization, since the ctx.files() for a merge commit is not correct for
183 # this comparison.
183 # this comparison.
184 forwardmissingmatch = match
184 forwardmissingmatch = match
185 if not match and b.p1() == a and b.p2().node() == node.nullid:
185 if not match and b.p1() == a and b.p2().node() == node.nullid:
186 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
186 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
187 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
187 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
188
188
189 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
189 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
190 for f in missing:
190 for f in missing:
191 fctx = b[f]
191 fctx = b[f]
192 fctx._ancestrycontext = ancestrycontext
192 fctx._ancestrycontext = ancestrycontext
193 ofctx = _tracefile(fctx, am, limit)
193 ofctx = _tracefile(fctx, am, limit)
194 if ofctx:
194 if ofctx:
195 cm[f] = ofctx.path()
195 cm[f] = ofctx.path()
196
196
197 # combine copies from dirstate if necessary
197 # combine copies from dirstate if necessary
198 if w is not None:
198 if w is not None:
199 cm = _chain(a, w, cm, _dirstatecopies(w))
199 cm = _chain(a, w, cm, _dirstatecopies(w))
200
200
201 return cm
201 return cm
202
202
203 def _backwardrenames(a, b):
203 def _backwardrenames(a, b):
204 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
204 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
205 return {}
205 return {}
206
206
207 # Even though we're not taking copies into account, 1:n rename situations
207 # Even though we're not taking copies into account, 1:n rename situations
208 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
208 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
209 # arbitrarily pick one of the renames.
209 # arbitrarily pick one of the renames.
210 f = _forwardcopies(b, a)
210 f = _forwardcopies(b, a)
211 r = {}
211 r = {}
212 for k, v in sorted(f.iteritems()):
212 for k, v in sorted(f.iteritems()):
213 # remove copies
213 # remove copies
214 if v in a:
214 if v in a:
215 continue
215 continue
216 r[v] = k
216 r[v] = k
217 return r
217 return r
218
218
219 def pathcopies(x, y, match=None):
219 def pathcopies(x, y, match=None):
220 '''find {dst@y: src@x} copy mapping for directed compare'''
220 '''find {dst@y: src@x} copy mapping for directed compare'''
221 if x == y or not x or not y:
221 if x == y or not x or not y:
222 return {}
222 return {}
223 a = y.ancestor(x)
223 a = y.ancestor(x)
224 if a == x:
224 if a == x:
225 return _forwardcopies(x, y, match=match)
225 return _forwardcopies(x, y, match=match)
226 if a == y:
226 if a == y:
227 return _backwardrenames(x, y)
227 return _backwardrenames(x, y)
228 return _chain(x, y, _backwardrenames(x, a),
228 return _chain(x, y, _backwardrenames(x, a),
229 _forwardcopies(a, y, match=match))
229 _forwardcopies(a, y, match=match))
230
230
231 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
231 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
232 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
232 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
233 and c2. This is its own function so extensions can easily wrap this call
233 and c2. This is its own function so extensions can easily wrap this call
234 to see what files mergecopies is about to process.
234 to see what files mergecopies is about to process.
235
235
236 Even though c1 and c2 are not used in this function, they are useful in
236 Even though c1 and c2 are not used in this function, they are useful in
237 other extensions for being able to read the file nodes of the changed files.
237 other extensions for being able to read the file nodes of the changed files.
238
238
239 "baselabel" can be passed to help distinguish the multiple computations
239 "baselabel" can be passed to help distinguish the multiple computations
240 done in the graft case.
240 done in the graft case.
241 """
241 """
242 u1 = sorted(addedinm1 - addedinm2)
242 u1 = sorted(addedinm1 - addedinm2)
243 u2 = sorted(addedinm2 - addedinm1)
243 u2 = sorted(addedinm2 - addedinm1)
244
244
245 header = " unmatched files in %s"
245 header = " unmatched files in %s"
246 if baselabel:
246 if baselabel:
247 header += ' (from %s)' % baselabel
247 header += ' (from %s)' % baselabel
248 if u1:
248 if u1:
249 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
249 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
250 if u2:
250 if u2:
251 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
251 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
252 return u1, u2
252 return u1, u2
253
253
254 def _makegetfctx(ctx):
254 def _makegetfctx(ctx):
255 """return a 'getfctx' function suitable for _checkcopies usage
255 """return a 'getfctx' function suitable for _checkcopies usage
256
256
257 We have to re-setup the function building 'filectx' for each
257 We have to re-setup the function building 'filectx' for each
258 '_checkcopies' to ensure the linkrev adjustment is properly setup for
258 '_checkcopies' to ensure the linkrev adjustment is properly setup for
259 each. Linkrev adjustment is important to avoid bug in rename
259 each. Linkrev adjustment is important to avoid bug in rename
260 detection. Moreover, having a proper '_ancestrycontext' setup ensures
260 detection. Moreover, having a proper '_ancestrycontext' setup ensures
261 the performance impact of this adjustment is kept limited. Without it,
261 the performance impact of this adjustment is kept limited. Without it,
262 each file could do a full dag traversal making the time complexity of
262 each file could do a full dag traversal making the time complexity of
263 the operation explode (see issue4537).
263 the operation explode (see issue4537).
264
264
265 This function exists here mostly to limit the impact on stable. Feel
265 This function exists here mostly to limit the impact on stable. Feel
266 free to refactor on default.
266 free to refactor on default.
267 """
267 """
268 rev = ctx.rev()
268 rev = ctx.rev()
269 repo = ctx._repo
269 repo = ctx._repo
270 ac = getattr(ctx, '_ancestrycontext', None)
270 ac = getattr(ctx, '_ancestrycontext', None)
271 if ac is None:
271 if ac is None:
272 revs = [rev]
272 revs = [rev]
273 if rev is None:
273 if rev is None:
274 revs = [p.rev() for p in ctx.parents()]
274 revs = [p.rev() for p in ctx.parents()]
275 ac = repo.changelog.ancestors(revs, inclusive=True)
275 ac = repo.changelog.ancestors(revs, inclusive=True)
276 ctx._ancestrycontext = ac
276 ctx._ancestrycontext = ac
277 def makectx(f, n):
277 def makectx(f, n):
278 if n in node.wdirnodes: # in a working context?
278 if n in node.wdirnodes: # in a working context?
279 if ctx.rev() is None:
279 if ctx.rev() is None:
280 return ctx.filectx(f)
280 return ctx.filectx(f)
281 return repo[None][f]
281 return repo[None][f]
282 fctx = repo.filectx(f, fileid=n)
282 fctx = repo.filectx(f, fileid=n)
283 # setup only needed for filectx not create from a changectx
283 # setup only needed for filectx not create from a changectx
284 fctx._ancestrycontext = ac
284 fctx._ancestrycontext = ac
285 fctx._descendantrev = rev
285 fctx._descendantrev = rev
286 return fctx
286 return fctx
287 return util.lrucachefunc(makectx)
287 return util.lrucachefunc(makectx)
288
288
289 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
289 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
290 """combine partial copy paths"""
290 """combine partial copy paths"""
291 remainder = {}
291 remainder = {}
292 for f in copyfrom:
292 for f in copyfrom:
293 if f in copyto:
293 if f in copyto:
294 finalcopy[copyto[f]] = copyfrom[f]
294 finalcopy[copyto[f]] = copyfrom[f]
295 del copyto[f]
295 del copyto[f]
296 for f in incompletediverge:
296 for f in incompletediverge:
297 assert f not in diverge
297 assert f not in diverge
298 ic = incompletediverge[f]
298 ic = incompletediverge[f]
299 if ic[0] in copyto:
299 if ic[0] in copyto:
300 diverge[f] = [copyto[ic[0]], ic[1]]
300 diverge[f] = [copyto[ic[0]], ic[1]]
301 else:
301 else:
302 remainder[f] = ic
302 remainder[f] = ic
303 return remainder
303 return remainder
304
304
305 def mergecopies(repo, c1, c2, base):
305 def mergecopies(repo, c1, c2, base):
306 """
306 """
307 Find moves and copies between context c1 and c2 that are relevant
307 Find moves and copies between context c1 and c2 that are relevant
308 for merging. 'base' will be used as the merge base.
308 for merging. 'base' will be used as the merge base.
309
309
310 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
310 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
311 "dirmove".
311 "dirmove".
312
312
313 "copy" is a mapping from destination name -> source name,
313 "copy" is a mapping from destination name -> source name,
314 where source is in c1 and destination is in c2 or vice-versa.
314 where source is in c1 and destination is in c2 or vice-versa.
315
315
316 "movewithdir" is a mapping from source name -> destination name,
316 "movewithdir" is a mapping from source name -> destination name,
317 where the file at source present in one context but not the other
317 where the file at source present in one context but not the other
318 needs to be moved to destination by the merge process, because the
318 needs to be moved to destination by the merge process, because the
319 other context moved the directory it is in.
319 other context moved the directory it is in.
320
320
321 "diverge" is a mapping of source name -> list of destination names
321 "diverge" is a mapping of source name -> list of destination names
322 for divergent renames.
322 for divergent renames.
323
323
324 "renamedelete" is a mapping of source name -> list of destination
324 "renamedelete" is a mapping of source name -> list of destination
325 names for files deleted in c1 that were renamed in c2 or vice-versa.
325 names for files deleted in c1 that were renamed in c2 or vice-versa.
326
326
327 "dirmove" is a mapping of detected source dir -> destination dir renames.
327 "dirmove" is a mapping of detected source dir -> destination dir renames.
328 This is needed for handling changes to new files previously grafted into
328 This is needed for handling changes to new files previously grafted into
329 renamed directories.
329 renamed directories.
330 """
330 """
331 # avoid silly behavior for update from empty dir
331 # avoid silly behavior for update from empty dir
332 if not c1 or not c2 or c1 == c2:
332 if not c1 or not c2 or c1 == c2:
333 return {}, {}, {}, {}, {}
333 return {}, {}, {}, {}, {}
334
334
335 # avoid silly behavior for parent -> working dir
335 # avoid silly behavior for parent -> working dir
336 if c2.node() is None and c1.node() == repo.dirstate.p1():
336 if c2.node() is None and c1.node() == repo.dirstate.p1():
337 return repo.dirstate.copies(), {}, {}, {}, {}
337 return repo.dirstate.copies(), {}, {}, {}, {}
338
338
339 # Copy trace disabling is explicitly below the node == p1 logic above
339 # Copy trace disabling is explicitly below the node == p1 logic above
340 # because the logic above is required for a simple copy to be kept across a
340 # because the logic above is required for a simple copy to be kept across a
341 # rebase.
341 # rebase.
342 if repo.ui.configbool('experimental', 'disablecopytrace'):
342 if repo.ui.configbool('experimental', 'disablecopytrace'):
343 return {}, {}, {}, {}, {}
343 return {}, {}, {}, {}, {}
344
344
345 # In certain scenarios (e.g. graft, update or rebase), base can be
345 # In certain scenarios (e.g. graft, update or rebase), base can be
346 # overridden We still need to know a real common ancestor in this case We
346 # overridden We still need to know a real common ancestor in this case We
347 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
347 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
348 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
348 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
349 # caller may not know if the revision passed in lieu of the CA is a genuine
349 # caller may not know if the revision passed in lieu of the CA is a genuine
350 # common ancestor or not without explicitly checking it, it's better to
350 # common ancestor or not without explicitly checking it, it's better to
351 # determine that here.
351 # determine that here.
352 #
352 #
353 # base.descendant(wc) and base.descendant(base) are False, work around that
353 # base.descendant(wc) and base.descendant(base) are False, work around that
354 _c1 = c1.p1() if c1.rev() is None else c1
354 _c1 = c1.p1() if c1.rev() is None else c1
355 _c2 = c2.p1() if c2.rev() is None else c2
355 _c2 = c2.p1() if c2.rev() is None else c2
356 # an endpoint is "dirty" if it isn't a descendant of the merge base
356 # an endpoint is "dirty" if it isn't a descendant of the merge base
357 # if we have a dirty endpoint, we need to trigger graft logic, and also
357 # if we have a dirty endpoint, we need to trigger graft logic, and also
358 # keep track of which endpoint is dirty
358 # keep track of which endpoint is dirty
359 dirtyc1 = not (base == _c1 or base.descendant(_c1))
359 dirtyc1 = not (base == _c1 or base.descendant(_c1))
360 dirtyc2 = not (base== _c2 or base.descendant(_c2))
360 dirtyc2 = not (base== _c2 or base.descendant(_c2))
361 graft = dirtyc1 or dirtyc2
361 graft = dirtyc1 or dirtyc2
362 tca = base
362 tca = base
363 if graft:
363 if graft:
364 tca = _c1.ancestor(_c2)
364 tca = _c1.ancestor(_c2)
365
365
366 limit = _findlimit(repo, c1.rev(), c2.rev())
366 limit = _findlimit(repo, c1.rev(), c2.rev())
367 if limit is None:
367 if limit is None:
368 # no common ancestor, no copies
368 # no common ancestor, no copies
369 return {}, {}, {}, {}, {}
369 return {}, {}, {}, {}, {}
370 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
370 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
371
371
372 m1 = c1.manifest()
372 m1 = c1.manifest()
373 m2 = c2.manifest()
373 m2 = c2.manifest()
374 mb = base.manifest()
374 mb = base.manifest()
375
375
376 # gather data from _checkcopies:
376 # gather data from _checkcopies:
377 # - diverge = record all diverges in this dict
377 # - diverge = record all diverges in this dict
378 # - copy = record all non-divergent copies in this dict
378 # - copy = record all non-divergent copies in this dict
379 # - fullcopy = record all copies in this dict
379 # - fullcopy = record all copies in this dict
380 # - incomplete = record non-divergent partial copies here
380 # - incomplete = record non-divergent partial copies here
381 # - incompletediverge = record divergent partial copies here
381 # - incompletediverge = record divergent partial copies here
382 diverge = {} # divergence data is shared
382 diverge = {} # divergence data is shared
383 incompletediverge = {}
383 incompletediverge = {}
384 data1 = {'copy': {},
384 data1 = {'copy': {},
385 'fullcopy': {},
385 'fullcopy': {},
386 'incomplete': {},
386 'incomplete': {},
387 'diverge': diverge,
387 'diverge': diverge,
388 'incompletediverge': incompletediverge,
388 'incompletediverge': incompletediverge,
389 }
389 }
390 data2 = {'copy': {},
390 data2 = {'copy': {},
391 'fullcopy': {},
391 'fullcopy': {},
392 'incomplete': {},
392 'incomplete': {},
393 'diverge': diverge,
393 'diverge': diverge,
394 'incompletediverge': incompletediverge,
394 'incompletediverge': incompletediverge,
395 }
395 }
396
396
397 # find interesting file sets from manifests
397 # find interesting file sets from manifests
398 addedinm1 = m1.filesnotin(mb)
398 addedinm1 = m1.filesnotin(mb)
399 addedinm2 = m2.filesnotin(mb)
399 addedinm2 = m2.filesnotin(mb)
400 bothnew = sorted(addedinm1 & addedinm2)
400 bothnew = sorted(addedinm1 & addedinm2)
401 if tca == base:
401 if tca == base:
402 # unmatched file from base
402 # unmatched file from base
403 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
403 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
404 u1u, u2u = u1r, u2r
404 u1u, u2u = u1r, u2r
405 else:
405 else:
406 # unmatched file from base (DAG rotation in the graft case)
406 # unmatched file from base (DAG rotation in the graft case)
407 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
407 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
408 baselabel='base')
408 baselabel='base')
409 # unmatched file from topological common ancestors (no DAG rotation)
409 # unmatched file from topological common ancestors (no DAG rotation)
410 # need to recompute this for directory move handling when grafting
410 # need to recompute this for directory move handling when grafting
411 mta = tca.manifest()
411 mta = tca.manifest()
412 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
412 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
413 m2.filesnotin(mta),
413 m2.filesnotin(mta),
414 baselabel='topological common ancestor')
414 baselabel='topological common ancestor')
415
415
416 for f in u1u:
416 for f in u1u:
417 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
417 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
418
418
419 for f in u2u:
419 for f in u2u:
420 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
420 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
421
421
422 copy = dict(data1['copy'].items() + data2['copy'].items())
422 copy = dict(data1['copy'].items() + data2['copy'].items())
423 fullcopy = dict(data1['fullcopy'].items() + data2['fullcopy'].items())
423 fullcopy = dict(data1['fullcopy'].items() + data2['fullcopy'].items())
424
424
425 if dirtyc1:
425 if dirtyc1:
426 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
426 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
427 incompletediverge)
427 incompletediverge)
428 else:
428 else:
429 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
429 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
430 incompletediverge)
430 incompletediverge)
431
431
432 renamedelete = {}
432 renamedelete = {}
433 renamedeleteset = set()
433 renamedeleteset = set()
434 divergeset = set()
434 divergeset = set()
435 for of, fl in diverge.items():
435 for of, fl in diverge.items():
436 if len(fl) == 1 or of in c1 or of in c2:
436 if len(fl) == 1 or of in c1 or of in c2:
437 del diverge[of] # not actually divergent, or not a rename
437 del diverge[of] # not actually divergent, or not a rename
438 if of not in c1 and of not in c2:
438 if of not in c1 and of not in c2:
439 # renamed on one side, deleted on the other side, but filter
439 # renamed on one side, deleted on the other side, but filter
440 # out files that have been renamed and then deleted
440 # out files that have been renamed and then deleted
441 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
441 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
442 renamedeleteset.update(fl) # reverse map for below
442 renamedeleteset.update(fl) # reverse map for below
443 else:
443 else:
444 divergeset.update(fl) # reverse map for below
444 divergeset.update(fl) # reverse map for below
445
445
446 if bothnew:
446 if bothnew:
447 repo.ui.debug(" unmatched files new in both:\n %s\n"
447 repo.ui.debug(" unmatched files new in both:\n %s\n"
448 % "\n ".join(bothnew))
448 % "\n ".join(bothnew))
449 bothdiverge = {}
449 bothdiverge = {}
450 bothincompletediverge = {}
450 bothincompletediverge = {}
451 remainder = {}
451 remainder = {}
452 both1 = {'copy': {},
452 both1 = {'copy': {},
453 'fullcopy': {},
453 'fullcopy': {},
454 'incomplete': {},
454 'incomplete': {},
455 'diverge': bothdiverge,
455 'diverge': bothdiverge,
456 'incompletediverge': bothincompletediverge
456 'incompletediverge': bothincompletediverge
457 }
457 }
458 both2 = {'copy': {},
458 both2 = {'copy': {},
459 'fullcopy': {},
459 'fullcopy': {},
460 'incomplete': {},
460 'incomplete': {},
461 'diverge': bothdiverge,
461 'diverge': bothdiverge,
462 'incompletediverge': bothincompletediverge
462 'incompletediverge': bothincompletediverge
463 }
463 }
464 for f in bothnew:
464 for f in bothnew:
465 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
465 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
466 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
466 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
467 if dirtyc1:
467 if dirtyc1:
468 # incomplete copies may only be found on the "dirty" side for bothnew
468 # incomplete copies may only be found on the "dirty" side for bothnew
469 assert not both2['incomplete']
469 assert not both2['incomplete']
470 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
470 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
471 bothincompletediverge)
471 bothincompletediverge)
472 elif dirtyc2:
472 elif dirtyc2:
473 assert not both1['incomplete']
473 assert not both1['incomplete']
474 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
474 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
475 bothincompletediverge)
475 bothincompletediverge)
476 else:
476 else:
477 # incomplete copies and divergences can't happen outside grafts
477 # incomplete copies and divergences can't happen outside grafts
478 assert not both1['incomplete']
478 assert not both1['incomplete']
479 assert not both2['incomplete']
479 assert not both2['incomplete']
480 assert not bothincompletediverge
480 assert not bothincompletediverge
481 for f in remainder:
481 for f in remainder:
482 assert f not in bothdiverge
482 assert f not in bothdiverge
483 ic = remainder[f]
483 ic = remainder[f]
484 if ic[0] in (m1 if dirtyc1 else m2):
484 if ic[0] in (m1 if dirtyc1 else m2):
485 # backed-out rename on one side, but watch out for deleted files
485 # backed-out rename on one side, but watch out for deleted files
486 bothdiverge[f] = ic
486 bothdiverge[f] = ic
487 for of, fl in bothdiverge.items():
487 for of, fl in bothdiverge.items():
488 if len(fl) == 2 and fl[0] == fl[1]:
488 if len(fl) == 2 and fl[0] == fl[1]:
489 copy[fl[0]] = of # not actually divergent, just matching renames
489 copy[fl[0]] = of # not actually divergent, just matching renames
490
490
491 if fullcopy and repo.ui.debugflag:
491 if fullcopy and repo.ui.debugflag:
492 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
492 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
493 "% = renamed and deleted):\n")
493 "% = renamed and deleted):\n")
494 for f in sorted(fullcopy):
494 for f in sorted(fullcopy):
495 note = ""
495 note = ""
496 if f in copy:
496 if f in copy:
497 note += "*"
497 note += "*"
498 if f in divergeset:
498 if f in divergeset:
499 note += "!"
499 note += "!"
500 if f in renamedeleteset:
500 if f in renamedeleteset:
501 note += "%"
501 note += "%"
502 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
502 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
503 note))
503 note))
504 del divergeset
504 del divergeset
505
505
506 if not fullcopy:
506 if not fullcopy:
507 return copy, {}, diverge, renamedelete, {}
507 return copy, {}, diverge, renamedelete, {}
508
508
509 repo.ui.debug(" checking for directory renames\n")
509 repo.ui.debug(" checking for directory renames\n")
510
510
511 # generate a directory move map
511 # generate a directory move map
512 d1, d2 = c1.dirs(), c2.dirs()
512 d1, d2 = c1.dirs(), c2.dirs()
513 # Hack for adding '', which is not otherwise added, to d1 and d2
513 # Hack for adding '', which is not otherwise added, to d1 and d2
514 d1.addpath('/')
514 d1.addpath('/')
515 d2.addpath('/')
515 d2.addpath('/')
516 invalid = set()
516 invalid = set()
517 dirmove = {}
517 dirmove = {}
518
518
519 # examine each file copy for a potential directory move, which is
519 # examine each file copy for a potential directory move, which is
520 # when all the files in a directory are moved to a new directory
520 # when all the files in a directory are moved to a new directory
521 for dst, src in fullcopy.iteritems():
521 for dst, src in fullcopy.iteritems():
522 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
522 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
523 if dsrc in invalid:
523 if dsrc in invalid:
524 # already seen to be uninteresting
524 # already seen to be uninteresting
525 continue
525 continue
526 elif dsrc in d1 and ddst in d1:
526 elif dsrc in d1 and ddst in d1:
527 # directory wasn't entirely moved locally
527 # directory wasn't entirely moved locally
528 invalid.add(dsrc + "/")
528 invalid.add(dsrc + "/")
529 elif dsrc in d2 and ddst in d2:
529 elif dsrc in d2 and ddst in d2:
530 # directory wasn't entirely moved remotely
530 # directory wasn't entirely moved remotely
531 invalid.add(dsrc + "/")
531 invalid.add(dsrc + "/")
532 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
532 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
533 # files from the same directory moved to two different places
533 # files from the same directory moved to two different places
534 invalid.add(dsrc + "/")
534 invalid.add(dsrc + "/")
535 else:
535 else:
536 # looks good so far
536 # looks good so far
537 dirmove[dsrc + "/"] = ddst + "/"
537 dirmove[dsrc + "/"] = ddst + "/"
538
538
539 for i in invalid:
539 for i in invalid:
540 if i in dirmove:
540 if i in dirmove:
541 del dirmove[i]
541 del dirmove[i]
542 del d1, d2, invalid
542 del d1, d2, invalid
543
543
544 if not dirmove:
544 if not dirmove:
545 return copy, {}, diverge, renamedelete, {}
545 return copy, {}, diverge, renamedelete, {}
546
546
547 for d in dirmove:
547 for d in dirmove:
548 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
548 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
549 (d, dirmove[d]))
549 (d, dirmove[d]))
550
550
551 movewithdir = {}
551 movewithdir = {}
552 # check unaccounted nonoverlapping files against directory moves
552 # check unaccounted nonoverlapping files against directory moves
553 for f in u1r + u2r:
553 for f in u1r + u2r:
554 if f not in fullcopy:
554 if f not in fullcopy:
555 for d in dirmove:
555 for d in dirmove:
556 if f.startswith(d):
556 if f.startswith(d):
557 # new file added in a directory that was moved, move it
557 # new file added in a directory that was moved, move it
558 df = dirmove[d] + f[len(d):]
558 df = dirmove[d] + f[len(d):]
559 if df not in copy:
559 if df not in copy:
560 movewithdir[f] = df
560 movewithdir[f] = df
561 repo.ui.debug((" pending file src: '%s' -> "
561 repo.ui.debug((" pending file src: '%s' -> "
562 "dst: '%s'\n") % (f, df))
562 "dst: '%s'\n") % (f, df))
563 break
563 break
564
564
565 return copy, movewithdir, diverge, renamedelete, dirmove
565 return copy, movewithdir, diverge, renamedelete, dirmove
566
566
567 def _related(f1, f2, limit):
567 def _related(f1, f2, limit):
568 """return True if f1 and f2 filectx have a common ancestor
568 """return True if f1 and f2 filectx have a common ancestor
569
569
570 Walk back to common ancestor to see if the two files originate
570 Walk back to common ancestor to see if the two files originate
571 from the same file. Since workingfilectx's rev() is None it messes
571 from the same file. Since workingfilectx's rev() is None it messes
572 up the integer comparison logic, hence the pre-step check for
572 up the integer comparison logic, hence the pre-step check for
573 None (f1 and f2 can only be workingfilectx's initially).
573 None (f1 and f2 can only be workingfilectx's initially).
574 """
574 """
575
575
576 if f1 == f2:
576 if f1 == f2:
577 return f1 # a match
577 return f1 # a match
578
578
579 g1, g2 = f1.ancestors(), f2.ancestors()
579 g1, g2 = f1.ancestors(), f2.ancestors()
580 try:
580 try:
581 f1r, f2r = f1.linkrev(), f2.linkrev()
581 f1r, f2r = f1.linkrev(), f2.linkrev()
582
582
583 if f1r is None:
583 if f1r is None:
584 f1 = next(g1)
584 f1 = next(g1)
585 if f2r is None:
585 if f2r is None:
586 f2 = next(g2)
586 f2 = next(g2)
587
587
588 while True:
588 while True:
589 f1r, f2r = f1.linkrev(), f2.linkrev()
589 f1r, f2r = f1.linkrev(), f2.linkrev()
590 if f1r > f2r:
590 if f1r > f2r:
591 f1 = next(g1)
591 f1 = next(g1)
592 elif f2r > f1r:
592 elif f2r > f1r:
593 f2 = next(g2)
593 f2 = next(g2)
594 elif f1 == f2:
594 elif f1 == f2:
595 return f1 # a match
595 return f1 # a match
596 elif f1r == f2r or f1r < limit or f2r < limit:
596 elif f1r == f2r or f1r < limit or f2r < limit:
597 return False # copy no longer relevant
597 return False # copy no longer relevant
598 except StopIteration:
598 except StopIteration:
599 return False
599 return False
600
600
601 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
601 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
602 """
602 """
603 check possible copies of f from msrc to mdst
603 check possible copies of f from msrc to mdst
604
604
605 srcctx = starting context for f in msrc
605 srcctx = starting context for f in msrc
606 dstctx = destination context for f in mdst
606 dstctx = destination context for f in mdst
607 f = the filename to check (as in msrc)
607 f = the filename to check (as in msrc)
608 base = the changectx used as a merge base
608 base = the changectx used as a merge base
609 tca = topological common ancestor for graft-like scenarios
609 tca = topological common ancestor for graft-like scenarios
610 remotebase = True if base is outside tca::srcctx, False otherwise
610 remotebase = True if base is outside tca::srcctx, False otherwise
611 limit = the rev number to not search beyond
611 limit = the rev number to not search beyond
612 data = dictionary of dictionary to store copy data. (see mergecopies)
612 data = dictionary of dictionary to store copy data. (see mergecopies)
613
613
614 note: limit is only an optimization, and there is no guarantee that
614 note: limit is only an optimization, and there is no guarantee that
615 irrelevant revisions will not be limited
615 irrelevant revisions will not be limited
616 there is no easy way to make this algorithm stop in a guaranteed way
616 there is no easy way to make this algorithm stop in a guaranteed way
617 once it "goes behind a certain revision".
617 once it "goes behind a certain revision".
618 """
618 """
619
619
620 msrc = srcctx.manifest()
620 msrc = srcctx.manifest()
621 mdst = dstctx.manifest()
621 mdst = dstctx.manifest()
622 mb = base.manifest()
622 mb = base.manifest()
623 mta = tca.manifest()
623 mta = tca.manifest()
624 # Might be true if this call is about finding backward renames,
624 # Might be true if this call is about finding backward renames,
625 # This happens in the case of grafts because the DAG is then rotated.
625 # This happens in the case of grafts because the DAG is then rotated.
626 # If the file exists in both the base and the source, we are not looking
626 # If the file exists in both the base and the source, we are not looking
627 # for a rename on the source side, but on the part of the DAG that is
627 # for a rename on the source side, but on the part of the DAG that is
628 # traversed backwards.
628 # traversed backwards.
629 #
629 #
630 # In the case there is both backward and forward renames (before and after
630 # In the case there is both backward and forward renames (before and after
631 # the base) this is more complicated as we must detect a divergence.
631 # the base) this is more complicated as we must detect a divergence.
632 # We use 'backwards = False' in that case.
632 # We use 'backwards = False' in that case.
633 backwards = not remotebase and base != tca and f in mb
633 backwards = not remotebase and base != tca and f in mb
634 getsrcfctx = _makegetfctx(srcctx)
634 getsrcfctx = _makegetfctx(srcctx)
635 getdstfctx = _makegetfctx(dstctx)
635
636
636 if msrc[f] == mb.get(f) and not remotebase:
637 if msrc[f] == mb.get(f) and not remotebase:
637 # Nothing to merge
638 # Nothing to merge
638 return
639 return
639
640
640 of = None
641 of = None
641 seen = {f}
642 seen = {f}
642 for oc in getsrcfctx(f, msrc[f]).ancestors():
643 for oc in getsrcfctx(f, msrc[f]).ancestors():
643 ocr = oc.linkrev()
644 ocr = oc.linkrev()
644 of = oc.path()
645 of = oc.path()
645 if of in seen:
646 if of in seen:
646 # check limit late - grab last rename before
647 # check limit late - grab last rename before
647 if ocr < limit:
648 if ocr < limit:
648 break
649 break
649 continue
650 continue
650 seen.add(of)
651 seen.add(of)
651
652
652 # remember for dir rename detection
653 # remember for dir rename detection
653 if backwards:
654 if backwards:
654 data['fullcopy'][of] = f # grafting backwards through renames
655 data['fullcopy'][of] = f # grafting backwards through renames
655 else:
656 else:
656 data['fullcopy'][f] = of
657 data['fullcopy'][f] = of
657 if of not in mdst:
658 if of not in mdst:
658 continue # no match, keep looking
659 continue # no match, keep looking
659 if mdst[of] == mb.get(of):
660 if mdst[of] == mb.get(of):
660 return # no merge needed, quit early
661 return # no merge needed, quit early
661 c2 = getsrcfctx(of, mdst[of])
662 c2 = getdstfctx(of, mdst[of])
662 # c2 might be a plain new file on added on destination side that is
663 # c2 might be a plain new file on added on destination side that is
663 # unrelated to the droids we are looking for.
664 # unrelated to the droids we are looking for.
664 cr = _related(oc, c2, tca.rev())
665 cr = _related(oc, c2, tca.rev())
665 if cr and (of == f or of == c2.path()): # non-divergent
666 if cr and (of == f or of == c2.path()): # non-divergent
666 if backwards:
667 if backwards:
667 data['copy'][of] = f
668 data['copy'][of] = f
668 elif of in mb:
669 elif of in mb:
669 data['copy'][f] = of
670 data['copy'][f] = of
670 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
671 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
671 data['copy'][of] = f
672 data['copy'][of] = f
672 del data['fullcopy'][f]
673 del data['fullcopy'][f]
673 data['fullcopy'][of] = f
674 data['fullcopy'][of] = f
674 else: # divergence w.r.t. graft CA on one side of topological CA
675 else: # divergence w.r.t. graft CA on one side of topological CA
675 for sf in seen:
676 for sf in seen:
676 if sf in mb:
677 if sf in mb:
677 assert sf not in data['diverge']
678 assert sf not in data['diverge']
678 data['diverge'][sf] = [f, of]
679 data['diverge'][sf] = [f, of]
679 break
680 break
680 return
681 return
681
682
682 if of in mta:
683 if of in mta:
683 if backwards or remotebase:
684 if backwards or remotebase:
684 data['incomplete'][of] = f
685 data['incomplete'][of] = f
685 else:
686 else:
686 for sf in seen:
687 for sf in seen:
687 if sf in mb:
688 if sf in mb:
688 if tca == base:
689 if tca == base:
689 data['diverge'].setdefault(sf, []).append(f)
690 data['diverge'].setdefault(sf, []).append(f)
690 else:
691 else:
691 data['incompletediverge'][sf] = [of, f]
692 data['incompletediverge'][sf] = [of, f]
692 return
693 return
693
694
694 def duplicatecopies(repo, rev, fromrev, skiprev=None):
695 def duplicatecopies(repo, rev, fromrev, skiprev=None):
695 '''reproduce copies from fromrev to rev in the dirstate
696 '''reproduce copies from fromrev to rev in the dirstate
696
697
697 If skiprev is specified, it's a revision that should be used to
698 If skiprev is specified, it's a revision that should be used to
698 filter copy records. Any copies that occur between fromrev and
699 filter copy records. Any copies that occur between fromrev and
699 skiprev will not be duplicated, even if they appear in the set of
700 skiprev will not be duplicated, even if they appear in the set of
700 copies between fromrev and rev.
701 copies between fromrev and rev.
701 '''
702 '''
702 exclude = {}
703 exclude = {}
703 if (skiprev is not None and
704 if (skiprev is not None and
704 not repo.ui.configbool('experimental', 'disablecopytrace')):
705 not repo.ui.configbool('experimental', 'disablecopytrace')):
705 # disablecopytrace skips this line, but not the entire function because
706 # disablecopytrace skips this line, but not the entire function because
706 # the line below is O(size of the repo) during a rebase, while the rest
707 # the line below is O(size of the repo) during a rebase, while the rest
707 # of the function is much faster (and is required for carrying copy
708 # of the function is much faster (and is required for carrying copy
708 # metadata across the rebase anyway).
709 # metadata across the rebase anyway).
709 exclude = pathcopies(repo[fromrev], repo[skiprev])
710 exclude = pathcopies(repo[fromrev], repo[skiprev])
710 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
711 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
711 # copies.pathcopies returns backward renames, so dst might not
712 # copies.pathcopies returns backward renames, so dst might not
712 # actually be in the dirstate
713 # actually be in the dirstate
713 if dst in exclude:
714 if dst in exclude:
714 continue
715 continue
715 if repo.dirstate[dst] in "nma":
716 if repo.dirstate[dst] in "nma":
716 repo.dirstate.copy(src, dst)
717 repo.dirstate.copy(src, dst)
General Comments 0
You need to be logged in to leave comments. Login now