##// END OF EJS Templates
py3: explicitly convert dict.keys() and dict.items() into a list...
Pulkit Goyal -
r34350:1a5abc45 default
parent child Browse files
Show More
@@ -1,867 +1,867 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12 import os
12 import os
13
13
14 from . import (
14 from . import (
15 match as matchmod,
15 match as matchmod,
16 node,
16 node,
17 pathutil,
17 pathutil,
18 phases,
18 phases,
19 scmutil,
19 scmutil,
20 util,
20 util,
21 )
21 )
22
22
23 def _findlimit(repo, a, b):
23 def _findlimit(repo, a, b):
24 """
24 """
25 Find the last revision that needs to be checked to ensure that a full
25 Find the last revision that needs to be checked to ensure that a full
26 transitive closure for file copies can be properly calculated.
26 transitive closure for file copies can be properly calculated.
27 Generally, this means finding the earliest revision number that's an
27 Generally, this means finding the earliest revision number that's an
28 ancestor of a or b but not both, except when a or b is a direct descendent
28 ancestor of a or b but not both, except when a or b is a direct descendent
29 of the other, in which case we can return the minimum revnum of a and b.
29 of the other, in which case we can return the minimum revnum of a and b.
30 None if no such revision exists.
30 None if no such revision exists.
31 """
31 """
32
32
33 # basic idea:
33 # basic idea:
34 # - mark a and b with different sides
34 # - mark a and b with different sides
35 # - if a parent's children are all on the same side, the parent is
35 # - if a parent's children are all on the same side, the parent is
36 # on that side, otherwise it is on no side
36 # on that side, otherwise it is on no side
37 # - walk the graph in topological order with the help of a heap;
37 # - walk the graph in topological order with the help of a heap;
38 # - add unseen parents to side map
38 # - add unseen parents to side map
39 # - clear side of any parent that has children on different sides
39 # - clear side of any parent that has children on different sides
40 # - track number of interesting revs that might still be on a side
40 # - track number of interesting revs that might still be on a side
41 # - track the lowest interesting rev seen
41 # - track the lowest interesting rev seen
42 # - quit when interesting revs is zero
42 # - quit when interesting revs is zero
43
43
44 cl = repo.changelog
44 cl = repo.changelog
45 working = len(cl) # pseudo rev for the working directory
45 working = len(cl) # pseudo rev for the working directory
46 if a is None:
46 if a is None:
47 a = working
47 a = working
48 if b is None:
48 if b is None:
49 b = working
49 b = working
50
50
51 side = {a: -1, b: 1}
51 side = {a: -1, b: 1}
52 visit = [-a, -b]
52 visit = [-a, -b]
53 heapq.heapify(visit)
53 heapq.heapify(visit)
54 interesting = len(visit)
54 interesting = len(visit)
55 hascommonancestor = False
55 hascommonancestor = False
56 limit = working
56 limit = working
57
57
58 while interesting:
58 while interesting:
59 r = -heapq.heappop(visit)
59 r = -heapq.heappop(visit)
60 if r == working:
60 if r == working:
61 parents = [cl.rev(p) for p in repo.dirstate.parents()]
61 parents = [cl.rev(p) for p in repo.dirstate.parents()]
62 else:
62 else:
63 parents = cl.parentrevs(r)
63 parents = cl.parentrevs(r)
64 for p in parents:
64 for p in parents:
65 if p < 0:
65 if p < 0:
66 continue
66 continue
67 if p not in side:
67 if p not in side:
68 # first time we see p; add it to visit
68 # first time we see p; add it to visit
69 side[p] = side[r]
69 side[p] = side[r]
70 if side[p]:
70 if side[p]:
71 interesting += 1
71 interesting += 1
72 heapq.heappush(visit, -p)
72 heapq.heappush(visit, -p)
73 elif side[p] and side[p] != side[r]:
73 elif side[p] and side[p] != side[r]:
74 # p was interesting but now we know better
74 # p was interesting but now we know better
75 side[p] = 0
75 side[p] = 0
76 interesting -= 1
76 interesting -= 1
77 hascommonancestor = True
77 hascommonancestor = True
78 if side[r]:
78 if side[r]:
79 limit = r # lowest rev visited
79 limit = r # lowest rev visited
80 interesting -= 1
80 interesting -= 1
81
81
82 if not hascommonancestor:
82 if not hascommonancestor:
83 return None
83 return None
84
84
85 # Consider the following flow (see test-commit-amend.t under issue4405):
85 # Consider the following flow (see test-commit-amend.t under issue4405):
86 # 1/ File 'a0' committed
86 # 1/ File 'a0' committed
87 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
87 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
88 # 3/ Move back to first commit
88 # 3/ Move back to first commit
89 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
89 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
90 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
90 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
91 #
91 #
92 # During the amend in step five, we will be in this state:
92 # During the amend in step five, we will be in this state:
93 #
93 #
94 # @ 3 temporary amend commit for a1-amend
94 # @ 3 temporary amend commit for a1-amend
95 # |
95 # |
96 # o 2 a1-amend
96 # o 2 a1-amend
97 # |
97 # |
98 # | o 1 a1
98 # | o 1 a1
99 # |/
99 # |/
100 # o 0 a0
100 # o 0 a0
101 #
101 #
102 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
102 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
103 # yet the filelog has the copy information in rev 1 and we will not look
103 # yet the filelog has the copy information in rev 1 and we will not look
104 # back far enough unless we also look at the a and b as candidates.
104 # back far enough unless we also look at the a and b as candidates.
105 # This only occurs when a is a descendent of b or visa-versa.
105 # This only occurs when a is a descendent of b or visa-versa.
106 return min(limit, a, b)
106 return min(limit, a, b)
107
107
108 def _chain(src, dst, a, b):
108 def _chain(src, dst, a, b):
109 '''chain two sets of copies a->b'''
109 '''chain two sets of copies a->b'''
110 t = a.copy()
110 t = a.copy()
111 for k, v in b.iteritems():
111 for k, v in b.iteritems():
112 if v in t:
112 if v in t:
113 # found a chain
113 # found a chain
114 if t[v] != k:
114 if t[v] != k:
115 # file wasn't renamed back to itself
115 # file wasn't renamed back to itself
116 t[k] = t[v]
116 t[k] = t[v]
117 if v not in dst:
117 if v not in dst:
118 # chain was a rename, not a copy
118 # chain was a rename, not a copy
119 del t[v]
119 del t[v]
120 if v in src:
120 if v in src:
121 # file is a copy of an existing file
121 # file is a copy of an existing file
122 t[k] = v
122 t[k] = v
123
123
124 # remove criss-crossed copies
124 # remove criss-crossed copies
125 for k, v in t.items():
125 for k, v in t.items():
126 if k in src and v in dst:
126 if k in src and v in dst:
127 del t[k]
127 del t[k]
128
128
129 return t
129 return t
130
130
131 def _tracefile(fctx, am, limit=-1):
131 def _tracefile(fctx, am, limit=-1):
132 '''return file context that is the ancestor of fctx present in ancestor
132 '''return file context that is the ancestor of fctx present in ancestor
133 manifest am, stopping after the first ancestor lower than limit'''
133 manifest am, stopping after the first ancestor lower than limit'''
134
134
135 for f in fctx.ancestors():
135 for f in fctx.ancestors():
136 if am.get(f.path(), None) == f.filenode():
136 if am.get(f.path(), None) == f.filenode():
137 return f
137 return f
138 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
138 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
139 return None
139 return None
140
140
141 def _dirstatecopies(d):
141 def _dirstatecopies(d):
142 ds = d._repo.dirstate
142 ds = d._repo.dirstate
143 c = ds.copies().copy()
143 c = ds.copies().copy()
144 for k in c.keys():
144 for k in list(c):
145 if ds[k] not in 'anm':
145 if ds[k] not in 'anm':
146 del c[k]
146 del c[k]
147 return c
147 return c
148
148
149 def _computeforwardmissing(a, b, match=None):
149 def _computeforwardmissing(a, b, match=None):
150 """Computes which files are in b but not a.
150 """Computes which files are in b but not a.
151 This is its own function so extensions can easily wrap this call to see what
151 This is its own function so extensions can easily wrap this call to see what
152 files _forwardcopies is about to process.
152 files _forwardcopies is about to process.
153 """
153 """
154 ma = a.manifest()
154 ma = a.manifest()
155 mb = b.manifest()
155 mb = b.manifest()
156 return mb.filesnotin(ma, match=match)
156 return mb.filesnotin(ma, match=match)
157
157
158 def _forwardcopies(a, b, match=None):
158 def _forwardcopies(a, b, match=None):
159 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
159 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
160
160
161 # check for working copy
161 # check for working copy
162 w = None
162 w = None
163 if b.rev() is None:
163 if b.rev() is None:
164 w = b
164 w = b
165 b = w.p1()
165 b = w.p1()
166 if a == b:
166 if a == b:
167 # short-circuit to avoid issues with merge states
167 # short-circuit to avoid issues with merge states
168 return _dirstatecopies(w)
168 return _dirstatecopies(w)
169
169
170 # files might have to be traced back to the fctx parent of the last
170 # files might have to be traced back to the fctx parent of the last
171 # one-side-only changeset, but not further back than that
171 # one-side-only changeset, but not further back than that
172 limit = _findlimit(a._repo, a.rev(), b.rev())
172 limit = _findlimit(a._repo, a.rev(), b.rev())
173 if limit is None:
173 if limit is None:
174 limit = -1
174 limit = -1
175 am = a.manifest()
175 am = a.manifest()
176
176
177 # find where new files came from
177 # find where new files came from
178 # we currently don't try to find where old files went, too expensive
178 # we currently don't try to find where old files went, too expensive
179 # this means we can miss a case like 'hg rm b; hg cp a b'
179 # this means we can miss a case like 'hg rm b; hg cp a b'
180 cm = {}
180 cm = {}
181
181
182 # Computing the forward missing is quite expensive on large manifests, since
182 # Computing the forward missing is quite expensive on large manifests, since
183 # it compares the entire manifests. We can optimize it in the common use
183 # it compares the entire manifests. We can optimize it in the common use
184 # case of computing what copies are in a commit versus its parent (like
184 # case of computing what copies are in a commit versus its parent (like
185 # during a rebase or histedit). Note, we exclude merge commits from this
185 # during a rebase or histedit). Note, we exclude merge commits from this
186 # optimization, since the ctx.files() for a merge commit is not correct for
186 # optimization, since the ctx.files() for a merge commit is not correct for
187 # this comparison.
187 # this comparison.
188 forwardmissingmatch = match
188 forwardmissingmatch = match
189 if b.p1() == a and b.p2().node() == node.nullid:
189 if b.p1() == a and b.p2().node() == node.nullid:
190 filesmatcher = scmutil.matchfiles(a._repo, b.files())
190 filesmatcher = scmutil.matchfiles(a._repo, b.files())
191 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
191 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
192 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
192 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
193
193
194 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
194 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
195 for f in missing:
195 for f in missing:
196 fctx = b[f]
196 fctx = b[f]
197 fctx._ancestrycontext = ancestrycontext
197 fctx._ancestrycontext = ancestrycontext
198 ofctx = _tracefile(fctx, am, limit)
198 ofctx = _tracefile(fctx, am, limit)
199 if ofctx:
199 if ofctx:
200 cm[f] = ofctx.path()
200 cm[f] = ofctx.path()
201
201
202 # combine copies from dirstate if necessary
202 # combine copies from dirstate if necessary
203 if w is not None:
203 if w is not None:
204 cm = _chain(a, w, cm, _dirstatecopies(w))
204 cm = _chain(a, w, cm, _dirstatecopies(w))
205
205
206 return cm
206 return cm
207
207
208 def _backwardrenames(a, b):
208 def _backwardrenames(a, b):
209 if a._repo.ui.config('experimental', 'copytrace') == 'off':
209 if a._repo.ui.config('experimental', 'copytrace') == 'off':
210 return {}
210 return {}
211
211
212 # Even though we're not taking copies into account, 1:n rename situations
212 # Even though we're not taking copies into account, 1:n rename situations
213 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
213 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
214 # arbitrarily pick one of the renames.
214 # arbitrarily pick one of the renames.
215 f = _forwardcopies(b, a)
215 f = _forwardcopies(b, a)
216 r = {}
216 r = {}
217 for k, v in sorted(f.iteritems()):
217 for k, v in sorted(f.iteritems()):
218 # remove copies
218 # remove copies
219 if v in a:
219 if v in a:
220 continue
220 continue
221 r[v] = k
221 r[v] = k
222 return r
222 return r
223
223
224 def pathcopies(x, y, match=None):
224 def pathcopies(x, y, match=None):
225 '''find {dst@y: src@x} copy mapping for directed compare'''
225 '''find {dst@y: src@x} copy mapping for directed compare'''
226 if x == y or not x or not y:
226 if x == y or not x or not y:
227 return {}
227 return {}
228 a = y.ancestor(x)
228 a = y.ancestor(x)
229 if a == x:
229 if a == x:
230 return _forwardcopies(x, y, match=match)
230 return _forwardcopies(x, y, match=match)
231 if a == y:
231 if a == y:
232 return _backwardrenames(x, y)
232 return _backwardrenames(x, y)
233 return _chain(x, y, _backwardrenames(x, a),
233 return _chain(x, y, _backwardrenames(x, a),
234 _forwardcopies(a, y, match=match))
234 _forwardcopies(a, y, match=match))
235
235
236 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
236 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
237 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
237 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
238 and c2. This is its own function so extensions can easily wrap this call
238 and c2. This is its own function so extensions can easily wrap this call
239 to see what files mergecopies is about to process.
239 to see what files mergecopies is about to process.
240
240
241 Even though c1 and c2 are not used in this function, they are useful in
241 Even though c1 and c2 are not used in this function, they are useful in
242 other extensions for being able to read the file nodes of the changed files.
242 other extensions for being able to read the file nodes of the changed files.
243
243
244 "baselabel" can be passed to help distinguish the multiple computations
244 "baselabel" can be passed to help distinguish the multiple computations
245 done in the graft case.
245 done in the graft case.
246 """
246 """
247 u1 = sorted(addedinm1 - addedinm2)
247 u1 = sorted(addedinm1 - addedinm2)
248 u2 = sorted(addedinm2 - addedinm1)
248 u2 = sorted(addedinm2 - addedinm1)
249
249
250 header = " unmatched files in %s"
250 header = " unmatched files in %s"
251 if baselabel:
251 if baselabel:
252 header += ' (from %s)' % baselabel
252 header += ' (from %s)' % baselabel
253 if u1:
253 if u1:
254 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
254 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
255 if u2:
255 if u2:
256 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
256 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
257 return u1, u2
257 return u1, u2
258
258
259 def _makegetfctx(ctx):
259 def _makegetfctx(ctx):
260 """return a 'getfctx' function suitable for _checkcopies usage
260 """return a 'getfctx' function suitable for _checkcopies usage
261
261
262 We have to re-setup the function building 'filectx' for each
262 We have to re-setup the function building 'filectx' for each
263 '_checkcopies' to ensure the linkrev adjustment is properly setup for
263 '_checkcopies' to ensure the linkrev adjustment is properly setup for
264 each. Linkrev adjustment is important to avoid bug in rename
264 each. Linkrev adjustment is important to avoid bug in rename
265 detection. Moreover, having a proper '_ancestrycontext' setup ensures
265 detection. Moreover, having a proper '_ancestrycontext' setup ensures
266 the performance impact of this adjustment is kept limited. Without it,
266 the performance impact of this adjustment is kept limited. Without it,
267 each file could do a full dag traversal making the time complexity of
267 each file could do a full dag traversal making the time complexity of
268 the operation explode (see issue4537).
268 the operation explode (see issue4537).
269
269
270 This function exists here mostly to limit the impact on stable. Feel
270 This function exists here mostly to limit the impact on stable. Feel
271 free to refactor on default.
271 free to refactor on default.
272 """
272 """
273 rev = ctx.rev()
273 rev = ctx.rev()
274 repo = ctx._repo
274 repo = ctx._repo
275 ac = getattr(ctx, '_ancestrycontext', None)
275 ac = getattr(ctx, '_ancestrycontext', None)
276 if ac is None:
276 if ac is None:
277 revs = [rev]
277 revs = [rev]
278 if rev is None:
278 if rev is None:
279 revs = [p.rev() for p in ctx.parents()]
279 revs = [p.rev() for p in ctx.parents()]
280 ac = repo.changelog.ancestors(revs, inclusive=True)
280 ac = repo.changelog.ancestors(revs, inclusive=True)
281 ctx._ancestrycontext = ac
281 ctx._ancestrycontext = ac
282 def makectx(f, n):
282 def makectx(f, n):
283 if n in node.wdirnodes: # in a working context?
283 if n in node.wdirnodes: # in a working context?
284 if ctx.rev() is None:
284 if ctx.rev() is None:
285 return ctx.filectx(f)
285 return ctx.filectx(f)
286 return repo[None][f]
286 return repo[None][f]
287 fctx = repo.filectx(f, fileid=n)
287 fctx = repo.filectx(f, fileid=n)
288 # setup only needed for filectx not create from a changectx
288 # setup only needed for filectx not create from a changectx
289 fctx._ancestrycontext = ac
289 fctx._ancestrycontext = ac
290 fctx._descendantrev = rev
290 fctx._descendantrev = rev
291 return fctx
291 return fctx
292 return util.lrucachefunc(makectx)
292 return util.lrucachefunc(makectx)
293
293
294 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
294 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
295 """combine partial copy paths"""
295 """combine partial copy paths"""
296 remainder = {}
296 remainder = {}
297 for f in copyfrom:
297 for f in copyfrom:
298 if f in copyto:
298 if f in copyto:
299 finalcopy[copyto[f]] = copyfrom[f]
299 finalcopy[copyto[f]] = copyfrom[f]
300 del copyto[f]
300 del copyto[f]
301 for f in incompletediverge:
301 for f in incompletediverge:
302 assert f not in diverge
302 assert f not in diverge
303 ic = incompletediverge[f]
303 ic = incompletediverge[f]
304 if ic[0] in copyto:
304 if ic[0] in copyto:
305 diverge[f] = [copyto[ic[0]], ic[1]]
305 diverge[f] = [copyto[ic[0]], ic[1]]
306 else:
306 else:
307 remainder[f] = ic
307 remainder[f] = ic
308 return remainder
308 return remainder
309
309
310 def mergecopies(repo, c1, c2, base):
310 def mergecopies(repo, c1, c2, base):
311 """
311 """
312 The function calling different copytracing algorithms on the basis of config
312 The function calling different copytracing algorithms on the basis of config
313 which find moves and copies between context c1 and c2 that are relevant for
313 which find moves and copies between context c1 and c2 that are relevant for
314 merging. 'base' will be used as the merge base.
314 merging. 'base' will be used as the merge base.
315
315
316 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
316 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
317 files that were moved/ copied in one merge parent and modified in another.
317 files that were moved/ copied in one merge parent and modified in another.
318 For example:
318 For example:
319
319
320 o ---> 4 another commit
320 o ---> 4 another commit
321 |
321 |
322 | o ---> 3 commit that modifies a.txt
322 | o ---> 3 commit that modifies a.txt
323 | /
323 | /
324 o / ---> 2 commit that moves a.txt to b.txt
324 o / ---> 2 commit that moves a.txt to b.txt
325 |/
325 |/
326 o ---> 1 merge base
326 o ---> 1 merge base
327
327
328 If we try to rebase revision 3 on revision 4, since there is no a.txt in
328 If we try to rebase revision 3 on revision 4, since there is no a.txt in
329 revision 4, and if user have copytrace disabled, we prints the following
329 revision 4, and if user have copytrace disabled, we prints the following
330 message:
330 message:
331
331
332 ```other changed <file> which local deleted```
332 ```other changed <file> which local deleted```
333
333
334 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
334 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
335 "dirmove".
335 "dirmove".
336
336
337 "copy" is a mapping from destination name -> source name,
337 "copy" is a mapping from destination name -> source name,
338 where source is in c1 and destination is in c2 or vice-versa.
338 where source is in c1 and destination is in c2 or vice-versa.
339
339
340 "movewithdir" is a mapping from source name -> destination name,
340 "movewithdir" is a mapping from source name -> destination name,
341 where the file at source present in one context but not the other
341 where the file at source present in one context but not the other
342 needs to be moved to destination by the merge process, because the
342 needs to be moved to destination by the merge process, because the
343 other context moved the directory it is in.
343 other context moved the directory it is in.
344
344
345 "diverge" is a mapping of source name -> list of destination names
345 "diverge" is a mapping of source name -> list of destination names
346 for divergent renames.
346 for divergent renames.
347
347
348 "renamedelete" is a mapping of source name -> list of destination
348 "renamedelete" is a mapping of source name -> list of destination
349 names for files deleted in c1 that were renamed in c2 or vice-versa.
349 names for files deleted in c1 that were renamed in c2 or vice-versa.
350
350
351 "dirmove" is a mapping of detected source dir -> destination dir renames.
351 "dirmove" is a mapping of detected source dir -> destination dir renames.
352 This is needed for handling changes to new files previously grafted into
352 This is needed for handling changes to new files previously grafted into
353 renamed directories.
353 renamed directories.
354 """
354 """
355 # avoid silly behavior for update from empty dir
355 # avoid silly behavior for update from empty dir
356 if not c1 or not c2 or c1 == c2:
356 if not c1 or not c2 or c1 == c2:
357 return {}, {}, {}, {}, {}
357 return {}, {}, {}, {}, {}
358
358
359 # avoid silly behavior for parent -> working dir
359 # avoid silly behavior for parent -> working dir
360 if c2.node() is None and c1.node() == repo.dirstate.p1():
360 if c2.node() is None and c1.node() == repo.dirstate.p1():
361 return repo.dirstate.copies(), {}, {}, {}, {}
361 return repo.dirstate.copies(), {}, {}, {}, {}
362
362
363 copytracing = repo.ui.config('experimental', 'copytrace')
363 copytracing = repo.ui.config('experimental', 'copytrace')
364
364
365 # Copy trace disabling is explicitly below the node == p1 logic above
365 # Copy trace disabling is explicitly below the node == p1 logic above
366 # because the logic above is required for a simple copy to be kept across a
366 # because the logic above is required for a simple copy to be kept across a
367 # rebase.
367 # rebase.
368 if copytracing == 'off':
368 if copytracing == 'off':
369 return {}, {}, {}, {}, {}
369 return {}, {}, {}, {}, {}
370 elif copytracing == 'heuristics':
370 elif copytracing == 'heuristics':
371 # Do full copytracing if only drafts are involved as that will be fast
371 # Do full copytracing if only drafts are involved as that will be fast
372 # enough and will also cover the copies which can be missed by
372 # enough and will also cover the copies which can be missed by
373 # heuristics
373 # heuristics
374 if _isfullcopytraceable(repo, c1, base):
374 if _isfullcopytraceable(repo, c1, base):
375 return _fullcopytracing(repo, c1, c2, base)
375 return _fullcopytracing(repo, c1, c2, base)
376 return _heuristicscopytracing(repo, c1, c2, base)
376 return _heuristicscopytracing(repo, c1, c2, base)
377 else:
377 else:
378 return _fullcopytracing(repo, c1, c2, base)
378 return _fullcopytracing(repo, c1, c2, base)
379
379
380 def _isfullcopytraceable(repo, c1, base):
380 def _isfullcopytraceable(repo, c1, base):
381 """ Checks that if base, source and destination are all draft branches, if
381 """ Checks that if base, source and destination are all draft branches, if
382 yes let's use the full copytrace algorithm for increased capabilities since
382 yes let's use the full copytrace algorithm for increased capabilities since
383 it will be fast enough.
383 it will be fast enough.
384 """
384 """
385 if c1.rev() is None:
385 if c1.rev() is None:
386 c1 = c1.p1()
386 c1 = c1.p1()
387
387
388 nonpublicphases = set([phases.draft, phases.secret])
388 nonpublicphases = set([phases.draft, phases.secret])
389
389
390 if (c1.phase() in nonpublicphases) and (base.phase() in nonpublicphases):
390 if (c1.phase() in nonpublicphases) and (base.phase() in nonpublicphases):
391 sourcecommitlimit = repo.ui.configint('experimental',
391 sourcecommitlimit = repo.ui.configint('experimental',
392 'copytrace.sourcecommitlimit')
392 'copytrace.sourcecommitlimit')
393 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
393 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
394 return commits < sourcecommitlimit
394 return commits < sourcecommitlimit
395 return False
395 return False
396
396
397 def _fullcopytracing(repo, c1, c2, base):
397 def _fullcopytracing(repo, c1, c2, base):
398 """ The full copytracing algorithm which finds all the new files that were
398 """ The full copytracing algorithm which finds all the new files that were
399 added from merge base up to the top commit and for each file it checks if
399 added from merge base up to the top commit and for each file it checks if
400 this file was copied from another file.
400 this file was copied from another file.
401
401
402 This is pretty slow when a lot of changesets are involved but will track all
402 This is pretty slow when a lot of changesets are involved but will track all
403 the copies.
403 the copies.
404 """
404 """
405 # In certain scenarios (e.g. graft, update or rebase), base can be
405 # In certain scenarios (e.g. graft, update or rebase), base can be
406 # overridden We still need to know a real common ancestor in this case We
406 # overridden We still need to know a real common ancestor in this case We
407 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
407 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
408 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
408 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
409 # caller may not know if the revision passed in lieu of the CA is a genuine
409 # caller may not know if the revision passed in lieu of the CA is a genuine
410 # common ancestor or not without explicitly checking it, it's better to
410 # common ancestor or not without explicitly checking it, it's better to
411 # determine that here.
411 # determine that here.
412 #
412 #
413 # base.descendant(wc) and base.descendant(base) are False, work around that
413 # base.descendant(wc) and base.descendant(base) are False, work around that
414 _c1 = c1.p1() if c1.rev() is None else c1
414 _c1 = c1.p1() if c1.rev() is None else c1
415 _c2 = c2.p1() if c2.rev() is None else c2
415 _c2 = c2.p1() if c2.rev() is None else c2
416 # an endpoint is "dirty" if it isn't a descendant of the merge base
416 # an endpoint is "dirty" if it isn't a descendant of the merge base
417 # if we have a dirty endpoint, we need to trigger graft logic, and also
417 # if we have a dirty endpoint, we need to trigger graft logic, and also
418 # keep track of which endpoint is dirty
418 # keep track of which endpoint is dirty
419 dirtyc1 = not (base == _c1 or base.descendant(_c1))
419 dirtyc1 = not (base == _c1 or base.descendant(_c1))
420 dirtyc2 = not (base == _c2 or base.descendant(_c2))
420 dirtyc2 = not (base == _c2 or base.descendant(_c2))
421 graft = dirtyc1 or dirtyc2
421 graft = dirtyc1 or dirtyc2
422 tca = base
422 tca = base
423 if graft:
423 if graft:
424 tca = _c1.ancestor(_c2)
424 tca = _c1.ancestor(_c2)
425
425
426 limit = _findlimit(repo, c1.rev(), c2.rev())
426 limit = _findlimit(repo, c1.rev(), c2.rev())
427 if limit is None:
427 if limit is None:
428 # no common ancestor, no copies
428 # no common ancestor, no copies
429 return {}, {}, {}, {}, {}
429 return {}, {}, {}, {}, {}
430 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
430 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
431
431
432 m1 = c1.manifest()
432 m1 = c1.manifest()
433 m2 = c2.manifest()
433 m2 = c2.manifest()
434 mb = base.manifest()
434 mb = base.manifest()
435
435
436 # gather data from _checkcopies:
436 # gather data from _checkcopies:
437 # - diverge = record all diverges in this dict
437 # - diverge = record all diverges in this dict
438 # - copy = record all non-divergent copies in this dict
438 # - copy = record all non-divergent copies in this dict
439 # - fullcopy = record all copies in this dict
439 # - fullcopy = record all copies in this dict
440 # - incomplete = record non-divergent partial copies here
440 # - incomplete = record non-divergent partial copies here
441 # - incompletediverge = record divergent partial copies here
441 # - incompletediverge = record divergent partial copies here
442 diverge = {} # divergence data is shared
442 diverge = {} # divergence data is shared
443 incompletediverge = {}
443 incompletediverge = {}
444 data1 = {'copy': {},
444 data1 = {'copy': {},
445 'fullcopy': {},
445 'fullcopy': {},
446 'incomplete': {},
446 'incomplete': {},
447 'diverge': diverge,
447 'diverge': diverge,
448 'incompletediverge': incompletediverge,
448 'incompletediverge': incompletediverge,
449 }
449 }
450 data2 = {'copy': {},
450 data2 = {'copy': {},
451 'fullcopy': {},
451 'fullcopy': {},
452 'incomplete': {},
452 'incomplete': {},
453 'diverge': diverge,
453 'diverge': diverge,
454 'incompletediverge': incompletediverge,
454 'incompletediverge': incompletediverge,
455 }
455 }
456
456
457 # find interesting file sets from manifests
457 # find interesting file sets from manifests
458 addedinm1 = m1.filesnotin(mb)
458 addedinm1 = m1.filesnotin(mb)
459 addedinm2 = m2.filesnotin(mb)
459 addedinm2 = m2.filesnotin(mb)
460 bothnew = sorted(addedinm1 & addedinm2)
460 bothnew = sorted(addedinm1 & addedinm2)
461 if tca == base:
461 if tca == base:
462 # unmatched file from base
462 # unmatched file from base
463 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
463 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
464 u1u, u2u = u1r, u2r
464 u1u, u2u = u1r, u2r
465 else:
465 else:
466 # unmatched file from base (DAG rotation in the graft case)
466 # unmatched file from base (DAG rotation in the graft case)
467 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
467 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
468 baselabel='base')
468 baselabel='base')
469 # unmatched file from topological common ancestors (no DAG rotation)
469 # unmatched file from topological common ancestors (no DAG rotation)
470 # need to recompute this for directory move handling when grafting
470 # need to recompute this for directory move handling when grafting
471 mta = tca.manifest()
471 mta = tca.manifest()
472 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
472 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
473 m2.filesnotin(mta),
473 m2.filesnotin(mta),
474 baselabel='topological common ancestor')
474 baselabel='topological common ancestor')
475
475
476 for f in u1u:
476 for f in u1u:
477 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
477 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
478
478
479 for f in u2u:
479 for f in u2u:
480 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
480 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
481
481
482 copy = dict(data1['copy'])
482 copy = dict(data1['copy'])
483 copy.update(data2['copy'])
483 copy.update(data2['copy'])
484 fullcopy = dict(data1['fullcopy'])
484 fullcopy = dict(data1['fullcopy'])
485 fullcopy.update(data2['fullcopy'])
485 fullcopy.update(data2['fullcopy'])
486
486
487 if dirtyc1:
487 if dirtyc1:
488 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
488 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
489 incompletediverge)
489 incompletediverge)
490 else:
490 else:
491 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
491 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
492 incompletediverge)
492 incompletediverge)
493
493
494 renamedelete = {}
494 renamedelete = {}
495 renamedeleteset = set()
495 renamedeleteset = set()
496 divergeset = set()
496 divergeset = set()
497 for of, fl in diverge.items():
497 for of, fl in list(diverge.items()):
498 if len(fl) == 1 or of in c1 or of in c2:
498 if len(fl) == 1 or of in c1 or of in c2:
499 del diverge[of] # not actually divergent, or not a rename
499 del diverge[of] # not actually divergent, or not a rename
500 if of not in c1 and of not in c2:
500 if of not in c1 and of not in c2:
501 # renamed on one side, deleted on the other side, but filter
501 # renamed on one side, deleted on the other side, but filter
502 # out files that have been renamed and then deleted
502 # out files that have been renamed and then deleted
503 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
503 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
504 renamedeleteset.update(fl) # reverse map for below
504 renamedeleteset.update(fl) # reverse map for below
505 else:
505 else:
506 divergeset.update(fl) # reverse map for below
506 divergeset.update(fl) # reverse map for below
507
507
508 if bothnew:
508 if bothnew:
509 repo.ui.debug(" unmatched files new in both:\n %s\n"
509 repo.ui.debug(" unmatched files new in both:\n %s\n"
510 % "\n ".join(bothnew))
510 % "\n ".join(bothnew))
511 bothdiverge = {}
511 bothdiverge = {}
512 bothincompletediverge = {}
512 bothincompletediverge = {}
513 remainder = {}
513 remainder = {}
514 both1 = {'copy': {},
514 both1 = {'copy': {},
515 'fullcopy': {},
515 'fullcopy': {},
516 'incomplete': {},
516 'incomplete': {},
517 'diverge': bothdiverge,
517 'diverge': bothdiverge,
518 'incompletediverge': bothincompletediverge
518 'incompletediverge': bothincompletediverge
519 }
519 }
520 both2 = {'copy': {},
520 both2 = {'copy': {},
521 'fullcopy': {},
521 'fullcopy': {},
522 'incomplete': {},
522 'incomplete': {},
523 'diverge': bothdiverge,
523 'diverge': bothdiverge,
524 'incompletediverge': bothincompletediverge
524 'incompletediverge': bothincompletediverge
525 }
525 }
526 for f in bothnew:
526 for f in bothnew:
527 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
527 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
528 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
528 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
529 if dirtyc1:
529 if dirtyc1:
530 # incomplete copies may only be found on the "dirty" side for bothnew
530 # incomplete copies may only be found on the "dirty" side for bothnew
531 assert not both2['incomplete']
531 assert not both2['incomplete']
532 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
532 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
533 bothincompletediverge)
533 bothincompletediverge)
534 elif dirtyc2:
534 elif dirtyc2:
535 assert not both1['incomplete']
535 assert not both1['incomplete']
536 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
536 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
537 bothincompletediverge)
537 bothincompletediverge)
538 else:
538 else:
539 # incomplete copies and divergences can't happen outside grafts
539 # incomplete copies and divergences can't happen outside grafts
540 assert not both1['incomplete']
540 assert not both1['incomplete']
541 assert not both2['incomplete']
541 assert not both2['incomplete']
542 assert not bothincompletediverge
542 assert not bothincompletediverge
543 for f in remainder:
543 for f in remainder:
544 assert f not in bothdiverge
544 assert f not in bothdiverge
545 ic = remainder[f]
545 ic = remainder[f]
546 if ic[0] in (m1 if dirtyc1 else m2):
546 if ic[0] in (m1 if dirtyc1 else m2):
547 # backed-out rename on one side, but watch out for deleted files
547 # backed-out rename on one side, but watch out for deleted files
548 bothdiverge[f] = ic
548 bothdiverge[f] = ic
549 for of, fl in bothdiverge.items():
549 for of, fl in bothdiverge.items():
550 if len(fl) == 2 and fl[0] == fl[1]:
550 if len(fl) == 2 and fl[0] == fl[1]:
551 copy[fl[0]] = of # not actually divergent, just matching renames
551 copy[fl[0]] = of # not actually divergent, just matching renames
552
552
553 if fullcopy and repo.ui.debugflag:
553 if fullcopy and repo.ui.debugflag:
554 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
554 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
555 "% = renamed and deleted):\n")
555 "% = renamed and deleted):\n")
556 for f in sorted(fullcopy):
556 for f in sorted(fullcopy):
557 note = ""
557 note = ""
558 if f in copy:
558 if f in copy:
559 note += "*"
559 note += "*"
560 if f in divergeset:
560 if f in divergeset:
561 note += "!"
561 note += "!"
562 if f in renamedeleteset:
562 if f in renamedeleteset:
563 note += "%"
563 note += "%"
564 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
564 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
565 note))
565 note))
566 del divergeset
566 del divergeset
567
567
568 if not fullcopy:
568 if not fullcopy:
569 return copy, {}, diverge, renamedelete, {}
569 return copy, {}, diverge, renamedelete, {}
570
570
571 repo.ui.debug(" checking for directory renames\n")
571 repo.ui.debug(" checking for directory renames\n")
572
572
573 # generate a directory move map
573 # generate a directory move map
574 d1, d2 = c1.dirs(), c2.dirs()
574 d1, d2 = c1.dirs(), c2.dirs()
575 # Hack for adding '', which is not otherwise added, to d1 and d2
575 # Hack for adding '', which is not otherwise added, to d1 and d2
576 d1.addpath('/')
576 d1.addpath('/')
577 d2.addpath('/')
577 d2.addpath('/')
578 invalid = set()
578 invalid = set()
579 dirmove = {}
579 dirmove = {}
580
580
581 # examine each file copy for a potential directory move, which is
581 # examine each file copy for a potential directory move, which is
582 # when all the files in a directory are moved to a new directory
582 # when all the files in a directory are moved to a new directory
583 for dst, src in fullcopy.iteritems():
583 for dst, src in fullcopy.iteritems():
584 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
584 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
585 if dsrc in invalid:
585 if dsrc in invalid:
586 # already seen to be uninteresting
586 # already seen to be uninteresting
587 continue
587 continue
588 elif dsrc in d1 and ddst in d1:
588 elif dsrc in d1 and ddst in d1:
589 # directory wasn't entirely moved locally
589 # directory wasn't entirely moved locally
590 invalid.add(dsrc + "/")
590 invalid.add(dsrc + "/")
591 elif dsrc in d2 and ddst in d2:
591 elif dsrc in d2 and ddst in d2:
592 # directory wasn't entirely moved remotely
592 # directory wasn't entirely moved remotely
593 invalid.add(dsrc + "/")
593 invalid.add(dsrc + "/")
594 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
594 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
595 # files from the same directory moved to two different places
595 # files from the same directory moved to two different places
596 invalid.add(dsrc + "/")
596 invalid.add(dsrc + "/")
597 else:
597 else:
598 # looks good so far
598 # looks good so far
599 dirmove[dsrc + "/"] = ddst + "/"
599 dirmove[dsrc + "/"] = ddst + "/"
600
600
601 for i in invalid:
601 for i in invalid:
602 if i in dirmove:
602 if i in dirmove:
603 del dirmove[i]
603 del dirmove[i]
604 del d1, d2, invalid
604 del d1, d2, invalid
605
605
606 if not dirmove:
606 if not dirmove:
607 return copy, {}, diverge, renamedelete, {}
607 return copy, {}, diverge, renamedelete, {}
608
608
609 for d in dirmove:
609 for d in dirmove:
610 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
610 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
611 (d, dirmove[d]))
611 (d, dirmove[d]))
612
612
613 movewithdir = {}
613 movewithdir = {}
614 # check unaccounted nonoverlapping files against directory moves
614 # check unaccounted nonoverlapping files against directory moves
615 for f in u1r + u2r:
615 for f in u1r + u2r:
616 if f not in fullcopy:
616 if f not in fullcopy:
617 for d in dirmove:
617 for d in dirmove:
618 if f.startswith(d):
618 if f.startswith(d):
619 # new file added in a directory that was moved, move it
619 # new file added in a directory that was moved, move it
620 df = dirmove[d] + f[len(d):]
620 df = dirmove[d] + f[len(d):]
621 if df not in copy:
621 if df not in copy:
622 movewithdir[f] = df
622 movewithdir[f] = df
623 repo.ui.debug((" pending file src: '%s' -> "
623 repo.ui.debug((" pending file src: '%s' -> "
624 "dst: '%s'\n") % (f, df))
624 "dst: '%s'\n") % (f, df))
625 break
625 break
626
626
627 return copy, movewithdir, diverge, renamedelete, dirmove
627 return copy, movewithdir, diverge, renamedelete, dirmove
628
628
629 def _heuristicscopytracing(repo, c1, c2, base):
629 def _heuristicscopytracing(repo, c1, c2, base):
630 """ Fast copytracing using filename heuristics
630 """ Fast copytracing using filename heuristics
631
631
632 Assumes that moves or renames are of following two types:
632 Assumes that moves or renames are of following two types:
633
633
634 1) Inside a directory only (same directory name but different filenames)
634 1) Inside a directory only (same directory name but different filenames)
635 2) Move from one directory to another
635 2) Move from one directory to another
636 (same filenames but different directory names)
636 (same filenames but different directory names)
637
637
638 Works only when there are no merge commits in the "source branch".
638 Works only when there are no merge commits in the "source branch".
639 Source branch is commits from base up to c2 not including base.
639 Source branch is commits from base up to c2 not including base.
640
640
641 If merge is involved it fallbacks to _fullcopytracing().
641 If merge is involved it fallbacks to _fullcopytracing().
642
642
643 Can be used by setting the following config:
643 Can be used by setting the following config:
644
644
645 [experimental]
645 [experimental]
646 copytrace = heuristics
646 copytrace = heuristics
647 """
647 """
648
648
649 if c1.rev() is None:
649 if c1.rev() is None:
650 c1 = c1.p1()
650 c1 = c1.p1()
651 if c2.rev() is None:
651 if c2.rev() is None:
652 c2 = c2.p1()
652 c2 = c2.p1()
653
653
654 copies = {}
654 copies = {}
655
655
656 changedfiles = set()
656 changedfiles = set()
657 m1 = c1.manifest()
657 m1 = c1.manifest()
658 if not repo.revs('%d::%d', base.rev(), c2.rev()):
658 if not repo.revs('%d::%d', base.rev(), c2.rev()):
659 # If base is not in c2 branch, we switch to fullcopytracing
659 # If base is not in c2 branch, we switch to fullcopytracing
660 repo.ui.debug("switching to full copytracing as base is not "
660 repo.ui.debug("switching to full copytracing as base is not "
661 "an ancestor of c2\n")
661 "an ancestor of c2\n")
662 return _fullcopytracing(repo, c1, c2, base)
662 return _fullcopytracing(repo, c1, c2, base)
663
663
664 ctx = c2
664 ctx = c2
665 while ctx != base:
665 while ctx != base:
666 if len(ctx.parents()) == 2:
666 if len(ctx.parents()) == 2:
667 # To keep things simple let's not handle merges
667 # To keep things simple let's not handle merges
668 repo.ui.debug("switching to full copytracing because of merges\n")
668 repo.ui.debug("switching to full copytracing because of merges\n")
669 return _fullcopytracing(repo, c1, c2, base)
669 return _fullcopytracing(repo, c1, c2, base)
670 changedfiles.update(ctx.files())
670 changedfiles.update(ctx.files())
671 ctx = ctx.p1()
671 ctx = ctx.p1()
672
672
673 cp = _forwardcopies(base, c2)
673 cp = _forwardcopies(base, c2)
674 for dst, src in cp.iteritems():
674 for dst, src in cp.iteritems():
675 if src in m1:
675 if src in m1:
676 copies[dst] = src
676 copies[dst] = src
677
677
678 # file is missing if it isn't present in the destination, but is present in
678 # file is missing if it isn't present in the destination, but is present in
679 # the base and present in the source.
679 # the base and present in the source.
680 # Presence in the base is important to exclude added files, presence in the
680 # Presence in the base is important to exclude added files, presence in the
681 # source is important to exclude removed files.
681 # source is important to exclude removed files.
682 missingfiles = filter(lambda f: f not in m1 and f in base and f in c2,
682 missingfiles = filter(lambda f: f not in m1 and f in base and f in c2,
683 changedfiles)
683 changedfiles)
684
684
685 if missingfiles:
685 if missingfiles:
686 basenametofilename = collections.defaultdict(list)
686 basenametofilename = collections.defaultdict(list)
687 dirnametofilename = collections.defaultdict(list)
687 dirnametofilename = collections.defaultdict(list)
688
688
689 for f in m1.filesnotin(base.manifest()):
689 for f in m1.filesnotin(base.manifest()):
690 basename = os.path.basename(f)
690 basename = os.path.basename(f)
691 dirname = os.path.dirname(f)
691 dirname = os.path.dirname(f)
692 basenametofilename[basename].append(f)
692 basenametofilename[basename].append(f)
693 dirnametofilename[dirname].append(f)
693 dirnametofilename[dirname].append(f)
694
694
695 # in case of a rebase/graft, base may not be a common ancestor
695 # in case of a rebase/graft, base may not be a common ancestor
696 anc = c1.ancestor(c2)
696 anc = c1.ancestor(c2)
697
697
698 for f in missingfiles:
698 for f in missingfiles:
699 basename = os.path.basename(f)
699 basename = os.path.basename(f)
700 dirname = os.path.dirname(f)
700 dirname = os.path.dirname(f)
701 samebasename = basenametofilename[basename]
701 samebasename = basenametofilename[basename]
702 samedirname = dirnametofilename[dirname]
702 samedirname = dirnametofilename[dirname]
703 movecandidates = samebasename + samedirname
703 movecandidates = samebasename + samedirname
704 # f is guaranteed to be present in c2, that's why
704 # f is guaranteed to be present in c2, that's why
705 # c2.filectx(f) won't fail
705 # c2.filectx(f) won't fail
706 f2 = c2.filectx(f)
706 f2 = c2.filectx(f)
707 for candidate in movecandidates:
707 for candidate in movecandidates:
708 f1 = c1.filectx(candidate)
708 f1 = c1.filectx(candidate)
709 if _related(f1, f2, anc.rev()):
709 if _related(f1, f2, anc.rev()):
710 # if there are a few related copies then we'll merge
710 # if there are a few related copies then we'll merge
711 # changes into all of them. This matches the behaviour
711 # changes into all of them. This matches the behaviour
712 # of upstream copytracing
712 # of upstream copytracing
713 copies[candidate] = f
713 copies[candidate] = f
714
714
715 return copies, {}, {}, {}, {}
715 return copies, {}, {}, {}, {}
716
716
717 def _related(f1, f2, limit):
717 def _related(f1, f2, limit):
718 """return True if f1 and f2 filectx have a common ancestor
718 """return True if f1 and f2 filectx have a common ancestor
719
719
720 Walk back to common ancestor to see if the two files originate
720 Walk back to common ancestor to see if the two files originate
721 from the same file. Since workingfilectx's rev() is None it messes
721 from the same file. Since workingfilectx's rev() is None it messes
722 up the integer comparison logic, hence the pre-step check for
722 up the integer comparison logic, hence the pre-step check for
723 None (f1 and f2 can only be workingfilectx's initially).
723 None (f1 and f2 can only be workingfilectx's initially).
724 """
724 """
725
725
726 if f1 == f2:
726 if f1 == f2:
727 return f1 # a match
727 return f1 # a match
728
728
729 g1, g2 = f1.ancestors(), f2.ancestors()
729 g1, g2 = f1.ancestors(), f2.ancestors()
730 try:
730 try:
731 f1r, f2r = f1.linkrev(), f2.linkrev()
731 f1r, f2r = f1.linkrev(), f2.linkrev()
732
732
733 if f1r is None:
733 if f1r is None:
734 f1 = next(g1)
734 f1 = next(g1)
735 if f2r is None:
735 if f2r is None:
736 f2 = next(g2)
736 f2 = next(g2)
737
737
738 while True:
738 while True:
739 f1r, f2r = f1.linkrev(), f2.linkrev()
739 f1r, f2r = f1.linkrev(), f2.linkrev()
740 if f1r > f2r:
740 if f1r > f2r:
741 f1 = next(g1)
741 f1 = next(g1)
742 elif f2r > f1r:
742 elif f2r > f1r:
743 f2 = next(g2)
743 f2 = next(g2)
744 elif f1 == f2:
744 elif f1 == f2:
745 return f1 # a match
745 return f1 # a match
746 elif f1r == f2r or f1r < limit or f2r < limit:
746 elif f1r == f2r or f1r < limit or f2r < limit:
747 return False # copy no longer relevant
747 return False # copy no longer relevant
748 except StopIteration:
748 except StopIteration:
749 return False
749 return False
750
750
751 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
751 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
752 """
752 """
753 check possible copies of f from msrc to mdst
753 check possible copies of f from msrc to mdst
754
754
755 srcctx = starting context for f in msrc
755 srcctx = starting context for f in msrc
756 dstctx = destination context for f in mdst
756 dstctx = destination context for f in mdst
757 f = the filename to check (as in msrc)
757 f = the filename to check (as in msrc)
758 base = the changectx used as a merge base
758 base = the changectx used as a merge base
759 tca = topological common ancestor for graft-like scenarios
759 tca = topological common ancestor for graft-like scenarios
760 remotebase = True if base is outside tca::srcctx, False otherwise
760 remotebase = True if base is outside tca::srcctx, False otherwise
761 limit = the rev number to not search beyond
761 limit = the rev number to not search beyond
762 data = dictionary of dictionary to store copy data. (see mergecopies)
762 data = dictionary of dictionary to store copy data. (see mergecopies)
763
763
764 note: limit is only an optimization, and provides no guarantee that
764 note: limit is only an optimization, and provides no guarantee that
765 irrelevant revisions will not be visited
765 irrelevant revisions will not be visited
766 there is no easy way to make this algorithm stop in a guaranteed way
766 there is no easy way to make this algorithm stop in a guaranteed way
767 once it "goes behind a certain revision".
767 once it "goes behind a certain revision".
768 """
768 """
769
769
770 msrc = srcctx.manifest()
770 msrc = srcctx.manifest()
771 mdst = dstctx.manifest()
771 mdst = dstctx.manifest()
772 mb = base.manifest()
772 mb = base.manifest()
773 mta = tca.manifest()
773 mta = tca.manifest()
774 # Might be true if this call is about finding backward renames,
774 # Might be true if this call is about finding backward renames,
775 # This happens in the case of grafts because the DAG is then rotated.
775 # This happens in the case of grafts because the DAG is then rotated.
776 # If the file exists in both the base and the source, we are not looking
776 # If the file exists in both the base and the source, we are not looking
777 # for a rename on the source side, but on the part of the DAG that is
777 # for a rename on the source side, but on the part of the DAG that is
778 # traversed backwards.
778 # traversed backwards.
779 #
779 #
780 # In the case there is both backward and forward renames (before and after
780 # In the case there is both backward and forward renames (before and after
781 # the base) this is more complicated as we must detect a divergence.
781 # the base) this is more complicated as we must detect a divergence.
782 # We use 'backwards = False' in that case.
782 # We use 'backwards = False' in that case.
783 backwards = not remotebase and base != tca and f in mb
783 backwards = not remotebase and base != tca and f in mb
784 getsrcfctx = _makegetfctx(srcctx)
784 getsrcfctx = _makegetfctx(srcctx)
785 getdstfctx = _makegetfctx(dstctx)
785 getdstfctx = _makegetfctx(dstctx)
786
786
787 if msrc[f] == mb.get(f) and not remotebase:
787 if msrc[f] == mb.get(f) and not remotebase:
788 # Nothing to merge
788 # Nothing to merge
789 return
789 return
790
790
791 of = None
791 of = None
792 seen = {f}
792 seen = {f}
793 for oc in getsrcfctx(f, msrc[f]).ancestors():
793 for oc in getsrcfctx(f, msrc[f]).ancestors():
794 ocr = oc.linkrev()
794 ocr = oc.linkrev()
795 of = oc.path()
795 of = oc.path()
796 if of in seen:
796 if of in seen:
797 # check limit late - grab last rename before
797 # check limit late - grab last rename before
798 if ocr < limit:
798 if ocr < limit:
799 break
799 break
800 continue
800 continue
801 seen.add(of)
801 seen.add(of)
802
802
803 # remember for dir rename detection
803 # remember for dir rename detection
804 if backwards:
804 if backwards:
805 data['fullcopy'][of] = f # grafting backwards through renames
805 data['fullcopy'][of] = f # grafting backwards through renames
806 else:
806 else:
807 data['fullcopy'][f] = of
807 data['fullcopy'][f] = of
808 if of not in mdst:
808 if of not in mdst:
809 continue # no match, keep looking
809 continue # no match, keep looking
810 if mdst[of] == mb.get(of):
810 if mdst[of] == mb.get(of):
811 return # no merge needed, quit early
811 return # no merge needed, quit early
812 c2 = getdstfctx(of, mdst[of])
812 c2 = getdstfctx(of, mdst[of])
813 # c2 might be a plain new file on added on destination side that is
813 # c2 might be a plain new file on added on destination side that is
814 # unrelated to the droids we are looking for.
814 # unrelated to the droids we are looking for.
815 cr = _related(oc, c2, tca.rev())
815 cr = _related(oc, c2, tca.rev())
816 if cr and (of == f or of == c2.path()): # non-divergent
816 if cr and (of == f or of == c2.path()): # non-divergent
817 if backwards:
817 if backwards:
818 data['copy'][of] = f
818 data['copy'][of] = f
819 elif of in mb:
819 elif of in mb:
820 data['copy'][f] = of
820 data['copy'][f] = of
821 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
821 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
822 data['copy'][of] = f
822 data['copy'][of] = f
823 del data['fullcopy'][f]
823 del data['fullcopy'][f]
824 data['fullcopy'][of] = f
824 data['fullcopy'][of] = f
825 else: # divergence w.r.t. graft CA on one side of topological CA
825 else: # divergence w.r.t. graft CA on one side of topological CA
826 for sf in seen:
826 for sf in seen:
827 if sf in mb:
827 if sf in mb:
828 assert sf not in data['diverge']
828 assert sf not in data['diverge']
829 data['diverge'][sf] = [f, of]
829 data['diverge'][sf] = [f, of]
830 break
830 break
831 return
831 return
832
832
833 if of in mta:
833 if of in mta:
834 if backwards or remotebase:
834 if backwards or remotebase:
835 data['incomplete'][of] = f
835 data['incomplete'][of] = f
836 else:
836 else:
837 for sf in seen:
837 for sf in seen:
838 if sf in mb:
838 if sf in mb:
839 if tca == base:
839 if tca == base:
840 data['diverge'].setdefault(sf, []).append(f)
840 data['diverge'].setdefault(sf, []).append(f)
841 else:
841 else:
842 data['incompletediverge'][sf] = [of, f]
842 data['incompletediverge'][sf] = [of, f]
843 return
843 return
844
844
845 def duplicatecopies(repo, rev, fromrev, skiprev=None):
845 def duplicatecopies(repo, rev, fromrev, skiprev=None):
846 '''reproduce copies from fromrev to rev in the dirstate
846 '''reproduce copies from fromrev to rev in the dirstate
847
847
848 If skiprev is specified, it's a revision that should be used to
848 If skiprev is specified, it's a revision that should be used to
849 filter copy records. Any copies that occur between fromrev and
849 filter copy records. Any copies that occur between fromrev and
850 skiprev will not be duplicated, even if they appear in the set of
850 skiprev will not be duplicated, even if they appear in the set of
851 copies between fromrev and rev.
851 copies between fromrev and rev.
852 '''
852 '''
853 exclude = {}
853 exclude = {}
854 if (skiprev is not None and
854 if (skiprev is not None and
855 repo.ui.config('experimental', 'copytrace') != 'off'):
855 repo.ui.config('experimental', 'copytrace') != 'off'):
856 # copytrace='off' skips this line, but not the entire function because
856 # copytrace='off' skips this line, but not the entire function because
857 # the line below is O(size of the repo) during a rebase, while the rest
857 # the line below is O(size of the repo) during a rebase, while the rest
858 # of the function is much faster (and is required for carrying copy
858 # of the function is much faster (and is required for carrying copy
859 # metadata across the rebase anyway).
859 # metadata across the rebase anyway).
860 exclude = pathcopies(repo[fromrev], repo[skiprev])
860 exclude = pathcopies(repo[fromrev], repo[skiprev])
861 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
861 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
862 # copies.pathcopies returns backward renames, so dst might not
862 # copies.pathcopies returns backward renames, so dst might not
863 # actually be in the dirstate
863 # actually be in the dirstate
864 if dst in exclude:
864 if dst in exclude:
865 continue
865 continue
866 if repo.dirstate[dst] in "nma":
866 if repo.dirstate[dst] in "nma":
867 repo.dirstate.copy(src, dst)
867 repo.dirstate.copy(src, dst)
@@ -1,1774 +1,1774 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 m: the external merge driver defined for this merge plus its run state
69 m: the external merge driver defined for this merge plus its run state
70 (experimental)
70 (experimental)
71 f: a (filename, dictionary) tuple of optional values for a given file
71 f: a (filename, dictionary) tuple of optional values for a given file
72 X: unsupported mandatory record type (used in tests)
72 X: unsupported mandatory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
74 l: the labels for the parts of the merge.
74 l: the labels for the parts of the merge.
75
75
76 Merge driver run states (experimental):
76 Merge driver run states (experimental):
77 u: driver-resolved files unmarked -- needs to be run next time we're about
77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 to resolve or commit
78 to resolve or commit
79 m: driver-resolved files marked -- only needs to be run before commit
79 m: driver-resolved files marked -- only needs to be run before commit
80 s: success/skipped -- does not need to be run any more
80 s: success/skipped -- does not need to be run any more
81
81
82 '''
82 '''
83 statepathv1 = 'merge/state'
83 statepathv1 = 'merge/state'
84 statepathv2 = 'merge/state2'
84 statepathv2 = 'merge/state2'
85
85
86 @staticmethod
86 @staticmethod
87 def clean(repo, node=None, other=None, labels=None):
87 def clean(repo, node=None, other=None, labels=None):
88 """Initialize a brand new merge state, removing any existing state on
88 """Initialize a brand new merge state, removing any existing state on
89 disk."""
89 disk."""
90 ms = mergestate(repo)
90 ms = mergestate(repo)
91 ms.reset(node, other, labels)
91 ms.reset(node, other, labels)
92 return ms
92 return ms
93
93
94 @staticmethod
94 @staticmethod
95 def read(repo):
95 def read(repo):
96 """Initialize the merge state, reading it from disk."""
96 """Initialize the merge state, reading it from disk."""
97 ms = mergestate(repo)
97 ms = mergestate(repo)
98 ms._read()
98 ms._read()
99 return ms
99 return ms
100
100
101 def __init__(self, repo):
101 def __init__(self, repo):
102 """Initialize the merge state.
102 """Initialize the merge state.
103
103
104 Do not use this directly! Instead call read() or clean()."""
104 Do not use this directly! Instead call read() or clean()."""
105 self._repo = repo
105 self._repo = repo
106 self._dirty = False
106 self._dirty = False
107 self._labels = None
107 self._labels = None
108
108
109 def reset(self, node=None, other=None, labels=None):
109 def reset(self, node=None, other=None, labels=None):
110 self._state = {}
110 self._state = {}
111 self._stateextras = {}
111 self._stateextras = {}
112 self._local = None
112 self._local = None
113 self._other = None
113 self._other = None
114 self._labels = labels
114 self._labels = labels
115 for var in ('localctx', 'otherctx'):
115 for var in ('localctx', 'otherctx'):
116 if var in vars(self):
116 if var in vars(self):
117 delattr(self, var)
117 delattr(self, var)
118 if node:
118 if node:
119 self._local = node
119 self._local = node
120 self._other = other
120 self._other = other
121 self._readmergedriver = None
121 self._readmergedriver = None
122 if self.mergedriver:
122 if self.mergedriver:
123 self._mdstate = 's'
123 self._mdstate = 's'
124 else:
124 else:
125 self._mdstate = 'u'
125 self._mdstate = 'u'
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 self._results = {}
127 self._results = {}
128 self._dirty = False
128 self._dirty = False
129
129
130 def _read(self):
130 def _read(self):
131 """Analyse each record content to restore a serialized state from disk
131 """Analyse each record content to restore a serialized state from disk
132
132
133 This function process "record" entry produced by the de-serialization
133 This function process "record" entry produced by the de-serialization
134 of on disk file.
134 of on disk file.
135 """
135 """
136 self._state = {}
136 self._state = {}
137 self._stateextras = {}
137 self._stateextras = {}
138 self._local = None
138 self._local = None
139 self._other = None
139 self._other = None
140 for var in ('localctx', 'otherctx'):
140 for var in ('localctx', 'otherctx'):
141 if var in vars(self):
141 if var in vars(self):
142 delattr(self, var)
142 delattr(self, var)
143 self._readmergedriver = None
143 self._readmergedriver = None
144 self._mdstate = 's'
144 self._mdstate = 's'
145 unsupported = set()
145 unsupported = set()
146 records = self._readrecords()
146 records = self._readrecords()
147 for rtype, record in records:
147 for rtype, record in records:
148 if rtype == 'L':
148 if rtype == 'L':
149 self._local = bin(record)
149 self._local = bin(record)
150 elif rtype == 'O':
150 elif rtype == 'O':
151 self._other = bin(record)
151 self._other = bin(record)
152 elif rtype == 'm':
152 elif rtype == 'm':
153 bits = record.split('\0', 1)
153 bits = record.split('\0', 1)
154 mdstate = bits[1]
154 mdstate = bits[1]
155 if len(mdstate) != 1 or mdstate not in 'ums':
155 if len(mdstate) != 1 or mdstate not in 'ums':
156 # the merge driver should be idempotent, so just rerun it
156 # the merge driver should be idempotent, so just rerun it
157 mdstate = 'u'
157 mdstate = 'u'
158
158
159 self._readmergedriver = bits[0]
159 self._readmergedriver = bits[0]
160 self._mdstate = mdstate
160 self._mdstate = mdstate
161 elif rtype in 'FDC':
161 elif rtype in 'FDC':
162 bits = record.split('\0')
162 bits = record.split('\0')
163 self._state[bits[0]] = bits[1:]
163 self._state[bits[0]] = bits[1:]
164 elif rtype == 'f':
164 elif rtype == 'f':
165 filename, rawextras = record.split('\0', 1)
165 filename, rawextras = record.split('\0', 1)
166 extraparts = rawextras.split('\0')
166 extraparts = rawextras.split('\0')
167 extras = {}
167 extras = {}
168 i = 0
168 i = 0
169 while i < len(extraparts):
169 while i < len(extraparts):
170 extras[extraparts[i]] = extraparts[i + 1]
170 extras[extraparts[i]] = extraparts[i + 1]
171 i += 2
171 i += 2
172
172
173 self._stateextras[filename] = extras
173 self._stateextras[filename] = extras
174 elif rtype == 'l':
174 elif rtype == 'l':
175 labels = record.split('\0', 2)
175 labels = record.split('\0', 2)
176 self._labels = [l for l in labels if len(l) > 0]
176 self._labels = [l for l in labels if len(l) > 0]
177 elif not rtype.islower():
177 elif not rtype.islower():
178 unsupported.add(rtype)
178 unsupported.add(rtype)
179 self._results = {}
179 self._results = {}
180 self._dirty = False
180 self._dirty = False
181
181
182 if unsupported:
182 if unsupported:
183 raise error.UnsupportedMergeRecords(unsupported)
183 raise error.UnsupportedMergeRecords(unsupported)
184
184
185 def _readrecords(self):
185 def _readrecords(self):
186 """Read merge state from disk and return a list of record (TYPE, data)
186 """Read merge state from disk and return a list of record (TYPE, data)
187
187
188 We read data from both v1 and v2 files and decide which one to use.
188 We read data from both v1 and v2 files and decide which one to use.
189
189
190 V1 has been used by version prior to 2.9.1 and contains less data than
190 V1 has been used by version prior to 2.9.1 and contains less data than
191 v2. We read both versions and check if no data in v2 contradicts
191 v2. We read both versions and check if no data in v2 contradicts
192 v1. If there is not contradiction we can safely assume that both v1
192 v1. If there is not contradiction we can safely assume that both v1
193 and v2 were written at the same time and use the extract data in v2. If
193 and v2 were written at the same time and use the extract data in v2. If
194 there is contradiction we ignore v2 content as we assume an old version
194 there is contradiction we ignore v2 content as we assume an old version
195 of Mercurial has overwritten the mergestate file and left an old v2
195 of Mercurial has overwritten the mergestate file and left an old v2
196 file around.
196 file around.
197
197
198 returns list of record [(TYPE, data), ...]"""
198 returns list of record [(TYPE, data), ...]"""
199 v1records = self._readrecordsv1()
199 v1records = self._readrecordsv1()
200 v2records = self._readrecordsv2()
200 v2records = self._readrecordsv2()
201 if self._v1v2match(v1records, v2records):
201 if self._v1v2match(v1records, v2records):
202 return v2records
202 return v2records
203 else:
203 else:
204 # v1 file is newer than v2 file, use it
204 # v1 file is newer than v2 file, use it
205 # we have to infer the "other" changeset of the merge
205 # we have to infer the "other" changeset of the merge
206 # we cannot do better than that with v1 of the format
206 # we cannot do better than that with v1 of the format
207 mctx = self._repo[None].parents()[-1]
207 mctx = self._repo[None].parents()[-1]
208 v1records.append(('O', mctx.hex()))
208 v1records.append(('O', mctx.hex()))
209 # add place holder "other" file node information
209 # add place holder "other" file node information
210 # nobody is using it yet so we do no need to fetch the data
210 # nobody is using it yet so we do no need to fetch the data
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 for idx, r in enumerate(v1records):
212 for idx, r in enumerate(v1records):
213 if r[0] == 'F':
213 if r[0] == 'F':
214 bits = r[1].split('\0')
214 bits = r[1].split('\0')
215 bits.insert(-2, '')
215 bits.insert(-2, '')
216 v1records[idx] = (r[0], '\0'.join(bits))
216 v1records[idx] = (r[0], '\0'.join(bits))
217 return v1records
217 return v1records
218
218
219 def _v1v2match(self, v1records, v2records):
219 def _v1v2match(self, v1records, v2records):
220 oldv2 = set() # old format version of v2 record
220 oldv2 = set() # old format version of v2 record
221 for rec in v2records:
221 for rec in v2records:
222 if rec[0] == 'L':
222 if rec[0] == 'L':
223 oldv2.add(rec)
223 oldv2.add(rec)
224 elif rec[0] == 'F':
224 elif rec[0] == 'F':
225 # drop the onode data (not contained in v1)
225 # drop the onode data (not contained in v1)
226 oldv2.add(('F', _droponode(rec[1])))
226 oldv2.add(('F', _droponode(rec[1])))
227 for rec in v1records:
227 for rec in v1records:
228 if rec not in oldv2:
228 if rec not in oldv2:
229 return False
229 return False
230 else:
230 else:
231 return True
231 return True
232
232
233 def _readrecordsv1(self):
233 def _readrecordsv1(self):
234 """read on disk merge state for version 1 file
234 """read on disk merge state for version 1 file
235
235
236 returns list of record [(TYPE, data), ...]
236 returns list of record [(TYPE, data), ...]
237
237
238 Note: the "F" data from this file are one entry short
238 Note: the "F" data from this file are one entry short
239 (no "other file node" entry)
239 (no "other file node" entry)
240 """
240 """
241 records = []
241 records = []
242 try:
242 try:
243 f = self._repo.vfs(self.statepathv1)
243 f = self._repo.vfs(self.statepathv1)
244 for i, l in enumerate(f):
244 for i, l in enumerate(f):
245 if i == 0:
245 if i == 0:
246 records.append(('L', l[:-1]))
246 records.append(('L', l[:-1]))
247 else:
247 else:
248 records.append(('F', l[:-1]))
248 records.append(('F', l[:-1]))
249 f.close()
249 f.close()
250 except IOError as err:
250 except IOError as err:
251 if err.errno != errno.ENOENT:
251 if err.errno != errno.ENOENT:
252 raise
252 raise
253 return records
253 return records
254
254
255 def _readrecordsv2(self):
255 def _readrecordsv2(self):
256 """read on disk merge state for version 2 file
256 """read on disk merge state for version 2 file
257
257
258 This format is a list of arbitrary records of the form:
258 This format is a list of arbitrary records of the form:
259
259
260 [type][length][content]
260 [type][length][content]
261
261
262 `type` is a single character, `length` is a 4 byte integer, and
262 `type` is a single character, `length` is a 4 byte integer, and
263 `content` is an arbitrary byte sequence of length `length`.
263 `content` is an arbitrary byte sequence of length `length`.
264
264
265 Mercurial versions prior to 3.7 have a bug where if there are
265 Mercurial versions prior to 3.7 have a bug where if there are
266 unsupported mandatory merge records, attempting to clear out the merge
266 unsupported mandatory merge records, attempting to clear out the merge
267 state with hg update --clean or similar aborts. The 't' record type
267 state with hg update --clean or similar aborts. The 't' record type
268 works around that by writing out what those versions treat as an
268 works around that by writing out what those versions treat as an
269 advisory record, but later versions interpret as special: the first
269 advisory record, but later versions interpret as special: the first
270 character is the 'real' record type and everything onwards is the data.
270 character is the 'real' record type and everything onwards is the data.
271
271
272 Returns list of records [(TYPE, data), ...]."""
272 Returns list of records [(TYPE, data), ...]."""
273 records = []
273 records = []
274 try:
274 try:
275 f = self._repo.vfs(self.statepathv2)
275 f = self._repo.vfs(self.statepathv2)
276 data = f.read()
276 data = f.read()
277 off = 0
277 off = 0
278 end = len(data)
278 end = len(data)
279 while off < end:
279 while off < end:
280 rtype = data[off]
280 rtype = data[off]
281 off += 1
281 off += 1
282 length = _unpack('>I', data[off:(off + 4)])[0]
282 length = _unpack('>I', data[off:(off + 4)])[0]
283 off += 4
283 off += 4
284 record = data[off:(off + length)]
284 record = data[off:(off + length)]
285 off += length
285 off += length
286 if rtype == 't':
286 if rtype == 't':
287 rtype, record = record[0], record[1:]
287 rtype, record = record[0], record[1:]
288 records.append((rtype, record))
288 records.append((rtype, record))
289 f.close()
289 f.close()
290 except IOError as err:
290 except IOError as err:
291 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
292 raise
292 raise
293 return records
293 return records
294
294
295 @util.propertycache
295 @util.propertycache
296 def mergedriver(self):
296 def mergedriver(self):
297 # protect against the following:
297 # protect against the following:
298 # - A configures a malicious merge driver in their hgrc, then
298 # - A configures a malicious merge driver in their hgrc, then
299 # pauses the merge
299 # pauses the merge
300 # - A edits their hgrc to remove references to the merge driver
300 # - A edits their hgrc to remove references to the merge driver
301 # - A gives a copy of their entire repo, including .hg, to B
301 # - A gives a copy of their entire repo, including .hg, to B
302 # - B inspects .hgrc and finds it to be clean
302 # - B inspects .hgrc and finds it to be clean
303 # - B then continues the merge and the malicious merge driver
303 # - B then continues the merge and the malicious merge driver
304 # gets invoked
304 # gets invoked
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 if (self._readmergedriver is not None
306 if (self._readmergedriver is not None
307 and self._readmergedriver != configmergedriver):
307 and self._readmergedriver != configmergedriver):
308 raise error.ConfigError(
308 raise error.ConfigError(
309 _("merge driver changed since merge started"),
309 _("merge driver changed since merge started"),
310 hint=_("revert merge driver change or abort merge"))
310 hint=_("revert merge driver change or abort merge"))
311
311
312 return configmergedriver
312 return configmergedriver
313
313
314 @util.propertycache
314 @util.propertycache
315 def localctx(self):
315 def localctx(self):
316 if self._local is None:
316 if self._local is None:
317 msg = "localctx accessed but self._local isn't set"
317 msg = "localctx accessed but self._local isn't set"
318 raise error.ProgrammingError(msg)
318 raise error.ProgrammingError(msg)
319 return self._repo[self._local]
319 return self._repo[self._local]
320
320
321 @util.propertycache
321 @util.propertycache
322 def otherctx(self):
322 def otherctx(self):
323 if self._other is None:
323 if self._other is None:
324 msg = "otherctx accessed but self._other isn't set"
324 msg = "otherctx accessed but self._other isn't set"
325 raise error.ProgrammingError(msg)
325 raise error.ProgrammingError(msg)
326 return self._repo[self._other]
326 return self._repo[self._other]
327
327
328 def active(self):
328 def active(self):
329 """Whether mergestate is active.
329 """Whether mergestate is active.
330
330
331 Returns True if there appears to be mergestate. This is a rough proxy
331 Returns True if there appears to be mergestate. This is a rough proxy
332 for "is a merge in progress."
332 for "is a merge in progress."
333 """
333 """
334 # Check local variables before looking at filesystem for performance
334 # Check local variables before looking at filesystem for performance
335 # reasons.
335 # reasons.
336 return bool(self._local) or bool(self._state) or \
336 return bool(self._local) or bool(self._state) or \
337 self._repo.vfs.exists(self.statepathv1) or \
337 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv2)
338 self._repo.vfs.exists(self.statepathv2)
339
339
340 def commit(self):
340 def commit(self):
341 """Write current state on disk (if necessary)"""
341 """Write current state on disk (if necessary)"""
342 if self._dirty:
342 if self._dirty:
343 records = self._makerecords()
343 records = self._makerecords()
344 self._writerecords(records)
344 self._writerecords(records)
345 self._dirty = False
345 self._dirty = False
346
346
347 def _makerecords(self):
347 def _makerecords(self):
348 records = []
348 records = []
349 records.append(('L', hex(self._local)))
349 records.append(('L', hex(self._local)))
350 records.append(('O', hex(self._other)))
350 records.append(('O', hex(self._other)))
351 if self.mergedriver:
351 if self.mergedriver:
352 records.append(('m', '\0'.join([
352 records.append(('m', '\0'.join([
353 self.mergedriver, self._mdstate])))
353 self.mergedriver, self._mdstate])))
354 for d, v in self._state.iteritems():
354 for d, v in self._state.iteritems():
355 if v[0] == 'd':
355 if v[0] == 'd':
356 records.append(('D', '\0'.join([d] + v)))
356 records.append(('D', '\0'.join([d] + v)))
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # older versions of Mercurial
358 # older versions of Mercurial
359 elif v[1] == nullhex or v[6] == nullhex:
359 elif v[1] == nullhex or v[6] == nullhex:
360 records.append(('C', '\0'.join([d] + v)))
360 records.append(('C', '\0'.join([d] + v)))
361 else:
361 else:
362 records.append(('F', '\0'.join([d] + v)))
362 records.append(('F', '\0'.join([d] + v)))
363 for filename, extras in sorted(self._stateextras.iteritems()):
363 for filename, extras in sorted(self._stateextras.iteritems()):
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 extras.iteritems())
365 extras.iteritems())
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 if self._labels is not None:
367 if self._labels is not None:
368 labels = '\0'.join(self._labels)
368 labels = '\0'.join(self._labels)
369 records.append(('l', labels))
369 records.append(('l', labels))
370 return records
370 return records
371
371
372 def _writerecords(self, records):
372 def _writerecords(self, records):
373 """Write current state on disk (both v1 and v2)"""
373 """Write current state on disk (both v1 and v2)"""
374 self._writerecordsv1(records)
374 self._writerecordsv1(records)
375 self._writerecordsv2(records)
375 self._writerecordsv2(records)
376
376
377 def _writerecordsv1(self, records):
377 def _writerecordsv1(self, records):
378 """Write current state on disk in a version 1 file"""
378 """Write current state on disk in a version 1 file"""
379 f = self._repo.vfs(self.statepathv1, 'w')
379 f = self._repo.vfs(self.statepathv1, 'w')
380 irecords = iter(records)
380 irecords = iter(records)
381 lrecords = next(irecords)
381 lrecords = next(irecords)
382 assert lrecords[0] == 'L'
382 assert lrecords[0] == 'L'
383 f.write(hex(self._local) + '\n')
383 f.write(hex(self._local) + '\n')
384 for rtype, data in irecords:
384 for rtype, data in irecords:
385 if rtype == 'F':
385 if rtype == 'F':
386 f.write('%s\n' % _droponode(data))
386 f.write('%s\n' % _droponode(data))
387 f.close()
387 f.close()
388
388
389 def _writerecordsv2(self, records):
389 def _writerecordsv2(self, records):
390 """Write current state on disk in a version 2 file
390 """Write current state on disk in a version 2 file
391
391
392 See the docstring for _readrecordsv2 for why we use 't'."""
392 See the docstring for _readrecordsv2 for why we use 't'."""
393 # these are the records that all version 2 clients can read
393 # these are the records that all version 2 clients can read
394 whitelist = 'LOF'
394 whitelist = 'LOF'
395 f = self._repo.vfs(self.statepathv2, 'w')
395 f = self._repo.vfs(self.statepathv2, 'w')
396 for key, data in records:
396 for key, data in records:
397 assert len(key) == 1
397 assert len(key) == 1
398 if key not in whitelist:
398 if key not in whitelist:
399 key, data = 't', '%s%s' % (key, data)
399 key, data = 't', '%s%s' % (key, data)
400 format = '>sI%is' % len(data)
400 format = '>sI%is' % len(data)
401 f.write(_pack(format, key, len(data), data))
401 f.write(_pack(format, key, len(data), data))
402 f.close()
402 f.close()
403
403
404 def add(self, fcl, fco, fca, fd):
404 def add(self, fcl, fco, fca, fd):
405 """add a new (potentially?) conflicting file the merge state
405 """add a new (potentially?) conflicting file the merge state
406 fcl: file context for local,
406 fcl: file context for local,
407 fco: file context for remote,
407 fco: file context for remote,
408 fca: file context for ancestors,
408 fca: file context for ancestors,
409 fd: file path of the resulting merge.
409 fd: file path of the resulting merge.
410
410
411 note: also write the local version to the `.hg/merge` directory.
411 note: also write the local version to the `.hg/merge` directory.
412 """
412 """
413 if fcl.isabsent():
413 if fcl.isabsent():
414 hash = nullhex
414 hash = nullhex
415 else:
415 else:
416 hash = hex(hashlib.sha1(fcl.path()).digest())
416 hash = hex(hashlib.sha1(fcl.path()).digest())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._state[fd] = ['u', hash, fcl.path(),
418 self._state[fd] = ['u', hash, fcl.path(),
419 fca.path(), hex(fca.filenode()),
419 fca.path(), hex(fca.filenode()),
420 fco.path(), hex(fco.filenode()),
420 fco.path(), hex(fco.filenode()),
421 fcl.flags()]
421 fcl.flags()]
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 self._dirty = True
423 self._dirty = True
424
424
425 def __contains__(self, dfile):
425 def __contains__(self, dfile):
426 return dfile in self._state
426 return dfile in self._state
427
427
428 def __getitem__(self, dfile):
428 def __getitem__(self, dfile):
429 return self._state[dfile][0]
429 return self._state[dfile][0]
430
430
431 def __iter__(self):
431 def __iter__(self):
432 return iter(sorted(self._state))
432 return iter(sorted(self._state))
433
433
434 def files(self):
434 def files(self):
435 return self._state.keys()
435 return self._state.keys()
436
436
437 def mark(self, dfile, state):
437 def mark(self, dfile, state):
438 self._state[dfile][0] = state
438 self._state[dfile][0] = state
439 self._dirty = True
439 self._dirty = True
440
440
441 def mdstate(self):
441 def mdstate(self):
442 return self._mdstate
442 return self._mdstate
443
443
444 def unresolved(self):
444 def unresolved(self):
445 """Obtain the paths of unresolved files."""
445 """Obtain the paths of unresolved files."""
446
446
447 for f, entry in self._state.iteritems():
447 for f, entry in self._state.iteritems():
448 if entry[0] == 'u':
448 if entry[0] == 'u':
449 yield f
449 yield f
450
450
451 def driverresolved(self):
451 def driverresolved(self):
452 """Obtain the paths of driver-resolved files."""
452 """Obtain the paths of driver-resolved files."""
453
453
454 for f, entry in self._state.items():
454 for f, entry in self._state.items():
455 if entry[0] == 'd':
455 if entry[0] == 'd':
456 yield f
456 yield f
457
457
458 def extras(self, filename):
458 def extras(self, filename):
459 return self._stateextras.setdefault(filename, {})
459 return self._stateextras.setdefault(filename, {})
460
460
461 def _resolve(self, preresolve, dfile, wctx):
461 def _resolve(self, preresolve, dfile, wctx):
462 """rerun merge process for file path `dfile`"""
462 """rerun merge process for file path `dfile`"""
463 if self[dfile] in 'rd':
463 if self[dfile] in 'rd':
464 return True, 0
464 return True, 0
465 stateentry = self._state[dfile]
465 stateentry = self._state[dfile]
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 octx = self._repo[self._other]
467 octx = self._repo[self._other]
468 extras = self.extras(dfile)
468 extras = self.extras(dfile)
469 anccommitnode = extras.get('ancestorlinknode')
469 anccommitnode = extras.get('ancestorlinknode')
470 if anccommitnode:
470 if anccommitnode:
471 actx = self._repo[anccommitnode]
471 actx = self._repo[anccommitnode]
472 else:
472 else:
473 actx = None
473 actx = None
474 fcd = self._filectxorabsent(hash, wctx, dfile)
474 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fco = self._filectxorabsent(onode, octx, ofile)
475 fco = self._filectxorabsent(onode, octx, ofile)
476 # TODO: move this to filectxorabsent
476 # TODO: move this to filectxorabsent
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 # "premerge" x flags
478 # "premerge" x flags
479 flo = fco.flags()
479 flo = fco.flags()
480 fla = fca.flags()
480 fla = fca.flags()
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if fca.node() == nullid and flags != flo:
482 if fca.node() == nullid and flags != flo:
483 if preresolve:
483 if preresolve:
484 self._repo.ui.warn(
484 self._repo.ui.warn(
485 _('warning: cannot merge flags for %s '
485 _('warning: cannot merge flags for %s '
486 'without common ancestor - keeping local flags\n')
486 'without common ancestor - keeping local flags\n')
487 % afile)
487 % afile)
488 elif flags == fla:
488 elif flags == fla:
489 flags = flo
489 flags = flo
490 if preresolve:
490 if preresolve:
491 # restore local
491 # restore local
492 if hash != nullhex:
492 if hash != nullhex:
493 f = self._repo.vfs('merge/' + hash)
493 f = self._repo.vfs('merge/' + hash)
494 wctx[dfile].write(f.read(), flags)
494 wctx[dfile].write(f.read(), flags)
495 f.close()
495 f.close()
496 else:
496 else:
497 wctx[dfile].remove(ignoremissing=True)
497 wctx[dfile].remove(ignoremissing=True)
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
499 self._local, lfile, fcd,
499 self._local, lfile, fcd,
500 fco, fca,
500 fco, fca,
501 labels=self._labels)
501 labels=self._labels)
502 else:
502 else:
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
504 self._local, lfile, fcd,
504 self._local, lfile, fcd,
505 fco, fca,
505 fco, fca,
506 labels=self._labels)
506 labels=self._labels)
507 if r is None:
507 if r is None:
508 # no real conflict
508 # no real conflict
509 del self._state[dfile]
509 del self._state[dfile]
510 self._stateextras.pop(dfile, None)
510 self._stateextras.pop(dfile, None)
511 self._dirty = True
511 self._dirty = True
512 elif not r:
512 elif not r:
513 self.mark(dfile, 'r')
513 self.mark(dfile, 'r')
514
514
515 if complete:
515 if complete:
516 action = None
516 action = None
517 if deleted:
517 if deleted:
518 if fcd.isabsent():
518 if fcd.isabsent():
519 # dc: local picked. Need to drop if present, which may
519 # dc: local picked. Need to drop if present, which may
520 # happen on re-resolves.
520 # happen on re-resolves.
521 action = 'f'
521 action = 'f'
522 else:
522 else:
523 # cd: remote picked (or otherwise deleted)
523 # cd: remote picked (or otherwise deleted)
524 action = 'r'
524 action = 'r'
525 else:
525 else:
526 if fcd.isabsent(): # dc: remote picked
526 if fcd.isabsent(): # dc: remote picked
527 action = 'g'
527 action = 'g'
528 elif fco.isabsent(): # cd: local picked
528 elif fco.isabsent(): # cd: local picked
529 if dfile in self.localctx:
529 if dfile in self.localctx:
530 action = 'am'
530 action = 'am'
531 else:
531 else:
532 action = 'a'
532 action = 'a'
533 # else: regular merges (no action necessary)
533 # else: regular merges (no action necessary)
534 self._results[dfile] = r, action
534 self._results[dfile] = r, action
535
535
536 return complete, r
536 return complete, r
537
537
538 def _filectxorabsent(self, hexnode, ctx, f):
538 def _filectxorabsent(self, hexnode, ctx, f):
539 if hexnode == nullhex:
539 if hexnode == nullhex:
540 return filemerge.absentfilectx(ctx, f)
540 return filemerge.absentfilectx(ctx, f)
541 else:
541 else:
542 return ctx[f]
542 return ctx[f]
543
543
544 def preresolve(self, dfile, wctx):
544 def preresolve(self, dfile, wctx):
545 """run premerge process for dfile
545 """run premerge process for dfile
546
546
547 Returns whether the merge is complete, and the exit code."""
547 Returns whether the merge is complete, and the exit code."""
548 return self._resolve(True, dfile, wctx)
548 return self._resolve(True, dfile, wctx)
549
549
550 def resolve(self, dfile, wctx):
550 def resolve(self, dfile, wctx):
551 """run merge process (assuming premerge was run) for dfile
551 """run merge process (assuming premerge was run) for dfile
552
552
553 Returns the exit code of the merge."""
553 Returns the exit code of the merge."""
554 return self._resolve(False, dfile, wctx)[1]
554 return self._resolve(False, dfile, wctx)[1]
555
555
556 def counts(self):
556 def counts(self):
557 """return counts for updated, merged and removed files in this
557 """return counts for updated, merged and removed files in this
558 session"""
558 session"""
559 updated, merged, removed = 0, 0, 0
559 updated, merged, removed = 0, 0, 0
560 for r, action in self._results.itervalues():
560 for r, action in self._results.itervalues():
561 if r is None:
561 if r is None:
562 updated += 1
562 updated += 1
563 elif r == 0:
563 elif r == 0:
564 if action == 'r':
564 if action == 'r':
565 removed += 1
565 removed += 1
566 else:
566 else:
567 merged += 1
567 merged += 1
568 return updated, merged, removed
568 return updated, merged, removed
569
569
570 def unresolvedcount(self):
570 def unresolvedcount(self):
571 """get unresolved count for this merge (persistent)"""
571 """get unresolved count for this merge (persistent)"""
572 return len(list(self.unresolved()))
572 return len(list(self.unresolved()))
573
573
574 def actions(self):
574 def actions(self):
575 """return lists of actions to perform on the dirstate"""
575 """return lists of actions to perform on the dirstate"""
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 for f, (r, action) in self._results.iteritems():
577 for f, (r, action) in self._results.iteritems():
578 if action is not None:
578 if action is not None:
579 actions[action].append((f, None, "merge result"))
579 actions[action].append((f, None, "merge result"))
580 return actions
580 return actions
581
581
582 def recordactions(self):
582 def recordactions(self):
583 """record remove/add/get actions in the dirstate"""
583 """record remove/add/get actions in the dirstate"""
584 branchmerge = self._repo.dirstate.p2() != nullid
584 branchmerge = self._repo.dirstate.p2() != nullid
585 recordupdates(self._repo, self.actions(), branchmerge)
585 recordupdates(self._repo, self.actions(), branchmerge)
586
586
587 def queueremove(self, f):
587 def queueremove(self, f):
588 """queues a file to be removed from the dirstate
588 """queues a file to be removed from the dirstate
589
589
590 Meant for use by custom merge drivers."""
590 Meant for use by custom merge drivers."""
591 self._results[f] = 0, 'r'
591 self._results[f] = 0, 'r'
592
592
593 def queueadd(self, f):
593 def queueadd(self, f):
594 """queues a file to be added to the dirstate
594 """queues a file to be added to the dirstate
595
595
596 Meant for use by custom merge drivers."""
596 Meant for use by custom merge drivers."""
597 self._results[f] = 0, 'a'
597 self._results[f] = 0, 'a'
598
598
599 def queueget(self, f):
599 def queueget(self, f):
600 """queues a file to be marked modified in the dirstate
600 """queues a file to be marked modified in the dirstate
601
601
602 Meant for use by custom merge drivers."""
602 Meant for use by custom merge drivers."""
603 self._results[f] = 0, 'g'
603 self._results[f] = 0, 'g'
604
604
605 def _getcheckunknownconfig(repo, section, name):
605 def _getcheckunknownconfig(repo, section, name):
606 config = repo.ui.config(section, name, default='abort')
606 config = repo.ui.config(section, name, default='abort')
607 valid = ['abort', 'ignore', 'warn']
607 valid = ['abort', 'ignore', 'warn']
608 if config not in valid:
608 if config not in valid:
609 validstr = ', '.join(["'" + v + "'" for v in valid])
609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 raise error.ConfigError(_("%s.%s not valid "
610 raise error.ConfigError(_("%s.%s not valid "
611 "('%s' is none of %s)")
611 "('%s' is none of %s)")
612 % (section, name, config, validstr))
612 % (section, name, config, validstr))
613 return config
613 return config
614
614
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 if f2 is None:
616 if f2 is None:
617 f2 = f
617 f2 = f
618 return (repo.wvfs.audit.check(f)
618 return (repo.wvfs.audit.check(f)
619 and repo.wvfs.isfileorlink(f)
619 and repo.wvfs.isfileorlink(f)
620 and repo.dirstate.normalize(f) not in repo.dirstate
620 and repo.dirstate.normalize(f) not in repo.dirstate
621 and mctx[f2].cmp(wctx[f]))
621 and mctx[f2].cmp(wctx[f]))
622
622
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 """
624 """
625 Considers any actions that care about the presence of conflicting unknown
625 Considers any actions that care about the presence of conflicting unknown
626 files. For some actions, the result is to abort; for others, it is to
626 files. For some actions, the result is to abort; for others, it is to
627 choose a different action.
627 choose a different action.
628 """
628 """
629 conflicts = set()
629 conflicts = set()
630 warnconflicts = set()
630 warnconflicts = set()
631 abortconflicts = set()
631 abortconflicts = set()
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 if not force:
634 if not force:
635 def collectconflicts(conflicts, config):
635 def collectconflicts(conflicts, config):
636 if config == 'abort':
636 if config == 'abort':
637 abortconflicts.update(conflicts)
637 abortconflicts.update(conflicts)
638 elif config == 'warn':
638 elif config == 'warn':
639 warnconflicts.update(conflicts)
639 warnconflicts.update(conflicts)
640
640
641 for f, (m, args, msg) in actions.iteritems():
641 for f, (m, args, msg) in actions.iteritems():
642 if m in ('c', 'dc'):
642 if m in ('c', 'dc'):
643 if _checkunknownfile(repo, wctx, mctx, f):
643 if _checkunknownfile(repo, wctx, mctx, f):
644 conflicts.add(f)
644 conflicts.add(f)
645 elif m == 'dg':
645 elif m == 'dg':
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 conflicts.add(f)
647 conflicts.add(f)
648
648
649 ignoredconflicts = set([c for c in conflicts
649 ignoredconflicts = set([c for c in conflicts
650 if repo.dirstate._ignore(c)])
650 if repo.dirstate._ignore(c)])
651 unknownconflicts = conflicts - ignoredconflicts
651 unknownconflicts = conflicts - ignoredconflicts
652 collectconflicts(ignoredconflicts, ignoredconfig)
652 collectconflicts(ignoredconflicts, ignoredconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
654 else:
654 else:
655 for f, (m, args, msg) in actions.iteritems():
655 for f, (m, args, msg) in actions.iteritems():
656 if m == 'cm':
656 if m == 'cm':
657 fl2, anc = args
657 fl2, anc = args
658 different = _checkunknownfile(repo, wctx, mctx, f)
658 different = _checkunknownfile(repo, wctx, mctx, f)
659 if repo.dirstate._ignore(f):
659 if repo.dirstate._ignore(f):
660 config = ignoredconfig
660 config = ignoredconfig
661 else:
661 else:
662 config = unknownconfig
662 config = unknownconfig
663
663
664 # The behavior when force is True is described by this table:
664 # The behavior when force is True is described by this table:
665 # config different mergeforce | action backup
665 # config different mergeforce | action backup
666 # * n * | get n
666 # * n * | get n
667 # * y y | merge -
667 # * y y | merge -
668 # abort y n | merge - (1)
668 # abort y n | merge - (1)
669 # warn y n | warn + get y
669 # warn y n | warn + get y
670 # ignore y n | get y
670 # ignore y n | get y
671 #
671 #
672 # (1) this is probably the wrong behavior here -- we should
672 # (1) this is probably the wrong behavior here -- we should
673 # probably abort, but some actions like rebases currently
673 # probably abort, but some actions like rebases currently
674 # don't like an abort happening in the middle of
674 # don't like an abort happening in the middle of
675 # merge.update.
675 # merge.update.
676 if not different:
676 if not different:
677 actions[f] = ('g', (fl2, False), "remote created")
677 actions[f] = ('g', (fl2, False), "remote created")
678 elif mergeforce or config == 'abort':
678 elif mergeforce or config == 'abort':
679 actions[f] = ('m', (f, f, None, False, anc),
679 actions[f] = ('m', (f, f, None, False, anc),
680 "remote differs from untracked local")
680 "remote differs from untracked local")
681 elif config == 'abort':
681 elif config == 'abort':
682 abortconflicts.add(f)
682 abortconflicts.add(f)
683 else:
683 else:
684 if config == 'warn':
684 if config == 'warn':
685 warnconflicts.add(f)
685 warnconflicts.add(f)
686 actions[f] = ('g', (fl2, True), "remote created")
686 actions[f] = ('g', (fl2, True), "remote created")
687
687
688 for f in sorted(abortconflicts):
688 for f in sorted(abortconflicts):
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 if abortconflicts:
690 if abortconflicts:
691 raise error.Abort(_("untracked files in working directory "
691 raise error.Abort(_("untracked files in working directory "
692 "differ from files in requested revision"))
692 "differ from files in requested revision"))
693
693
694 for f in sorted(warnconflicts):
694 for f in sorted(warnconflicts):
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696
696
697 for f, (m, args, msg) in actions.iteritems():
697 for f, (m, args, msg) in actions.iteritems():
698 backup = f in conflicts
698 backup = f in conflicts
699 if m == 'c':
699 if m == 'c':
700 flags, = args
700 flags, = args
701 actions[f] = ('g', (flags, backup), msg)
701 actions[f] = ('g', (flags, backup), msg)
702
702
703 def _forgetremoved(wctx, mctx, branchmerge):
703 def _forgetremoved(wctx, mctx, branchmerge):
704 """
704 """
705 Forget removed files
705 Forget removed files
706
706
707 If we're jumping between revisions (as opposed to merging), and if
707 If we're jumping between revisions (as opposed to merging), and if
708 neither the working directory nor the target rev has the file,
708 neither the working directory nor the target rev has the file,
709 then we need to remove it from the dirstate, to prevent the
709 then we need to remove it from the dirstate, to prevent the
710 dirstate from listing the file when it is no longer in the
710 dirstate from listing the file when it is no longer in the
711 manifest.
711 manifest.
712
712
713 If we're merging, and the other revision has removed a file
713 If we're merging, and the other revision has removed a file
714 that is not present in the working directory, we need to mark it
714 that is not present in the working directory, we need to mark it
715 as removed.
715 as removed.
716 """
716 """
717
717
718 actions = {}
718 actions = {}
719 m = 'f'
719 m = 'f'
720 if branchmerge:
720 if branchmerge:
721 m = 'r'
721 m = 'r'
722 for f in wctx.deleted():
722 for f in wctx.deleted():
723 if f not in mctx:
723 if f not in mctx:
724 actions[f] = m, None, "forget deleted"
724 actions[f] = m, None, "forget deleted"
725
725
726 if not branchmerge:
726 if not branchmerge:
727 for f in wctx.removed():
727 for f in wctx.removed():
728 if f not in mctx:
728 if f not in mctx:
729 actions[f] = 'f', None, "forget removed"
729 actions[f] = 'f', None, "forget removed"
730
730
731 return actions
731 return actions
732
732
733 def _checkcollision(repo, wmf, actions):
733 def _checkcollision(repo, wmf, actions):
734 # build provisional merged manifest up
734 # build provisional merged manifest up
735 pmmf = set(wmf)
735 pmmf = set(wmf)
736
736
737 if actions:
737 if actions:
738 # k, dr, e and rd are no-op
738 # k, dr, e and rd are no-op
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 for f, args, msg in actions[m]:
740 for f, args, msg in actions[m]:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['r']:
742 for f, args, msg in actions['r']:
743 pmmf.discard(f)
743 pmmf.discard(f)
744 for f, args, msg in actions['dm']:
744 for f, args, msg in actions['dm']:
745 f2, flags = args
745 f2, flags = args
746 pmmf.discard(f2)
746 pmmf.discard(f2)
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['dg']:
748 for f, args, msg in actions['dg']:
749 pmmf.add(f)
749 pmmf.add(f)
750 for f, args, msg in actions['m']:
750 for f, args, msg in actions['m']:
751 f1, f2, fa, move, anc = args
751 f1, f2, fa, move, anc = args
752 if move:
752 if move:
753 pmmf.discard(f1)
753 pmmf.discard(f1)
754 pmmf.add(f)
754 pmmf.add(f)
755
755
756 # check case-folding collision in provisional merged manifest
756 # check case-folding collision in provisional merged manifest
757 foldmap = {}
757 foldmap = {}
758 for f in pmmf:
758 for f in pmmf:
759 fold = util.normcase(f)
759 fold = util.normcase(f)
760 if fold in foldmap:
760 if fold in foldmap:
761 raise error.Abort(_("case-folding collision between %s and %s")
761 raise error.Abort(_("case-folding collision between %s and %s")
762 % (f, foldmap[fold]))
762 % (f, foldmap[fold]))
763 foldmap[fold] = f
763 foldmap[fold] = f
764
764
765 # check case-folding of directories
765 # check case-folding of directories
766 foldprefix = unfoldprefix = lastfull = ''
766 foldprefix = unfoldprefix = lastfull = ''
767 for fold, f in sorted(foldmap.items()):
767 for fold, f in sorted(foldmap.items()):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 # the folded prefix matches but actual casing is different
769 # the folded prefix matches but actual casing is different
770 raise error.Abort(_("case-folding collision between "
770 raise error.Abort(_("case-folding collision between "
771 "%s and directory of %s") % (lastfull, f))
771 "%s and directory of %s") % (lastfull, f))
772 foldprefix = fold + '/'
772 foldprefix = fold + '/'
773 unfoldprefix = f + '/'
773 unfoldprefix = f + '/'
774 lastfull = f
774 lastfull = f
775
775
776 def driverpreprocess(repo, ms, wctx, labels=None):
776 def driverpreprocess(repo, ms, wctx, labels=None):
777 """run the preprocess step of the merge driver, if any
777 """run the preprocess step of the merge driver, if any
778
778
779 This is currently not implemented -- it's an extension point."""
779 This is currently not implemented -- it's an extension point."""
780 return True
780 return True
781
781
782 def driverconclude(repo, ms, wctx, labels=None):
782 def driverconclude(repo, ms, wctx, labels=None):
783 """run the conclude step of the merge driver, if any
783 """run the conclude step of the merge driver, if any
784
784
785 This is currently not implemented -- it's an extension point."""
785 This is currently not implemented -- it's an extension point."""
786 return True
786 return True
787
787
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 acceptremote, followcopies, forcefulldiff=False):
789 acceptremote, followcopies, forcefulldiff=False):
790 """
790 """
791 Merge wctx and p2 with ancestor pa and generate merge action list
791 Merge wctx and p2 with ancestor pa and generate merge action list
792
792
793 branchmerge and force are as passed in to update
793 branchmerge and force are as passed in to update
794 matcher = matcher to filter file lists
794 matcher = matcher to filter file lists
795 acceptremote = accept the incoming changes without prompting
795 acceptremote = accept the incoming changes without prompting
796 """
796 """
797 if matcher is not None and matcher.always():
797 if matcher is not None and matcher.always():
798 matcher = None
798 matcher = None
799
799
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801
801
802 # manifests fetched in order are going to be faster, so prime the caches
802 # manifests fetched in order are going to be faster, so prime the caches
803 [x.manifest() for x in
803 [x.manifest() for x in
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805
805
806 if followcopies:
806 if followcopies:
807 ret = copies.mergecopies(repo, wctx, p2, pa)
807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809
809
810 boolbm = pycompat.bytestr(bool(branchmerge))
810 boolbm = pycompat.bytestr(bool(branchmerge))
811 boolf = pycompat.bytestr(bool(force))
811 boolf = pycompat.bytestr(bool(force))
812 boolm = pycompat.bytestr(bool(matcher))
812 boolm = pycompat.bytestr(bool(matcher))
813 repo.ui.note(_("resolving manifests\n"))
813 repo.ui.note(_("resolving manifests\n"))
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 % (boolbm, boolf, boolm))
815 % (boolbm, boolf, boolm))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817
817
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 copied = set(copy.values())
819 copied = set(copy.values())
820 copied.update(movewithdir.values())
820 copied.update(movewithdir.values())
821
821
822 if '.hgsubstate' in m1:
822 if '.hgsubstate' in m1:
823 # check whether sub state is modified
823 # check whether sub state is modified
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 m1['.hgsubstate'] = modifiednodeid
825 m1['.hgsubstate'] = modifiednodeid
826
826
827 # Don't use m2-vs-ma optimization if:
827 # Don't use m2-vs-ma optimization if:
828 # - ma is the same as m1 or m2, which we're just going to diff again later
828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 # - The caller specifically asks for a full diff, which is useful during bid
829 # - The caller specifically asks for a full diff, which is useful during bid
830 # merge.
830 # merge.
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 # Identify which files are relevant to the merge, so we can limit the
832 # Identify which files are relevant to the merge, so we can limit the
833 # total m1-vs-m2 diff to just those files. This has significant
833 # total m1-vs-m2 diff to just those files. This has significant
834 # performance benefits in large repositories.
834 # performance benefits in large repositories.
835 relevantfiles = set(ma.diff(m2).keys())
835 relevantfiles = set(ma.diff(m2).keys())
836
836
837 # For copied and moved files, we need to add the source file too.
837 # For copied and moved files, we need to add the source file too.
838 for copykey, copyvalue in copy.iteritems():
838 for copykey, copyvalue in copy.iteritems():
839 if copyvalue in relevantfiles:
839 if copyvalue in relevantfiles:
840 relevantfiles.add(copykey)
840 relevantfiles.add(copykey)
841 for movedirkey in movewithdir:
841 for movedirkey in movewithdir:
842 relevantfiles.add(movedirkey)
842 relevantfiles.add(movedirkey)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845
845
846 diff = m1.diff(m2, match=matcher)
846 diff = m1.diff(m2, match=matcher)
847
847
848 if matcher is None:
848 if matcher is None:
849 matcher = matchmod.always('', '')
849 matcher = matchmod.always('', '')
850
850
851 actions = {}
851 actions = {}
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 if n1 and n2: # file exists on both local and remote side
853 if n1 and n2: # file exists on both local and remote side
854 if f not in ma:
854 if f not in ma:
855 fa = copy.get(f, None)
855 fa = copy.get(f, None)
856 if fa is not None:
856 if fa is not None:
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 "both renamed from " + fa)
858 "both renamed from " + fa)
859 else:
859 else:
860 actions[f] = ('m', (f, f, None, False, pa.node()),
860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 "both created")
861 "both created")
862 else:
862 else:
863 a = ma[f]
863 a = ma[f]
864 fla = ma.flags(f)
864 fla = ma.flags(f)
865 nol = 'l' not in fl1 + fl2 + fla
865 nol = 'l' not in fl1 + fl2 + fla
866 if n2 == a and fl2 == fla:
866 if n2 == a and fl2 == fla:
867 actions[f] = ('k' , (), "remote unchanged")
867 actions[f] = ('k' , (), "remote unchanged")
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 if n1 == n2: # optimization: keep local content
869 if n1 == n2: # optimization: keep local content
870 actions[f] = ('e', (fl2,), "update permissions")
870 actions[f] = ('e', (fl2,), "update permissions")
871 else:
871 else:
872 actions[f] = ('g', (fl2, False), "remote is newer")
872 actions[f] = ('g', (fl2, False), "remote is newer")
873 elif nol and n2 == a: # remote only changed 'x'
873 elif nol and n2 == a: # remote only changed 'x'
874 actions[f] = ('e', (fl2,), "update permissions")
874 actions[f] = ('e', (fl2,), "update permissions")
875 elif nol and n1 == a: # local only changed 'x'
875 elif nol and n1 == a: # local only changed 'x'
876 actions[f] = ('g', (fl1, False), "remote is newer")
876 actions[f] = ('g', (fl1, False), "remote is newer")
877 else: # both changed something
877 else: # both changed something
878 actions[f] = ('m', (f, f, f, False, pa.node()),
878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 "versions differ")
879 "versions differ")
880 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
883 elif f in movewithdir: # directory rename, move local
883 elif f in movewithdir: # directory rename, move local
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m2:
885 if f2 in m2:
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 "remote directory rename, both created")
887 "remote directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dm', (f, fl1),
889 actions[f2] = ('dm', (f, fl1),
890 "remote directory rename - move from " + f)
890 "remote directory rename - move from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 "local copied/moved from " + f2)
894 "local copied/moved from " + f2)
895 elif f in ma: # clean, a different, no remote
895 elif f in ma: # clean, a different, no remote
896 if n1 != ma[f]:
896 if n1 != ma[f]:
897 if acceptremote:
897 if acceptremote:
898 actions[f] = ('r', None, "remote delete")
898 actions[f] = ('r', None, "remote delete")
899 else:
899 else:
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 "prompt changed/deleted")
901 "prompt changed/deleted")
902 elif n1 == addednodeid:
902 elif n1 == addednodeid:
903 # This extra 'a' is added by working copy manifest to mark
903 # This extra 'a' is added by working copy manifest to mark
904 # the file as locally added. We should forget it instead of
904 # the file as locally added. We should forget it instead of
905 # deleting it.
905 # deleting it.
906 actions[f] = ('f', None, "remote deleted")
906 actions[f] = ('f', None, "remote deleted")
907 else:
907 else:
908 actions[f] = ('r', None, "other deleted")
908 actions[f] = ('r', None, "other deleted")
909 elif n2: # file exists only on remote side
909 elif n2: # file exists only on remote side
910 if f in copied:
910 if f in copied:
911 pass # we'll deal with it on m1 side
911 pass # we'll deal with it on m1 side
912 elif f in movewithdir:
912 elif f in movewithdir:
913 f2 = movewithdir[f]
913 f2 = movewithdir[f]
914 if f2 in m1:
914 if f2 in m1:
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 "local directory rename, both created")
916 "local directory rename, both created")
917 else:
917 else:
918 actions[f2] = ('dg', (f, fl2),
918 actions[f2] = ('dg', (f, fl2),
919 "local directory rename - get from " + f)
919 "local directory rename - get from " + f)
920 elif f in copy:
920 elif f in copy:
921 f2 = copy[f]
921 f2 = copy[f]
922 if f2 in m2:
922 if f2 in m2:
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 "remote copied from " + f2)
924 "remote copied from " + f2)
925 else:
925 else:
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 "remote moved from " + f2)
927 "remote moved from " + f2)
928 elif f not in ma:
928 elif f not in ma:
929 # local unknown, remote created: the logic is described by the
929 # local unknown, remote created: the logic is described by the
930 # following table:
930 # following table:
931 #
931 #
932 # force branchmerge different | action
932 # force branchmerge different | action
933 # n * * | create
933 # n * * | create
934 # y n * | create
934 # y n * | create
935 # y y n | create
935 # y y n | create
936 # y y y | merge
936 # y y y | merge
937 #
937 #
938 # Checking whether the files are different is expensive, so we
938 # Checking whether the files are different is expensive, so we
939 # don't do that when we can avoid it.
939 # don't do that when we can avoid it.
940 if not force:
940 if not force:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 elif not branchmerge:
942 elif not branchmerge:
943 actions[f] = ('c', (fl2,), "remote created")
943 actions[f] = ('c', (fl2,), "remote created")
944 else:
944 else:
945 actions[f] = ('cm', (fl2, pa.node()),
945 actions[f] = ('cm', (fl2, pa.node()),
946 "remote created, get or merge")
946 "remote created, get or merge")
947 elif n2 != ma[f]:
947 elif n2 != ma[f]:
948 df = None
948 df = None
949 for d in dirmove:
949 for d in dirmove:
950 if f.startswith(d):
950 if f.startswith(d):
951 # new file added in a directory that was moved
951 # new file added in a directory that was moved
952 df = dirmove[d] + f[len(d):]
952 df = dirmove[d] + f[len(d):]
953 break
953 break
954 if df is not None and df in m1:
954 if df is not None and df in m1:
955 actions[df] = ('m', (df, f, f, False, pa.node()),
955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 "local directory rename - respect move from " + f)
956 "local directory rename - respect move from " + f)
957 elif acceptremote:
957 elif acceptremote:
958 actions[f] = ('c', (fl2,), "remote recreating")
958 actions[f] = ('c', (fl2,), "remote recreating")
959 else:
959 else:
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 "prompt deleted/changed")
961 "prompt deleted/changed")
962
962
963 return actions, diverge, renamedelete
963 return actions, diverge, renamedelete
964
964
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 """Resolves false conflicts where the nodeid changed but the content
966 """Resolves false conflicts where the nodeid changed but the content
967 remained the same."""
967 remained the same."""
968
968
969 for f, (m, args, msg) in actions.items():
969 for f, (m, args, msg) in actions.items():
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 # local did change but ended up with same content
971 # local did change but ended up with same content
972 actions[f] = 'r', None, "prompt same"
972 actions[f] = 'r', None, "prompt same"
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 # remote did change but ended up with same content
974 # remote did change but ended up with same content
975 del actions[f] # don't get = keep local deleted
975 del actions[f] # don't get = keep local deleted
976
976
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 acceptremote, followcopies, matcher=None,
978 acceptremote, followcopies, matcher=None,
979 mergeforce=False):
979 mergeforce=False):
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
981 # Avoid cycle.
981 # Avoid cycle.
982 from . import sparse
982 from . import sparse
983
983
984 if len(ancestors) == 1: # default
984 if len(ancestors) == 1: # default
985 actions, diverge, renamedelete = manifestmerge(
985 actions, diverge, renamedelete = manifestmerge(
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
987 acceptremote, followcopies)
987 acceptremote, followcopies)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
989
989
990 else: # only when merge.preferancestor=* - the default
990 else: # only when merge.preferancestor=* - the default
991 repo.ui.note(
991 repo.ui.note(
992 _("note: merging %s and %s using bids from ancestors %s\n") %
992 _("note: merging %s and %s using bids from ancestors %s\n") %
993 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
993 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
994
994
995 # Call for bids
995 # Call for bids
996 fbids = {} # mapping filename to bids (action method to list af actions)
996 fbids = {} # mapping filename to bids (action method to list af actions)
997 diverge, renamedelete = None, None
997 diverge, renamedelete = None, None
998 for ancestor in ancestors:
998 for ancestor in ancestors:
999 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
999 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1000 actions, diverge1, renamedelete1 = manifestmerge(
1000 actions, diverge1, renamedelete1 = manifestmerge(
1001 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1001 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1002 acceptremote, followcopies, forcefulldiff=True)
1002 acceptremote, followcopies, forcefulldiff=True)
1003 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1003 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1004
1004
1005 # Track the shortest set of warning on the theory that bid
1005 # Track the shortest set of warning on the theory that bid
1006 # merge will correctly incorporate more information
1006 # merge will correctly incorporate more information
1007 if diverge is None or len(diverge1) < len(diverge):
1007 if diverge is None or len(diverge1) < len(diverge):
1008 diverge = diverge1
1008 diverge = diverge1
1009 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1009 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1010 renamedelete = renamedelete1
1010 renamedelete = renamedelete1
1011
1011
1012 for f, a in sorted(actions.iteritems()):
1012 for f, a in sorted(actions.iteritems()):
1013 m, args, msg = a
1013 m, args, msg = a
1014 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1014 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1015 if f in fbids:
1015 if f in fbids:
1016 d = fbids[f]
1016 d = fbids[f]
1017 if m in d:
1017 if m in d:
1018 d[m].append(a)
1018 d[m].append(a)
1019 else:
1019 else:
1020 d[m] = [a]
1020 d[m] = [a]
1021 else:
1021 else:
1022 fbids[f] = {m: [a]}
1022 fbids[f] = {m: [a]}
1023
1023
1024 # Pick the best bid for each file
1024 # Pick the best bid for each file
1025 repo.ui.note(_('\nauction for merging merge bids\n'))
1025 repo.ui.note(_('\nauction for merging merge bids\n'))
1026 actions = {}
1026 actions = {}
1027 dms = [] # filenames that have dm actions
1027 dms = [] # filenames that have dm actions
1028 for f, bids in sorted(fbids.items()):
1028 for f, bids in sorted(fbids.items()):
1029 # bids is a mapping from action method to list af actions
1029 # bids is a mapping from action method to list af actions
1030 # Consensus?
1030 # Consensus?
1031 if len(bids) == 1: # all bids are the same kind of method
1031 if len(bids) == 1: # all bids are the same kind of method
1032 m, l = bids.items()[0]
1032 m, l = list(bids.items())[0]
1033 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1033 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1034 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1034 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1035 actions[f] = l[0]
1035 actions[f] = l[0]
1036 if m == 'dm':
1036 if m == 'dm':
1037 dms.append(f)
1037 dms.append(f)
1038 continue
1038 continue
1039 # If keep is an option, just do it.
1039 # If keep is an option, just do it.
1040 if 'k' in bids:
1040 if 'k' in bids:
1041 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1041 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1042 actions[f] = bids['k'][0]
1042 actions[f] = bids['k'][0]
1043 continue
1043 continue
1044 # If there are gets and they all agree [how could they not?], do it.
1044 # If there are gets and they all agree [how could they not?], do it.
1045 if 'g' in bids:
1045 if 'g' in bids:
1046 ga0 = bids['g'][0]
1046 ga0 = bids['g'][0]
1047 if all(a == ga0 for a in bids['g'][1:]):
1047 if all(a == ga0 for a in bids['g'][1:]):
1048 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1048 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1049 actions[f] = ga0
1049 actions[f] = ga0
1050 continue
1050 continue
1051 # TODO: Consider other simple actions such as mode changes
1051 # TODO: Consider other simple actions such as mode changes
1052 # Handle inefficient democrazy.
1052 # Handle inefficient democrazy.
1053 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1053 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1054 for m, l in sorted(bids.items()):
1054 for m, l in sorted(bids.items()):
1055 for _f, args, msg in l:
1055 for _f, args, msg in l:
1056 repo.ui.note(' %s -> %s\n' % (msg, m))
1056 repo.ui.note(' %s -> %s\n' % (msg, m))
1057 # Pick random action. TODO: Instead, prompt user when resolving
1057 # Pick random action. TODO: Instead, prompt user when resolving
1058 m, l = bids.items()[0]
1058 m, l = list(bids.items())[0]
1059 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1059 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1060 (f, m))
1060 (f, m))
1061 actions[f] = l[0]
1061 actions[f] = l[0]
1062 if m == 'dm':
1062 if m == 'dm':
1063 dms.append(f)
1063 dms.append(f)
1064 continue
1064 continue
1065 # Work around 'dm' that can cause multiple actions for the same file
1065 # Work around 'dm' that can cause multiple actions for the same file
1066 for f in dms:
1066 for f in dms:
1067 dm, (f0, flags), msg = actions[f]
1067 dm, (f0, flags), msg = actions[f]
1068 assert dm == 'dm', dm
1068 assert dm == 'dm', dm
1069 if f0 in actions and actions[f0][0] == 'r':
1069 if f0 in actions and actions[f0][0] == 'r':
1070 # We have one bid for removing a file and another for moving it.
1070 # We have one bid for removing a file and another for moving it.
1071 # These two could be merged as first move and then delete ...
1071 # These two could be merged as first move and then delete ...
1072 # but instead drop moving and just delete.
1072 # but instead drop moving and just delete.
1073 del actions[f]
1073 del actions[f]
1074 repo.ui.note(_('end of auction\n\n'))
1074 repo.ui.note(_('end of auction\n\n'))
1075
1075
1076 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1076 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1077
1077
1078 if wctx.rev() is None:
1078 if wctx.rev() is None:
1079 fractions = _forgetremoved(wctx, mctx, branchmerge)
1079 fractions = _forgetremoved(wctx, mctx, branchmerge)
1080 actions.update(fractions)
1080 actions.update(fractions)
1081
1081
1082 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1082 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1083 actions)
1083 actions)
1084
1084
1085 return prunedactions, diverge, renamedelete
1085 return prunedactions, diverge, renamedelete
1086
1086
1087 def _getcwd():
1087 def _getcwd():
1088 try:
1088 try:
1089 return pycompat.getcwd()
1089 return pycompat.getcwd()
1090 except OSError as err:
1090 except OSError as err:
1091 if err.errno == errno.ENOENT:
1091 if err.errno == errno.ENOENT:
1092 return None
1092 return None
1093 raise
1093 raise
1094
1094
1095 def batchremove(repo, wctx, actions):
1095 def batchremove(repo, wctx, actions):
1096 """apply removes to the working directory
1096 """apply removes to the working directory
1097
1097
1098 yields tuples for progress updates
1098 yields tuples for progress updates
1099 """
1099 """
1100 verbose = repo.ui.verbose
1100 verbose = repo.ui.verbose
1101 cwd = _getcwd()
1101 cwd = _getcwd()
1102 i = 0
1102 i = 0
1103 for f, args, msg in actions:
1103 for f, args, msg in actions:
1104 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1104 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1105 if verbose:
1105 if verbose:
1106 repo.ui.note(_("removing %s\n") % f)
1106 repo.ui.note(_("removing %s\n") % f)
1107 wctx[f].audit()
1107 wctx[f].audit()
1108 try:
1108 try:
1109 wctx[f].remove(ignoremissing=True)
1109 wctx[f].remove(ignoremissing=True)
1110 except OSError as inst:
1110 except OSError as inst:
1111 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1111 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1112 (f, inst.strerror))
1112 (f, inst.strerror))
1113 if i == 100:
1113 if i == 100:
1114 yield i, f
1114 yield i, f
1115 i = 0
1115 i = 0
1116 i += 1
1116 i += 1
1117 if i > 0:
1117 if i > 0:
1118 yield i, f
1118 yield i, f
1119
1119
1120 if cwd and not _getcwd():
1120 if cwd and not _getcwd():
1121 # cwd was removed in the course of removing files; print a helpful
1121 # cwd was removed in the course of removing files; print a helpful
1122 # warning.
1122 # warning.
1123 repo.ui.warn(_("current directory was removed\n"
1123 repo.ui.warn(_("current directory was removed\n"
1124 "(consider changing to repo root: %s)\n") % repo.root)
1124 "(consider changing to repo root: %s)\n") % repo.root)
1125
1125
1126 # It's necessary to flush here in case we're inside a worker fork and will
1126 # It's necessary to flush here in case we're inside a worker fork and will
1127 # quit after this function.
1127 # quit after this function.
1128 wctx.flushall()
1128 wctx.flushall()
1129
1129
1130 def batchget(repo, mctx, wctx, actions):
1130 def batchget(repo, mctx, wctx, actions):
1131 """apply gets to the working directory
1131 """apply gets to the working directory
1132
1132
1133 mctx is the context to get from
1133 mctx is the context to get from
1134
1134
1135 yields tuples for progress updates
1135 yields tuples for progress updates
1136 """
1136 """
1137 verbose = repo.ui.verbose
1137 verbose = repo.ui.verbose
1138 fctx = mctx.filectx
1138 fctx = mctx.filectx
1139 ui = repo.ui
1139 ui = repo.ui
1140 i = 0
1140 i = 0
1141 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1141 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1142 for f, (flags, backup), msg in actions:
1142 for f, (flags, backup), msg in actions:
1143 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1143 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1144 if verbose:
1144 if verbose:
1145 repo.ui.note(_("getting %s\n") % f)
1145 repo.ui.note(_("getting %s\n") % f)
1146
1146
1147 if backup:
1147 if backup:
1148 absf = repo.wjoin(f)
1148 absf = repo.wjoin(f)
1149 orig = scmutil.origpath(ui, repo, absf)
1149 orig = scmutil.origpath(ui, repo, absf)
1150 try:
1150 try:
1151 if repo.wvfs.isfileorlink(f):
1151 if repo.wvfs.isfileorlink(f):
1152 util.rename(absf, orig)
1152 util.rename(absf, orig)
1153 except OSError as e:
1153 except OSError as e:
1154 if e.errno != errno.ENOENT:
1154 if e.errno != errno.ENOENT:
1155 raise
1155 raise
1156 wctx[f].clearunknown()
1156 wctx[f].clearunknown()
1157 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1157 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1158 if i == 100:
1158 if i == 100:
1159 yield i, f
1159 yield i, f
1160 i = 0
1160 i = 0
1161 i += 1
1161 i += 1
1162 if i > 0:
1162 if i > 0:
1163 yield i, f
1163 yield i, f
1164
1164
1165 # It's necessary to flush here in case we're inside a worker fork and will
1165 # It's necessary to flush here in case we're inside a worker fork and will
1166 # quit after this function.
1166 # quit after this function.
1167 wctx.flushall()
1167 wctx.flushall()
1168
1168
1169 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1169 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1170 """apply the merge action list to the working directory
1170 """apply the merge action list to the working directory
1171
1171
1172 wctx is the working copy context
1172 wctx is the working copy context
1173 mctx is the context to be merged into the working copy
1173 mctx is the context to be merged into the working copy
1174
1174
1175 Return a tuple of counts (updated, merged, removed, unresolved) that
1175 Return a tuple of counts (updated, merged, removed, unresolved) that
1176 describes how many files were affected by the update.
1176 describes how many files were affected by the update.
1177 """
1177 """
1178
1178
1179 updated, merged, removed = 0, 0, 0
1179 updated, merged, removed = 0, 0, 0
1180 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1180 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1181 moves = []
1181 moves = []
1182 for m, l in actions.items():
1182 for m, l in actions.items():
1183 l.sort()
1183 l.sort()
1184
1184
1185 # 'cd' and 'dc' actions are treated like other merge conflicts
1185 # 'cd' and 'dc' actions are treated like other merge conflicts
1186 mergeactions = sorted(actions['cd'])
1186 mergeactions = sorted(actions['cd'])
1187 mergeactions.extend(sorted(actions['dc']))
1187 mergeactions.extend(sorted(actions['dc']))
1188 mergeactions.extend(actions['m'])
1188 mergeactions.extend(actions['m'])
1189 for f, args, msg in mergeactions:
1189 for f, args, msg in mergeactions:
1190 f1, f2, fa, move, anc = args
1190 f1, f2, fa, move, anc = args
1191 if f == '.hgsubstate': # merged internally
1191 if f == '.hgsubstate': # merged internally
1192 continue
1192 continue
1193 if f1 is None:
1193 if f1 is None:
1194 fcl = filemerge.absentfilectx(wctx, fa)
1194 fcl = filemerge.absentfilectx(wctx, fa)
1195 else:
1195 else:
1196 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1196 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1197 fcl = wctx[f1]
1197 fcl = wctx[f1]
1198 if f2 is None:
1198 if f2 is None:
1199 fco = filemerge.absentfilectx(mctx, fa)
1199 fco = filemerge.absentfilectx(mctx, fa)
1200 else:
1200 else:
1201 fco = mctx[f2]
1201 fco = mctx[f2]
1202 actx = repo[anc]
1202 actx = repo[anc]
1203 if fa in actx:
1203 if fa in actx:
1204 fca = actx[fa]
1204 fca = actx[fa]
1205 else:
1205 else:
1206 # TODO: move to absentfilectx
1206 # TODO: move to absentfilectx
1207 fca = repo.filectx(f1, fileid=nullrev)
1207 fca = repo.filectx(f1, fileid=nullrev)
1208 ms.add(fcl, fco, fca, f)
1208 ms.add(fcl, fco, fca, f)
1209 if f1 != f and move:
1209 if f1 != f and move:
1210 moves.append(f1)
1210 moves.append(f1)
1211
1211
1212 _updating = _('updating')
1212 _updating = _('updating')
1213 _files = _('files')
1213 _files = _('files')
1214 progress = repo.ui.progress
1214 progress = repo.ui.progress
1215
1215
1216 # remove renamed files after safely stored
1216 # remove renamed files after safely stored
1217 for f in moves:
1217 for f in moves:
1218 if wctx[f].lexists():
1218 if wctx[f].lexists():
1219 repo.ui.debug("removing %s\n" % f)
1219 repo.ui.debug("removing %s\n" % f)
1220 wctx[f].audit()
1220 wctx[f].audit()
1221 wctx[f].remove()
1221 wctx[f].remove()
1222
1222
1223 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1223 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1224
1224
1225 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1225 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1226 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1226 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1227
1227
1228 # remove in parallel (must come first)
1228 # remove in parallel (must come first)
1229 z = 0
1229 z = 0
1230 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1230 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1231 actions['r'])
1231 actions['r'])
1232 for i, item in prog:
1232 for i, item in prog:
1233 z += i
1233 z += i
1234 progress(_updating, z, item=item, total=numupdates, unit=_files)
1234 progress(_updating, z, item=item, total=numupdates, unit=_files)
1235 removed = len(actions['r'])
1235 removed = len(actions['r'])
1236
1236
1237 # We should flush before forking into worker processes, since those workers
1237 # We should flush before forking into worker processes, since those workers
1238 # flush when they complete, and we don't want to duplicate work.
1238 # flush when they complete, and we don't want to duplicate work.
1239 wctx.flushall()
1239 wctx.flushall()
1240
1240
1241 # get in parallel
1241 # get in parallel
1242 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1242 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1243 actions['g'])
1243 actions['g'])
1244 for i, item in prog:
1244 for i, item in prog:
1245 z += i
1245 z += i
1246 progress(_updating, z, item=item, total=numupdates, unit=_files)
1246 progress(_updating, z, item=item, total=numupdates, unit=_files)
1247 updated = len(actions['g'])
1247 updated = len(actions['g'])
1248
1248
1249 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1249 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1250 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1250 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1251
1251
1252 # forget (manifest only, just log it) (must come first)
1252 # forget (manifest only, just log it) (must come first)
1253 for f, args, msg in actions['f']:
1253 for f, args, msg in actions['f']:
1254 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1254 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1255 z += 1
1255 z += 1
1256 progress(_updating, z, item=f, total=numupdates, unit=_files)
1256 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257
1257
1258 # re-add (manifest only, just log it)
1258 # re-add (manifest only, just log it)
1259 for f, args, msg in actions['a']:
1259 for f, args, msg in actions['a']:
1260 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1260 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1261 z += 1
1261 z += 1
1262 progress(_updating, z, item=f, total=numupdates, unit=_files)
1262 progress(_updating, z, item=f, total=numupdates, unit=_files)
1263
1263
1264 # re-add/mark as modified (manifest only, just log it)
1264 # re-add/mark as modified (manifest only, just log it)
1265 for f, args, msg in actions['am']:
1265 for f, args, msg in actions['am']:
1266 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1266 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1267 z += 1
1267 z += 1
1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1269
1269
1270 # keep (noop, just log it)
1270 # keep (noop, just log it)
1271 for f, args, msg in actions['k']:
1271 for f, args, msg in actions['k']:
1272 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1272 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1273 # no progress
1273 # no progress
1274
1274
1275 # directory rename, move local
1275 # directory rename, move local
1276 for f, args, msg in actions['dm']:
1276 for f, args, msg in actions['dm']:
1277 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1277 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1278 z += 1
1278 z += 1
1279 progress(_updating, z, item=f, total=numupdates, unit=_files)
1279 progress(_updating, z, item=f, total=numupdates, unit=_files)
1280 f0, flags = args
1280 f0, flags = args
1281 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1281 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1282 wctx[f].audit()
1282 wctx[f].audit()
1283 wctx[f].write(wctx.filectx(f0).data(), flags)
1283 wctx[f].write(wctx.filectx(f0).data(), flags)
1284 wctx[f0].remove()
1284 wctx[f0].remove()
1285 updated += 1
1285 updated += 1
1286
1286
1287 # local directory rename, get
1287 # local directory rename, get
1288 for f, args, msg in actions['dg']:
1288 for f, args, msg in actions['dg']:
1289 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1289 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1290 z += 1
1290 z += 1
1291 progress(_updating, z, item=f, total=numupdates, unit=_files)
1291 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 f0, flags = args
1292 f0, flags = args
1293 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1293 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1294 wctx[f].write(mctx.filectx(f0).data(), flags)
1294 wctx[f].write(mctx.filectx(f0).data(), flags)
1295 updated += 1
1295 updated += 1
1296
1296
1297 # exec
1297 # exec
1298 for f, args, msg in actions['e']:
1298 for f, args, msg in actions['e']:
1299 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1299 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1300 z += 1
1300 z += 1
1301 progress(_updating, z, item=f, total=numupdates, unit=_files)
1301 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 flags, = args
1302 flags, = args
1303 wctx[f].audit()
1303 wctx[f].audit()
1304 wctx[f].setflags('l' in flags, 'x' in flags)
1304 wctx[f].setflags('l' in flags, 'x' in flags)
1305 updated += 1
1305 updated += 1
1306
1306
1307 # the ordering is important here -- ms.mergedriver will raise if the merge
1307 # the ordering is important here -- ms.mergedriver will raise if the merge
1308 # driver has changed, and we want to be able to bypass it when overwrite is
1308 # driver has changed, and we want to be able to bypass it when overwrite is
1309 # True
1309 # True
1310 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1310 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1311
1311
1312 if usemergedriver:
1312 if usemergedriver:
1313 ms.commit()
1313 ms.commit()
1314 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1314 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1315 # the driver might leave some files unresolved
1315 # the driver might leave some files unresolved
1316 unresolvedf = set(ms.unresolved())
1316 unresolvedf = set(ms.unresolved())
1317 if not proceed:
1317 if not proceed:
1318 # XXX setting unresolved to at least 1 is a hack to make sure we
1318 # XXX setting unresolved to at least 1 is a hack to make sure we
1319 # error out
1319 # error out
1320 return updated, merged, removed, max(len(unresolvedf), 1)
1320 return updated, merged, removed, max(len(unresolvedf), 1)
1321 newactions = []
1321 newactions = []
1322 for f, args, msg in mergeactions:
1322 for f, args, msg in mergeactions:
1323 if f in unresolvedf:
1323 if f in unresolvedf:
1324 newactions.append((f, args, msg))
1324 newactions.append((f, args, msg))
1325 mergeactions = newactions
1325 mergeactions = newactions
1326
1326
1327 # premerge
1327 # premerge
1328 tocomplete = []
1328 tocomplete = []
1329 for f, args, msg in mergeactions:
1329 for f, args, msg in mergeactions:
1330 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1330 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1331 z += 1
1331 z += 1
1332 progress(_updating, z, item=f, total=numupdates, unit=_files)
1332 progress(_updating, z, item=f, total=numupdates, unit=_files)
1333 if f == '.hgsubstate': # subrepo states need updating
1333 if f == '.hgsubstate': # subrepo states need updating
1334 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1334 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1335 overwrite, labels)
1335 overwrite, labels)
1336 continue
1336 continue
1337 wctx[f].audit()
1337 wctx[f].audit()
1338 complete, r = ms.preresolve(f, wctx)
1338 complete, r = ms.preresolve(f, wctx)
1339 if not complete:
1339 if not complete:
1340 numupdates += 1
1340 numupdates += 1
1341 tocomplete.append((f, args, msg))
1341 tocomplete.append((f, args, msg))
1342
1342
1343 # merge
1343 # merge
1344 for f, args, msg in tocomplete:
1344 for f, args, msg in tocomplete:
1345 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1345 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1346 z += 1
1346 z += 1
1347 progress(_updating, z, item=f, total=numupdates, unit=_files)
1347 progress(_updating, z, item=f, total=numupdates, unit=_files)
1348 ms.resolve(f, wctx)
1348 ms.resolve(f, wctx)
1349
1349
1350 ms.commit()
1350 ms.commit()
1351
1351
1352 unresolved = ms.unresolvedcount()
1352 unresolved = ms.unresolvedcount()
1353
1353
1354 if usemergedriver and not unresolved and ms.mdstate() != 's':
1354 if usemergedriver and not unresolved and ms.mdstate() != 's':
1355 if not driverconclude(repo, ms, wctx, labels=labels):
1355 if not driverconclude(repo, ms, wctx, labels=labels):
1356 # XXX setting unresolved to at least 1 is a hack to make sure we
1356 # XXX setting unresolved to at least 1 is a hack to make sure we
1357 # error out
1357 # error out
1358 unresolved = max(unresolved, 1)
1358 unresolved = max(unresolved, 1)
1359
1359
1360 ms.commit()
1360 ms.commit()
1361
1361
1362 msupdated, msmerged, msremoved = ms.counts()
1362 msupdated, msmerged, msremoved = ms.counts()
1363 updated += msupdated
1363 updated += msupdated
1364 merged += msmerged
1364 merged += msmerged
1365 removed += msremoved
1365 removed += msremoved
1366
1366
1367 extraactions = ms.actions()
1367 extraactions = ms.actions()
1368 if extraactions:
1368 if extraactions:
1369 mfiles = set(a[0] for a in actions['m'])
1369 mfiles = set(a[0] for a in actions['m'])
1370 for k, acts in extraactions.iteritems():
1370 for k, acts in extraactions.iteritems():
1371 actions[k].extend(acts)
1371 actions[k].extend(acts)
1372 # Remove these files from actions['m'] as well. This is important
1372 # Remove these files from actions['m'] as well. This is important
1373 # because in recordupdates, files in actions['m'] are processed
1373 # because in recordupdates, files in actions['m'] are processed
1374 # after files in other actions, and the merge driver might add
1374 # after files in other actions, and the merge driver might add
1375 # files to those actions via extraactions above. This can lead to a
1375 # files to those actions via extraactions above. This can lead to a
1376 # file being recorded twice, with poor results. This is especially
1376 # file being recorded twice, with poor results. This is especially
1377 # problematic for actions['r'] (currently only possible with the
1377 # problematic for actions['r'] (currently only possible with the
1378 # merge driver in the initial merge process; interrupted merges
1378 # merge driver in the initial merge process; interrupted merges
1379 # don't go through this flow).
1379 # don't go through this flow).
1380 #
1380 #
1381 # The real fix here is to have indexes by both file and action so
1381 # The real fix here is to have indexes by both file and action so
1382 # that when the action for a file is changed it is automatically
1382 # that when the action for a file is changed it is automatically
1383 # reflected in the other action lists. But that involves a more
1383 # reflected in the other action lists. But that involves a more
1384 # complex data structure, so this will do for now.
1384 # complex data structure, so this will do for now.
1385 #
1385 #
1386 # We don't need to do the same operation for 'dc' and 'cd' because
1386 # We don't need to do the same operation for 'dc' and 'cd' because
1387 # those lists aren't consulted again.
1387 # those lists aren't consulted again.
1388 mfiles.difference_update(a[0] for a in acts)
1388 mfiles.difference_update(a[0] for a in acts)
1389
1389
1390 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1390 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1391
1391
1392 progress(_updating, None, total=numupdates, unit=_files)
1392 progress(_updating, None, total=numupdates, unit=_files)
1393
1393
1394 return updated, merged, removed, unresolved
1394 return updated, merged, removed, unresolved
1395
1395
1396 def recordupdates(repo, actions, branchmerge):
1396 def recordupdates(repo, actions, branchmerge):
1397 "record merge actions to the dirstate"
1397 "record merge actions to the dirstate"
1398 # remove (must come first)
1398 # remove (must come first)
1399 for f, args, msg in actions.get('r', []):
1399 for f, args, msg in actions.get('r', []):
1400 if branchmerge:
1400 if branchmerge:
1401 repo.dirstate.remove(f)
1401 repo.dirstate.remove(f)
1402 else:
1402 else:
1403 repo.dirstate.drop(f)
1403 repo.dirstate.drop(f)
1404
1404
1405 # forget (must come first)
1405 # forget (must come first)
1406 for f, args, msg in actions.get('f', []):
1406 for f, args, msg in actions.get('f', []):
1407 repo.dirstate.drop(f)
1407 repo.dirstate.drop(f)
1408
1408
1409 # re-add
1409 # re-add
1410 for f, args, msg in actions.get('a', []):
1410 for f, args, msg in actions.get('a', []):
1411 repo.dirstate.add(f)
1411 repo.dirstate.add(f)
1412
1412
1413 # re-add/mark as modified
1413 # re-add/mark as modified
1414 for f, args, msg in actions.get('am', []):
1414 for f, args, msg in actions.get('am', []):
1415 if branchmerge:
1415 if branchmerge:
1416 repo.dirstate.normallookup(f)
1416 repo.dirstate.normallookup(f)
1417 else:
1417 else:
1418 repo.dirstate.add(f)
1418 repo.dirstate.add(f)
1419
1419
1420 # exec change
1420 # exec change
1421 for f, args, msg in actions.get('e', []):
1421 for f, args, msg in actions.get('e', []):
1422 repo.dirstate.normallookup(f)
1422 repo.dirstate.normallookup(f)
1423
1423
1424 # keep
1424 # keep
1425 for f, args, msg in actions.get('k', []):
1425 for f, args, msg in actions.get('k', []):
1426 pass
1426 pass
1427
1427
1428 # get
1428 # get
1429 for f, args, msg in actions.get('g', []):
1429 for f, args, msg in actions.get('g', []):
1430 if branchmerge:
1430 if branchmerge:
1431 repo.dirstate.otherparent(f)
1431 repo.dirstate.otherparent(f)
1432 else:
1432 else:
1433 repo.dirstate.normal(f)
1433 repo.dirstate.normal(f)
1434
1434
1435 # merge
1435 # merge
1436 for f, args, msg in actions.get('m', []):
1436 for f, args, msg in actions.get('m', []):
1437 f1, f2, fa, move, anc = args
1437 f1, f2, fa, move, anc = args
1438 if branchmerge:
1438 if branchmerge:
1439 # We've done a branch merge, mark this file as merged
1439 # We've done a branch merge, mark this file as merged
1440 # so that we properly record the merger later
1440 # so that we properly record the merger later
1441 repo.dirstate.merge(f)
1441 repo.dirstate.merge(f)
1442 if f1 != f2: # copy/rename
1442 if f1 != f2: # copy/rename
1443 if move:
1443 if move:
1444 repo.dirstate.remove(f1)
1444 repo.dirstate.remove(f1)
1445 if f1 != f:
1445 if f1 != f:
1446 repo.dirstate.copy(f1, f)
1446 repo.dirstate.copy(f1, f)
1447 else:
1447 else:
1448 repo.dirstate.copy(f2, f)
1448 repo.dirstate.copy(f2, f)
1449 else:
1449 else:
1450 # We've update-merged a locally modified file, so
1450 # We've update-merged a locally modified file, so
1451 # we set the dirstate to emulate a normal checkout
1451 # we set the dirstate to emulate a normal checkout
1452 # of that file some time in the past. Thus our
1452 # of that file some time in the past. Thus our
1453 # merge will appear as a normal local file
1453 # merge will appear as a normal local file
1454 # modification.
1454 # modification.
1455 if f2 == f: # file not locally copied/moved
1455 if f2 == f: # file not locally copied/moved
1456 repo.dirstate.normallookup(f)
1456 repo.dirstate.normallookup(f)
1457 if move:
1457 if move:
1458 repo.dirstate.drop(f1)
1458 repo.dirstate.drop(f1)
1459
1459
1460 # directory rename, move local
1460 # directory rename, move local
1461 for f, args, msg in actions.get('dm', []):
1461 for f, args, msg in actions.get('dm', []):
1462 f0, flag = args
1462 f0, flag = args
1463 if branchmerge:
1463 if branchmerge:
1464 repo.dirstate.add(f)
1464 repo.dirstate.add(f)
1465 repo.dirstate.remove(f0)
1465 repo.dirstate.remove(f0)
1466 repo.dirstate.copy(f0, f)
1466 repo.dirstate.copy(f0, f)
1467 else:
1467 else:
1468 repo.dirstate.normal(f)
1468 repo.dirstate.normal(f)
1469 repo.dirstate.drop(f0)
1469 repo.dirstate.drop(f0)
1470
1470
1471 # directory rename, get
1471 # directory rename, get
1472 for f, args, msg in actions.get('dg', []):
1472 for f, args, msg in actions.get('dg', []):
1473 f0, flag = args
1473 f0, flag = args
1474 if branchmerge:
1474 if branchmerge:
1475 repo.dirstate.add(f)
1475 repo.dirstate.add(f)
1476 repo.dirstate.copy(f0, f)
1476 repo.dirstate.copy(f0, f)
1477 else:
1477 else:
1478 repo.dirstate.normal(f)
1478 repo.dirstate.normal(f)
1479
1479
1480 def update(repo, node, branchmerge, force, ancestor=None,
1480 def update(repo, node, branchmerge, force, ancestor=None,
1481 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1481 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1482 updatecheck=None, wc=None):
1482 updatecheck=None, wc=None):
1483 """
1483 """
1484 Perform a merge between the working directory and the given node
1484 Perform a merge between the working directory and the given node
1485
1485
1486 node = the node to update to
1486 node = the node to update to
1487 branchmerge = whether to merge between branches
1487 branchmerge = whether to merge between branches
1488 force = whether to force branch merging or file overwriting
1488 force = whether to force branch merging or file overwriting
1489 matcher = a matcher to filter file lists (dirstate not updated)
1489 matcher = a matcher to filter file lists (dirstate not updated)
1490 mergeancestor = whether it is merging with an ancestor. If true,
1490 mergeancestor = whether it is merging with an ancestor. If true,
1491 we should accept the incoming changes for any prompts that occur.
1491 we should accept the incoming changes for any prompts that occur.
1492 If false, merging with an ancestor (fast-forward) is only allowed
1492 If false, merging with an ancestor (fast-forward) is only allowed
1493 between different named branches. This flag is used by rebase extension
1493 between different named branches. This flag is used by rebase extension
1494 as a temporary fix and should be avoided in general.
1494 as a temporary fix and should be avoided in general.
1495 labels = labels to use for base, local and other
1495 labels = labels to use for base, local and other
1496 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1496 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1497 this is True, then 'force' should be True as well.
1497 this is True, then 'force' should be True as well.
1498
1498
1499 The table below shows all the behaviors of the update command
1499 The table below shows all the behaviors of the update command
1500 given the -c and -C or no options, whether the working directory
1500 given the -c and -C or no options, whether the working directory
1501 is dirty, whether a revision is specified, and the relationship of
1501 is dirty, whether a revision is specified, and the relationship of
1502 the parent rev to the target rev (linear or not). Match from top first. The
1502 the parent rev to the target rev (linear or not). Match from top first. The
1503 -n option doesn't exist on the command line, but represents the
1503 -n option doesn't exist on the command line, but represents the
1504 experimental.updatecheck=noconflict option.
1504 experimental.updatecheck=noconflict option.
1505
1505
1506 This logic is tested by test-update-branches.t.
1506 This logic is tested by test-update-branches.t.
1507
1507
1508 -c -C -n -m dirty rev linear | result
1508 -c -C -n -m dirty rev linear | result
1509 y y * * * * * | (1)
1509 y y * * * * * | (1)
1510 y * y * * * * | (1)
1510 y * y * * * * | (1)
1511 y * * y * * * | (1)
1511 y * * y * * * | (1)
1512 * y y * * * * | (1)
1512 * y y * * * * | (1)
1513 * y * y * * * | (1)
1513 * y * y * * * | (1)
1514 * * y y * * * | (1)
1514 * * y y * * * | (1)
1515 * * * * * n n | x
1515 * * * * * n n | x
1516 * * * * n * * | ok
1516 * * * * n * * | ok
1517 n n n n y * y | merge
1517 n n n n y * y | merge
1518 n n n n y y n | (2)
1518 n n n n y y n | (2)
1519 n n n y y * * | merge
1519 n n n y y * * | merge
1520 n n y n y * * | merge if no conflict
1520 n n y n y * * | merge if no conflict
1521 n y n n y * * | discard
1521 n y n n y * * | discard
1522 y n n n y * * | (3)
1522 y n n n y * * | (3)
1523
1523
1524 x = can't happen
1524 x = can't happen
1525 * = don't-care
1525 * = don't-care
1526 1 = incompatible options (checked in commands.py)
1526 1 = incompatible options (checked in commands.py)
1527 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1527 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1528 3 = abort: uncommitted changes (checked in commands.py)
1528 3 = abort: uncommitted changes (checked in commands.py)
1529
1529
1530 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1530 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1531 to repo[None] if None is passed.
1531 to repo[None] if None is passed.
1532
1532
1533 Return the same tuple as applyupdates().
1533 Return the same tuple as applyupdates().
1534 """
1534 """
1535 # Avoid cycle.
1535 # Avoid cycle.
1536 from . import sparse
1536 from . import sparse
1537
1537
1538 # This function used to find the default destination if node was None, but
1538 # This function used to find the default destination if node was None, but
1539 # that's now in destutil.py.
1539 # that's now in destutil.py.
1540 assert node is not None
1540 assert node is not None
1541 if not branchmerge and not force:
1541 if not branchmerge and not force:
1542 # TODO: remove the default once all callers that pass branchmerge=False
1542 # TODO: remove the default once all callers that pass branchmerge=False
1543 # and force=False pass a value for updatecheck. We may want to allow
1543 # and force=False pass a value for updatecheck. We may want to allow
1544 # updatecheck='abort' to better suppport some of these callers.
1544 # updatecheck='abort' to better suppport some of these callers.
1545 if updatecheck is None:
1545 if updatecheck is None:
1546 updatecheck = 'linear'
1546 updatecheck = 'linear'
1547 assert updatecheck in ('none', 'linear', 'noconflict')
1547 assert updatecheck in ('none', 'linear', 'noconflict')
1548 # If we're doing a partial update, we need to skip updating
1548 # If we're doing a partial update, we need to skip updating
1549 # the dirstate, so make a note of any partial-ness to the
1549 # the dirstate, so make a note of any partial-ness to the
1550 # update here.
1550 # update here.
1551 if matcher is None or matcher.always():
1551 if matcher is None or matcher.always():
1552 partial = False
1552 partial = False
1553 else:
1553 else:
1554 partial = True
1554 partial = True
1555 with repo.wlock():
1555 with repo.wlock():
1556 if wc is None:
1556 if wc is None:
1557 wc = repo[None]
1557 wc = repo[None]
1558 pl = wc.parents()
1558 pl = wc.parents()
1559 p1 = pl[0]
1559 p1 = pl[0]
1560 pas = [None]
1560 pas = [None]
1561 if ancestor is not None:
1561 if ancestor is not None:
1562 pas = [repo[ancestor]]
1562 pas = [repo[ancestor]]
1563
1563
1564 overwrite = force and not branchmerge
1564 overwrite = force and not branchmerge
1565
1565
1566 p2 = repo[node]
1566 p2 = repo[node]
1567 if pas[0] is None:
1567 if pas[0] is None:
1568 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1568 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1569 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1569 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1570 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1570 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1571 else:
1571 else:
1572 pas = [p1.ancestor(p2, warn=branchmerge)]
1572 pas = [p1.ancestor(p2, warn=branchmerge)]
1573
1573
1574 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1574 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1575
1575
1576 ### check phase
1576 ### check phase
1577 if not overwrite:
1577 if not overwrite:
1578 if len(pl) > 1:
1578 if len(pl) > 1:
1579 raise error.Abort(_("outstanding uncommitted merge"))
1579 raise error.Abort(_("outstanding uncommitted merge"))
1580 ms = mergestate.read(repo)
1580 ms = mergestate.read(repo)
1581 if list(ms.unresolved()):
1581 if list(ms.unresolved()):
1582 raise error.Abort(_("outstanding merge conflicts"))
1582 raise error.Abort(_("outstanding merge conflicts"))
1583 if branchmerge:
1583 if branchmerge:
1584 if pas == [p2]:
1584 if pas == [p2]:
1585 raise error.Abort(_("merging with a working directory ancestor"
1585 raise error.Abort(_("merging with a working directory ancestor"
1586 " has no effect"))
1586 " has no effect"))
1587 elif pas == [p1]:
1587 elif pas == [p1]:
1588 if not mergeancestor and wc.branch() == p2.branch():
1588 if not mergeancestor and wc.branch() == p2.branch():
1589 raise error.Abort(_("nothing to merge"),
1589 raise error.Abort(_("nothing to merge"),
1590 hint=_("use 'hg update' "
1590 hint=_("use 'hg update' "
1591 "or check 'hg heads'"))
1591 "or check 'hg heads'"))
1592 if not force and (wc.files() or wc.deleted()):
1592 if not force and (wc.files() or wc.deleted()):
1593 raise error.Abort(_("uncommitted changes"),
1593 raise error.Abort(_("uncommitted changes"),
1594 hint=_("use 'hg status' to list changes"))
1594 hint=_("use 'hg status' to list changes"))
1595 for s in sorted(wc.substate):
1595 for s in sorted(wc.substate):
1596 wc.sub(s).bailifchanged()
1596 wc.sub(s).bailifchanged()
1597
1597
1598 elif not overwrite:
1598 elif not overwrite:
1599 if p1 == p2: # no-op update
1599 if p1 == p2: # no-op update
1600 # call the hooks and exit early
1600 # call the hooks and exit early
1601 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1601 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1602 repo.hook('update', parent1=xp2, parent2='', error=0)
1602 repo.hook('update', parent1=xp2, parent2='', error=0)
1603 return 0, 0, 0, 0
1603 return 0, 0, 0, 0
1604
1604
1605 if (updatecheck == 'linear' and
1605 if (updatecheck == 'linear' and
1606 pas not in ([p1], [p2])): # nonlinear
1606 pas not in ([p1], [p2])): # nonlinear
1607 dirty = wc.dirty(missing=True)
1607 dirty = wc.dirty(missing=True)
1608 if dirty:
1608 if dirty:
1609 # Branching is a bit strange to ensure we do the minimal
1609 # Branching is a bit strange to ensure we do the minimal
1610 # amount of call to obsutil.foreground.
1610 # amount of call to obsutil.foreground.
1611 foreground = obsutil.foreground(repo, [p1.node()])
1611 foreground = obsutil.foreground(repo, [p1.node()])
1612 # note: the <node> variable contains a random identifier
1612 # note: the <node> variable contains a random identifier
1613 if repo[node].node() in foreground:
1613 if repo[node].node() in foreground:
1614 pass # allow updating to successors
1614 pass # allow updating to successors
1615 else:
1615 else:
1616 msg = _("uncommitted changes")
1616 msg = _("uncommitted changes")
1617 hint = _("commit or update --clean to discard changes")
1617 hint = _("commit or update --clean to discard changes")
1618 raise error.UpdateAbort(msg, hint=hint)
1618 raise error.UpdateAbort(msg, hint=hint)
1619 else:
1619 else:
1620 # Allow jumping branches if clean and specific rev given
1620 # Allow jumping branches if clean and specific rev given
1621 pass
1621 pass
1622
1622
1623 if overwrite:
1623 if overwrite:
1624 pas = [wc]
1624 pas = [wc]
1625 elif not branchmerge:
1625 elif not branchmerge:
1626 pas = [p1]
1626 pas = [p1]
1627
1627
1628 # deprecated config: merge.followcopies
1628 # deprecated config: merge.followcopies
1629 followcopies = repo.ui.configbool('merge', 'followcopies')
1629 followcopies = repo.ui.configbool('merge', 'followcopies')
1630 if overwrite:
1630 if overwrite:
1631 followcopies = False
1631 followcopies = False
1632 elif not pas[0]:
1632 elif not pas[0]:
1633 followcopies = False
1633 followcopies = False
1634 if not branchmerge and not wc.dirty(missing=True):
1634 if not branchmerge and not wc.dirty(missing=True):
1635 followcopies = False
1635 followcopies = False
1636
1636
1637 ### calculate phase
1637 ### calculate phase
1638 actionbyfile, diverge, renamedelete = calculateupdates(
1638 actionbyfile, diverge, renamedelete = calculateupdates(
1639 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1639 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1640 followcopies, matcher=matcher, mergeforce=mergeforce)
1640 followcopies, matcher=matcher, mergeforce=mergeforce)
1641
1641
1642 if updatecheck == 'noconflict':
1642 if updatecheck == 'noconflict':
1643 for f, (m, args, msg) in actionbyfile.iteritems():
1643 for f, (m, args, msg) in actionbyfile.iteritems():
1644 if m not in ('g', 'k', 'e', 'r'):
1644 if m not in ('g', 'k', 'e', 'r'):
1645 msg = _("conflicting changes")
1645 msg = _("conflicting changes")
1646 hint = _("commit or update --clean to discard changes")
1646 hint = _("commit or update --clean to discard changes")
1647 raise error.Abort(msg, hint=hint)
1647 raise error.Abort(msg, hint=hint)
1648
1648
1649 # Prompt and create actions. Most of this is in the resolve phase
1649 # Prompt and create actions. Most of this is in the resolve phase
1650 # already, but we can't handle .hgsubstate in filemerge or
1650 # already, but we can't handle .hgsubstate in filemerge or
1651 # subrepo.submerge yet so we have to keep prompting for it.
1651 # subrepo.submerge yet so we have to keep prompting for it.
1652 if '.hgsubstate' in actionbyfile:
1652 if '.hgsubstate' in actionbyfile:
1653 f = '.hgsubstate'
1653 f = '.hgsubstate'
1654 m, args, msg = actionbyfile[f]
1654 m, args, msg = actionbyfile[f]
1655 prompts = filemerge.partextras(labels)
1655 prompts = filemerge.partextras(labels)
1656 prompts['f'] = f
1656 prompts['f'] = f
1657 if m == 'cd':
1657 if m == 'cd':
1658 if repo.ui.promptchoice(
1658 if repo.ui.promptchoice(
1659 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1659 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1660 "use (c)hanged version or (d)elete?"
1660 "use (c)hanged version or (d)elete?"
1661 "$$ &Changed $$ &Delete") % prompts, 0):
1661 "$$ &Changed $$ &Delete") % prompts, 0):
1662 actionbyfile[f] = ('r', None, "prompt delete")
1662 actionbyfile[f] = ('r', None, "prompt delete")
1663 elif f in p1:
1663 elif f in p1:
1664 actionbyfile[f] = ('am', None, "prompt keep")
1664 actionbyfile[f] = ('am', None, "prompt keep")
1665 else:
1665 else:
1666 actionbyfile[f] = ('a', None, "prompt keep")
1666 actionbyfile[f] = ('a', None, "prompt keep")
1667 elif m == 'dc':
1667 elif m == 'dc':
1668 f1, f2, fa, move, anc = args
1668 f1, f2, fa, move, anc = args
1669 flags = p2[f2].flags()
1669 flags = p2[f2].flags()
1670 if repo.ui.promptchoice(
1670 if repo.ui.promptchoice(
1671 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1671 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1672 "use (c)hanged version or leave (d)eleted?"
1672 "use (c)hanged version or leave (d)eleted?"
1673 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1673 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1674 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1674 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1675 else:
1675 else:
1676 del actionbyfile[f]
1676 del actionbyfile[f]
1677
1677
1678 # Convert to dictionary-of-lists format
1678 # Convert to dictionary-of-lists format
1679 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1679 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1680 for f, (m, args, msg) in actionbyfile.iteritems():
1680 for f, (m, args, msg) in actionbyfile.iteritems():
1681 if m not in actions:
1681 if m not in actions:
1682 actions[m] = []
1682 actions[m] = []
1683 actions[m].append((f, args, msg))
1683 actions[m].append((f, args, msg))
1684
1684
1685 if not util.fscasesensitive(repo.path):
1685 if not util.fscasesensitive(repo.path):
1686 # check collision between files only in p2 for clean update
1686 # check collision between files only in p2 for clean update
1687 if (not branchmerge and
1687 if (not branchmerge and
1688 (force or not wc.dirty(missing=True, branch=False))):
1688 (force or not wc.dirty(missing=True, branch=False))):
1689 _checkcollision(repo, p2.manifest(), None)
1689 _checkcollision(repo, p2.manifest(), None)
1690 else:
1690 else:
1691 _checkcollision(repo, wc.manifest(), actions)
1691 _checkcollision(repo, wc.manifest(), actions)
1692
1692
1693 # divergent renames
1693 # divergent renames
1694 for f, fl in sorted(diverge.iteritems()):
1694 for f, fl in sorted(diverge.iteritems()):
1695 repo.ui.warn(_("note: possible conflict - %s was renamed "
1695 repo.ui.warn(_("note: possible conflict - %s was renamed "
1696 "multiple times to:\n") % f)
1696 "multiple times to:\n") % f)
1697 for nf in fl:
1697 for nf in fl:
1698 repo.ui.warn(" %s\n" % nf)
1698 repo.ui.warn(" %s\n" % nf)
1699
1699
1700 # rename and delete
1700 # rename and delete
1701 for f, fl in sorted(renamedelete.iteritems()):
1701 for f, fl in sorted(renamedelete.iteritems()):
1702 repo.ui.warn(_("note: possible conflict - %s was deleted "
1702 repo.ui.warn(_("note: possible conflict - %s was deleted "
1703 "and renamed to:\n") % f)
1703 "and renamed to:\n") % f)
1704 for nf in fl:
1704 for nf in fl:
1705 repo.ui.warn(" %s\n" % nf)
1705 repo.ui.warn(" %s\n" % nf)
1706
1706
1707 ### apply phase
1707 ### apply phase
1708 if not branchmerge: # just jump to the new rev
1708 if not branchmerge: # just jump to the new rev
1709 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1709 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1710 if not partial:
1710 if not partial:
1711 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1711 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1712 # note that we're in the middle of an update
1712 # note that we're in the middle of an update
1713 repo.vfs.write('updatestate', p2.hex())
1713 repo.vfs.write('updatestate', p2.hex())
1714
1714
1715 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1715 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1716 wc.flushall()
1716 wc.flushall()
1717
1717
1718 if not partial:
1718 if not partial:
1719 with repo.dirstate.parentchange():
1719 with repo.dirstate.parentchange():
1720 repo.setparents(fp1, fp2)
1720 repo.setparents(fp1, fp2)
1721 recordupdates(repo, actions, branchmerge)
1721 recordupdates(repo, actions, branchmerge)
1722 # update completed, clear state
1722 # update completed, clear state
1723 util.unlink(repo.vfs.join('updatestate'))
1723 util.unlink(repo.vfs.join('updatestate'))
1724
1724
1725 if not branchmerge:
1725 if not branchmerge:
1726 repo.dirstate.setbranch(p2.branch())
1726 repo.dirstate.setbranch(p2.branch())
1727
1727
1728 # If we're updating to a location, clean up any stale temporary includes
1728 # If we're updating to a location, clean up any stale temporary includes
1729 # (ex: this happens during hg rebase --abort).
1729 # (ex: this happens during hg rebase --abort).
1730 if not branchmerge:
1730 if not branchmerge:
1731 sparse.prunetemporaryincludes(repo)
1731 sparse.prunetemporaryincludes(repo)
1732
1732
1733 if not partial:
1733 if not partial:
1734 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1734 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1735 return stats
1735 return stats
1736
1736
1737 def graft(repo, ctx, pctx, labels, keepparent=False):
1737 def graft(repo, ctx, pctx, labels, keepparent=False):
1738 """Do a graft-like merge.
1738 """Do a graft-like merge.
1739
1739
1740 This is a merge where the merge ancestor is chosen such that one
1740 This is a merge where the merge ancestor is chosen such that one
1741 or more changesets are grafted onto the current changeset. In
1741 or more changesets are grafted onto the current changeset. In
1742 addition to the merge, this fixes up the dirstate to include only
1742 addition to the merge, this fixes up the dirstate to include only
1743 a single parent (if keepparent is False) and tries to duplicate any
1743 a single parent (if keepparent is False) and tries to duplicate any
1744 renames/copies appropriately.
1744 renames/copies appropriately.
1745
1745
1746 ctx - changeset to rebase
1746 ctx - changeset to rebase
1747 pctx - merge base, usually ctx.p1()
1747 pctx - merge base, usually ctx.p1()
1748 labels - merge labels eg ['local', 'graft']
1748 labels - merge labels eg ['local', 'graft']
1749 keepparent - keep second parent if any
1749 keepparent - keep second parent if any
1750
1750
1751 """
1751 """
1752 # If we're grafting a descendant onto an ancestor, be sure to pass
1752 # If we're grafting a descendant onto an ancestor, be sure to pass
1753 # mergeancestor=True to update. This does two things: 1) allows the merge if
1753 # mergeancestor=True to update. This does two things: 1) allows the merge if
1754 # the destination is the same as the parent of the ctx (so we can use graft
1754 # the destination is the same as the parent of the ctx (so we can use graft
1755 # to copy commits), and 2) informs update that the incoming changes are
1755 # to copy commits), and 2) informs update that the incoming changes are
1756 # newer than the destination so it doesn't prompt about "remote changed foo
1756 # newer than the destination so it doesn't prompt about "remote changed foo
1757 # which local deleted".
1757 # which local deleted".
1758 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1758 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1759
1759
1760 stats = update(repo, ctx.node(), True, True, pctx.node(),
1760 stats = update(repo, ctx.node(), True, True, pctx.node(),
1761 mergeancestor=mergeancestor, labels=labels)
1761 mergeancestor=mergeancestor, labels=labels)
1762
1762
1763 pother = nullid
1763 pother = nullid
1764 parents = ctx.parents()
1764 parents = ctx.parents()
1765 if keepparent and len(parents) == 2 and pctx in parents:
1765 if keepparent and len(parents) == 2 and pctx in parents:
1766 parents.remove(pctx)
1766 parents.remove(pctx)
1767 pother = parents[0].node()
1767 pother = parents[0].node()
1768
1768
1769 with repo.dirstate.parentchange():
1769 with repo.dirstate.parentchange():
1770 repo.setparents(repo['.'].node(), pother)
1770 repo.setparents(repo['.'].node(), pother)
1771 repo.dirstate.write(repo.currenttransaction())
1771 repo.dirstate.write(repo.currenttransaction())
1772 # fix up dirstate for copies and renames
1772 # fix up dirstate for copies and renames
1773 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1773 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1774 return stats
1774 return stats
General Comments 0
You need to be logged in to leave comments. Login now