##// END OF EJS Templates
destutil: remove duplicate check and leave it to merge.update()...
Martin von Zweigbergk -
r30961:330fbd51 default
parent child Browse files
Show More
@@ -1,433 +1,407 b''
1 # destutil.py - Mercurial utility function for command destination
1 # destutil.py - Mercurial utility function for command destination
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com> and other
3 # Copyright Matt Mackall <mpm@selenic.com> and other
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 bookmarks,
12 bookmarks,
13 error,
13 error,
14 obsolete,
14 obsolete,
15 )
15 )
16
16
17 def _destupdatevalidate(repo, rev, clean, check):
18 """validate that the destination comply to various rules
19
20 This exists as its own function to help wrapping from extensions."""
21 wc = repo[None]
22 p1 = wc.p1()
23 if not clean:
24 # Check that the update is linear.
25 #
26 # Mercurial do not allow update-merge for non linear pattern
27 # (that would be technically possible but was considered too confusing
28 # for user a long time ago)
29 #
30 # See mercurial.merge.update for details
31 if p1.rev() not in repo.changelog.ancestors([rev], inclusive=True):
32 dirty = wc.dirty(missing=True)
33 foreground = obsolete.foreground(repo, [p1.node()])
34 if not repo[rev].node() in foreground:
35 if dirty:
36 msg = _("uncommitted changes")
37 hint = _("commit and merge, or update --clean to"
38 " discard changes")
39 raise error.UpdateAbort(msg, hint=hint)
40
41 def _destupdateobs(repo, clean, check):
17 def _destupdateobs(repo, clean, check):
42 """decide of an update destination from obsolescence markers"""
18 """decide of an update destination from obsolescence markers"""
43 node = None
19 node = None
44 wc = repo[None]
20 wc = repo[None]
45 p1 = wc.p1()
21 p1 = wc.p1()
46 movemark = None
22 movemark = None
47
23
48 if p1.obsolete() and not p1.children():
24 if p1.obsolete() and not p1.children():
49 # allow updating to successors
25 # allow updating to successors
50 successors = obsolete.successorssets(repo, p1.node())
26 successors = obsolete.successorssets(repo, p1.node())
51
27
52 # behavior of certain cases is as follows,
28 # behavior of certain cases is as follows,
53 #
29 #
54 # divergent changesets: update to highest rev, similar to what
30 # divergent changesets: update to highest rev, similar to what
55 # is currently done when there are more than one head
31 # is currently done when there are more than one head
56 # (i.e. 'tip')
32 # (i.e. 'tip')
57 #
33 #
58 # replaced changesets: same as divergent except we know there
34 # replaced changesets: same as divergent except we know there
59 # is no conflict
35 # is no conflict
60 #
36 #
61 # pruned changeset: no update is done; though, we could
37 # pruned changeset: no update is done; though, we could
62 # consider updating to the first non-obsolete parent,
38 # consider updating to the first non-obsolete parent,
63 # similar to what is current done for 'hg prune'
39 # similar to what is current done for 'hg prune'
64
40
65 if successors:
41 if successors:
66 # flatten the list here handles both divergent (len > 1)
42 # flatten the list here handles both divergent (len > 1)
67 # and the usual case (len = 1)
43 # and the usual case (len = 1)
68 successors = [n for sub in successors for n in sub]
44 successors = [n for sub in successors for n in sub]
69
45
70 # get the max revision for the given successors set,
46 # get the max revision for the given successors set,
71 # i.e. the 'tip' of a set
47 # i.e. the 'tip' of a set
72 node = repo.revs('max(%ln)', successors).first()
48 node = repo.revs('max(%ln)', successors).first()
73 if bookmarks.isactivewdirparent(repo):
49 if bookmarks.isactivewdirparent(repo):
74 movemark = repo['.'].node()
50 movemark = repo['.'].node()
75 return node, movemark, None
51 return node, movemark, None
76
52
77 def _destupdatebook(repo, clean, check):
53 def _destupdatebook(repo, clean, check):
78 """decide on an update destination from active bookmark"""
54 """decide on an update destination from active bookmark"""
79 # we also move the active bookmark, if any
55 # we also move the active bookmark, if any
80 activemark = None
56 activemark = None
81 node, movemark = bookmarks.calculateupdate(repo.ui, repo, None)
57 node, movemark = bookmarks.calculateupdate(repo.ui, repo, None)
82 if node is not None:
58 if node is not None:
83 activemark = node
59 activemark = node
84 return node, movemark, activemark
60 return node, movemark, activemark
85
61
86 def _destupdatebranch(repo, clean, check):
62 def _destupdatebranch(repo, clean, check):
87 """decide on an update destination from current branch
63 """decide on an update destination from current branch
88
64
89 This ignores closed branch heads.
65 This ignores closed branch heads.
90 """
66 """
91 wc = repo[None]
67 wc = repo[None]
92 movemark = node = None
68 movemark = node = None
93 currentbranch = wc.branch()
69 currentbranch = wc.branch()
94
70
95 if clean:
71 if clean:
96 currentbranch = repo['.'].branch()
72 currentbranch = repo['.'].branch()
97
73
98 if currentbranch in repo.branchmap():
74 if currentbranch in repo.branchmap():
99 heads = repo.branchheads(currentbranch)
75 heads = repo.branchheads(currentbranch)
100 if heads:
76 if heads:
101 node = repo.revs('max(.::(%ln))', heads).first()
77 node = repo.revs('max(.::(%ln))', heads).first()
102 if bookmarks.isactivewdirparent(repo):
78 if bookmarks.isactivewdirparent(repo):
103 movemark = repo['.'].node()
79 movemark = repo['.'].node()
104 elif currentbranch == 'default' and not wc.p1():
80 elif currentbranch == 'default' and not wc.p1():
105 # "null" parent belongs to "default" branch, but it doesn't exist, so
81 # "null" parent belongs to "default" branch, but it doesn't exist, so
106 # update to the tipmost non-closed branch head
82 # update to the tipmost non-closed branch head
107 node = repo.revs('max(head() and not closed())').first()
83 node = repo.revs('max(head() and not closed())').first()
108 else:
84 else:
109 node = repo['.'].node()
85 node = repo['.'].node()
110 return node, movemark, None
86 return node, movemark, None
111
87
112 def _destupdatebranchfallback(repo, clean, check):
88 def _destupdatebranchfallback(repo, clean, check):
113 """decide on an update destination from closed heads in current branch"""
89 """decide on an update destination from closed heads in current branch"""
114 wc = repo[None]
90 wc = repo[None]
115 currentbranch = wc.branch()
91 currentbranch = wc.branch()
116 movemark = None
92 movemark = None
117 if currentbranch in repo.branchmap():
93 if currentbranch in repo.branchmap():
118 # here, all descendant branch heads are closed
94 # here, all descendant branch heads are closed
119 heads = repo.branchheads(currentbranch, closed=True)
95 heads = repo.branchheads(currentbranch, closed=True)
120 assert heads, "any branch has at least one head"
96 assert heads, "any branch has at least one head"
121 node = repo.revs('max(.::(%ln))', heads).first()
97 node = repo.revs('max(.::(%ln))', heads).first()
122 assert node is not None, ("any revision has at least "
98 assert node is not None, ("any revision has at least "
123 "one descendant branch head")
99 "one descendant branch head")
124 if bookmarks.isactivewdirparent(repo):
100 if bookmarks.isactivewdirparent(repo):
125 movemark = repo['.'].node()
101 movemark = repo['.'].node()
126 else:
102 else:
127 # here, no "default" branch, and all branches are closed
103 # here, no "default" branch, and all branches are closed
128 node = repo.lookup('tip')
104 node = repo.lookup('tip')
129 assert node is not None, "'tip' exists even in empty repository"
105 assert node is not None, "'tip' exists even in empty repository"
130 return node, movemark, None
106 return node, movemark, None
131
107
132 # order in which each step should be evaluated
108 # order in which each step should be evaluated
133 # steps are run until one finds a destination
109 # steps are run until one finds a destination
134 destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
110 destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
135 # mapping to ease extension overriding steps.
111 # mapping to ease extension overriding steps.
136 destupdatestepmap = {'evolution': _destupdateobs,
112 destupdatestepmap = {'evolution': _destupdateobs,
137 'bookmark': _destupdatebook,
113 'bookmark': _destupdatebook,
138 'branch': _destupdatebranch,
114 'branch': _destupdatebranch,
139 'branchfallback': _destupdatebranchfallback,
115 'branchfallback': _destupdatebranchfallback,
140 }
116 }
141
117
142 def destupdate(repo, clean=False, check=False):
118 def destupdate(repo, clean=False, check=False):
143 """destination for bare update operation
119 """destination for bare update operation
144
120
145 return (rev, movemark, activemark)
121 return (rev, movemark, activemark)
146
122
147 - rev: the revision to update to,
123 - rev: the revision to update to,
148 - movemark: node to move the active bookmark from
124 - movemark: node to move the active bookmark from
149 (cf bookmark.calculate update),
125 (cf bookmark.calculate update),
150 - activemark: a bookmark to activate at the end of the update.
126 - activemark: a bookmark to activate at the end of the update.
151 """
127 """
152 node = movemark = activemark = None
128 node = movemark = activemark = None
153
129
154 for step in destupdatesteps:
130 for step in destupdatesteps:
155 node, movemark, activemark = destupdatestepmap[step](repo, clean, check)
131 node, movemark, activemark = destupdatestepmap[step](repo, clean, check)
156 if node is not None:
132 if node is not None:
157 break
133 break
158 rev = repo[node].rev()
134 rev = repo[node].rev()
159
135
160 _destupdatevalidate(repo, rev, clean, check)
161
162 return rev, movemark, activemark
136 return rev, movemark, activemark
163
137
164 msgdestmerge = {
138 msgdestmerge = {
165 # too many matching divergent bookmark
139 # too many matching divergent bookmark
166 'toomanybookmarks':
140 'toomanybookmarks':
167 {'merge':
141 {'merge':
168 (_("multiple matching bookmarks to merge -"
142 (_("multiple matching bookmarks to merge -"
169 " please merge with an explicit rev or bookmark"),
143 " please merge with an explicit rev or bookmark"),
170 _("run 'hg heads' to see all heads")),
144 _("run 'hg heads' to see all heads")),
171 'rebase':
145 'rebase':
172 (_("multiple matching bookmarks to rebase -"
146 (_("multiple matching bookmarks to rebase -"
173 " please rebase to an explicit rev or bookmark"),
147 " please rebase to an explicit rev or bookmark"),
174 _("run 'hg heads' to see all heads")),
148 _("run 'hg heads' to see all heads")),
175 },
149 },
176 # no other matching divergent bookmark
150 # no other matching divergent bookmark
177 'nootherbookmarks':
151 'nootherbookmarks':
178 {'merge':
152 {'merge':
179 (_("no matching bookmark to merge - "
153 (_("no matching bookmark to merge - "
180 "please merge with an explicit rev or bookmark"),
154 "please merge with an explicit rev or bookmark"),
181 _("run 'hg heads' to see all heads")),
155 _("run 'hg heads' to see all heads")),
182 'rebase':
156 'rebase':
183 (_("no matching bookmark to rebase - "
157 (_("no matching bookmark to rebase - "
184 "please rebase to an explicit rev or bookmark"),
158 "please rebase to an explicit rev or bookmark"),
185 _("run 'hg heads' to see all heads")),
159 _("run 'hg heads' to see all heads")),
186 },
160 },
187 # branch have too many unbookmarked heads, no obvious destination
161 # branch have too many unbookmarked heads, no obvious destination
188 'toomanyheads':
162 'toomanyheads':
189 {'merge':
163 {'merge':
190 (_("branch '%s' has %d heads - please merge with an explicit rev"),
164 (_("branch '%s' has %d heads - please merge with an explicit rev"),
191 _("run 'hg heads .' to see heads")),
165 _("run 'hg heads .' to see heads")),
192 'rebase':
166 'rebase':
193 (_("branch '%s' has %d heads - please rebase to an explicit rev"),
167 (_("branch '%s' has %d heads - please rebase to an explicit rev"),
194 _("run 'hg heads .' to see heads")),
168 _("run 'hg heads .' to see heads")),
195 },
169 },
196 # branch have no other unbookmarked heads
170 # branch have no other unbookmarked heads
197 'bookmarkedheads':
171 'bookmarkedheads':
198 {'merge':
172 {'merge':
199 (_("heads are bookmarked - please merge with an explicit rev"),
173 (_("heads are bookmarked - please merge with an explicit rev"),
200 _("run 'hg heads' to see all heads")),
174 _("run 'hg heads' to see all heads")),
201 'rebase':
175 'rebase':
202 (_("heads are bookmarked - please rebase to an explicit rev"),
176 (_("heads are bookmarked - please rebase to an explicit rev"),
203 _("run 'hg heads' to see all heads")),
177 _("run 'hg heads' to see all heads")),
204 },
178 },
205 # branch have just a single heads, but there is other branches
179 # branch have just a single heads, but there is other branches
206 'nootherbranchheads':
180 'nootherbranchheads':
207 {'merge':
181 {'merge':
208 (_("branch '%s' has one head - please merge with an explicit rev"),
182 (_("branch '%s' has one head - please merge with an explicit rev"),
209 _("run 'hg heads' to see all heads")),
183 _("run 'hg heads' to see all heads")),
210 'rebase':
184 'rebase':
211 (_("branch '%s' has one head - please rebase to an explicit rev"),
185 (_("branch '%s' has one head - please rebase to an explicit rev"),
212 _("run 'hg heads' to see all heads")),
186 _("run 'hg heads' to see all heads")),
213 },
187 },
214 # repository have a single head
188 # repository have a single head
215 'nootherheads':
189 'nootherheads':
216 {'merge':
190 {'merge':
217 (_('nothing to merge'),
191 (_('nothing to merge'),
218 None),
192 None),
219 'rebase':
193 'rebase':
220 (_('nothing to rebase'),
194 (_('nothing to rebase'),
221 None),
195 None),
222 },
196 },
223 # repository have a single head and we are not on it
197 # repository have a single head and we are not on it
224 'nootherheadsbehind':
198 'nootherheadsbehind':
225 {'merge':
199 {'merge':
226 (_('nothing to merge'),
200 (_('nothing to merge'),
227 _("use 'hg update' instead")),
201 _("use 'hg update' instead")),
228 'rebase':
202 'rebase':
229 (_('nothing to rebase'),
203 (_('nothing to rebase'),
230 _("use 'hg update' instead")),
204 _("use 'hg update' instead")),
231 },
205 },
232 # We are not on a head
206 # We are not on a head
233 'notatheads':
207 'notatheads':
234 {'merge':
208 {'merge':
235 (_('working directory not at a head revision'),
209 (_('working directory not at a head revision'),
236 _("use 'hg update' or merge with an explicit revision")),
210 _("use 'hg update' or merge with an explicit revision")),
237 'rebase':
211 'rebase':
238 (_('working directory not at a head revision'),
212 (_('working directory not at a head revision'),
239 _("use 'hg update' or rebase to an explicit revision"))
213 _("use 'hg update' or rebase to an explicit revision"))
240 },
214 },
241 'emptysourceset':
215 'emptysourceset':
242 {'merge':
216 {'merge':
243 (_('source set is empty'),
217 (_('source set is empty'),
244 None),
218 None),
245 'rebase':
219 'rebase':
246 (_('source set is empty'),
220 (_('source set is empty'),
247 None),
221 None),
248 },
222 },
249 'multiplebranchessourceset':
223 'multiplebranchessourceset':
250 {'merge':
224 {'merge':
251 (_('source set is rooted in multiple branches'),
225 (_('source set is rooted in multiple branches'),
252 None),
226 None),
253 'rebase':
227 'rebase':
254 (_('rebaseset is rooted in multiple named branches'),
228 (_('rebaseset is rooted in multiple named branches'),
255 _('specify an explicit destination with --dest')),
229 _('specify an explicit destination with --dest')),
256 },
230 },
257 }
231 }
258
232
259 def _destmergebook(repo, action='merge', sourceset=None, destspace=None):
233 def _destmergebook(repo, action='merge', sourceset=None, destspace=None):
260 """find merge destination in the active bookmark case"""
234 """find merge destination in the active bookmark case"""
261 node = None
235 node = None
262 bmheads = repo.bookmarkheads(repo._activebookmark)
236 bmheads = repo.bookmarkheads(repo._activebookmark)
263 curhead = repo[repo._activebookmark].node()
237 curhead = repo[repo._activebookmark].node()
264 if len(bmheads) == 2:
238 if len(bmheads) == 2:
265 if curhead == bmheads[0]:
239 if curhead == bmheads[0]:
266 node = bmheads[1]
240 node = bmheads[1]
267 else:
241 else:
268 node = bmheads[0]
242 node = bmheads[0]
269 elif len(bmheads) > 2:
243 elif len(bmheads) > 2:
270 msg, hint = msgdestmerge['toomanybookmarks'][action]
244 msg, hint = msgdestmerge['toomanybookmarks'][action]
271 raise error.ManyMergeDestAbort(msg, hint=hint)
245 raise error.ManyMergeDestAbort(msg, hint=hint)
272 elif len(bmheads) <= 1:
246 elif len(bmheads) <= 1:
273 msg, hint = msgdestmerge['nootherbookmarks'][action]
247 msg, hint = msgdestmerge['nootherbookmarks'][action]
274 raise error.NoMergeDestAbort(msg, hint=hint)
248 raise error.NoMergeDestAbort(msg, hint=hint)
275 assert node is not None
249 assert node is not None
276 return node
250 return node
277
251
278 def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True,
252 def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True,
279 destspace=None):
253 destspace=None):
280 """find merge destination based on branch heads"""
254 """find merge destination based on branch heads"""
281 node = None
255 node = None
282
256
283 if sourceset is None:
257 if sourceset is None:
284 sourceset = [repo[repo.dirstate.p1()].rev()]
258 sourceset = [repo[repo.dirstate.p1()].rev()]
285 branch = repo.dirstate.branch()
259 branch = repo.dirstate.branch()
286 elif not sourceset:
260 elif not sourceset:
287 msg, hint = msgdestmerge['emptysourceset'][action]
261 msg, hint = msgdestmerge['emptysourceset'][action]
288 raise error.NoMergeDestAbort(msg, hint=hint)
262 raise error.NoMergeDestAbort(msg, hint=hint)
289 else:
263 else:
290 branch = None
264 branch = None
291 for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset):
265 for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset):
292 if branch is not None and ctx.branch() != branch:
266 if branch is not None and ctx.branch() != branch:
293 msg, hint = msgdestmerge['multiplebranchessourceset'][action]
267 msg, hint = msgdestmerge['multiplebranchessourceset'][action]
294 raise error.ManyMergeDestAbort(msg, hint=hint)
268 raise error.ManyMergeDestAbort(msg, hint=hint)
295 branch = ctx.branch()
269 branch = ctx.branch()
296
270
297 bheads = repo.branchheads(branch)
271 bheads = repo.branchheads(branch)
298 onhead = repo.revs('%ld and %ln', sourceset, bheads)
272 onhead = repo.revs('%ld and %ln', sourceset, bheads)
299 if onheadcheck and not onhead:
273 if onheadcheck and not onhead:
300 # Case A: working copy if not on a head. (merge only)
274 # Case A: working copy if not on a head. (merge only)
301 #
275 #
302 # This is probably a user mistake We bailout pointing at 'hg update'
276 # This is probably a user mistake We bailout pointing at 'hg update'
303 if len(repo.heads()) <= 1:
277 if len(repo.heads()) <= 1:
304 msg, hint = msgdestmerge['nootherheadsbehind'][action]
278 msg, hint = msgdestmerge['nootherheadsbehind'][action]
305 else:
279 else:
306 msg, hint = msgdestmerge['notatheads'][action]
280 msg, hint = msgdestmerge['notatheads'][action]
307 raise error.Abort(msg, hint=hint)
281 raise error.Abort(msg, hint=hint)
308 # remove heads descendants of source from the set
282 # remove heads descendants of source from the set
309 bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset))
283 bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset))
310 # filters out bookmarked heads
284 # filters out bookmarked heads
311 nbhs = list(repo.revs('%ld - bookmark()', bheads))
285 nbhs = list(repo.revs('%ld - bookmark()', bheads))
312
286
313 if destspace is not None:
287 if destspace is not None:
314 # restrict search space
288 # restrict search space
315 # used in the 'hg pull --rebase' case, see issue 5214.
289 # used in the 'hg pull --rebase' case, see issue 5214.
316 nbhs = list(repo.revs('%ld and %ld', destspace, nbhs))
290 nbhs = list(repo.revs('%ld and %ld', destspace, nbhs))
317
291
318 if len(nbhs) > 1:
292 if len(nbhs) > 1:
319 # Case B: There is more than 1 other anonymous heads
293 # Case B: There is more than 1 other anonymous heads
320 #
294 #
321 # This means that there will be more than 1 candidate. This is
295 # This means that there will be more than 1 candidate. This is
322 # ambiguous. We abort asking the user to pick as explicit destination
296 # ambiguous. We abort asking the user to pick as explicit destination
323 # instead.
297 # instead.
324 msg, hint = msgdestmerge['toomanyheads'][action]
298 msg, hint = msgdestmerge['toomanyheads'][action]
325 msg %= (branch, len(bheads) + 1)
299 msg %= (branch, len(bheads) + 1)
326 raise error.ManyMergeDestAbort(msg, hint=hint)
300 raise error.ManyMergeDestAbort(msg, hint=hint)
327 elif not nbhs:
301 elif not nbhs:
328 # Case B: There is no other anonymous heads
302 # Case B: There is no other anonymous heads
329 #
303 #
330 # This means that there is no natural candidate to merge with.
304 # This means that there is no natural candidate to merge with.
331 # We abort, with various messages for various cases.
305 # We abort, with various messages for various cases.
332 if bheads:
306 if bheads:
333 msg, hint = msgdestmerge['bookmarkedheads'][action]
307 msg, hint = msgdestmerge['bookmarkedheads'][action]
334 elif len(repo.heads()) > 1:
308 elif len(repo.heads()) > 1:
335 msg, hint = msgdestmerge['nootherbranchheads'][action]
309 msg, hint = msgdestmerge['nootherbranchheads'][action]
336 msg %= branch
310 msg %= branch
337 elif not onhead:
311 elif not onhead:
338 # if 'onheadcheck == False' (rebase case),
312 # if 'onheadcheck == False' (rebase case),
339 # this was not caught in Case A.
313 # this was not caught in Case A.
340 msg, hint = msgdestmerge['nootherheadsbehind'][action]
314 msg, hint = msgdestmerge['nootherheadsbehind'][action]
341 else:
315 else:
342 msg, hint = msgdestmerge['nootherheads'][action]
316 msg, hint = msgdestmerge['nootherheads'][action]
343 raise error.NoMergeDestAbort(msg, hint=hint)
317 raise error.NoMergeDestAbort(msg, hint=hint)
344 else:
318 else:
345 node = nbhs[0]
319 node = nbhs[0]
346 assert node is not None
320 assert node is not None
347 return node
321 return node
348
322
349 def destmerge(repo, action='merge', sourceset=None, onheadcheck=True,
323 def destmerge(repo, action='merge', sourceset=None, onheadcheck=True,
350 destspace=None):
324 destspace=None):
351 """return the default destination for a merge
325 """return the default destination for a merge
352
326
353 (or raise exception about why it can't pick one)
327 (or raise exception about why it can't pick one)
354
328
355 :action: the action being performed, controls emitted error message
329 :action: the action being performed, controls emitted error message
356 """
330 """
357 # destspace is here to work around issues with `hg pull --rebase` see
331 # destspace is here to work around issues with `hg pull --rebase` see
358 # issue5214 for details
332 # issue5214 for details
359 if repo._activebookmark:
333 if repo._activebookmark:
360 node = _destmergebook(repo, action=action, sourceset=sourceset,
334 node = _destmergebook(repo, action=action, sourceset=sourceset,
361 destspace=destspace)
335 destspace=destspace)
362 else:
336 else:
363 node = _destmergebranch(repo, action=action, sourceset=sourceset,
337 node = _destmergebranch(repo, action=action, sourceset=sourceset,
364 onheadcheck=onheadcheck, destspace=destspace)
338 onheadcheck=onheadcheck, destspace=destspace)
365 return repo[node].rev()
339 return repo[node].rev()
366
340
367 histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())'
341 histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())'
368
342
369 def desthistedit(ui, repo):
343 def desthistedit(ui, repo):
370 """Default base revision to edit for `hg histedit`."""
344 """Default base revision to edit for `hg histedit`."""
371 # Avoid cycle: scmutil -> revset -> destutil
345 # Avoid cycle: scmutil -> revset -> destutil
372 from . import scmutil
346 from . import scmutil
373
347
374 default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
348 default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
375 if default:
349 if default:
376 revs = scmutil.revrange(repo, [default])
350 revs = scmutil.revrange(repo, [default])
377 if revs:
351 if revs:
378 # The revset supplied by the user may not be in ascending order nor
352 # The revset supplied by the user may not be in ascending order nor
379 # take the first revision. So do this manually.
353 # take the first revision. So do this manually.
380 revs.sort()
354 revs.sort()
381 return revs.first()
355 return revs.first()
382
356
383 return None
357 return None
384
358
385 def _statusotherbook(ui, repo):
359 def _statusotherbook(ui, repo):
386 bmheads = repo.bookmarkheads(repo._activebookmark)
360 bmheads = repo.bookmarkheads(repo._activebookmark)
387 curhead = repo[repo._activebookmark].node()
361 curhead = repo[repo._activebookmark].node()
388 if repo.revs('%n and parents()', curhead):
362 if repo.revs('%n and parents()', curhead):
389 # we are on the active bookmark
363 # we are on the active bookmark
390 bmheads = [b for b in bmheads if curhead != b]
364 bmheads = [b for b in bmheads if curhead != b]
391 if bmheads:
365 if bmheads:
392 msg = _('%i other divergent bookmarks for "%s"\n')
366 msg = _('%i other divergent bookmarks for "%s"\n')
393 ui.status(msg % (len(bmheads), repo._activebookmark))
367 ui.status(msg % (len(bmheads), repo._activebookmark))
394
368
395 def _statusotherbranchheads(ui, repo):
369 def _statusotherbranchheads(ui, repo):
396 currentbranch = repo.dirstate.branch()
370 currentbranch = repo.dirstate.branch()
397 allheads = repo.branchheads(currentbranch, closed=True)
371 allheads = repo.branchheads(currentbranch, closed=True)
398 heads = repo.branchheads(currentbranch)
372 heads = repo.branchheads(currentbranch)
399 if repo.revs('%ln and parents()', allheads):
373 if repo.revs('%ln and parents()', allheads):
400 # we are on a head, even though it might be closed
374 # we are on a head, even though it might be closed
401 #
375 #
402 # on closed otherheads
376 # on closed otherheads
403 # ========= ==========
377 # ========= ==========
404 # o 0 all heads for current branch are closed
378 # o 0 all heads for current branch are closed
405 # N only descendant branch heads are closed
379 # N only descendant branch heads are closed
406 # x 0 there is only one non-closed branch head
380 # x 0 there is only one non-closed branch head
407 # N there are some non-closed branch heads
381 # N there are some non-closed branch heads
408 # ========= ==========
382 # ========= ==========
409 otherheads = repo.revs('%ln - parents()', heads)
383 otherheads = repo.revs('%ln - parents()', heads)
410 if repo['.'].closesbranch():
384 if repo['.'].closesbranch():
411 ui.warn(_('no open descendant heads on branch "%s", '
385 ui.warn(_('no open descendant heads on branch "%s", '
412 'updating to a closed head\n') %
386 'updating to a closed head\n') %
413 (currentbranch))
387 (currentbranch))
414 if otherheads:
388 if otherheads:
415 ui.warn(_("(committing will reopen the head, "
389 ui.warn(_("(committing will reopen the head, "
416 "use 'hg heads .' to see %i other heads)\n") %
390 "use 'hg heads .' to see %i other heads)\n") %
417 (len(otherheads)))
391 (len(otherheads)))
418 else:
392 else:
419 ui.warn(_('(committing will reopen branch "%s")\n') %
393 ui.warn(_('(committing will reopen branch "%s")\n') %
420 (currentbranch))
394 (currentbranch))
421 elif otherheads:
395 elif otherheads:
422 ui.status(_('%i other heads for branch "%s"\n') %
396 ui.status(_('%i other heads for branch "%s"\n') %
423 (len(otherheads), currentbranch))
397 (len(otherheads), currentbranch))
424
398
425 def statusotherdests(ui, repo):
399 def statusotherdests(ui, repo):
426 """Print message about other head"""
400 """Print message about other head"""
427 # XXX we should probably include a hint:
401 # XXX we should probably include a hint:
428 # - about what to do
402 # - about what to do
429 # - how to see such heads
403 # - how to see such heads
430 if repo._activebookmark:
404 if repo._activebookmark:
431 _statusotherbook(ui, repo)
405 _statusotherbook(ui, repo)
432 else:
406 else:
433 _statusotherbranchheads(ui, repo)
407 _statusotherbranchheads(ui, repo)
@@ -1,1710 +1,1710 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import shutil
13 import shutil
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 copies,
28 error,
28 error,
29 filemerge,
29 filemerge,
30 obsolete,
30 obsolete,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 m: the external merge driver defined for this merge plus its run state
69 m: the external merge driver defined for this merge plus its run state
70 (experimental)
70 (experimental)
71 f: a (filename, dictionary) tuple of optional values for a given file
71 f: a (filename, dictionary) tuple of optional values for a given file
72 X: unsupported mandatory record type (used in tests)
72 X: unsupported mandatory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
74 l: the labels for the parts of the merge.
74 l: the labels for the parts of the merge.
75
75
76 Merge driver run states (experimental):
76 Merge driver run states (experimental):
77 u: driver-resolved files unmarked -- needs to be run next time we're about
77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 to resolve or commit
78 to resolve or commit
79 m: driver-resolved files marked -- only needs to be run before commit
79 m: driver-resolved files marked -- only needs to be run before commit
80 s: success/skipped -- does not need to be run any more
80 s: success/skipped -- does not need to be run any more
81
81
82 '''
82 '''
83 statepathv1 = 'merge/state'
83 statepathv1 = 'merge/state'
84 statepathv2 = 'merge/state2'
84 statepathv2 = 'merge/state2'
85
85
86 @staticmethod
86 @staticmethod
87 def clean(repo, node=None, other=None, labels=None):
87 def clean(repo, node=None, other=None, labels=None):
88 """Initialize a brand new merge state, removing any existing state on
88 """Initialize a brand new merge state, removing any existing state on
89 disk."""
89 disk."""
90 ms = mergestate(repo)
90 ms = mergestate(repo)
91 ms.reset(node, other, labels)
91 ms.reset(node, other, labels)
92 return ms
92 return ms
93
93
94 @staticmethod
94 @staticmethod
95 def read(repo):
95 def read(repo):
96 """Initialize the merge state, reading it from disk."""
96 """Initialize the merge state, reading it from disk."""
97 ms = mergestate(repo)
97 ms = mergestate(repo)
98 ms._read()
98 ms._read()
99 return ms
99 return ms
100
100
101 def __init__(self, repo):
101 def __init__(self, repo):
102 """Initialize the merge state.
102 """Initialize the merge state.
103
103
104 Do not use this directly! Instead call read() or clean()."""
104 Do not use this directly! Instead call read() or clean()."""
105 self._repo = repo
105 self._repo = repo
106 self._dirty = False
106 self._dirty = False
107 self._labels = None
107 self._labels = None
108
108
109 def reset(self, node=None, other=None, labels=None):
109 def reset(self, node=None, other=None, labels=None):
110 self._state = {}
110 self._state = {}
111 self._stateextras = {}
111 self._stateextras = {}
112 self._local = None
112 self._local = None
113 self._other = None
113 self._other = None
114 self._labels = labels
114 self._labels = labels
115 for var in ('localctx', 'otherctx'):
115 for var in ('localctx', 'otherctx'):
116 if var in vars(self):
116 if var in vars(self):
117 delattr(self, var)
117 delattr(self, var)
118 if node:
118 if node:
119 self._local = node
119 self._local = node
120 self._other = other
120 self._other = other
121 self._readmergedriver = None
121 self._readmergedriver = None
122 if self.mergedriver:
122 if self.mergedriver:
123 self._mdstate = 's'
123 self._mdstate = 's'
124 else:
124 else:
125 self._mdstate = 'u'
125 self._mdstate = 'u'
126 shutil.rmtree(self._repo.join('merge'), True)
126 shutil.rmtree(self._repo.join('merge'), True)
127 self._results = {}
127 self._results = {}
128 self._dirty = False
128 self._dirty = False
129
129
130 def _read(self):
130 def _read(self):
131 """Analyse each record content to restore a serialized state from disk
131 """Analyse each record content to restore a serialized state from disk
132
132
133 This function process "record" entry produced by the de-serialization
133 This function process "record" entry produced by the de-serialization
134 of on disk file.
134 of on disk file.
135 """
135 """
136 self._state = {}
136 self._state = {}
137 self._stateextras = {}
137 self._stateextras = {}
138 self._local = None
138 self._local = None
139 self._other = None
139 self._other = None
140 for var in ('localctx', 'otherctx'):
140 for var in ('localctx', 'otherctx'):
141 if var in vars(self):
141 if var in vars(self):
142 delattr(self, var)
142 delattr(self, var)
143 self._readmergedriver = None
143 self._readmergedriver = None
144 self._mdstate = 's'
144 self._mdstate = 's'
145 unsupported = set()
145 unsupported = set()
146 records = self._readrecords()
146 records = self._readrecords()
147 for rtype, record in records:
147 for rtype, record in records:
148 if rtype == 'L':
148 if rtype == 'L':
149 self._local = bin(record)
149 self._local = bin(record)
150 elif rtype == 'O':
150 elif rtype == 'O':
151 self._other = bin(record)
151 self._other = bin(record)
152 elif rtype == 'm':
152 elif rtype == 'm':
153 bits = record.split('\0', 1)
153 bits = record.split('\0', 1)
154 mdstate = bits[1]
154 mdstate = bits[1]
155 if len(mdstate) != 1 or mdstate not in 'ums':
155 if len(mdstate) != 1 or mdstate not in 'ums':
156 # the merge driver should be idempotent, so just rerun it
156 # the merge driver should be idempotent, so just rerun it
157 mdstate = 'u'
157 mdstate = 'u'
158
158
159 self._readmergedriver = bits[0]
159 self._readmergedriver = bits[0]
160 self._mdstate = mdstate
160 self._mdstate = mdstate
161 elif rtype in 'FDC':
161 elif rtype in 'FDC':
162 bits = record.split('\0')
162 bits = record.split('\0')
163 self._state[bits[0]] = bits[1:]
163 self._state[bits[0]] = bits[1:]
164 elif rtype == 'f':
164 elif rtype == 'f':
165 filename, rawextras = record.split('\0', 1)
165 filename, rawextras = record.split('\0', 1)
166 extraparts = rawextras.split('\0')
166 extraparts = rawextras.split('\0')
167 extras = {}
167 extras = {}
168 i = 0
168 i = 0
169 while i < len(extraparts):
169 while i < len(extraparts):
170 extras[extraparts[i]] = extraparts[i + 1]
170 extras[extraparts[i]] = extraparts[i + 1]
171 i += 2
171 i += 2
172
172
173 self._stateextras[filename] = extras
173 self._stateextras[filename] = extras
174 elif rtype == 'l':
174 elif rtype == 'l':
175 labels = record.split('\0', 2)
175 labels = record.split('\0', 2)
176 self._labels = [l for l in labels if len(l) > 0]
176 self._labels = [l for l in labels if len(l) > 0]
177 elif not rtype.islower():
177 elif not rtype.islower():
178 unsupported.add(rtype)
178 unsupported.add(rtype)
179 self._results = {}
179 self._results = {}
180 self._dirty = False
180 self._dirty = False
181
181
182 if unsupported:
182 if unsupported:
183 raise error.UnsupportedMergeRecords(unsupported)
183 raise error.UnsupportedMergeRecords(unsupported)
184
184
185 def _readrecords(self):
185 def _readrecords(self):
186 """Read merge state from disk and return a list of record (TYPE, data)
186 """Read merge state from disk and return a list of record (TYPE, data)
187
187
188 We read data from both v1 and v2 files and decide which one to use.
188 We read data from both v1 and v2 files and decide which one to use.
189
189
190 V1 has been used by version prior to 2.9.1 and contains less data than
190 V1 has been used by version prior to 2.9.1 and contains less data than
191 v2. We read both versions and check if no data in v2 contradicts
191 v2. We read both versions and check if no data in v2 contradicts
192 v1. If there is not contradiction we can safely assume that both v1
192 v1. If there is not contradiction we can safely assume that both v1
193 and v2 were written at the same time and use the extract data in v2. If
193 and v2 were written at the same time and use the extract data in v2. If
194 there is contradiction we ignore v2 content as we assume an old version
194 there is contradiction we ignore v2 content as we assume an old version
195 of Mercurial has overwritten the mergestate file and left an old v2
195 of Mercurial has overwritten the mergestate file and left an old v2
196 file around.
196 file around.
197
197
198 returns list of record [(TYPE, data), ...]"""
198 returns list of record [(TYPE, data), ...]"""
199 v1records = self._readrecordsv1()
199 v1records = self._readrecordsv1()
200 v2records = self._readrecordsv2()
200 v2records = self._readrecordsv2()
201 if self._v1v2match(v1records, v2records):
201 if self._v1v2match(v1records, v2records):
202 return v2records
202 return v2records
203 else:
203 else:
204 # v1 file is newer than v2 file, use it
204 # v1 file is newer than v2 file, use it
205 # we have to infer the "other" changeset of the merge
205 # we have to infer the "other" changeset of the merge
206 # we cannot do better than that with v1 of the format
206 # we cannot do better than that with v1 of the format
207 mctx = self._repo[None].parents()[-1]
207 mctx = self._repo[None].parents()[-1]
208 v1records.append(('O', mctx.hex()))
208 v1records.append(('O', mctx.hex()))
209 # add place holder "other" file node information
209 # add place holder "other" file node information
210 # nobody is using it yet so we do no need to fetch the data
210 # nobody is using it yet so we do no need to fetch the data
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 for idx, r in enumerate(v1records):
212 for idx, r in enumerate(v1records):
213 if r[0] == 'F':
213 if r[0] == 'F':
214 bits = r[1].split('\0')
214 bits = r[1].split('\0')
215 bits.insert(-2, '')
215 bits.insert(-2, '')
216 v1records[idx] = (r[0], '\0'.join(bits))
216 v1records[idx] = (r[0], '\0'.join(bits))
217 return v1records
217 return v1records
218
218
219 def _v1v2match(self, v1records, v2records):
219 def _v1v2match(self, v1records, v2records):
220 oldv2 = set() # old format version of v2 record
220 oldv2 = set() # old format version of v2 record
221 for rec in v2records:
221 for rec in v2records:
222 if rec[0] == 'L':
222 if rec[0] == 'L':
223 oldv2.add(rec)
223 oldv2.add(rec)
224 elif rec[0] == 'F':
224 elif rec[0] == 'F':
225 # drop the onode data (not contained in v1)
225 # drop the onode data (not contained in v1)
226 oldv2.add(('F', _droponode(rec[1])))
226 oldv2.add(('F', _droponode(rec[1])))
227 for rec in v1records:
227 for rec in v1records:
228 if rec not in oldv2:
228 if rec not in oldv2:
229 return False
229 return False
230 else:
230 else:
231 return True
231 return True
232
232
233 def _readrecordsv1(self):
233 def _readrecordsv1(self):
234 """read on disk merge state for version 1 file
234 """read on disk merge state for version 1 file
235
235
236 returns list of record [(TYPE, data), ...]
236 returns list of record [(TYPE, data), ...]
237
237
238 Note: the "F" data from this file are one entry short
238 Note: the "F" data from this file are one entry short
239 (no "other file node" entry)
239 (no "other file node" entry)
240 """
240 """
241 records = []
241 records = []
242 try:
242 try:
243 f = self._repo.vfs(self.statepathv1)
243 f = self._repo.vfs(self.statepathv1)
244 for i, l in enumerate(f):
244 for i, l in enumerate(f):
245 if i == 0:
245 if i == 0:
246 records.append(('L', l[:-1]))
246 records.append(('L', l[:-1]))
247 else:
247 else:
248 records.append(('F', l[:-1]))
248 records.append(('F', l[:-1]))
249 f.close()
249 f.close()
250 except IOError as err:
250 except IOError as err:
251 if err.errno != errno.ENOENT:
251 if err.errno != errno.ENOENT:
252 raise
252 raise
253 return records
253 return records
254
254
255 def _readrecordsv2(self):
255 def _readrecordsv2(self):
256 """read on disk merge state for version 2 file
256 """read on disk merge state for version 2 file
257
257
258 This format is a list of arbitrary records of the form:
258 This format is a list of arbitrary records of the form:
259
259
260 [type][length][content]
260 [type][length][content]
261
261
262 `type` is a single character, `length` is a 4 byte integer, and
262 `type` is a single character, `length` is a 4 byte integer, and
263 `content` is an arbitrary byte sequence of length `length`.
263 `content` is an arbitrary byte sequence of length `length`.
264
264
265 Mercurial versions prior to 3.7 have a bug where if there are
265 Mercurial versions prior to 3.7 have a bug where if there are
266 unsupported mandatory merge records, attempting to clear out the merge
266 unsupported mandatory merge records, attempting to clear out the merge
267 state with hg update --clean or similar aborts. The 't' record type
267 state with hg update --clean or similar aborts. The 't' record type
268 works around that by writing out what those versions treat as an
268 works around that by writing out what those versions treat as an
269 advisory record, but later versions interpret as special: the first
269 advisory record, but later versions interpret as special: the first
270 character is the 'real' record type and everything onwards is the data.
270 character is the 'real' record type and everything onwards is the data.
271
271
272 Returns list of records [(TYPE, data), ...]."""
272 Returns list of records [(TYPE, data), ...]."""
273 records = []
273 records = []
274 try:
274 try:
275 f = self._repo.vfs(self.statepathv2)
275 f = self._repo.vfs(self.statepathv2)
276 data = f.read()
276 data = f.read()
277 off = 0
277 off = 0
278 end = len(data)
278 end = len(data)
279 while off < end:
279 while off < end:
280 rtype = data[off]
280 rtype = data[off]
281 off += 1
281 off += 1
282 length = _unpack('>I', data[off:(off + 4)])[0]
282 length = _unpack('>I', data[off:(off + 4)])[0]
283 off += 4
283 off += 4
284 record = data[off:(off + length)]
284 record = data[off:(off + length)]
285 off += length
285 off += length
286 if rtype == 't':
286 if rtype == 't':
287 rtype, record = record[0], record[1:]
287 rtype, record = record[0], record[1:]
288 records.append((rtype, record))
288 records.append((rtype, record))
289 f.close()
289 f.close()
290 except IOError as err:
290 except IOError as err:
291 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
292 raise
292 raise
293 return records
293 return records
294
294
295 @util.propertycache
295 @util.propertycache
296 def mergedriver(self):
296 def mergedriver(self):
297 # protect against the following:
297 # protect against the following:
298 # - A configures a malicious merge driver in their hgrc, then
298 # - A configures a malicious merge driver in their hgrc, then
299 # pauses the merge
299 # pauses the merge
300 # - A edits their hgrc to remove references to the merge driver
300 # - A edits their hgrc to remove references to the merge driver
301 # - A gives a copy of their entire repo, including .hg, to B
301 # - A gives a copy of their entire repo, including .hg, to B
302 # - B inspects .hgrc and finds it to be clean
302 # - B inspects .hgrc and finds it to be clean
303 # - B then continues the merge and the malicious merge driver
303 # - B then continues the merge and the malicious merge driver
304 # gets invoked
304 # gets invoked
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 if (self._readmergedriver is not None
306 if (self._readmergedriver is not None
307 and self._readmergedriver != configmergedriver):
307 and self._readmergedriver != configmergedriver):
308 raise error.ConfigError(
308 raise error.ConfigError(
309 _("merge driver changed since merge started"),
309 _("merge driver changed since merge started"),
310 hint=_("revert merge driver change or abort merge"))
310 hint=_("revert merge driver change or abort merge"))
311
311
312 return configmergedriver
312 return configmergedriver
313
313
314 @util.propertycache
314 @util.propertycache
315 def localctx(self):
315 def localctx(self):
316 if self._local is None:
316 if self._local is None:
317 raise RuntimeError("localctx accessed but self._local isn't set")
317 raise RuntimeError("localctx accessed but self._local isn't set")
318 return self._repo[self._local]
318 return self._repo[self._local]
319
319
320 @util.propertycache
320 @util.propertycache
321 def otherctx(self):
321 def otherctx(self):
322 if self._other is None:
322 if self._other is None:
323 raise RuntimeError("otherctx accessed but self._other isn't set")
323 raise RuntimeError("otherctx accessed but self._other isn't set")
324 return self._repo[self._other]
324 return self._repo[self._other]
325
325
326 def active(self):
326 def active(self):
327 """Whether mergestate is active.
327 """Whether mergestate is active.
328
328
329 Returns True if there appears to be mergestate. This is a rough proxy
329 Returns True if there appears to be mergestate. This is a rough proxy
330 for "is a merge in progress."
330 for "is a merge in progress."
331 """
331 """
332 # Check local variables before looking at filesystem for performance
332 # Check local variables before looking at filesystem for performance
333 # reasons.
333 # reasons.
334 return bool(self._local) or bool(self._state) or \
334 return bool(self._local) or bool(self._state) or \
335 self._repo.vfs.exists(self.statepathv1) or \
335 self._repo.vfs.exists(self.statepathv1) or \
336 self._repo.vfs.exists(self.statepathv2)
336 self._repo.vfs.exists(self.statepathv2)
337
337
338 def commit(self):
338 def commit(self):
339 """Write current state on disk (if necessary)"""
339 """Write current state on disk (if necessary)"""
340 if self._dirty:
340 if self._dirty:
341 records = self._makerecords()
341 records = self._makerecords()
342 self._writerecords(records)
342 self._writerecords(records)
343 self._dirty = False
343 self._dirty = False
344
344
345 def _makerecords(self):
345 def _makerecords(self):
346 records = []
346 records = []
347 records.append(('L', hex(self._local)))
347 records.append(('L', hex(self._local)))
348 records.append(('O', hex(self._other)))
348 records.append(('O', hex(self._other)))
349 if self.mergedriver:
349 if self.mergedriver:
350 records.append(('m', '\0'.join([
350 records.append(('m', '\0'.join([
351 self.mergedriver, self._mdstate])))
351 self.mergedriver, self._mdstate])))
352 for d, v in self._state.iteritems():
352 for d, v in self._state.iteritems():
353 if v[0] == 'd':
353 if v[0] == 'd':
354 records.append(('D', '\0'.join([d] + v)))
354 records.append(('D', '\0'.join([d] + v)))
355 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
355 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
356 # older versions of Mercurial
356 # older versions of Mercurial
357 elif v[1] == nullhex or v[6] == nullhex:
357 elif v[1] == nullhex or v[6] == nullhex:
358 records.append(('C', '\0'.join([d] + v)))
358 records.append(('C', '\0'.join([d] + v)))
359 else:
359 else:
360 records.append(('F', '\0'.join([d] + v)))
360 records.append(('F', '\0'.join([d] + v)))
361 for filename, extras in sorted(self._stateextras.iteritems()):
361 for filename, extras in sorted(self._stateextras.iteritems()):
362 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
362 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
363 extras.iteritems())
363 extras.iteritems())
364 records.append(('f', '%s\0%s' % (filename, rawextras)))
364 records.append(('f', '%s\0%s' % (filename, rawextras)))
365 if self._labels is not None:
365 if self._labels is not None:
366 labels = '\0'.join(self._labels)
366 labels = '\0'.join(self._labels)
367 records.append(('l', labels))
367 records.append(('l', labels))
368 return records
368 return records
369
369
370 def _writerecords(self, records):
370 def _writerecords(self, records):
371 """Write current state on disk (both v1 and v2)"""
371 """Write current state on disk (both v1 and v2)"""
372 self._writerecordsv1(records)
372 self._writerecordsv1(records)
373 self._writerecordsv2(records)
373 self._writerecordsv2(records)
374
374
375 def _writerecordsv1(self, records):
375 def _writerecordsv1(self, records):
376 """Write current state on disk in a version 1 file"""
376 """Write current state on disk in a version 1 file"""
377 f = self._repo.vfs(self.statepathv1, 'w')
377 f = self._repo.vfs(self.statepathv1, 'w')
378 irecords = iter(records)
378 irecords = iter(records)
379 lrecords = next(irecords)
379 lrecords = next(irecords)
380 assert lrecords[0] == 'L'
380 assert lrecords[0] == 'L'
381 f.write(hex(self._local) + '\n')
381 f.write(hex(self._local) + '\n')
382 for rtype, data in irecords:
382 for rtype, data in irecords:
383 if rtype == 'F':
383 if rtype == 'F':
384 f.write('%s\n' % _droponode(data))
384 f.write('%s\n' % _droponode(data))
385 f.close()
385 f.close()
386
386
387 def _writerecordsv2(self, records):
387 def _writerecordsv2(self, records):
388 """Write current state on disk in a version 2 file
388 """Write current state on disk in a version 2 file
389
389
390 See the docstring for _readrecordsv2 for why we use 't'."""
390 See the docstring for _readrecordsv2 for why we use 't'."""
391 # these are the records that all version 2 clients can read
391 # these are the records that all version 2 clients can read
392 whitelist = 'LOF'
392 whitelist = 'LOF'
393 f = self._repo.vfs(self.statepathv2, 'w')
393 f = self._repo.vfs(self.statepathv2, 'w')
394 for key, data in records:
394 for key, data in records:
395 assert len(key) == 1
395 assert len(key) == 1
396 if key not in whitelist:
396 if key not in whitelist:
397 key, data = 't', '%s%s' % (key, data)
397 key, data = 't', '%s%s' % (key, data)
398 format = '>sI%is' % len(data)
398 format = '>sI%is' % len(data)
399 f.write(_pack(format, key, len(data), data))
399 f.write(_pack(format, key, len(data), data))
400 f.close()
400 f.close()
401
401
402 def add(self, fcl, fco, fca, fd):
402 def add(self, fcl, fco, fca, fd):
403 """add a new (potentially?) conflicting file the merge state
403 """add a new (potentially?) conflicting file the merge state
404 fcl: file context for local,
404 fcl: file context for local,
405 fco: file context for remote,
405 fco: file context for remote,
406 fca: file context for ancestors,
406 fca: file context for ancestors,
407 fd: file path of the resulting merge.
407 fd: file path of the resulting merge.
408
408
409 note: also write the local version to the `.hg/merge` directory.
409 note: also write the local version to the `.hg/merge` directory.
410 """
410 """
411 if fcl.isabsent():
411 if fcl.isabsent():
412 hash = nullhex
412 hash = nullhex
413 else:
413 else:
414 hash = hashlib.sha1(fcl.path()).hexdigest()
414 hash = hashlib.sha1(fcl.path()).hexdigest()
415 self._repo.vfs.write('merge/' + hash, fcl.data())
415 self._repo.vfs.write('merge/' + hash, fcl.data())
416 self._state[fd] = ['u', hash, fcl.path(),
416 self._state[fd] = ['u', hash, fcl.path(),
417 fca.path(), hex(fca.filenode()),
417 fca.path(), hex(fca.filenode()),
418 fco.path(), hex(fco.filenode()),
418 fco.path(), hex(fco.filenode()),
419 fcl.flags()]
419 fcl.flags()]
420 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
420 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
421 self._dirty = True
421 self._dirty = True
422
422
423 def __contains__(self, dfile):
423 def __contains__(self, dfile):
424 return dfile in self._state
424 return dfile in self._state
425
425
426 def __getitem__(self, dfile):
426 def __getitem__(self, dfile):
427 return self._state[dfile][0]
427 return self._state[dfile][0]
428
428
429 def __iter__(self):
429 def __iter__(self):
430 return iter(sorted(self._state))
430 return iter(sorted(self._state))
431
431
432 def files(self):
432 def files(self):
433 return self._state.keys()
433 return self._state.keys()
434
434
435 def mark(self, dfile, state):
435 def mark(self, dfile, state):
436 self._state[dfile][0] = state
436 self._state[dfile][0] = state
437 self._dirty = True
437 self._dirty = True
438
438
439 def mdstate(self):
439 def mdstate(self):
440 return self._mdstate
440 return self._mdstate
441
441
442 def unresolved(self):
442 def unresolved(self):
443 """Obtain the paths of unresolved files."""
443 """Obtain the paths of unresolved files."""
444
444
445 for f, entry in self._state.items():
445 for f, entry in self._state.items():
446 if entry[0] == 'u':
446 if entry[0] == 'u':
447 yield f
447 yield f
448
448
449 def driverresolved(self):
449 def driverresolved(self):
450 """Obtain the paths of driver-resolved files."""
450 """Obtain the paths of driver-resolved files."""
451
451
452 for f, entry in self._state.items():
452 for f, entry in self._state.items():
453 if entry[0] == 'd':
453 if entry[0] == 'd':
454 yield f
454 yield f
455
455
456 def extras(self, filename):
456 def extras(self, filename):
457 return self._stateextras.setdefault(filename, {})
457 return self._stateextras.setdefault(filename, {})
458
458
459 def _resolve(self, preresolve, dfile, wctx):
459 def _resolve(self, preresolve, dfile, wctx):
460 """rerun merge process for file path `dfile`"""
460 """rerun merge process for file path `dfile`"""
461 if self[dfile] in 'rd':
461 if self[dfile] in 'rd':
462 return True, 0
462 return True, 0
463 stateentry = self._state[dfile]
463 stateentry = self._state[dfile]
464 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
464 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
465 octx = self._repo[self._other]
465 octx = self._repo[self._other]
466 extras = self.extras(dfile)
466 extras = self.extras(dfile)
467 anccommitnode = extras.get('ancestorlinknode')
467 anccommitnode = extras.get('ancestorlinknode')
468 if anccommitnode:
468 if anccommitnode:
469 actx = self._repo[anccommitnode]
469 actx = self._repo[anccommitnode]
470 else:
470 else:
471 actx = None
471 actx = None
472 fcd = self._filectxorabsent(hash, wctx, dfile)
472 fcd = self._filectxorabsent(hash, wctx, dfile)
473 fco = self._filectxorabsent(onode, octx, ofile)
473 fco = self._filectxorabsent(onode, octx, ofile)
474 # TODO: move this to filectxorabsent
474 # TODO: move this to filectxorabsent
475 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
475 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
476 # "premerge" x flags
476 # "premerge" x flags
477 flo = fco.flags()
477 flo = fco.flags()
478 fla = fca.flags()
478 fla = fca.flags()
479 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
479 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
480 if fca.node() == nullid and flags != flo:
480 if fca.node() == nullid and flags != flo:
481 if preresolve:
481 if preresolve:
482 self._repo.ui.warn(
482 self._repo.ui.warn(
483 _('warning: cannot merge flags for %s '
483 _('warning: cannot merge flags for %s '
484 'without common ancestor - keeping local flags\n')
484 'without common ancestor - keeping local flags\n')
485 % afile)
485 % afile)
486 elif flags == fla:
486 elif flags == fla:
487 flags = flo
487 flags = flo
488 if preresolve:
488 if preresolve:
489 # restore local
489 # restore local
490 if hash != nullhex:
490 if hash != nullhex:
491 f = self._repo.vfs('merge/' + hash)
491 f = self._repo.vfs('merge/' + hash)
492 self._repo.wwrite(dfile, f.read(), flags)
492 self._repo.wwrite(dfile, f.read(), flags)
493 f.close()
493 f.close()
494 else:
494 else:
495 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
495 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
496 complete, r, deleted = filemerge.premerge(self._repo, self._local,
496 complete, r, deleted = filemerge.premerge(self._repo, self._local,
497 lfile, fcd, fco, fca,
497 lfile, fcd, fco, fca,
498 labels=self._labels)
498 labels=self._labels)
499 else:
499 else:
500 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
500 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
501 lfile, fcd, fco, fca,
501 lfile, fcd, fco, fca,
502 labels=self._labels)
502 labels=self._labels)
503 if r is None:
503 if r is None:
504 # no real conflict
504 # no real conflict
505 del self._state[dfile]
505 del self._state[dfile]
506 self._stateextras.pop(dfile, None)
506 self._stateextras.pop(dfile, None)
507 self._dirty = True
507 self._dirty = True
508 elif not r:
508 elif not r:
509 self.mark(dfile, 'r')
509 self.mark(dfile, 'r')
510
510
511 if complete:
511 if complete:
512 action = None
512 action = None
513 if deleted:
513 if deleted:
514 if fcd.isabsent():
514 if fcd.isabsent():
515 # dc: local picked. Need to drop if present, which may
515 # dc: local picked. Need to drop if present, which may
516 # happen on re-resolves.
516 # happen on re-resolves.
517 action = 'f'
517 action = 'f'
518 else:
518 else:
519 # cd: remote picked (or otherwise deleted)
519 # cd: remote picked (or otherwise deleted)
520 action = 'r'
520 action = 'r'
521 else:
521 else:
522 if fcd.isabsent(): # dc: remote picked
522 if fcd.isabsent(): # dc: remote picked
523 action = 'g'
523 action = 'g'
524 elif fco.isabsent(): # cd: local picked
524 elif fco.isabsent(): # cd: local picked
525 if dfile in self.localctx:
525 if dfile in self.localctx:
526 action = 'am'
526 action = 'am'
527 else:
527 else:
528 action = 'a'
528 action = 'a'
529 # else: regular merges (no action necessary)
529 # else: regular merges (no action necessary)
530 self._results[dfile] = r, action
530 self._results[dfile] = r, action
531
531
532 return complete, r
532 return complete, r
533
533
534 def _filectxorabsent(self, hexnode, ctx, f):
534 def _filectxorabsent(self, hexnode, ctx, f):
535 if hexnode == nullhex:
535 if hexnode == nullhex:
536 return filemerge.absentfilectx(ctx, f)
536 return filemerge.absentfilectx(ctx, f)
537 else:
537 else:
538 return ctx[f]
538 return ctx[f]
539
539
540 def preresolve(self, dfile, wctx):
540 def preresolve(self, dfile, wctx):
541 """run premerge process for dfile
541 """run premerge process for dfile
542
542
543 Returns whether the merge is complete, and the exit code."""
543 Returns whether the merge is complete, and the exit code."""
544 return self._resolve(True, dfile, wctx)
544 return self._resolve(True, dfile, wctx)
545
545
546 def resolve(self, dfile, wctx):
546 def resolve(self, dfile, wctx):
547 """run merge process (assuming premerge was run) for dfile
547 """run merge process (assuming premerge was run) for dfile
548
548
549 Returns the exit code of the merge."""
549 Returns the exit code of the merge."""
550 return self._resolve(False, dfile, wctx)[1]
550 return self._resolve(False, dfile, wctx)[1]
551
551
552 def counts(self):
552 def counts(self):
553 """return counts for updated, merged and removed files in this
553 """return counts for updated, merged and removed files in this
554 session"""
554 session"""
555 updated, merged, removed = 0, 0, 0
555 updated, merged, removed = 0, 0, 0
556 for r, action in self._results.itervalues():
556 for r, action in self._results.itervalues():
557 if r is None:
557 if r is None:
558 updated += 1
558 updated += 1
559 elif r == 0:
559 elif r == 0:
560 if action == 'r':
560 if action == 'r':
561 removed += 1
561 removed += 1
562 else:
562 else:
563 merged += 1
563 merged += 1
564 return updated, merged, removed
564 return updated, merged, removed
565
565
566 def unresolvedcount(self):
566 def unresolvedcount(self):
567 """get unresolved count for this merge (persistent)"""
567 """get unresolved count for this merge (persistent)"""
568 return len([True for f, entry in self._state.iteritems()
568 return len([True for f, entry in self._state.iteritems()
569 if entry[0] == 'u'])
569 if entry[0] == 'u'])
570
570
571 def actions(self):
571 def actions(self):
572 """return lists of actions to perform on the dirstate"""
572 """return lists of actions to perform on the dirstate"""
573 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
573 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
574 for f, (r, action) in self._results.iteritems():
574 for f, (r, action) in self._results.iteritems():
575 if action is not None:
575 if action is not None:
576 actions[action].append((f, None, "merge result"))
576 actions[action].append((f, None, "merge result"))
577 return actions
577 return actions
578
578
579 def recordactions(self):
579 def recordactions(self):
580 """record remove/add/get actions in the dirstate"""
580 """record remove/add/get actions in the dirstate"""
581 branchmerge = self._repo.dirstate.p2() != nullid
581 branchmerge = self._repo.dirstate.p2() != nullid
582 recordupdates(self._repo, self.actions(), branchmerge)
582 recordupdates(self._repo, self.actions(), branchmerge)
583
583
584 def queueremove(self, f):
584 def queueremove(self, f):
585 """queues a file to be removed from the dirstate
585 """queues a file to be removed from the dirstate
586
586
587 Meant for use by custom merge drivers."""
587 Meant for use by custom merge drivers."""
588 self._results[f] = 0, 'r'
588 self._results[f] = 0, 'r'
589
589
590 def queueadd(self, f):
590 def queueadd(self, f):
591 """queues a file to be added to the dirstate
591 """queues a file to be added to the dirstate
592
592
593 Meant for use by custom merge drivers."""
593 Meant for use by custom merge drivers."""
594 self._results[f] = 0, 'a'
594 self._results[f] = 0, 'a'
595
595
596 def queueget(self, f):
596 def queueget(self, f):
597 """queues a file to be marked modified in the dirstate
597 """queues a file to be marked modified in the dirstate
598
598
599 Meant for use by custom merge drivers."""
599 Meant for use by custom merge drivers."""
600 self._results[f] = 0, 'g'
600 self._results[f] = 0, 'g'
601
601
602 def _getcheckunknownconfig(repo, section, name):
602 def _getcheckunknownconfig(repo, section, name):
603 config = repo.ui.config(section, name, default='abort')
603 config = repo.ui.config(section, name, default='abort')
604 valid = ['abort', 'ignore', 'warn']
604 valid = ['abort', 'ignore', 'warn']
605 if config not in valid:
605 if config not in valid:
606 validstr = ', '.join(["'" + v + "'" for v in valid])
606 validstr = ', '.join(["'" + v + "'" for v in valid])
607 raise error.ConfigError(_("%s.%s not valid "
607 raise error.ConfigError(_("%s.%s not valid "
608 "('%s' is none of %s)")
608 "('%s' is none of %s)")
609 % (section, name, config, validstr))
609 % (section, name, config, validstr))
610 return config
610 return config
611
611
612 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
612 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
613 if f2 is None:
613 if f2 is None:
614 f2 = f
614 f2 = f
615 return (repo.wvfs.audit.check(f)
615 return (repo.wvfs.audit.check(f)
616 and repo.wvfs.isfileorlink(f)
616 and repo.wvfs.isfileorlink(f)
617 and repo.dirstate.normalize(f) not in repo.dirstate
617 and repo.dirstate.normalize(f) not in repo.dirstate
618 and mctx[f2].cmp(wctx[f]))
618 and mctx[f2].cmp(wctx[f]))
619
619
620 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
620 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
621 """
621 """
622 Considers any actions that care about the presence of conflicting unknown
622 Considers any actions that care about the presence of conflicting unknown
623 files. For some actions, the result is to abort; for others, it is to
623 files. For some actions, the result is to abort; for others, it is to
624 choose a different action.
624 choose a different action.
625 """
625 """
626 conflicts = set()
626 conflicts = set()
627 warnconflicts = set()
627 warnconflicts = set()
628 abortconflicts = set()
628 abortconflicts = set()
629 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
629 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
630 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
630 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
631 if not force:
631 if not force:
632 def collectconflicts(conflicts, config):
632 def collectconflicts(conflicts, config):
633 if config == 'abort':
633 if config == 'abort':
634 abortconflicts.update(conflicts)
634 abortconflicts.update(conflicts)
635 elif config == 'warn':
635 elif config == 'warn':
636 warnconflicts.update(conflicts)
636 warnconflicts.update(conflicts)
637
637
638 for f, (m, args, msg) in actions.iteritems():
638 for f, (m, args, msg) in actions.iteritems():
639 if m in ('c', 'dc'):
639 if m in ('c', 'dc'):
640 if _checkunknownfile(repo, wctx, mctx, f):
640 if _checkunknownfile(repo, wctx, mctx, f):
641 conflicts.add(f)
641 conflicts.add(f)
642 elif m == 'dg':
642 elif m == 'dg':
643 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
643 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
644 conflicts.add(f)
644 conflicts.add(f)
645
645
646 ignoredconflicts = set([c for c in conflicts
646 ignoredconflicts = set([c for c in conflicts
647 if repo.dirstate._ignore(c)])
647 if repo.dirstate._ignore(c)])
648 unknownconflicts = conflicts - ignoredconflicts
648 unknownconflicts = conflicts - ignoredconflicts
649 collectconflicts(ignoredconflicts, ignoredconfig)
649 collectconflicts(ignoredconflicts, ignoredconfig)
650 collectconflicts(unknownconflicts, unknownconfig)
650 collectconflicts(unknownconflicts, unknownconfig)
651 else:
651 else:
652 for f, (m, args, msg) in actions.iteritems():
652 for f, (m, args, msg) in actions.iteritems():
653 if m == 'cm':
653 if m == 'cm':
654 fl2, anc = args
654 fl2, anc = args
655 different = _checkunknownfile(repo, wctx, mctx, f)
655 different = _checkunknownfile(repo, wctx, mctx, f)
656 if repo.dirstate._ignore(f):
656 if repo.dirstate._ignore(f):
657 config = ignoredconfig
657 config = ignoredconfig
658 else:
658 else:
659 config = unknownconfig
659 config = unknownconfig
660
660
661 # The behavior when force is True is described by this table:
661 # The behavior when force is True is described by this table:
662 # config different mergeforce | action backup
662 # config different mergeforce | action backup
663 # * n * | get n
663 # * n * | get n
664 # * y y | merge -
664 # * y y | merge -
665 # abort y n | merge - (1)
665 # abort y n | merge - (1)
666 # warn y n | warn + get y
666 # warn y n | warn + get y
667 # ignore y n | get y
667 # ignore y n | get y
668 #
668 #
669 # (1) this is probably the wrong behavior here -- we should
669 # (1) this is probably the wrong behavior here -- we should
670 # probably abort, but some actions like rebases currently
670 # probably abort, but some actions like rebases currently
671 # don't like an abort happening in the middle of
671 # don't like an abort happening in the middle of
672 # merge.update.
672 # merge.update.
673 if not different:
673 if not different:
674 actions[f] = ('g', (fl2, False), "remote created")
674 actions[f] = ('g', (fl2, False), "remote created")
675 elif mergeforce or config == 'abort':
675 elif mergeforce or config == 'abort':
676 actions[f] = ('m', (f, f, None, False, anc),
676 actions[f] = ('m', (f, f, None, False, anc),
677 "remote differs from untracked local")
677 "remote differs from untracked local")
678 elif config == 'abort':
678 elif config == 'abort':
679 abortconflicts.add(f)
679 abortconflicts.add(f)
680 else:
680 else:
681 if config == 'warn':
681 if config == 'warn':
682 warnconflicts.add(f)
682 warnconflicts.add(f)
683 actions[f] = ('g', (fl2, True), "remote created")
683 actions[f] = ('g', (fl2, True), "remote created")
684
684
685 for f in sorted(abortconflicts):
685 for f in sorted(abortconflicts):
686 repo.ui.warn(_("%s: untracked file differs\n") % f)
686 repo.ui.warn(_("%s: untracked file differs\n") % f)
687 if abortconflicts:
687 if abortconflicts:
688 raise error.Abort(_("untracked files in working directory "
688 raise error.Abort(_("untracked files in working directory "
689 "differ from files in requested revision"))
689 "differ from files in requested revision"))
690
690
691 for f in sorted(warnconflicts):
691 for f in sorted(warnconflicts):
692 repo.ui.warn(_("%s: replacing untracked file\n") % f)
692 repo.ui.warn(_("%s: replacing untracked file\n") % f)
693
693
694 for f, (m, args, msg) in actions.iteritems():
694 for f, (m, args, msg) in actions.iteritems():
695 backup = f in conflicts
695 backup = f in conflicts
696 if m == 'c':
696 if m == 'c':
697 flags, = args
697 flags, = args
698 actions[f] = ('g', (flags, backup), msg)
698 actions[f] = ('g', (flags, backup), msg)
699
699
700 def _forgetremoved(wctx, mctx, branchmerge):
700 def _forgetremoved(wctx, mctx, branchmerge):
701 """
701 """
702 Forget removed files
702 Forget removed files
703
703
704 If we're jumping between revisions (as opposed to merging), and if
704 If we're jumping between revisions (as opposed to merging), and if
705 neither the working directory nor the target rev has the file,
705 neither the working directory nor the target rev has the file,
706 then we need to remove it from the dirstate, to prevent the
706 then we need to remove it from the dirstate, to prevent the
707 dirstate from listing the file when it is no longer in the
707 dirstate from listing the file when it is no longer in the
708 manifest.
708 manifest.
709
709
710 If we're merging, and the other revision has removed a file
710 If we're merging, and the other revision has removed a file
711 that is not present in the working directory, we need to mark it
711 that is not present in the working directory, we need to mark it
712 as removed.
712 as removed.
713 """
713 """
714
714
715 actions = {}
715 actions = {}
716 m = 'f'
716 m = 'f'
717 if branchmerge:
717 if branchmerge:
718 m = 'r'
718 m = 'r'
719 for f in wctx.deleted():
719 for f in wctx.deleted():
720 if f not in mctx:
720 if f not in mctx:
721 actions[f] = m, None, "forget deleted"
721 actions[f] = m, None, "forget deleted"
722
722
723 if not branchmerge:
723 if not branchmerge:
724 for f in wctx.removed():
724 for f in wctx.removed():
725 if f not in mctx:
725 if f not in mctx:
726 actions[f] = 'f', None, "forget removed"
726 actions[f] = 'f', None, "forget removed"
727
727
728 return actions
728 return actions
729
729
730 def _checkcollision(repo, wmf, actions):
730 def _checkcollision(repo, wmf, actions):
731 # build provisional merged manifest up
731 # build provisional merged manifest up
732 pmmf = set(wmf)
732 pmmf = set(wmf)
733
733
734 if actions:
734 if actions:
735 # k, dr, e and rd are no-op
735 # k, dr, e and rd are no-op
736 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
736 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
737 for f, args, msg in actions[m]:
737 for f, args, msg in actions[m]:
738 pmmf.add(f)
738 pmmf.add(f)
739 for f, args, msg in actions['r']:
739 for f, args, msg in actions['r']:
740 pmmf.discard(f)
740 pmmf.discard(f)
741 for f, args, msg in actions['dm']:
741 for f, args, msg in actions['dm']:
742 f2, flags = args
742 f2, flags = args
743 pmmf.discard(f2)
743 pmmf.discard(f2)
744 pmmf.add(f)
744 pmmf.add(f)
745 for f, args, msg in actions['dg']:
745 for f, args, msg in actions['dg']:
746 pmmf.add(f)
746 pmmf.add(f)
747 for f, args, msg in actions['m']:
747 for f, args, msg in actions['m']:
748 f1, f2, fa, move, anc = args
748 f1, f2, fa, move, anc = args
749 if move:
749 if move:
750 pmmf.discard(f1)
750 pmmf.discard(f1)
751 pmmf.add(f)
751 pmmf.add(f)
752
752
753 # check case-folding collision in provisional merged manifest
753 # check case-folding collision in provisional merged manifest
754 foldmap = {}
754 foldmap = {}
755 for f in sorted(pmmf):
755 for f in sorted(pmmf):
756 fold = util.normcase(f)
756 fold = util.normcase(f)
757 if fold in foldmap:
757 if fold in foldmap:
758 raise error.Abort(_("case-folding collision between %s and %s")
758 raise error.Abort(_("case-folding collision between %s and %s")
759 % (f, foldmap[fold]))
759 % (f, foldmap[fold]))
760 foldmap[fold] = f
760 foldmap[fold] = f
761
761
762 # check case-folding of directories
762 # check case-folding of directories
763 foldprefix = unfoldprefix = lastfull = ''
763 foldprefix = unfoldprefix = lastfull = ''
764 for fold, f in sorted(foldmap.items()):
764 for fold, f in sorted(foldmap.items()):
765 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
765 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
766 # the folded prefix matches but actual casing is different
766 # the folded prefix matches but actual casing is different
767 raise error.Abort(_("case-folding collision between "
767 raise error.Abort(_("case-folding collision between "
768 "%s and directory of %s") % (lastfull, f))
768 "%s and directory of %s") % (lastfull, f))
769 foldprefix = fold + '/'
769 foldprefix = fold + '/'
770 unfoldprefix = f + '/'
770 unfoldprefix = f + '/'
771 lastfull = f
771 lastfull = f
772
772
773 def driverpreprocess(repo, ms, wctx, labels=None):
773 def driverpreprocess(repo, ms, wctx, labels=None):
774 """run the preprocess step of the merge driver, if any
774 """run the preprocess step of the merge driver, if any
775
775
776 This is currently not implemented -- it's an extension point."""
776 This is currently not implemented -- it's an extension point."""
777 return True
777 return True
778
778
779 def driverconclude(repo, ms, wctx, labels=None):
779 def driverconclude(repo, ms, wctx, labels=None):
780 """run the conclude step of the merge driver, if any
780 """run the conclude step of the merge driver, if any
781
781
782 This is currently not implemented -- it's an extension point."""
782 This is currently not implemented -- it's an extension point."""
783 return True
783 return True
784
784
785 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
785 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
786 acceptremote, followcopies):
786 acceptremote, followcopies):
787 """
787 """
788 Merge wctx and p2 with ancestor pa and generate merge action list
788 Merge wctx and p2 with ancestor pa and generate merge action list
789
789
790 branchmerge and force are as passed in to update
790 branchmerge and force are as passed in to update
791 matcher = matcher to filter file lists
791 matcher = matcher to filter file lists
792 acceptremote = accept the incoming changes without prompting
792 acceptremote = accept the incoming changes without prompting
793 """
793 """
794 if matcher is not None and matcher.always():
794 if matcher is not None and matcher.always():
795 matcher = None
795 matcher = None
796
796
797 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
797 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
798
798
799 # manifests fetched in order are going to be faster, so prime the caches
799 # manifests fetched in order are going to be faster, so prime the caches
800 [x.manifest() for x in
800 [x.manifest() for x in
801 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
801 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
802
802
803 if followcopies:
803 if followcopies:
804 ret = copies.mergecopies(repo, wctx, p2, pa)
804 ret = copies.mergecopies(repo, wctx, p2, pa)
805 copy, movewithdir, diverge, renamedelete, dirmove = ret
805 copy, movewithdir, diverge, renamedelete, dirmove = ret
806
806
807 repo.ui.note(_("resolving manifests\n"))
807 repo.ui.note(_("resolving manifests\n"))
808 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
808 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
809 % (bool(branchmerge), bool(force), bool(matcher)))
809 % (bool(branchmerge), bool(force), bool(matcher)))
810 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
810 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
811
811
812 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
812 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
813 copied = set(copy.values())
813 copied = set(copy.values())
814 copied.update(movewithdir.values())
814 copied.update(movewithdir.values())
815
815
816 if '.hgsubstate' in m1:
816 if '.hgsubstate' in m1:
817 # check whether sub state is modified
817 # check whether sub state is modified
818 if any(wctx.sub(s).dirty() for s in wctx.substate):
818 if any(wctx.sub(s).dirty() for s in wctx.substate):
819 m1['.hgsubstate'] = modifiednodeid
819 m1['.hgsubstate'] = modifiednodeid
820
820
821 # Compare manifests
821 # Compare manifests
822 if matcher is not None:
822 if matcher is not None:
823 m1 = m1.matches(matcher)
823 m1 = m1.matches(matcher)
824 m2 = m2.matches(matcher)
824 m2 = m2.matches(matcher)
825 diff = m1.diff(m2)
825 diff = m1.diff(m2)
826
826
827 actions = {}
827 actions = {}
828 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
828 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
829 if n1 and n2: # file exists on both local and remote side
829 if n1 and n2: # file exists on both local and remote side
830 if f not in ma:
830 if f not in ma:
831 fa = copy.get(f, None)
831 fa = copy.get(f, None)
832 if fa is not None:
832 if fa is not None:
833 actions[f] = ('m', (f, f, fa, False, pa.node()),
833 actions[f] = ('m', (f, f, fa, False, pa.node()),
834 "both renamed from " + fa)
834 "both renamed from " + fa)
835 else:
835 else:
836 actions[f] = ('m', (f, f, None, False, pa.node()),
836 actions[f] = ('m', (f, f, None, False, pa.node()),
837 "both created")
837 "both created")
838 else:
838 else:
839 a = ma[f]
839 a = ma[f]
840 fla = ma.flags(f)
840 fla = ma.flags(f)
841 nol = 'l' not in fl1 + fl2 + fla
841 nol = 'l' not in fl1 + fl2 + fla
842 if n2 == a and fl2 == fla:
842 if n2 == a and fl2 == fla:
843 actions[f] = ('k' , (), "remote unchanged")
843 actions[f] = ('k' , (), "remote unchanged")
844 elif n1 == a and fl1 == fla: # local unchanged - use remote
844 elif n1 == a and fl1 == fla: # local unchanged - use remote
845 if n1 == n2: # optimization: keep local content
845 if n1 == n2: # optimization: keep local content
846 actions[f] = ('e', (fl2,), "update permissions")
846 actions[f] = ('e', (fl2,), "update permissions")
847 else:
847 else:
848 actions[f] = ('g', (fl2, False), "remote is newer")
848 actions[f] = ('g', (fl2, False), "remote is newer")
849 elif nol and n2 == a: # remote only changed 'x'
849 elif nol and n2 == a: # remote only changed 'x'
850 actions[f] = ('e', (fl2,), "update permissions")
850 actions[f] = ('e', (fl2,), "update permissions")
851 elif nol and n1 == a: # local only changed 'x'
851 elif nol and n1 == a: # local only changed 'x'
852 actions[f] = ('g', (fl1, False), "remote is newer")
852 actions[f] = ('g', (fl1, False), "remote is newer")
853 else: # both changed something
853 else: # both changed something
854 actions[f] = ('m', (f, f, f, False, pa.node()),
854 actions[f] = ('m', (f, f, f, False, pa.node()),
855 "versions differ")
855 "versions differ")
856 elif n1: # file exists only on local side
856 elif n1: # file exists only on local side
857 if f in copied:
857 if f in copied:
858 pass # we'll deal with it on m2 side
858 pass # we'll deal with it on m2 side
859 elif f in movewithdir: # directory rename, move local
859 elif f in movewithdir: # directory rename, move local
860 f2 = movewithdir[f]
860 f2 = movewithdir[f]
861 if f2 in m2:
861 if f2 in m2:
862 actions[f2] = ('m', (f, f2, None, True, pa.node()),
862 actions[f2] = ('m', (f, f2, None, True, pa.node()),
863 "remote directory rename, both created")
863 "remote directory rename, both created")
864 else:
864 else:
865 actions[f2] = ('dm', (f, fl1),
865 actions[f2] = ('dm', (f, fl1),
866 "remote directory rename - move from " + f)
866 "remote directory rename - move from " + f)
867 elif f in copy:
867 elif f in copy:
868 f2 = copy[f]
868 f2 = copy[f]
869 actions[f] = ('m', (f, f2, f2, False, pa.node()),
869 actions[f] = ('m', (f, f2, f2, False, pa.node()),
870 "local copied/moved from " + f2)
870 "local copied/moved from " + f2)
871 elif f in ma: # clean, a different, no remote
871 elif f in ma: # clean, a different, no remote
872 if n1 != ma[f]:
872 if n1 != ma[f]:
873 if acceptremote:
873 if acceptremote:
874 actions[f] = ('r', None, "remote delete")
874 actions[f] = ('r', None, "remote delete")
875 else:
875 else:
876 actions[f] = ('cd', (f, None, f, False, pa.node()),
876 actions[f] = ('cd', (f, None, f, False, pa.node()),
877 "prompt changed/deleted")
877 "prompt changed/deleted")
878 elif n1 == addednodeid:
878 elif n1 == addednodeid:
879 # This extra 'a' is added by working copy manifest to mark
879 # This extra 'a' is added by working copy manifest to mark
880 # the file as locally added. We should forget it instead of
880 # the file as locally added. We should forget it instead of
881 # deleting it.
881 # deleting it.
882 actions[f] = ('f', None, "remote deleted")
882 actions[f] = ('f', None, "remote deleted")
883 else:
883 else:
884 actions[f] = ('r', None, "other deleted")
884 actions[f] = ('r', None, "other deleted")
885 elif n2: # file exists only on remote side
885 elif n2: # file exists only on remote side
886 if f in copied:
886 if f in copied:
887 pass # we'll deal with it on m1 side
887 pass # we'll deal with it on m1 side
888 elif f in movewithdir:
888 elif f in movewithdir:
889 f2 = movewithdir[f]
889 f2 = movewithdir[f]
890 if f2 in m1:
890 if f2 in m1:
891 actions[f2] = ('m', (f2, f, None, False, pa.node()),
891 actions[f2] = ('m', (f2, f, None, False, pa.node()),
892 "local directory rename, both created")
892 "local directory rename, both created")
893 else:
893 else:
894 actions[f2] = ('dg', (f, fl2),
894 actions[f2] = ('dg', (f, fl2),
895 "local directory rename - get from " + f)
895 "local directory rename - get from " + f)
896 elif f in copy:
896 elif f in copy:
897 f2 = copy[f]
897 f2 = copy[f]
898 if f2 in m2:
898 if f2 in m2:
899 actions[f] = ('m', (f2, f, f2, False, pa.node()),
899 actions[f] = ('m', (f2, f, f2, False, pa.node()),
900 "remote copied from " + f2)
900 "remote copied from " + f2)
901 else:
901 else:
902 actions[f] = ('m', (f2, f, f2, True, pa.node()),
902 actions[f] = ('m', (f2, f, f2, True, pa.node()),
903 "remote moved from " + f2)
903 "remote moved from " + f2)
904 elif f not in ma:
904 elif f not in ma:
905 # local unknown, remote created: the logic is described by the
905 # local unknown, remote created: the logic is described by the
906 # following table:
906 # following table:
907 #
907 #
908 # force branchmerge different | action
908 # force branchmerge different | action
909 # n * * | create
909 # n * * | create
910 # y n * | create
910 # y n * | create
911 # y y n | create
911 # y y n | create
912 # y y y | merge
912 # y y y | merge
913 #
913 #
914 # Checking whether the files are different is expensive, so we
914 # Checking whether the files are different is expensive, so we
915 # don't do that when we can avoid it.
915 # don't do that when we can avoid it.
916 if not force:
916 if not force:
917 actions[f] = ('c', (fl2,), "remote created")
917 actions[f] = ('c', (fl2,), "remote created")
918 elif not branchmerge:
918 elif not branchmerge:
919 actions[f] = ('c', (fl2,), "remote created")
919 actions[f] = ('c', (fl2,), "remote created")
920 else:
920 else:
921 actions[f] = ('cm', (fl2, pa.node()),
921 actions[f] = ('cm', (fl2, pa.node()),
922 "remote created, get or merge")
922 "remote created, get or merge")
923 elif n2 != ma[f]:
923 elif n2 != ma[f]:
924 df = None
924 df = None
925 for d in dirmove:
925 for d in dirmove:
926 if f.startswith(d):
926 if f.startswith(d):
927 # new file added in a directory that was moved
927 # new file added in a directory that was moved
928 df = dirmove[d] + f[len(d):]
928 df = dirmove[d] + f[len(d):]
929 break
929 break
930 if df in m1:
930 if df in m1:
931 actions[df] = ('m', (df, f, f, False, pa.node()),
931 actions[df] = ('m', (df, f, f, False, pa.node()),
932 "local directory rename - respect move from " + f)
932 "local directory rename - respect move from " + f)
933 elif acceptremote:
933 elif acceptremote:
934 actions[f] = ('c', (fl2,), "remote recreating")
934 actions[f] = ('c', (fl2,), "remote recreating")
935 else:
935 else:
936 actions[f] = ('dc', (None, f, f, False, pa.node()),
936 actions[f] = ('dc', (None, f, f, False, pa.node()),
937 "prompt deleted/changed")
937 "prompt deleted/changed")
938
938
939 return actions, diverge, renamedelete
939 return actions, diverge, renamedelete
940
940
941 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
941 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
942 """Resolves false conflicts where the nodeid changed but the content
942 """Resolves false conflicts where the nodeid changed but the content
943 remained the same."""
943 remained the same."""
944
944
945 for f, (m, args, msg) in actions.items():
945 for f, (m, args, msg) in actions.items():
946 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
946 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
947 # local did change but ended up with same content
947 # local did change but ended up with same content
948 actions[f] = 'r', None, "prompt same"
948 actions[f] = 'r', None, "prompt same"
949 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
949 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
950 # remote did change but ended up with same content
950 # remote did change but ended up with same content
951 del actions[f] # don't get = keep local deleted
951 del actions[f] # don't get = keep local deleted
952
952
953 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
953 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
954 acceptremote, followcopies, matcher=None,
954 acceptremote, followcopies, matcher=None,
955 mergeforce=False):
955 mergeforce=False):
956 "Calculate the actions needed to merge mctx into wctx using ancestors"
956 "Calculate the actions needed to merge mctx into wctx using ancestors"
957 if len(ancestors) == 1: # default
957 if len(ancestors) == 1: # default
958 actions, diverge, renamedelete = manifestmerge(
958 actions, diverge, renamedelete = manifestmerge(
959 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
959 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
960 acceptremote, followcopies)
960 acceptremote, followcopies)
961 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
961 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
962
962
963 else: # only when merge.preferancestor=* - the default
963 else: # only when merge.preferancestor=* - the default
964 repo.ui.note(
964 repo.ui.note(
965 _("note: merging %s and %s using bids from ancestors %s\n") %
965 _("note: merging %s and %s using bids from ancestors %s\n") %
966 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
966 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
967
967
968 # Call for bids
968 # Call for bids
969 fbids = {} # mapping filename to bids (action method to list af actions)
969 fbids = {} # mapping filename to bids (action method to list af actions)
970 diverge, renamedelete = None, None
970 diverge, renamedelete = None, None
971 for ancestor in ancestors:
971 for ancestor in ancestors:
972 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
972 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
973 actions, diverge1, renamedelete1 = manifestmerge(
973 actions, diverge1, renamedelete1 = manifestmerge(
974 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
974 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
975 acceptremote, followcopies)
975 acceptremote, followcopies)
976 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
976 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
977
977
978 # Track the shortest set of warning on the theory that bid
978 # Track the shortest set of warning on the theory that bid
979 # merge will correctly incorporate more information
979 # merge will correctly incorporate more information
980 if diverge is None or len(diverge1) < len(diverge):
980 if diverge is None or len(diverge1) < len(diverge):
981 diverge = diverge1
981 diverge = diverge1
982 if renamedelete is None or len(renamedelete) < len(renamedelete1):
982 if renamedelete is None or len(renamedelete) < len(renamedelete1):
983 renamedelete = renamedelete1
983 renamedelete = renamedelete1
984
984
985 for f, a in sorted(actions.iteritems()):
985 for f, a in sorted(actions.iteritems()):
986 m, args, msg = a
986 m, args, msg = a
987 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
987 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
988 if f in fbids:
988 if f in fbids:
989 d = fbids[f]
989 d = fbids[f]
990 if m in d:
990 if m in d:
991 d[m].append(a)
991 d[m].append(a)
992 else:
992 else:
993 d[m] = [a]
993 d[m] = [a]
994 else:
994 else:
995 fbids[f] = {m: [a]}
995 fbids[f] = {m: [a]}
996
996
997 # Pick the best bid for each file
997 # Pick the best bid for each file
998 repo.ui.note(_('\nauction for merging merge bids\n'))
998 repo.ui.note(_('\nauction for merging merge bids\n'))
999 actions = {}
999 actions = {}
1000 dms = [] # filenames that have dm actions
1000 dms = [] # filenames that have dm actions
1001 for f, bids in sorted(fbids.items()):
1001 for f, bids in sorted(fbids.items()):
1002 # bids is a mapping from action method to list af actions
1002 # bids is a mapping from action method to list af actions
1003 # Consensus?
1003 # Consensus?
1004 if len(bids) == 1: # all bids are the same kind of method
1004 if len(bids) == 1: # all bids are the same kind of method
1005 m, l = bids.items()[0]
1005 m, l = bids.items()[0]
1006 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1006 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1007 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1007 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1008 actions[f] = l[0]
1008 actions[f] = l[0]
1009 if m == 'dm':
1009 if m == 'dm':
1010 dms.append(f)
1010 dms.append(f)
1011 continue
1011 continue
1012 # If keep is an option, just do it.
1012 # If keep is an option, just do it.
1013 if 'k' in bids:
1013 if 'k' in bids:
1014 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1014 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1015 actions[f] = bids['k'][0]
1015 actions[f] = bids['k'][0]
1016 continue
1016 continue
1017 # If there are gets and they all agree [how could they not?], do it.
1017 # If there are gets and they all agree [how could they not?], do it.
1018 if 'g' in bids:
1018 if 'g' in bids:
1019 ga0 = bids['g'][0]
1019 ga0 = bids['g'][0]
1020 if all(a == ga0 for a in bids['g'][1:]):
1020 if all(a == ga0 for a in bids['g'][1:]):
1021 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1021 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1022 actions[f] = ga0
1022 actions[f] = ga0
1023 continue
1023 continue
1024 # TODO: Consider other simple actions such as mode changes
1024 # TODO: Consider other simple actions such as mode changes
1025 # Handle inefficient democrazy.
1025 # Handle inefficient democrazy.
1026 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1026 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1027 for m, l in sorted(bids.items()):
1027 for m, l in sorted(bids.items()):
1028 for _f, args, msg in l:
1028 for _f, args, msg in l:
1029 repo.ui.note(' %s -> %s\n' % (msg, m))
1029 repo.ui.note(' %s -> %s\n' % (msg, m))
1030 # Pick random action. TODO: Instead, prompt user when resolving
1030 # Pick random action. TODO: Instead, prompt user when resolving
1031 m, l = bids.items()[0]
1031 m, l = bids.items()[0]
1032 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1032 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1033 (f, m))
1033 (f, m))
1034 actions[f] = l[0]
1034 actions[f] = l[0]
1035 if m == 'dm':
1035 if m == 'dm':
1036 dms.append(f)
1036 dms.append(f)
1037 continue
1037 continue
1038 # Work around 'dm' that can cause multiple actions for the same file
1038 # Work around 'dm' that can cause multiple actions for the same file
1039 for f in dms:
1039 for f in dms:
1040 dm, (f0, flags), msg = actions[f]
1040 dm, (f0, flags), msg = actions[f]
1041 assert dm == 'dm', dm
1041 assert dm == 'dm', dm
1042 if f0 in actions and actions[f0][0] == 'r':
1042 if f0 in actions and actions[f0][0] == 'r':
1043 # We have one bid for removing a file and another for moving it.
1043 # We have one bid for removing a file and another for moving it.
1044 # These two could be merged as first move and then delete ...
1044 # These two could be merged as first move and then delete ...
1045 # but instead drop moving and just delete.
1045 # but instead drop moving and just delete.
1046 del actions[f]
1046 del actions[f]
1047 repo.ui.note(_('end of auction\n\n'))
1047 repo.ui.note(_('end of auction\n\n'))
1048
1048
1049 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1049 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1050
1050
1051 if wctx.rev() is None:
1051 if wctx.rev() is None:
1052 fractions = _forgetremoved(wctx, mctx, branchmerge)
1052 fractions = _forgetremoved(wctx, mctx, branchmerge)
1053 actions.update(fractions)
1053 actions.update(fractions)
1054
1054
1055 return actions, diverge, renamedelete
1055 return actions, diverge, renamedelete
1056
1056
1057 def batchremove(repo, actions):
1057 def batchremove(repo, actions):
1058 """apply removes to the working directory
1058 """apply removes to the working directory
1059
1059
1060 yields tuples for progress updates
1060 yields tuples for progress updates
1061 """
1061 """
1062 verbose = repo.ui.verbose
1062 verbose = repo.ui.verbose
1063 unlink = util.unlinkpath
1063 unlink = util.unlinkpath
1064 wjoin = repo.wjoin
1064 wjoin = repo.wjoin
1065 audit = repo.wvfs.audit
1065 audit = repo.wvfs.audit
1066 try:
1066 try:
1067 cwd = pycompat.getcwd()
1067 cwd = pycompat.getcwd()
1068 except OSError as err:
1068 except OSError as err:
1069 if err.errno != errno.ENOENT:
1069 if err.errno != errno.ENOENT:
1070 raise
1070 raise
1071 cwd = None
1071 cwd = None
1072 i = 0
1072 i = 0
1073 for f, args, msg in actions:
1073 for f, args, msg in actions:
1074 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1074 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1075 if verbose:
1075 if verbose:
1076 repo.ui.note(_("removing %s\n") % f)
1076 repo.ui.note(_("removing %s\n") % f)
1077 audit(f)
1077 audit(f)
1078 try:
1078 try:
1079 unlink(wjoin(f), ignoremissing=True)
1079 unlink(wjoin(f), ignoremissing=True)
1080 except OSError as inst:
1080 except OSError as inst:
1081 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1081 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1082 (f, inst.strerror))
1082 (f, inst.strerror))
1083 if i == 100:
1083 if i == 100:
1084 yield i, f
1084 yield i, f
1085 i = 0
1085 i = 0
1086 i += 1
1086 i += 1
1087 if i > 0:
1087 if i > 0:
1088 yield i, f
1088 yield i, f
1089 if cwd:
1089 if cwd:
1090 # cwd was present before we started to remove files
1090 # cwd was present before we started to remove files
1091 # let's check if it is present after we removed them
1091 # let's check if it is present after we removed them
1092 try:
1092 try:
1093 pycompat.getcwd()
1093 pycompat.getcwd()
1094 except OSError as err:
1094 except OSError as err:
1095 if err.errno != errno.ENOENT:
1095 if err.errno != errno.ENOENT:
1096 raise
1096 raise
1097 # Print a warning if cwd was deleted
1097 # Print a warning if cwd was deleted
1098 repo.ui.warn(_("current directory was removed\n"
1098 repo.ui.warn(_("current directory was removed\n"
1099 "(consider changing to repo root: %s)\n") %
1099 "(consider changing to repo root: %s)\n") %
1100 repo.root)
1100 repo.root)
1101
1101
1102 def batchget(repo, mctx, actions):
1102 def batchget(repo, mctx, actions):
1103 """apply gets to the working directory
1103 """apply gets to the working directory
1104
1104
1105 mctx is the context to get from
1105 mctx is the context to get from
1106
1106
1107 yields tuples for progress updates
1107 yields tuples for progress updates
1108 """
1108 """
1109 verbose = repo.ui.verbose
1109 verbose = repo.ui.verbose
1110 fctx = mctx.filectx
1110 fctx = mctx.filectx
1111 wwrite = repo.wwrite
1111 wwrite = repo.wwrite
1112 ui = repo.ui
1112 ui = repo.ui
1113 i = 0
1113 i = 0
1114 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1114 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1115 for f, (flags, backup), msg in actions:
1115 for f, (flags, backup), msg in actions:
1116 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1116 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1117 if verbose:
1117 if verbose:
1118 repo.ui.note(_("getting %s\n") % f)
1118 repo.ui.note(_("getting %s\n") % f)
1119
1119
1120 if backup:
1120 if backup:
1121 absf = repo.wjoin(f)
1121 absf = repo.wjoin(f)
1122 orig = scmutil.origpath(ui, repo, absf)
1122 orig = scmutil.origpath(ui, repo, absf)
1123 try:
1123 try:
1124 if repo.wvfs.isfileorlink(f):
1124 if repo.wvfs.isfileorlink(f):
1125 util.rename(absf, orig)
1125 util.rename(absf, orig)
1126 except OSError as e:
1126 except OSError as e:
1127 if e.errno != errno.ENOENT:
1127 if e.errno != errno.ENOENT:
1128 raise
1128 raise
1129
1129
1130 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1130 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1131 repo.wvfs.removedirs(f)
1131 repo.wvfs.removedirs(f)
1132 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1132 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1133 if i == 100:
1133 if i == 100:
1134 yield i, f
1134 yield i, f
1135 i = 0
1135 i = 0
1136 i += 1
1136 i += 1
1137 if i > 0:
1137 if i > 0:
1138 yield i, f
1138 yield i, f
1139
1139
1140 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1140 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1141 """apply the merge action list to the working directory
1141 """apply the merge action list to the working directory
1142
1142
1143 wctx is the working copy context
1143 wctx is the working copy context
1144 mctx is the context to be merged into the working copy
1144 mctx is the context to be merged into the working copy
1145
1145
1146 Return a tuple of counts (updated, merged, removed, unresolved) that
1146 Return a tuple of counts (updated, merged, removed, unresolved) that
1147 describes how many files were affected by the update.
1147 describes how many files were affected by the update.
1148 """
1148 """
1149
1149
1150 updated, merged, removed = 0, 0, 0
1150 updated, merged, removed = 0, 0, 0
1151 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1151 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1152 moves = []
1152 moves = []
1153 for m, l in actions.items():
1153 for m, l in actions.items():
1154 l.sort()
1154 l.sort()
1155
1155
1156 # 'cd' and 'dc' actions are treated like other merge conflicts
1156 # 'cd' and 'dc' actions are treated like other merge conflicts
1157 mergeactions = sorted(actions['cd'])
1157 mergeactions = sorted(actions['cd'])
1158 mergeactions.extend(sorted(actions['dc']))
1158 mergeactions.extend(sorted(actions['dc']))
1159 mergeactions.extend(actions['m'])
1159 mergeactions.extend(actions['m'])
1160 for f, args, msg in mergeactions:
1160 for f, args, msg in mergeactions:
1161 f1, f2, fa, move, anc = args
1161 f1, f2, fa, move, anc = args
1162 if f == '.hgsubstate': # merged internally
1162 if f == '.hgsubstate': # merged internally
1163 continue
1163 continue
1164 if f1 is None:
1164 if f1 is None:
1165 fcl = filemerge.absentfilectx(wctx, fa)
1165 fcl = filemerge.absentfilectx(wctx, fa)
1166 else:
1166 else:
1167 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1167 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1168 fcl = wctx[f1]
1168 fcl = wctx[f1]
1169 if f2 is None:
1169 if f2 is None:
1170 fco = filemerge.absentfilectx(mctx, fa)
1170 fco = filemerge.absentfilectx(mctx, fa)
1171 else:
1171 else:
1172 fco = mctx[f2]
1172 fco = mctx[f2]
1173 actx = repo[anc]
1173 actx = repo[anc]
1174 if fa in actx:
1174 if fa in actx:
1175 fca = actx[fa]
1175 fca = actx[fa]
1176 else:
1176 else:
1177 # TODO: move to absentfilectx
1177 # TODO: move to absentfilectx
1178 fca = repo.filectx(f1, fileid=nullrev)
1178 fca = repo.filectx(f1, fileid=nullrev)
1179 ms.add(fcl, fco, fca, f)
1179 ms.add(fcl, fco, fca, f)
1180 if f1 != f and move:
1180 if f1 != f and move:
1181 moves.append(f1)
1181 moves.append(f1)
1182
1182
1183 audit = repo.wvfs.audit
1183 audit = repo.wvfs.audit
1184 _updating = _('updating')
1184 _updating = _('updating')
1185 _files = _('files')
1185 _files = _('files')
1186 progress = repo.ui.progress
1186 progress = repo.ui.progress
1187
1187
1188 # remove renamed files after safely stored
1188 # remove renamed files after safely stored
1189 for f in moves:
1189 for f in moves:
1190 if os.path.lexists(repo.wjoin(f)):
1190 if os.path.lexists(repo.wjoin(f)):
1191 repo.ui.debug("removing %s\n" % f)
1191 repo.ui.debug("removing %s\n" % f)
1192 audit(f)
1192 audit(f)
1193 util.unlinkpath(repo.wjoin(f))
1193 util.unlinkpath(repo.wjoin(f))
1194
1194
1195 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1195 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1196
1196
1197 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1197 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1198 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1198 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1199
1199
1200 # remove in parallel (must come first)
1200 # remove in parallel (must come first)
1201 z = 0
1201 z = 0
1202 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1202 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1203 for i, item in prog:
1203 for i, item in prog:
1204 z += i
1204 z += i
1205 progress(_updating, z, item=item, total=numupdates, unit=_files)
1205 progress(_updating, z, item=item, total=numupdates, unit=_files)
1206 removed = len(actions['r'])
1206 removed = len(actions['r'])
1207
1207
1208 # get in parallel
1208 # get in parallel
1209 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1209 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1210 for i, item in prog:
1210 for i, item in prog:
1211 z += i
1211 z += i
1212 progress(_updating, z, item=item, total=numupdates, unit=_files)
1212 progress(_updating, z, item=item, total=numupdates, unit=_files)
1213 updated = len(actions['g'])
1213 updated = len(actions['g'])
1214
1214
1215 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1215 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1216 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1216 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1217
1217
1218 # forget (manifest only, just log it) (must come first)
1218 # forget (manifest only, just log it) (must come first)
1219 for f, args, msg in actions['f']:
1219 for f, args, msg in actions['f']:
1220 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1220 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1221 z += 1
1221 z += 1
1222 progress(_updating, z, item=f, total=numupdates, unit=_files)
1222 progress(_updating, z, item=f, total=numupdates, unit=_files)
1223
1223
1224 # re-add (manifest only, just log it)
1224 # re-add (manifest only, just log it)
1225 for f, args, msg in actions['a']:
1225 for f, args, msg in actions['a']:
1226 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1226 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1227 z += 1
1227 z += 1
1228 progress(_updating, z, item=f, total=numupdates, unit=_files)
1228 progress(_updating, z, item=f, total=numupdates, unit=_files)
1229
1229
1230 # re-add/mark as modified (manifest only, just log it)
1230 # re-add/mark as modified (manifest only, just log it)
1231 for f, args, msg in actions['am']:
1231 for f, args, msg in actions['am']:
1232 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1232 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1233 z += 1
1233 z += 1
1234 progress(_updating, z, item=f, total=numupdates, unit=_files)
1234 progress(_updating, z, item=f, total=numupdates, unit=_files)
1235
1235
1236 # keep (noop, just log it)
1236 # keep (noop, just log it)
1237 for f, args, msg in actions['k']:
1237 for f, args, msg in actions['k']:
1238 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1238 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1239 # no progress
1239 # no progress
1240
1240
1241 # directory rename, move local
1241 # directory rename, move local
1242 for f, args, msg in actions['dm']:
1242 for f, args, msg in actions['dm']:
1243 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1243 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1244 z += 1
1244 z += 1
1245 progress(_updating, z, item=f, total=numupdates, unit=_files)
1245 progress(_updating, z, item=f, total=numupdates, unit=_files)
1246 f0, flags = args
1246 f0, flags = args
1247 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1247 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1248 audit(f)
1248 audit(f)
1249 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1249 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1250 util.unlinkpath(repo.wjoin(f0))
1250 util.unlinkpath(repo.wjoin(f0))
1251 updated += 1
1251 updated += 1
1252
1252
1253 # local directory rename, get
1253 # local directory rename, get
1254 for f, args, msg in actions['dg']:
1254 for f, args, msg in actions['dg']:
1255 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1255 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1256 z += 1
1256 z += 1
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1258 f0, flags = args
1258 f0, flags = args
1259 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1259 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1260 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1260 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1261 updated += 1
1261 updated += 1
1262
1262
1263 # exec
1263 # exec
1264 for f, args, msg in actions['e']:
1264 for f, args, msg in actions['e']:
1265 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1265 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1266 z += 1
1266 z += 1
1267 progress(_updating, z, item=f, total=numupdates, unit=_files)
1267 progress(_updating, z, item=f, total=numupdates, unit=_files)
1268 flags, = args
1268 flags, = args
1269 audit(f)
1269 audit(f)
1270 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1270 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1271 updated += 1
1271 updated += 1
1272
1272
1273 # the ordering is important here -- ms.mergedriver will raise if the merge
1273 # the ordering is important here -- ms.mergedriver will raise if the merge
1274 # driver has changed, and we want to be able to bypass it when overwrite is
1274 # driver has changed, and we want to be able to bypass it when overwrite is
1275 # True
1275 # True
1276 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1276 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1277
1277
1278 if usemergedriver:
1278 if usemergedriver:
1279 ms.commit()
1279 ms.commit()
1280 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1280 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1281 # the driver might leave some files unresolved
1281 # the driver might leave some files unresolved
1282 unresolvedf = set(ms.unresolved())
1282 unresolvedf = set(ms.unresolved())
1283 if not proceed:
1283 if not proceed:
1284 # XXX setting unresolved to at least 1 is a hack to make sure we
1284 # XXX setting unresolved to at least 1 is a hack to make sure we
1285 # error out
1285 # error out
1286 return updated, merged, removed, max(len(unresolvedf), 1)
1286 return updated, merged, removed, max(len(unresolvedf), 1)
1287 newactions = []
1287 newactions = []
1288 for f, args, msg in mergeactions:
1288 for f, args, msg in mergeactions:
1289 if f in unresolvedf:
1289 if f in unresolvedf:
1290 newactions.append((f, args, msg))
1290 newactions.append((f, args, msg))
1291 mergeactions = newactions
1291 mergeactions = newactions
1292
1292
1293 # premerge
1293 # premerge
1294 tocomplete = []
1294 tocomplete = []
1295 for f, args, msg in mergeactions:
1295 for f, args, msg in mergeactions:
1296 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1296 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1297 z += 1
1297 z += 1
1298 progress(_updating, z, item=f, total=numupdates, unit=_files)
1298 progress(_updating, z, item=f, total=numupdates, unit=_files)
1299 if f == '.hgsubstate': # subrepo states need updating
1299 if f == '.hgsubstate': # subrepo states need updating
1300 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1300 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1301 overwrite, labels)
1301 overwrite, labels)
1302 continue
1302 continue
1303 audit(f)
1303 audit(f)
1304 complete, r = ms.preresolve(f, wctx)
1304 complete, r = ms.preresolve(f, wctx)
1305 if not complete:
1305 if not complete:
1306 numupdates += 1
1306 numupdates += 1
1307 tocomplete.append((f, args, msg))
1307 tocomplete.append((f, args, msg))
1308
1308
1309 # merge
1309 # merge
1310 for f, args, msg in tocomplete:
1310 for f, args, msg in tocomplete:
1311 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1311 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1312 z += 1
1312 z += 1
1313 progress(_updating, z, item=f, total=numupdates, unit=_files)
1313 progress(_updating, z, item=f, total=numupdates, unit=_files)
1314 ms.resolve(f, wctx)
1314 ms.resolve(f, wctx)
1315
1315
1316 ms.commit()
1316 ms.commit()
1317
1317
1318 unresolved = ms.unresolvedcount()
1318 unresolved = ms.unresolvedcount()
1319
1319
1320 if usemergedriver and not unresolved and ms.mdstate() != 's':
1320 if usemergedriver and not unresolved and ms.mdstate() != 's':
1321 if not driverconclude(repo, ms, wctx, labels=labels):
1321 if not driverconclude(repo, ms, wctx, labels=labels):
1322 # XXX setting unresolved to at least 1 is a hack to make sure we
1322 # XXX setting unresolved to at least 1 is a hack to make sure we
1323 # error out
1323 # error out
1324 unresolved = max(unresolved, 1)
1324 unresolved = max(unresolved, 1)
1325
1325
1326 ms.commit()
1326 ms.commit()
1327
1327
1328 msupdated, msmerged, msremoved = ms.counts()
1328 msupdated, msmerged, msremoved = ms.counts()
1329 updated += msupdated
1329 updated += msupdated
1330 merged += msmerged
1330 merged += msmerged
1331 removed += msremoved
1331 removed += msremoved
1332
1332
1333 extraactions = ms.actions()
1333 extraactions = ms.actions()
1334 if extraactions:
1334 if extraactions:
1335 mfiles = set(a[0] for a in actions['m'])
1335 mfiles = set(a[0] for a in actions['m'])
1336 for k, acts in extraactions.iteritems():
1336 for k, acts in extraactions.iteritems():
1337 actions[k].extend(acts)
1337 actions[k].extend(acts)
1338 # Remove these files from actions['m'] as well. This is important
1338 # Remove these files from actions['m'] as well. This is important
1339 # because in recordupdates, files in actions['m'] are processed
1339 # because in recordupdates, files in actions['m'] are processed
1340 # after files in other actions, and the merge driver might add
1340 # after files in other actions, and the merge driver might add
1341 # files to those actions via extraactions above. This can lead to a
1341 # files to those actions via extraactions above. This can lead to a
1342 # file being recorded twice, with poor results. This is especially
1342 # file being recorded twice, with poor results. This is especially
1343 # problematic for actions['r'] (currently only possible with the
1343 # problematic for actions['r'] (currently only possible with the
1344 # merge driver in the initial merge process; interrupted merges
1344 # merge driver in the initial merge process; interrupted merges
1345 # don't go through this flow).
1345 # don't go through this flow).
1346 #
1346 #
1347 # The real fix here is to have indexes by both file and action so
1347 # The real fix here is to have indexes by both file and action so
1348 # that when the action for a file is changed it is automatically
1348 # that when the action for a file is changed it is automatically
1349 # reflected in the other action lists. But that involves a more
1349 # reflected in the other action lists. But that involves a more
1350 # complex data structure, so this will do for now.
1350 # complex data structure, so this will do for now.
1351 #
1351 #
1352 # We don't need to do the same operation for 'dc' and 'cd' because
1352 # We don't need to do the same operation for 'dc' and 'cd' because
1353 # those lists aren't consulted again.
1353 # those lists aren't consulted again.
1354 mfiles.difference_update(a[0] for a in acts)
1354 mfiles.difference_update(a[0] for a in acts)
1355
1355
1356 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1356 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1357
1357
1358 progress(_updating, None, total=numupdates, unit=_files)
1358 progress(_updating, None, total=numupdates, unit=_files)
1359
1359
1360 return updated, merged, removed, unresolved
1360 return updated, merged, removed, unresolved
1361
1361
1362 def recordupdates(repo, actions, branchmerge):
1362 def recordupdates(repo, actions, branchmerge):
1363 "record merge actions to the dirstate"
1363 "record merge actions to the dirstate"
1364 # remove (must come first)
1364 # remove (must come first)
1365 for f, args, msg in actions.get('r', []):
1365 for f, args, msg in actions.get('r', []):
1366 if branchmerge:
1366 if branchmerge:
1367 repo.dirstate.remove(f)
1367 repo.dirstate.remove(f)
1368 else:
1368 else:
1369 repo.dirstate.drop(f)
1369 repo.dirstate.drop(f)
1370
1370
1371 # forget (must come first)
1371 # forget (must come first)
1372 for f, args, msg in actions.get('f', []):
1372 for f, args, msg in actions.get('f', []):
1373 repo.dirstate.drop(f)
1373 repo.dirstate.drop(f)
1374
1374
1375 # re-add
1375 # re-add
1376 for f, args, msg in actions.get('a', []):
1376 for f, args, msg in actions.get('a', []):
1377 repo.dirstate.add(f)
1377 repo.dirstate.add(f)
1378
1378
1379 # re-add/mark as modified
1379 # re-add/mark as modified
1380 for f, args, msg in actions.get('am', []):
1380 for f, args, msg in actions.get('am', []):
1381 if branchmerge:
1381 if branchmerge:
1382 repo.dirstate.normallookup(f)
1382 repo.dirstate.normallookup(f)
1383 else:
1383 else:
1384 repo.dirstate.add(f)
1384 repo.dirstate.add(f)
1385
1385
1386 # exec change
1386 # exec change
1387 for f, args, msg in actions.get('e', []):
1387 for f, args, msg in actions.get('e', []):
1388 repo.dirstate.normallookup(f)
1388 repo.dirstate.normallookup(f)
1389
1389
1390 # keep
1390 # keep
1391 for f, args, msg in actions.get('k', []):
1391 for f, args, msg in actions.get('k', []):
1392 pass
1392 pass
1393
1393
1394 # get
1394 # get
1395 for f, args, msg in actions.get('g', []):
1395 for f, args, msg in actions.get('g', []):
1396 if branchmerge:
1396 if branchmerge:
1397 repo.dirstate.otherparent(f)
1397 repo.dirstate.otherparent(f)
1398 else:
1398 else:
1399 repo.dirstate.normal(f)
1399 repo.dirstate.normal(f)
1400
1400
1401 # merge
1401 # merge
1402 for f, args, msg in actions.get('m', []):
1402 for f, args, msg in actions.get('m', []):
1403 f1, f2, fa, move, anc = args
1403 f1, f2, fa, move, anc = args
1404 if branchmerge:
1404 if branchmerge:
1405 # We've done a branch merge, mark this file as merged
1405 # We've done a branch merge, mark this file as merged
1406 # so that we properly record the merger later
1406 # so that we properly record the merger later
1407 repo.dirstate.merge(f)
1407 repo.dirstate.merge(f)
1408 if f1 != f2: # copy/rename
1408 if f1 != f2: # copy/rename
1409 if move:
1409 if move:
1410 repo.dirstate.remove(f1)
1410 repo.dirstate.remove(f1)
1411 if f1 != f:
1411 if f1 != f:
1412 repo.dirstate.copy(f1, f)
1412 repo.dirstate.copy(f1, f)
1413 else:
1413 else:
1414 repo.dirstate.copy(f2, f)
1414 repo.dirstate.copy(f2, f)
1415 else:
1415 else:
1416 # We've update-merged a locally modified file, so
1416 # We've update-merged a locally modified file, so
1417 # we set the dirstate to emulate a normal checkout
1417 # we set the dirstate to emulate a normal checkout
1418 # of that file some time in the past. Thus our
1418 # of that file some time in the past. Thus our
1419 # merge will appear as a normal local file
1419 # merge will appear as a normal local file
1420 # modification.
1420 # modification.
1421 if f2 == f: # file not locally copied/moved
1421 if f2 == f: # file not locally copied/moved
1422 repo.dirstate.normallookup(f)
1422 repo.dirstate.normallookup(f)
1423 if move:
1423 if move:
1424 repo.dirstate.drop(f1)
1424 repo.dirstate.drop(f1)
1425
1425
1426 # directory rename, move local
1426 # directory rename, move local
1427 for f, args, msg in actions.get('dm', []):
1427 for f, args, msg in actions.get('dm', []):
1428 f0, flag = args
1428 f0, flag = args
1429 if branchmerge:
1429 if branchmerge:
1430 repo.dirstate.add(f)
1430 repo.dirstate.add(f)
1431 repo.dirstate.remove(f0)
1431 repo.dirstate.remove(f0)
1432 repo.dirstate.copy(f0, f)
1432 repo.dirstate.copy(f0, f)
1433 else:
1433 else:
1434 repo.dirstate.normal(f)
1434 repo.dirstate.normal(f)
1435 repo.dirstate.drop(f0)
1435 repo.dirstate.drop(f0)
1436
1436
1437 # directory rename, get
1437 # directory rename, get
1438 for f, args, msg in actions.get('dg', []):
1438 for f, args, msg in actions.get('dg', []):
1439 f0, flag = args
1439 f0, flag = args
1440 if branchmerge:
1440 if branchmerge:
1441 repo.dirstate.add(f)
1441 repo.dirstate.add(f)
1442 repo.dirstate.copy(f0, f)
1442 repo.dirstate.copy(f0, f)
1443 else:
1443 else:
1444 repo.dirstate.normal(f)
1444 repo.dirstate.normal(f)
1445
1445
1446 def update(repo, node, branchmerge, force, ancestor=None,
1446 def update(repo, node, branchmerge, force, ancestor=None,
1447 mergeancestor=False, labels=None, matcher=None, mergeforce=False):
1447 mergeancestor=False, labels=None, matcher=None, mergeforce=False):
1448 """
1448 """
1449 Perform a merge between the working directory and the given node
1449 Perform a merge between the working directory and the given node
1450
1450
1451 node = the node to update to
1451 node = the node to update to
1452 branchmerge = whether to merge between branches
1452 branchmerge = whether to merge between branches
1453 force = whether to force branch merging or file overwriting
1453 force = whether to force branch merging or file overwriting
1454 matcher = a matcher to filter file lists (dirstate not updated)
1454 matcher = a matcher to filter file lists (dirstate not updated)
1455 mergeancestor = whether it is merging with an ancestor. If true,
1455 mergeancestor = whether it is merging with an ancestor. If true,
1456 we should accept the incoming changes for any prompts that occur.
1456 we should accept the incoming changes for any prompts that occur.
1457 If false, merging with an ancestor (fast-forward) is only allowed
1457 If false, merging with an ancestor (fast-forward) is only allowed
1458 between different named branches. This flag is used by rebase extension
1458 between different named branches. This flag is used by rebase extension
1459 as a temporary fix and should be avoided in general.
1459 as a temporary fix and should be avoided in general.
1460 labels = labels to use for base, local and other
1460 labels = labels to use for base, local and other
1461 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1461 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1462 this is True, then 'force' should be True as well.
1462 this is True, then 'force' should be True as well.
1463
1463
1464 The table below shows all the behaviors of the update command
1464 The table below shows all the behaviors of the update command
1465 given the -c and -C or no options, whether the working directory
1465 given the -c and -C or no options, whether the working directory
1466 is dirty, whether a revision is specified, and the relationship of
1466 is dirty, whether a revision is specified, and the relationship of
1467 the parent rev to the target rev (linear, on the same named
1467 the parent rev to the target rev (linear, on the same named
1468 branch, or on another named branch).
1468 branch, or on another named branch).
1469
1469
1470 This logic is tested by test-update-branches.t.
1470 This logic is tested by test-update-branches.t.
1471
1471
1472 -c -C dirty rev | linear same cross
1472 -c -C dirty rev | linear same cross
1473 n n n n | ok (1) x
1473 n n n n | ok (1) x
1474 n n n y | ok ok ok
1474 n n n y | ok ok ok
1475 n n y n | merge (2) (2)
1475 n n y n | merge (2) (2)
1476 n n y y | merge (3) (3)
1476 n n y y | merge (3) (3)
1477 n y * * | discard discard discard
1477 n y * * | discard discard discard
1478 y n y * | (4) (4) (4)
1478 y n y * | (4) (4) (4)
1479 y n n * | ok ok ok
1479 y n n * | ok ok ok
1480 y y * * | (5) (5) (5)
1480 y y * * | (5) (5) (5)
1481
1481
1482 x = can't happen
1482 x = can't happen
1483 * = don't-care
1483 * = don't-care
1484 1 = abort: not a linear update (merge or update --check to force update)
1484 1 = abort: not a linear update (merge or update --check to force update)
1485 2 = abort: uncommitted changes (commit and merge, or update --clean to
1485 2 = abort: uncommitted changes (commit and merge, or update --clean to
1486 discard changes)
1486 discard changes)
1487 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1487 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1488 4 = abort: uncommitted changes (checked in commands.py)
1488 4 = abort: uncommitted changes (checked in commands.py)
1489 5 = incompatible options (checked in commands.py)
1489 5 = incompatible options (checked in commands.py)
1490
1490
1491 Return the same tuple as applyupdates().
1491 Return the same tuple as applyupdates().
1492 """
1492 """
1493
1493
1494 # This functon used to find the default destination if node was None, but
1494 # This functon used to find the default destination if node was None, but
1495 # that's now in destutil.py.
1495 # that's now in destutil.py.
1496 assert node is not None
1496 assert node is not None
1497 # If we're doing a partial update, we need to skip updating
1497 # If we're doing a partial update, we need to skip updating
1498 # the dirstate, so make a note of any partial-ness to the
1498 # the dirstate, so make a note of any partial-ness to the
1499 # update here.
1499 # update here.
1500 if matcher is None or matcher.always():
1500 if matcher is None or matcher.always():
1501 partial = False
1501 partial = False
1502 else:
1502 else:
1503 partial = True
1503 partial = True
1504 with repo.wlock():
1504 with repo.wlock():
1505 wc = repo[None]
1505 wc = repo[None]
1506 pl = wc.parents()
1506 pl = wc.parents()
1507 p1 = pl[0]
1507 p1 = pl[0]
1508 pas = [None]
1508 pas = [None]
1509 if ancestor is not None:
1509 if ancestor is not None:
1510 pas = [repo[ancestor]]
1510 pas = [repo[ancestor]]
1511
1511
1512 overwrite = force and not branchmerge
1512 overwrite = force and not branchmerge
1513
1513
1514 p2 = repo[node]
1514 p2 = repo[node]
1515 if pas[0] is None:
1515 if pas[0] is None:
1516 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1516 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1517 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1517 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1518 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1518 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1519 else:
1519 else:
1520 pas = [p1.ancestor(p2, warn=branchmerge)]
1520 pas = [p1.ancestor(p2, warn=branchmerge)]
1521
1521
1522 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1522 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1523
1523
1524 ### check phase
1524 ### check phase
1525 if not overwrite:
1525 if not overwrite:
1526 if len(pl) > 1:
1526 if len(pl) > 1:
1527 raise error.Abort(_("outstanding uncommitted merge"))
1527 raise error.Abort(_("outstanding uncommitted merge"))
1528 ms = mergestate.read(repo)
1528 ms = mergestate.read(repo)
1529 if list(ms.unresolved()):
1529 if list(ms.unresolved()):
1530 raise error.Abort(_("outstanding merge conflicts"))
1530 raise error.Abort(_("outstanding merge conflicts"))
1531 if branchmerge:
1531 if branchmerge:
1532 if pas == [p2]:
1532 if pas == [p2]:
1533 raise error.Abort(_("merging with a working directory ancestor"
1533 raise error.Abort(_("merging with a working directory ancestor"
1534 " has no effect"))
1534 " has no effect"))
1535 elif pas == [p1]:
1535 elif pas == [p1]:
1536 if not mergeancestor and p1.branch() == p2.branch():
1536 if not mergeancestor and p1.branch() == p2.branch():
1537 raise error.Abort(_("nothing to merge"),
1537 raise error.Abort(_("nothing to merge"),
1538 hint=_("use 'hg update' "
1538 hint=_("use 'hg update' "
1539 "or check 'hg heads'"))
1539 "or check 'hg heads'"))
1540 if not force and (wc.files() or wc.deleted()):
1540 if not force and (wc.files() or wc.deleted()):
1541 raise error.Abort(_("uncommitted changes"),
1541 raise error.Abort(_("uncommitted changes"),
1542 hint=_("use 'hg status' to list changes"))
1542 hint=_("use 'hg status' to list changes"))
1543 for s in sorted(wc.substate):
1543 for s in sorted(wc.substate):
1544 wc.sub(s).bailifchanged()
1544 wc.sub(s).bailifchanged()
1545
1545
1546 elif not overwrite:
1546 elif not overwrite:
1547 if p1 == p2: # no-op update
1547 if p1 == p2: # no-op update
1548 # call the hooks and exit early
1548 # call the hooks and exit early
1549 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1549 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1550 repo.hook('update', parent1=xp2, parent2='', error=0)
1550 repo.hook('update', parent1=xp2, parent2='', error=0)
1551 return 0, 0, 0, 0
1551 return 0, 0, 0, 0
1552
1552
1553 if pas not in ([p1], [p2]): # nonlinear
1553 if pas not in ([p1], [p2]): # nonlinear
1554 dirty = wc.dirty(missing=True)
1554 dirty = wc.dirty(missing=True)
1555 if dirty:
1555 if dirty:
1556 # Branching is a bit strange to ensure we do the minimal
1556 # Branching is a bit strange to ensure we do the minimal
1557 # amount of call to obsolete.foreground.
1557 # amount of call to obsolete.foreground.
1558 foreground = obsolete.foreground(repo, [p1.node()])
1558 foreground = obsolete.foreground(repo, [p1.node()])
1559 # note: the <node> variable contains a random identifier
1559 # note: the <node> variable contains a random identifier
1560 if repo[node].node() in foreground:
1560 if repo[node].node() in foreground:
1561 pass # allow updating to successors
1561 pass # allow updating to successors
1562 else:
1562 else:
1563 msg = _("uncommitted changes")
1563 msg = _("uncommitted changes")
1564 hint = _("commit or update --clean to discard changes")
1564 hint = _("commit or update --clean to discard changes")
1565 raise error.Abort(msg, hint=hint)
1565 raise error.UpdateAbort(msg, hint=hint)
1566 else:
1566 else:
1567 # Allow jumping branches if clean and specific rev given
1567 # Allow jumping branches if clean and specific rev given
1568 pass
1568 pass
1569
1569
1570 if overwrite:
1570 if overwrite:
1571 pas = [wc]
1571 pas = [wc]
1572 elif not branchmerge:
1572 elif not branchmerge:
1573 pas = [p1]
1573 pas = [p1]
1574
1574
1575 # deprecated config: merge.followcopies
1575 # deprecated config: merge.followcopies
1576 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1576 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1577 if overwrite:
1577 if overwrite:
1578 followcopies = False
1578 followcopies = False
1579 elif not pas[0]:
1579 elif not pas[0]:
1580 followcopies = False
1580 followcopies = False
1581 if not branchmerge and not wc.dirty(missing=True):
1581 if not branchmerge and not wc.dirty(missing=True):
1582 followcopies = False
1582 followcopies = False
1583
1583
1584 ### calculate phase
1584 ### calculate phase
1585 actionbyfile, diverge, renamedelete = calculateupdates(
1585 actionbyfile, diverge, renamedelete = calculateupdates(
1586 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1586 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1587 followcopies, matcher=matcher, mergeforce=mergeforce)
1587 followcopies, matcher=matcher, mergeforce=mergeforce)
1588
1588
1589 # Prompt and create actions. Most of this is in the resolve phase
1589 # Prompt and create actions. Most of this is in the resolve phase
1590 # already, but we can't handle .hgsubstate in filemerge or
1590 # already, but we can't handle .hgsubstate in filemerge or
1591 # subrepo.submerge yet so we have to keep prompting for it.
1591 # subrepo.submerge yet so we have to keep prompting for it.
1592 if '.hgsubstate' in actionbyfile:
1592 if '.hgsubstate' in actionbyfile:
1593 f = '.hgsubstate'
1593 f = '.hgsubstate'
1594 m, args, msg = actionbyfile[f]
1594 m, args, msg = actionbyfile[f]
1595 prompts = filemerge.partextras(labels)
1595 prompts = filemerge.partextras(labels)
1596 prompts['f'] = f
1596 prompts['f'] = f
1597 if m == 'cd':
1597 if m == 'cd':
1598 if repo.ui.promptchoice(
1598 if repo.ui.promptchoice(
1599 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1599 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1600 "use (c)hanged version or (d)elete?"
1600 "use (c)hanged version or (d)elete?"
1601 "$$ &Changed $$ &Delete") % prompts, 0):
1601 "$$ &Changed $$ &Delete") % prompts, 0):
1602 actionbyfile[f] = ('r', None, "prompt delete")
1602 actionbyfile[f] = ('r', None, "prompt delete")
1603 elif f in p1:
1603 elif f in p1:
1604 actionbyfile[f] = ('am', None, "prompt keep")
1604 actionbyfile[f] = ('am', None, "prompt keep")
1605 else:
1605 else:
1606 actionbyfile[f] = ('a', None, "prompt keep")
1606 actionbyfile[f] = ('a', None, "prompt keep")
1607 elif m == 'dc':
1607 elif m == 'dc':
1608 f1, f2, fa, move, anc = args
1608 f1, f2, fa, move, anc = args
1609 flags = p2[f2].flags()
1609 flags = p2[f2].flags()
1610 if repo.ui.promptchoice(
1610 if repo.ui.promptchoice(
1611 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1611 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1612 "use (c)hanged version or leave (d)eleted?"
1612 "use (c)hanged version or leave (d)eleted?"
1613 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1613 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1614 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1614 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1615 else:
1615 else:
1616 del actionbyfile[f]
1616 del actionbyfile[f]
1617
1617
1618 # Convert to dictionary-of-lists format
1618 # Convert to dictionary-of-lists format
1619 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1619 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1620 for f, (m, args, msg) in actionbyfile.iteritems():
1620 for f, (m, args, msg) in actionbyfile.iteritems():
1621 if m not in actions:
1621 if m not in actions:
1622 actions[m] = []
1622 actions[m] = []
1623 actions[m].append((f, args, msg))
1623 actions[m].append((f, args, msg))
1624
1624
1625 if not util.fscasesensitive(repo.path):
1625 if not util.fscasesensitive(repo.path):
1626 # check collision between files only in p2 for clean update
1626 # check collision between files only in p2 for clean update
1627 if (not branchmerge and
1627 if (not branchmerge and
1628 (force or not wc.dirty(missing=True, branch=False))):
1628 (force or not wc.dirty(missing=True, branch=False))):
1629 _checkcollision(repo, p2.manifest(), None)
1629 _checkcollision(repo, p2.manifest(), None)
1630 else:
1630 else:
1631 _checkcollision(repo, wc.manifest(), actions)
1631 _checkcollision(repo, wc.manifest(), actions)
1632
1632
1633 # divergent renames
1633 # divergent renames
1634 for f, fl in sorted(diverge.iteritems()):
1634 for f, fl in sorted(diverge.iteritems()):
1635 repo.ui.warn(_("note: possible conflict - %s was renamed "
1635 repo.ui.warn(_("note: possible conflict - %s was renamed "
1636 "multiple times to:\n") % f)
1636 "multiple times to:\n") % f)
1637 for nf in fl:
1637 for nf in fl:
1638 repo.ui.warn(" %s\n" % nf)
1638 repo.ui.warn(" %s\n" % nf)
1639
1639
1640 # rename and delete
1640 # rename and delete
1641 for f, fl in sorted(renamedelete.iteritems()):
1641 for f, fl in sorted(renamedelete.iteritems()):
1642 repo.ui.warn(_("note: possible conflict - %s was deleted "
1642 repo.ui.warn(_("note: possible conflict - %s was deleted "
1643 "and renamed to:\n") % f)
1643 "and renamed to:\n") % f)
1644 for nf in fl:
1644 for nf in fl:
1645 repo.ui.warn(" %s\n" % nf)
1645 repo.ui.warn(" %s\n" % nf)
1646
1646
1647 ### apply phase
1647 ### apply phase
1648 if not branchmerge: # just jump to the new rev
1648 if not branchmerge: # just jump to the new rev
1649 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1649 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1650 if not partial:
1650 if not partial:
1651 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1651 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1652 # note that we're in the middle of an update
1652 # note that we're in the middle of an update
1653 repo.vfs.write('updatestate', p2.hex())
1653 repo.vfs.write('updatestate', p2.hex())
1654
1654
1655 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1655 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1656
1656
1657 if not partial:
1657 if not partial:
1658 repo.dirstate.beginparentchange()
1658 repo.dirstate.beginparentchange()
1659 repo.setparents(fp1, fp2)
1659 repo.setparents(fp1, fp2)
1660 recordupdates(repo, actions, branchmerge)
1660 recordupdates(repo, actions, branchmerge)
1661 # update completed, clear state
1661 # update completed, clear state
1662 util.unlink(repo.join('updatestate'))
1662 util.unlink(repo.join('updatestate'))
1663
1663
1664 if not branchmerge:
1664 if not branchmerge:
1665 repo.dirstate.setbranch(p2.branch())
1665 repo.dirstate.setbranch(p2.branch())
1666 repo.dirstate.endparentchange()
1666 repo.dirstate.endparentchange()
1667
1667
1668 if not partial:
1668 if not partial:
1669 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1669 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1670 return stats
1670 return stats
1671
1671
1672 def graft(repo, ctx, pctx, labels, keepparent=False):
1672 def graft(repo, ctx, pctx, labels, keepparent=False):
1673 """Do a graft-like merge.
1673 """Do a graft-like merge.
1674
1674
1675 This is a merge where the merge ancestor is chosen such that one
1675 This is a merge where the merge ancestor is chosen such that one
1676 or more changesets are grafted onto the current changeset. In
1676 or more changesets are grafted onto the current changeset. In
1677 addition to the merge, this fixes up the dirstate to include only
1677 addition to the merge, this fixes up the dirstate to include only
1678 a single parent (if keepparent is False) and tries to duplicate any
1678 a single parent (if keepparent is False) and tries to duplicate any
1679 renames/copies appropriately.
1679 renames/copies appropriately.
1680
1680
1681 ctx - changeset to rebase
1681 ctx - changeset to rebase
1682 pctx - merge base, usually ctx.p1()
1682 pctx - merge base, usually ctx.p1()
1683 labels - merge labels eg ['local', 'graft']
1683 labels - merge labels eg ['local', 'graft']
1684 keepparent - keep second parent if any
1684 keepparent - keep second parent if any
1685
1685
1686 """
1686 """
1687 # If we're grafting a descendant onto an ancestor, be sure to pass
1687 # If we're grafting a descendant onto an ancestor, be sure to pass
1688 # mergeancestor=True to update. This does two things: 1) allows the merge if
1688 # mergeancestor=True to update. This does two things: 1) allows the merge if
1689 # the destination is the same as the parent of the ctx (so we can use graft
1689 # the destination is the same as the parent of the ctx (so we can use graft
1690 # to copy commits), and 2) informs update that the incoming changes are
1690 # to copy commits), and 2) informs update that the incoming changes are
1691 # newer than the destination so it doesn't prompt about "remote changed foo
1691 # newer than the destination so it doesn't prompt about "remote changed foo
1692 # which local deleted".
1692 # which local deleted".
1693 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1693 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1694
1694
1695 stats = update(repo, ctx.node(), True, True, pctx.node(),
1695 stats = update(repo, ctx.node(), True, True, pctx.node(),
1696 mergeancestor=mergeancestor, labels=labels)
1696 mergeancestor=mergeancestor, labels=labels)
1697
1697
1698 pother = nullid
1698 pother = nullid
1699 parents = ctx.parents()
1699 parents = ctx.parents()
1700 if keepparent and len(parents) == 2 and pctx in parents:
1700 if keepparent and len(parents) == 2 and pctx in parents:
1701 parents.remove(pctx)
1701 parents.remove(pctx)
1702 pother = parents[0].node()
1702 pother = parents[0].node()
1703
1703
1704 repo.dirstate.beginparentchange()
1704 repo.dirstate.beginparentchange()
1705 repo.setparents(repo['.'].node(), pother)
1705 repo.setparents(repo['.'].node(), pother)
1706 repo.dirstate.write(repo.currenttransaction())
1706 repo.dirstate.write(repo.currenttransaction())
1707 # fix up dirstate for copies and renames
1707 # fix up dirstate for copies and renames
1708 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1708 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1709 repo.dirstate.endparentchange()
1709 repo.dirstate.endparentchange()
1710 return stats
1710 return stats
General Comments 0
You need to be logged in to leave comments. Login now