##// END OF EJS Templates
update: mention long options explicitly in description of merge.update()...
muxator -
r34920:1856de4d stable
parent child Browse files
Show More
@@ -1,2038 +1,2038 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 extensions,
28 extensions,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 P: a path conflict (file vs directory)
70 P: a path conflict (file vs directory)
71 m: the external merge driver defined for this merge plus its run state
71 m: the external merge driver defined for this merge plus its run state
72 (experimental)
72 (experimental)
73 f: a (filename, dictionary) tuple of optional values for a given file
73 f: a (filename, dictionary) tuple of optional values for a given file
74 X: unsupported mandatory record type (used in tests)
74 X: unsupported mandatory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
76 l: the labels for the parts of the merge.
76 l: the labels for the parts of the merge.
77
77
78 Merge driver run states (experimental):
78 Merge driver run states (experimental):
79 u: driver-resolved files unmarked -- needs to be run next time we're about
79 u: driver-resolved files unmarked -- needs to be run next time we're about
80 to resolve or commit
80 to resolve or commit
81 m: driver-resolved files marked -- only needs to be run before commit
81 m: driver-resolved files marked -- only needs to be run before commit
82 s: success/skipped -- does not need to be run any more
82 s: success/skipped -- does not need to be run any more
83
83
84 Merge record states (stored in self._state, indexed by filename):
84 Merge record states (stored in self._state, indexed by filename):
85 u: unresolved conflict
85 u: unresolved conflict
86 r: resolved conflict
86 r: resolved conflict
87 pu: unresolved path conflict (file conflicts with directory)
87 pu: unresolved path conflict (file conflicts with directory)
88 pr: resolved path conflict
88 pr: resolved path conflict
89 d: driver-resolved conflict
89 d: driver-resolved conflict
90
90
91 The resolve command transitions between 'u' and 'r' for conflicts and
91 The resolve command transitions between 'u' and 'r' for conflicts and
92 'pu' and 'pr' for path conflicts.
92 'pu' and 'pr' for path conflicts.
93 '''
93 '''
94 statepathv1 = 'merge/state'
94 statepathv1 = 'merge/state'
95 statepathv2 = 'merge/state2'
95 statepathv2 = 'merge/state2'
96
96
97 @staticmethod
97 @staticmethod
98 def clean(repo, node=None, other=None, labels=None):
98 def clean(repo, node=None, other=None, labels=None):
99 """Initialize a brand new merge state, removing any existing state on
99 """Initialize a brand new merge state, removing any existing state on
100 disk."""
100 disk."""
101 ms = mergestate(repo)
101 ms = mergestate(repo)
102 ms.reset(node, other, labels)
102 ms.reset(node, other, labels)
103 return ms
103 return ms
104
104
105 @staticmethod
105 @staticmethod
106 def read(repo):
106 def read(repo):
107 """Initialize the merge state, reading it from disk."""
107 """Initialize the merge state, reading it from disk."""
108 ms = mergestate(repo)
108 ms = mergestate(repo)
109 ms._read()
109 ms._read()
110 return ms
110 return ms
111
111
112 def __init__(self, repo):
112 def __init__(self, repo):
113 """Initialize the merge state.
113 """Initialize the merge state.
114
114
115 Do not use this directly! Instead call read() or clean()."""
115 Do not use this directly! Instead call read() or clean()."""
116 self._repo = repo
116 self._repo = repo
117 self._dirty = False
117 self._dirty = False
118 self._labels = None
118 self._labels = None
119
119
120 def reset(self, node=None, other=None, labels=None):
120 def reset(self, node=None, other=None, labels=None):
121 self._state = {}
121 self._state = {}
122 self._stateextras = {}
122 self._stateextras = {}
123 self._local = None
123 self._local = None
124 self._other = None
124 self._other = None
125 self._labels = labels
125 self._labels = labels
126 for var in ('localctx', 'otherctx'):
126 for var in ('localctx', 'otherctx'):
127 if var in vars(self):
127 if var in vars(self):
128 delattr(self, var)
128 delattr(self, var)
129 if node:
129 if node:
130 self._local = node
130 self._local = node
131 self._other = other
131 self._other = other
132 self._readmergedriver = None
132 self._readmergedriver = None
133 if self.mergedriver:
133 if self.mergedriver:
134 self._mdstate = 's'
134 self._mdstate = 's'
135 else:
135 else:
136 self._mdstate = 'u'
136 self._mdstate = 'u'
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
138 self._results = {}
138 self._results = {}
139 self._dirty = False
139 self._dirty = False
140
140
141 def _read(self):
141 def _read(self):
142 """Analyse each record content to restore a serialized state from disk
142 """Analyse each record content to restore a serialized state from disk
143
143
144 This function process "record" entry produced by the de-serialization
144 This function process "record" entry produced by the de-serialization
145 of on disk file.
145 of on disk file.
146 """
146 """
147 self._state = {}
147 self._state = {}
148 self._stateextras = {}
148 self._stateextras = {}
149 self._local = None
149 self._local = None
150 self._other = None
150 self._other = None
151 for var in ('localctx', 'otherctx'):
151 for var in ('localctx', 'otherctx'):
152 if var in vars(self):
152 if var in vars(self):
153 delattr(self, var)
153 delattr(self, var)
154 self._readmergedriver = None
154 self._readmergedriver = None
155 self._mdstate = 's'
155 self._mdstate = 's'
156 unsupported = set()
156 unsupported = set()
157 records = self._readrecords()
157 records = self._readrecords()
158 for rtype, record in records:
158 for rtype, record in records:
159 if rtype == 'L':
159 if rtype == 'L':
160 self._local = bin(record)
160 self._local = bin(record)
161 elif rtype == 'O':
161 elif rtype == 'O':
162 self._other = bin(record)
162 self._other = bin(record)
163 elif rtype == 'm':
163 elif rtype == 'm':
164 bits = record.split('\0', 1)
164 bits = record.split('\0', 1)
165 mdstate = bits[1]
165 mdstate = bits[1]
166 if len(mdstate) != 1 or mdstate not in 'ums':
166 if len(mdstate) != 1 or mdstate not in 'ums':
167 # the merge driver should be idempotent, so just rerun it
167 # the merge driver should be idempotent, so just rerun it
168 mdstate = 'u'
168 mdstate = 'u'
169
169
170 self._readmergedriver = bits[0]
170 self._readmergedriver = bits[0]
171 self._mdstate = mdstate
171 self._mdstate = mdstate
172 elif rtype in 'FDCP':
172 elif rtype in 'FDCP':
173 bits = record.split('\0')
173 bits = record.split('\0')
174 self._state[bits[0]] = bits[1:]
174 self._state[bits[0]] = bits[1:]
175 elif rtype == 'f':
175 elif rtype == 'f':
176 filename, rawextras = record.split('\0', 1)
176 filename, rawextras = record.split('\0', 1)
177 extraparts = rawextras.split('\0')
177 extraparts = rawextras.split('\0')
178 extras = {}
178 extras = {}
179 i = 0
179 i = 0
180 while i < len(extraparts):
180 while i < len(extraparts):
181 extras[extraparts[i]] = extraparts[i + 1]
181 extras[extraparts[i]] = extraparts[i + 1]
182 i += 2
182 i += 2
183
183
184 self._stateextras[filename] = extras
184 self._stateextras[filename] = extras
185 elif rtype == 'l':
185 elif rtype == 'l':
186 labels = record.split('\0', 2)
186 labels = record.split('\0', 2)
187 self._labels = [l for l in labels if len(l) > 0]
187 self._labels = [l for l in labels if len(l) > 0]
188 elif not rtype.islower():
188 elif not rtype.islower():
189 unsupported.add(rtype)
189 unsupported.add(rtype)
190 self._results = {}
190 self._results = {}
191 self._dirty = False
191 self._dirty = False
192
192
193 if unsupported:
193 if unsupported:
194 raise error.UnsupportedMergeRecords(unsupported)
194 raise error.UnsupportedMergeRecords(unsupported)
195
195
196 def _readrecords(self):
196 def _readrecords(self):
197 """Read merge state from disk and return a list of record (TYPE, data)
197 """Read merge state from disk and return a list of record (TYPE, data)
198
198
199 We read data from both v1 and v2 files and decide which one to use.
199 We read data from both v1 and v2 files and decide which one to use.
200
200
201 V1 has been used by version prior to 2.9.1 and contains less data than
201 V1 has been used by version prior to 2.9.1 and contains less data than
202 v2. We read both versions and check if no data in v2 contradicts
202 v2. We read both versions and check if no data in v2 contradicts
203 v1. If there is not contradiction we can safely assume that both v1
203 v1. If there is not contradiction we can safely assume that both v1
204 and v2 were written at the same time and use the extract data in v2. If
204 and v2 were written at the same time and use the extract data in v2. If
205 there is contradiction we ignore v2 content as we assume an old version
205 there is contradiction we ignore v2 content as we assume an old version
206 of Mercurial has overwritten the mergestate file and left an old v2
206 of Mercurial has overwritten the mergestate file and left an old v2
207 file around.
207 file around.
208
208
209 returns list of record [(TYPE, data), ...]"""
209 returns list of record [(TYPE, data), ...]"""
210 v1records = self._readrecordsv1()
210 v1records = self._readrecordsv1()
211 v2records = self._readrecordsv2()
211 v2records = self._readrecordsv2()
212 if self._v1v2match(v1records, v2records):
212 if self._v1v2match(v1records, v2records):
213 return v2records
213 return v2records
214 else:
214 else:
215 # v1 file is newer than v2 file, use it
215 # v1 file is newer than v2 file, use it
216 # we have to infer the "other" changeset of the merge
216 # we have to infer the "other" changeset of the merge
217 # we cannot do better than that with v1 of the format
217 # we cannot do better than that with v1 of the format
218 mctx = self._repo[None].parents()[-1]
218 mctx = self._repo[None].parents()[-1]
219 v1records.append(('O', mctx.hex()))
219 v1records.append(('O', mctx.hex()))
220 # add place holder "other" file node information
220 # add place holder "other" file node information
221 # nobody is using it yet so we do no need to fetch the data
221 # nobody is using it yet so we do no need to fetch the data
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
223 for idx, r in enumerate(v1records):
223 for idx, r in enumerate(v1records):
224 if r[0] == 'F':
224 if r[0] == 'F':
225 bits = r[1].split('\0')
225 bits = r[1].split('\0')
226 bits.insert(-2, '')
226 bits.insert(-2, '')
227 v1records[idx] = (r[0], '\0'.join(bits))
227 v1records[idx] = (r[0], '\0'.join(bits))
228 return v1records
228 return v1records
229
229
230 def _v1v2match(self, v1records, v2records):
230 def _v1v2match(self, v1records, v2records):
231 oldv2 = set() # old format version of v2 record
231 oldv2 = set() # old format version of v2 record
232 for rec in v2records:
232 for rec in v2records:
233 if rec[0] == 'L':
233 if rec[0] == 'L':
234 oldv2.add(rec)
234 oldv2.add(rec)
235 elif rec[0] == 'F':
235 elif rec[0] == 'F':
236 # drop the onode data (not contained in v1)
236 # drop the onode data (not contained in v1)
237 oldv2.add(('F', _droponode(rec[1])))
237 oldv2.add(('F', _droponode(rec[1])))
238 for rec in v1records:
238 for rec in v1records:
239 if rec not in oldv2:
239 if rec not in oldv2:
240 return False
240 return False
241 else:
241 else:
242 return True
242 return True
243
243
244 def _readrecordsv1(self):
244 def _readrecordsv1(self):
245 """read on disk merge state for version 1 file
245 """read on disk merge state for version 1 file
246
246
247 returns list of record [(TYPE, data), ...]
247 returns list of record [(TYPE, data), ...]
248
248
249 Note: the "F" data from this file are one entry short
249 Note: the "F" data from this file are one entry short
250 (no "other file node" entry)
250 (no "other file node" entry)
251 """
251 """
252 records = []
252 records = []
253 try:
253 try:
254 f = self._repo.vfs(self.statepathv1)
254 f = self._repo.vfs(self.statepathv1)
255 for i, l in enumerate(f):
255 for i, l in enumerate(f):
256 if i == 0:
256 if i == 0:
257 records.append(('L', l[:-1]))
257 records.append(('L', l[:-1]))
258 else:
258 else:
259 records.append(('F', l[:-1]))
259 records.append(('F', l[:-1]))
260 f.close()
260 f.close()
261 except IOError as err:
261 except IOError as err:
262 if err.errno != errno.ENOENT:
262 if err.errno != errno.ENOENT:
263 raise
263 raise
264 return records
264 return records
265
265
266 def _readrecordsv2(self):
266 def _readrecordsv2(self):
267 """read on disk merge state for version 2 file
267 """read on disk merge state for version 2 file
268
268
269 This format is a list of arbitrary records of the form:
269 This format is a list of arbitrary records of the form:
270
270
271 [type][length][content]
271 [type][length][content]
272
272
273 `type` is a single character, `length` is a 4 byte integer, and
273 `type` is a single character, `length` is a 4 byte integer, and
274 `content` is an arbitrary byte sequence of length `length`.
274 `content` is an arbitrary byte sequence of length `length`.
275
275
276 Mercurial versions prior to 3.7 have a bug where if there are
276 Mercurial versions prior to 3.7 have a bug where if there are
277 unsupported mandatory merge records, attempting to clear out the merge
277 unsupported mandatory merge records, attempting to clear out the merge
278 state with hg update --clean or similar aborts. The 't' record type
278 state with hg update --clean or similar aborts. The 't' record type
279 works around that by writing out what those versions treat as an
279 works around that by writing out what those versions treat as an
280 advisory record, but later versions interpret as special: the first
280 advisory record, but later versions interpret as special: the first
281 character is the 'real' record type and everything onwards is the data.
281 character is the 'real' record type and everything onwards is the data.
282
282
283 Returns list of records [(TYPE, data), ...]."""
283 Returns list of records [(TYPE, data), ...]."""
284 records = []
284 records = []
285 try:
285 try:
286 f = self._repo.vfs(self.statepathv2)
286 f = self._repo.vfs(self.statepathv2)
287 data = f.read()
287 data = f.read()
288 off = 0
288 off = 0
289 end = len(data)
289 end = len(data)
290 while off < end:
290 while off < end:
291 rtype = data[off]
291 rtype = data[off]
292 off += 1
292 off += 1
293 length = _unpack('>I', data[off:(off + 4)])[0]
293 length = _unpack('>I', data[off:(off + 4)])[0]
294 off += 4
294 off += 4
295 record = data[off:(off + length)]
295 record = data[off:(off + length)]
296 off += length
296 off += length
297 if rtype == 't':
297 if rtype == 't':
298 rtype, record = record[0], record[1:]
298 rtype, record = record[0], record[1:]
299 records.append((rtype, record))
299 records.append((rtype, record))
300 f.close()
300 f.close()
301 except IOError as err:
301 except IOError as err:
302 if err.errno != errno.ENOENT:
302 if err.errno != errno.ENOENT:
303 raise
303 raise
304 return records
304 return records
305
305
306 @util.propertycache
306 @util.propertycache
307 def mergedriver(self):
307 def mergedriver(self):
308 # protect against the following:
308 # protect against the following:
309 # - A configures a malicious merge driver in their hgrc, then
309 # - A configures a malicious merge driver in their hgrc, then
310 # pauses the merge
310 # pauses the merge
311 # - A edits their hgrc to remove references to the merge driver
311 # - A edits their hgrc to remove references to the merge driver
312 # - A gives a copy of their entire repo, including .hg, to B
312 # - A gives a copy of their entire repo, including .hg, to B
313 # - B inspects .hgrc and finds it to be clean
313 # - B inspects .hgrc and finds it to be clean
314 # - B then continues the merge and the malicious merge driver
314 # - B then continues the merge and the malicious merge driver
315 # gets invoked
315 # gets invoked
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
317 if (self._readmergedriver is not None
317 if (self._readmergedriver is not None
318 and self._readmergedriver != configmergedriver):
318 and self._readmergedriver != configmergedriver):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _("merge driver changed since merge started"),
320 _("merge driver changed since merge started"),
321 hint=_("revert merge driver change or abort merge"))
321 hint=_("revert merge driver change or abort merge"))
322
322
323 return configmergedriver
323 return configmergedriver
324
324
325 @util.propertycache
325 @util.propertycache
326 def localctx(self):
326 def localctx(self):
327 if self._local is None:
327 if self._local is None:
328 msg = "localctx accessed but self._local isn't set"
328 msg = "localctx accessed but self._local isn't set"
329 raise error.ProgrammingError(msg)
329 raise error.ProgrammingError(msg)
330 return self._repo[self._local]
330 return self._repo[self._local]
331
331
332 @util.propertycache
332 @util.propertycache
333 def otherctx(self):
333 def otherctx(self):
334 if self._other is None:
334 if self._other is None:
335 msg = "otherctx accessed but self._other isn't set"
335 msg = "otherctx accessed but self._other isn't set"
336 raise error.ProgrammingError(msg)
336 raise error.ProgrammingError(msg)
337 return self._repo[self._other]
337 return self._repo[self._other]
338
338
339 def active(self):
339 def active(self):
340 """Whether mergestate is active.
340 """Whether mergestate is active.
341
341
342 Returns True if there appears to be mergestate. This is a rough proxy
342 Returns True if there appears to be mergestate. This is a rough proxy
343 for "is a merge in progress."
343 for "is a merge in progress."
344 """
344 """
345 # Check local variables before looking at filesystem for performance
345 # Check local variables before looking at filesystem for performance
346 # reasons.
346 # reasons.
347 return bool(self._local) or bool(self._state) or \
347 return bool(self._local) or bool(self._state) or \
348 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv1) or \
349 self._repo.vfs.exists(self.statepathv2)
349 self._repo.vfs.exists(self.statepathv2)
350
350
351 def commit(self):
351 def commit(self):
352 """Write current state on disk (if necessary)"""
352 """Write current state on disk (if necessary)"""
353 if self._dirty:
353 if self._dirty:
354 records = self._makerecords()
354 records = self._makerecords()
355 self._writerecords(records)
355 self._writerecords(records)
356 self._dirty = False
356 self._dirty = False
357
357
358 def _makerecords(self):
358 def _makerecords(self):
359 records = []
359 records = []
360 records.append(('L', hex(self._local)))
360 records.append(('L', hex(self._local)))
361 records.append(('O', hex(self._other)))
361 records.append(('O', hex(self._other)))
362 if self.mergedriver:
362 if self.mergedriver:
363 records.append(('m', '\0'.join([
363 records.append(('m', '\0'.join([
364 self.mergedriver, self._mdstate])))
364 self.mergedriver, self._mdstate])))
365 # Write out state items. In all cases, the value of the state map entry
365 # Write out state items. In all cases, the value of the state map entry
366 # is written as the contents of the record. The record type depends on
366 # is written as the contents of the record. The record type depends on
367 # the type of state that is stored, and capital-letter records are used
367 # the type of state that is stored, and capital-letter records are used
368 # to prevent older versions of Mercurial that do not support the feature
368 # to prevent older versions of Mercurial that do not support the feature
369 # from loading them.
369 # from loading them.
370 for filename, v in self._state.iteritems():
370 for filename, v in self._state.iteritems():
371 if v[0] == 'd':
371 if v[0] == 'd':
372 # Driver-resolved merge. These are stored in 'D' records.
372 # Driver-resolved merge. These are stored in 'D' records.
373 records.append(('D', '\0'.join([filename] + v)))
373 records.append(('D', '\0'.join([filename] + v)))
374 elif v[0] in ('pu', 'pr'):
374 elif v[0] in ('pu', 'pr'):
375 # Path conflicts. These are stored in 'P' records. The current
375 # Path conflicts. These are stored in 'P' records. The current
376 # resolution state ('pu' or 'pr') is stored within the record.
376 # resolution state ('pu' or 'pr') is stored within the record.
377 records.append(('P', '\0'.join([filename] + v)))
377 records.append(('P', '\0'.join([filename] + v)))
378 elif v[1] == nullhex or v[6] == nullhex:
378 elif v[1] == nullhex or v[6] == nullhex:
379 # Change/Delete or Delete/Change conflicts. These are stored in
379 # Change/Delete or Delete/Change conflicts. These are stored in
380 # 'C' records. v[1] is the local file, and is nullhex when the
380 # 'C' records. v[1] is the local file, and is nullhex when the
381 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # file is deleted locally ('dc'). v[6] is the remote file, and
382 # is nullhex when the file is deleted remotely ('cd').
382 # is nullhex when the file is deleted remotely ('cd').
383 records.append(('C', '\0'.join([filename] + v)))
383 records.append(('C', '\0'.join([filename] + v)))
384 else:
384 else:
385 # Normal files. These are stored in 'F' records.
385 # Normal files. These are stored in 'F' records.
386 records.append(('F', '\0'.join([filename] + v)))
386 records.append(('F', '\0'.join([filename] + v)))
387 for filename, extras in sorted(self._stateextras.iteritems()):
387 for filename, extras in sorted(self._stateextras.iteritems()):
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
389 extras.iteritems())
389 extras.iteritems())
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
391 if self._labels is not None:
391 if self._labels is not None:
392 labels = '\0'.join(self._labels)
392 labels = '\0'.join(self._labels)
393 records.append(('l', labels))
393 records.append(('l', labels))
394 return records
394 return records
395
395
396 def _writerecords(self, records):
396 def _writerecords(self, records):
397 """Write current state on disk (both v1 and v2)"""
397 """Write current state on disk (both v1 and v2)"""
398 self._writerecordsv1(records)
398 self._writerecordsv1(records)
399 self._writerecordsv2(records)
399 self._writerecordsv2(records)
400
400
401 def _writerecordsv1(self, records):
401 def _writerecordsv1(self, records):
402 """Write current state on disk in a version 1 file"""
402 """Write current state on disk in a version 1 file"""
403 f = self._repo.vfs(self.statepathv1, 'w')
403 f = self._repo.vfs(self.statepathv1, 'w')
404 irecords = iter(records)
404 irecords = iter(records)
405 lrecords = next(irecords)
405 lrecords = next(irecords)
406 assert lrecords[0] == 'L'
406 assert lrecords[0] == 'L'
407 f.write(hex(self._local) + '\n')
407 f.write(hex(self._local) + '\n')
408 for rtype, data in irecords:
408 for rtype, data in irecords:
409 if rtype == 'F':
409 if rtype == 'F':
410 f.write('%s\n' % _droponode(data))
410 f.write('%s\n' % _droponode(data))
411 f.close()
411 f.close()
412
412
413 def _writerecordsv2(self, records):
413 def _writerecordsv2(self, records):
414 """Write current state on disk in a version 2 file
414 """Write current state on disk in a version 2 file
415
415
416 See the docstring for _readrecordsv2 for why we use 't'."""
416 See the docstring for _readrecordsv2 for why we use 't'."""
417 # these are the records that all version 2 clients can read
417 # these are the records that all version 2 clients can read
418 whitelist = 'LOF'
418 whitelist = 'LOF'
419 f = self._repo.vfs(self.statepathv2, 'w')
419 f = self._repo.vfs(self.statepathv2, 'w')
420 for key, data in records:
420 for key, data in records:
421 assert len(key) == 1
421 assert len(key) == 1
422 if key not in whitelist:
422 if key not in whitelist:
423 key, data = 't', '%s%s' % (key, data)
423 key, data = 't', '%s%s' % (key, data)
424 format = '>sI%is' % len(data)
424 format = '>sI%is' % len(data)
425 f.write(_pack(format, key, len(data), data))
425 f.write(_pack(format, key, len(data), data))
426 f.close()
426 f.close()
427
427
428 def add(self, fcl, fco, fca, fd):
428 def add(self, fcl, fco, fca, fd):
429 """add a new (potentially?) conflicting file the merge state
429 """add a new (potentially?) conflicting file the merge state
430 fcl: file context for local,
430 fcl: file context for local,
431 fco: file context for remote,
431 fco: file context for remote,
432 fca: file context for ancestors,
432 fca: file context for ancestors,
433 fd: file path of the resulting merge.
433 fd: file path of the resulting merge.
434
434
435 note: also write the local version to the `.hg/merge` directory.
435 note: also write the local version to the `.hg/merge` directory.
436 """
436 """
437 if fcl.isabsent():
437 if fcl.isabsent():
438 hash = nullhex
438 hash = nullhex
439 else:
439 else:
440 hash = hex(hashlib.sha1(fcl.path()).digest())
440 hash = hex(hashlib.sha1(fcl.path()).digest())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
442 self._state[fd] = ['u', hash, fcl.path(),
442 self._state[fd] = ['u', hash, fcl.path(),
443 fca.path(), hex(fca.filenode()),
443 fca.path(), hex(fca.filenode()),
444 fco.path(), hex(fco.filenode()),
444 fco.path(), hex(fco.filenode()),
445 fcl.flags()]
445 fcl.flags()]
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
447 self._dirty = True
447 self._dirty = True
448
448
449 def addpath(self, path, frename, forigin):
449 def addpath(self, path, frename, forigin):
450 """add a new conflicting path to the merge state
450 """add a new conflicting path to the merge state
451 path: the path that conflicts
451 path: the path that conflicts
452 frename: the filename the conflicting file was renamed to
452 frename: the filename the conflicting file was renamed to
453 forigin: origin of the file ('l' or 'r' for local/remote)
453 forigin: origin of the file ('l' or 'r' for local/remote)
454 """
454 """
455 self._state[path] = ['pu', frename, forigin]
455 self._state[path] = ['pu', frename, forigin]
456 self._dirty = True
456 self._dirty = True
457
457
458 def __contains__(self, dfile):
458 def __contains__(self, dfile):
459 return dfile in self._state
459 return dfile in self._state
460
460
461 def __getitem__(self, dfile):
461 def __getitem__(self, dfile):
462 return self._state[dfile][0]
462 return self._state[dfile][0]
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(sorted(self._state))
465 return iter(sorted(self._state))
466
466
467 def files(self):
467 def files(self):
468 return self._state.keys()
468 return self._state.keys()
469
469
470 def mark(self, dfile, state):
470 def mark(self, dfile, state):
471 self._state[dfile][0] = state
471 self._state[dfile][0] = state
472 self._dirty = True
472 self._dirty = True
473
473
474 def mdstate(self):
474 def mdstate(self):
475 return self._mdstate
475 return self._mdstate
476
476
477 def unresolved(self):
477 def unresolved(self):
478 """Obtain the paths of unresolved files."""
478 """Obtain the paths of unresolved files."""
479
479
480 for f, entry in self._state.iteritems():
480 for f, entry in self._state.iteritems():
481 if entry[0] in ('u', 'pu'):
481 if entry[0] in ('u', 'pu'):
482 yield f
482 yield f
483
483
484 def driverresolved(self):
484 def driverresolved(self):
485 """Obtain the paths of driver-resolved files."""
485 """Obtain the paths of driver-resolved files."""
486
486
487 for f, entry in self._state.items():
487 for f, entry in self._state.items():
488 if entry[0] == 'd':
488 if entry[0] == 'd':
489 yield f
489 yield f
490
490
491 def extras(self, filename):
491 def extras(self, filename):
492 return self._stateextras.setdefault(filename, {})
492 return self._stateextras.setdefault(filename, {})
493
493
494 def _resolve(self, preresolve, dfile, wctx):
494 def _resolve(self, preresolve, dfile, wctx):
495 """rerun merge process for file path `dfile`"""
495 """rerun merge process for file path `dfile`"""
496 if self[dfile] in 'rd':
496 if self[dfile] in 'rd':
497 return True, 0
497 return True, 0
498 stateentry = self._state[dfile]
498 stateentry = self._state[dfile]
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
500 octx = self._repo[self._other]
500 octx = self._repo[self._other]
501 extras = self.extras(dfile)
501 extras = self.extras(dfile)
502 anccommitnode = extras.get('ancestorlinknode')
502 anccommitnode = extras.get('ancestorlinknode')
503 if anccommitnode:
503 if anccommitnode:
504 actx = self._repo[anccommitnode]
504 actx = self._repo[anccommitnode]
505 else:
505 else:
506 actx = None
506 actx = None
507 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fcd = self._filectxorabsent(hash, wctx, dfile)
508 fco = self._filectxorabsent(onode, octx, ofile)
508 fco = self._filectxorabsent(onode, octx, ofile)
509 # TODO: move this to filectxorabsent
509 # TODO: move this to filectxorabsent
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
511 # "premerge" x flags
511 # "premerge" x flags
512 flo = fco.flags()
512 flo = fco.flags()
513 fla = fca.flags()
513 fla = fca.flags()
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
515 if fca.node() == nullid and flags != flo:
515 if fca.node() == nullid and flags != flo:
516 if preresolve:
516 if preresolve:
517 self._repo.ui.warn(
517 self._repo.ui.warn(
518 _('warning: cannot merge flags for %s '
518 _('warning: cannot merge flags for %s '
519 'without common ancestor - keeping local flags\n')
519 'without common ancestor - keeping local flags\n')
520 % afile)
520 % afile)
521 elif flags == fla:
521 elif flags == fla:
522 flags = flo
522 flags = flo
523 if preresolve:
523 if preresolve:
524 # restore local
524 # restore local
525 if hash != nullhex:
525 if hash != nullhex:
526 f = self._repo.vfs('merge/' + hash)
526 f = self._repo.vfs('merge/' + hash)
527 wctx[dfile].write(f.read(), flags)
527 wctx[dfile].write(f.read(), flags)
528 f.close()
528 f.close()
529 else:
529 else:
530 wctx[dfile].remove(ignoremissing=True)
530 wctx[dfile].remove(ignoremissing=True)
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
532 self._local, lfile, fcd,
532 self._local, lfile, fcd,
533 fco, fca,
533 fco, fca,
534 labels=self._labels)
534 labels=self._labels)
535 else:
535 else:
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
537 self._local, lfile, fcd,
537 self._local, lfile, fcd,
538 fco, fca,
538 fco, fca,
539 labels=self._labels)
539 labels=self._labels)
540 if r is None:
540 if r is None:
541 # no real conflict
541 # no real conflict
542 del self._state[dfile]
542 del self._state[dfile]
543 self._stateextras.pop(dfile, None)
543 self._stateextras.pop(dfile, None)
544 self._dirty = True
544 self._dirty = True
545 elif not r:
545 elif not r:
546 self.mark(dfile, 'r')
546 self.mark(dfile, 'r')
547
547
548 if complete:
548 if complete:
549 action = None
549 action = None
550 if deleted:
550 if deleted:
551 if fcd.isabsent():
551 if fcd.isabsent():
552 # dc: local picked. Need to drop if present, which may
552 # dc: local picked. Need to drop if present, which may
553 # happen on re-resolves.
553 # happen on re-resolves.
554 action = 'f'
554 action = 'f'
555 else:
555 else:
556 # cd: remote picked (or otherwise deleted)
556 # cd: remote picked (or otherwise deleted)
557 action = 'r'
557 action = 'r'
558 else:
558 else:
559 if fcd.isabsent(): # dc: remote picked
559 if fcd.isabsent(): # dc: remote picked
560 action = 'g'
560 action = 'g'
561 elif fco.isabsent(): # cd: local picked
561 elif fco.isabsent(): # cd: local picked
562 if dfile in self.localctx:
562 if dfile in self.localctx:
563 action = 'am'
563 action = 'am'
564 else:
564 else:
565 action = 'a'
565 action = 'a'
566 # else: regular merges (no action necessary)
566 # else: regular merges (no action necessary)
567 self._results[dfile] = r, action
567 self._results[dfile] = r, action
568
568
569 return complete, r
569 return complete, r
570
570
571 def _filectxorabsent(self, hexnode, ctx, f):
571 def _filectxorabsent(self, hexnode, ctx, f):
572 if hexnode == nullhex:
572 if hexnode == nullhex:
573 return filemerge.absentfilectx(ctx, f)
573 return filemerge.absentfilectx(ctx, f)
574 else:
574 else:
575 return ctx[f]
575 return ctx[f]
576
576
577 def preresolve(self, dfile, wctx):
577 def preresolve(self, dfile, wctx):
578 """run premerge process for dfile
578 """run premerge process for dfile
579
579
580 Returns whether the merge is complete, and the exit code."""
580 Returns whether the merge is complete, and the exit code."""
581 return self._resolve(True, dfile, wctx)
581 return self._resolve(True, dfile, wctx)
582
582
583 def resolve(self, dfile, wctx):
583 def resolve(self, dfile, wctx):
584 """run merge process (assuming premerge was run) for dfile
584 """run merge process (assuming premerge was run) for dfile
585
585
586 Returns the exit code of the merge."""
586 Returns the exit code of the merge."""
587 return self._resolve(False, dfile, wctx)[1]
587 return self._resolve(False, dfile, wctx)[1]
588
588
589 def counts(self):
589 def counts(self):
590 """return counts for updated, merged and removed files in this
590 """return counts for updated, merged and removed files in this
591 session"""
591 session"""
592 updated, merged, removed = 0, 0, 0
592 updated, merged, removed = 0, 0, 0
593 for r, action in self._results.itervalues():
593 for r, action in self._results.itervalues():
594 if r is None:
594 if r is None:
595 updated += 1
595 updated += 1
596 elif r == 0:
596 elif r == 0:
597 if action == 'r':
597 if action == 'r':
598 removed += 1
598 removed += 1
599 else:
599 else:
600 merged += 1
600 merged += 1
601 return updated, merged, removed
601 return updated, merged, removed
602
602
603 def unresolvedcount(self):
603 def unresolvedcount(self):
604 """get unresolved count for this merge (persistent)"""
604 """get unresolved count for this merge (persistent)"""
605 return len(list(self.unresolved()))
605 return len(list(self.unresolved()))
606
606
607 def actions(self):
607 def actions(self):
608 """return lists of actions to perform on the dirstate"""
608 """return lists of actions to perform on the dirstate"""
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
610 for f, (r, action) in self._results.iteritems():
610 for f, (r, action) in self._results.iteritems():
611 if action is not None:
611 if action is not None:
612 actions[action].append((f, None, "merge result"))
612 actions[action].append((f, None, "merge result"))
613 return actions
613 return actions
614
614
615 def recordactions(self):
615 def recordactions(self):
616 """record remove/add/get actions in the dirstate"""
616 """record remove/add/get actions in the dirstate"""
617 branchmerge = self._repo.dirstate.p2() != nullid
617 branchmerge = self._repo.dirstate.p2() != nullid
618 recordupdates(self._repo, self.actions(), branchmerge)
618 recordupdates(self._repo, self.actions(), branchmerge)
619
619
620 def queueremove(self, f):
620 def queueremove(self, f):
621 """queues a file to be removed from the dirstate
621 """queues a file to be removed from the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'r'
624 self._results[f] = 0, 'r'
625
625
626 def queueadd(self, f):
626 def queueadd(self, f):
627 """queues a file to be added to the dirstate
627 """queues a file to be added to the dirstate
628
628
629 Meant for use by custom merge drivers."""
629 Meant for use by custom merge drivers."""
630 self._results[f] = 0, 'a'
630 self._results[f] = 0, 'a'
631
631
632 def queueget(self, f):
632 def queueget(self, f):
633 """queues a file to be marked modified in the dirstate
633 """queues a file to be marked modified in the dirstate
634
634
635 Meant for use by custom merge drivers."""
635 Meant for use by custom merge drivers."""
636 self._results[f] = 0, 'g'
636 self._results[f] = 0, 'g'
637
637
638 def _getcheckunknownconfig(repo, section, name):
638 def _getcheckunknownconfig(repo, section, name):
639 config = repo.ui.config(section, name)
639 config = repo.ui.config(section, name)
640 valid = ['abort', 'ignore', 'warn']
640 valid = ['abort', 'ignore', 'warn']
641 if config not in valid:
641 if config not in valid:
642 validstr = ', '.join(["'" + v + "'" for v in valid])
642 validstr = ', '.join(["'" + v + "'" for v in valid])
643 raise error.ConfigError(_("%s.%s not valid "
643 raise error.ConfigError(_("%s.%s not valid "
644 "('%s' is none of %s)")
644 "('%s' is none of %s)")
645 % (section, name, config, validstr))
645 % (section, name, config, validstr))
646 return config
646 return config
647
647
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
649 if f2 is None:
649 if f2 is None:
650 f2 = f
650 f2 = f
651 return (repo.wvfs.audit.check(f)
651 return (repo.wvfs.audit.check(f)
652 and repo.wvfs.isfileorlink(f)
652 and repo.wvfs.isfileorlink(f)
653 and repo.dirstate.normalize(f) not in repo.dirstate
653 and repo.dirstate.normalize(f) not in repo.dirstate
654 and mctx[f2].cmp(wctx[f]))
654 and mctx[f2].cmp(wctx[f]))
655
655
656 def _checkunknowndirs(repo, f):
656 def _checkunknowndirs(repo, f):
657 """
657 """
658 Look for any unknown files or directories that may have a path conflict
658 Look for any unknown files or directories that may have a path conflict
659 with a file. If any path prefix of the file exists as a file or link,
659 with a file. If any path prefix of the file exists as a file or link,
660 then it conflicts. If the file itself is a directory that contains any
660 then it conflicts. If the file itself is a directory that contains any
661 file that is not tracked, then it conflicts.
661 file that is not tracked, then it conflicts.
662
662
663 Returns the shortest path at which a conflict occurs, or None if there is
663 Returns the shortest path at which a conflict occurs, or None if there is
664 no conflict.
664 no conflict.
665 """
665 """
666
666
667 # Check for path prefixes that exist as unknown files.
667 # Check for path prefixes that exist as unknown files.
668 for p in reversed(list(util.finddirs(f))):
668 for p in reversed(list(util.finddirs(f))):
669 if (repo.wvfs.audit.check(p)
669 if (repo.wvfs.audit.check(p)
670 and repo.wvfs.isfileorlink(p)
670 and repo.wvfs.isfileorlink(p)
671 and repo.dirstate.normalize(p) not in repo.dirstate):
671 and repo.dirstate.normalize(p) not in repo.dirstate):
672 return p
672 return p
673
673
674 # Check if the file conflicts with a directory containing unknown files.
674 # Check if the file conflicts with a directory containing unknown files.
675 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
675 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
676 # Does the directory contain any files that are not in the dirstate?
676 # Does the directory contain any files that are not in the dirstate?
677 for p, dirs, files in repo.wvfs.walk(f):
677 for p, dirs, files in repo.wvfs.walk(f):
678 for fn in files:
678 for fn in files:
679 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
679 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
680 if relf not in repo.dirstate:
680 if relf not in repo.dirstate:
681 return f
681 return f
682 return None
682 return None
683
683
684 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
684 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
685 """
685 """
686 Considers any actions that care about the presence of conflicting unknown
686 Considers any actions that care about the presence of conflicting unknown
687 files. For some actions, the result is to abort; for others, it is to
687 files. For some actions, the result is to abort; for others, it is to
688 choose a different action.
688 choose a different action.
689 """
689 """
690 fileconflicts = set()
690 fileconflicts = set()
691 pathconflicts = set()
691 pathconflicts = set()
692 warnconflicts = set()
692 warnconflicts = set()
693 abortconflicts = set()
693 abortconflicts = set()
694 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
694 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
695 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
695 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
696 if not force:
696 if not force:
697 def collectconflicts(conflicts, config):
697 def collectconflicts(conflicts, config):
698 if config == 'abort':
698 if config == 'abort':
699 abortconflicts.update(conflicts)
699 abortconflicts.update(conflicts)
700 elif config == 'warn':
700 elif config == 'warn':
701 warnconflicts.update(conflicts)
701 warnconflicts.update(conflicts)
702
702
703 for f, (m, args, msg) in actions.iteritems():
703 for f, (m, args, msg) in actions.iteritems():
704 if m in ('c', 'dc'):
704 if m in ('c', 'dc'):
705 if _checkunknownfile(repo, wctx, mctx, f):
705 if _checkunknownfile(repo, wctx, mctx, f):
706 fileconflicts.add(f)
706 fileconflicts.add(f)
707 elif f not in wctx:
707 elif f not in wctx:
708 path = _checkunknowndirs(repo, f)
708 path = _checkunknowndirs(repo, f)
709 if path is not None:
709 if path is not None:
710 pathconflicts.add(path)
710 pathconflicts.add(path)
711 elif m == 'dg':
711 elif m == 'dg':
712 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
712 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
713 fileconflicts.add(f)
713 fileconflicts.add(f)
714
714
715 allconflicts = fileconflicts | pathconflicts
715 allconflicts = fileconflicts | pathconflicts
716 ignoredconflicts = set([c for c in allconflicts
716 ignoredconflicts = set([c for c in allconflicts
717 if repo.dirstate._ignore(c)])
717 if repo.dirstate._ignore(c)])
718 unknownconflicts = allconflicts - ignoredconflicts
718 unknownconflicts = allconflicts - ignoredconflicts
719 collectconflicts(ignoredconflicts, ignoredconfig)
719 collectconflicts(ignoredconflicts, ignoredconfig)
720 collectconflicts(unknownconflicts, unknownconfig)
720 collectconflicts(unknownconflicts, unknownconfig)
721 else:
721 else:
722 for f, (m, args, msg) in actions.iteritems():
722 for f, (m, args, msg) in actions.iteritems():
723 if m == 'cm':
723 if m == 'cm':
724 fl2, anc = args
724 fl2, anc = args
725 different = _checkunknownfile(repo, wctx, mctx, f)
725 different = _checkunknownfile(repo, wctx, mctx, f)
726 if repo.dirstate._ignore(f):
726 if repo.dirstate._ignore(f):
727 config = ignoredconfig
727 config = ignoredconfig
728 else:
728 else:
729 config = unknownconfig
729 config = unknownconfig
730
730
731 # The behavior when force is True is described by this table:
731 # The behavior when force is True is described by this table:
732 # config different mergeforce | action backup
732 # config different mergeforce | action backup
733 # * n * | get n
733 # * n * | get n
734 # * y y | merge -
734 # * y y | merge -
735 # abort y n | merge - (1)
735 # abort y n | merge - (1)
736 # warn y n | warn + get y
736 # warn y n | warn + get y
737 # ignore y n | get y
737 # ignore y n | get y
738 #
738 #
739 # (1) this is probably the wrong behavior here -- we should
739 # (1) this is probably the wrong behavior here -- we should
740 # probably abort, but some actions like rebases currently
740 # probably abort, but some actions like rebases currently
741 # don't like an abort happening in the middle of
741 # don't like an abort happening in the middle of
742 # merge.update.
742 # merge.update.
743 if not different:
743 if not different:
744 actions[f] = ('g', (fl2, False), "remote created")
744 actions[f] = ('g', (fl2, False), "remote created")
745 elif mergeforce or config == 'abort':
745 elif mergeforce or config == 'abort':
746 actions[f] = ('m', (f, f, None, False, anc),
746 actions[f] = ('m', (f, f, None, False, anc),
747 "remote differs from untracked local")
747 "remote differs from untracked local")
748 elif config == 'abort':
748 elif config == 'abort':
749 abortconflicts.add(f)
749 abortconflicts.add(f)
750 else:
750 else:
751 if config == 'warn':
751 if config == 'warn':
752 warnconflicts.add(f)
752 warnconflicts.add(f)
753 actions[f] = ('g', (fl2, True), "remote created")
753 actions[f] = ('g', (fl2, True), "remote created")
754
754
755 for f in sorted(abortconflicts):
755 for f in sorted(abortconflicts):
756 warn = repo.ui.warn
756 warn = repo.ui.warn
757 if f in pathconflicts:
757 if f in pathconflicts:
758 if repo.wvfs.isfileorlink(f):
758 if repo.wvfs.isfileorlink(f):
759 warn(_("%s: untracked file conflicts with directory\n") % f)
759 warn(_("%s: untracked file conflicts with directory\n") % f)
760 else:
760 else:
761 warn(_("%s: untracked directory conflicts with file\n") % f)
761 warn(_("%s: untracked directory conflicts with file\n") % f)
762 else:
762 else:
763 warn(_("%s: untracked file differs\n") % f)
763 warn(_("%s: untracked file differs\n") % f)
764 if abortconflicts:
764 if abortconflicts:
765 raise error.Abort(_("untracked files in working directory "
765 raise error.Abort(_("untracked files in working directory "
766 "differ from files in requested revision"))
766 "differ from files in requested revision"))
767
767
768 for f in sorted(warnconflicts):
768 for f in sorted(warnconflicts):
769 if repo.wvfs.isfileorlink(f):
769 if repo.wvfs.isfileorlink(f):
770 repo.ui.warn(_("%s: replacing untracked file\n") % f)
770 repo.ui.warn(_("%s: replacing untracked file\n") % f)
771 else:
771 else:
772 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
772 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
773
773
774 for f, (m, args, msg) in actions.iteritems():
774 for f, (m, args, msg) in actions.iteritems():
775 if m == 'c':
775 if m == 'c':
776 backup = (f in fileconflicts or f in pathconflicts or
776 backup = (f in fileconflicts or f in pathconflicts or
777 any(p in pathconflicts for p in util.finddirs(f)))
777 any(p in pathconflicts for p in util.finddirs(f)))
778 flags, = args
778 flags, = args
779 actions[f] = ('g', (flags, backup), msg)
779 actions[f] = ('g', (flags, backup), msg)
780
780
781 def _forgetremoved(wctx, mctx, branchmerge):
781 def _forgetremoved(wctx, mctx, branchmerge):
782 """
782 """
783 Forget removed files
783 Forget removed files
784
784
785 If we're jumping between revisions (as opposed to merging), and if
785 If we're jumping between revisions (as opposed to merging), and if
786 neither the working directory nor the target rev has the file,
786 neither the working directory nor the target rev has the file,
787 then we need to remove it from the dirstate, to prevent the
787 then we need to remove it from the dirstate, to prevent the
788 dirstate from listing the file when it is no longer in the
788 dirstate from listing the file when it is no longer in the
789 manifest.
789 manifest.
790
790
791 If we're merging, and the other revision has removed a file
791 If we're merging, and the other revision has removed a file
792 that is not present in the working directory, we need to mark it
792 that is not present in the working directory, we need to mark it
793 as removed.
793 as removed.
794 """
794 """
795
795
796 actions = {}
796 actions = {}
797 m = 'f'
797 m = 'f'
798 if branchmerge:
798 if branchmerge:
799 m = 'r'
799 m = 'r'
800 for f in wctx.deleted():
800 for f in wctx.deleted():
801 if f not in mctx:
801 if f not in mctx:
802 actions[f] = m, None, "forget deleted"
802 actions[f] = m, None, "forget deleted"
803
803
804 if not branchmerge:
804 if not branchmerge:
805 for f in wctx.removed():
805 for f in wctx.removed():
806 if f not in mctx:
806 if f not in mctx:
807 actions[f] = 'f', None, "forget removed"
807 actions[f] = 'f', None, "forget removed"
808
808
809 return actions
809 return actions
810
810
811 def _checkcollision(repo, wmf, actions):
811 def _checkcollision(repo, wmf, actions):
812 # build provisional merged manifest up
812 # build provisional merged manifest up
813 pmmf = set(wmf)
813 pmmf = set(wmf)
814
814
815 if actions:
815 if actions:
816 # k, dr, e and rd are no-op
816 # k, dr, e and rd are no-op
817 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
817 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
818 for f, args, msg in actions[m]:
818 for f, args, msg in actions[m]:
819 pmmf.add(f)
819 pmmf.add(f)
820 for f, args, msg in actions['r']:
820 for f, args, msg in actions['r']:
821 pmmf.discard(f)
821 pmmf.discard(f)
822 for f, args, msg in actions['dm']:
822 for f, args, msg in actions['dm']:
823 f2, flags = args
823 f2, flags = args
824 pmmf.discard(f2)
824 pmmf.discard(f2)
825 pmmf.add(f)
825 pmmf.add(f)
826 for f, args, msg in actions['dg']:
826 for f, args, msg in actions['dg']:
827 pmmf.add(f)
827 pmmf.add(f)
828 for f, args, msg in actions['m']:
828 for f, args, msg in actions['m']:
829 f1, f2, fa, move, anc = args
829 f1, f2, fa, move, anc = args
830 if move:
830 if move:
831 pmmf.discard(f1)
831 pmmf.discard(f1)
832 pmmf.add(f)
832 pmmf.add(f)
833
833
834 # check case-folding collision in provisional merged manifest
834 # check case-folding collision in provisional merged manifest
835 foldmap = {}
835 foldmap = {}
836 for f in pmmf:
836 for f in pmmf:
837 fold = util.normcase(f)
837 fold = util.normcase(f)
838 if fold in foldmap:
838 if fold in foldmap:
839 raise error.Abort(_("case-folding collision between %s and %s")
839 raise error.Abort(_("case-folding collision between %s and %s")
840 % (f, foldmap[fold]))
840 % (f, foldmap[fold]))
841 foldmap[fold] = f
841 foldmap[fold] = f
842
842
843 # check case-folding of directories
843 # check case-folding of directories
844 foldprefix = unfoldprefix = lastfull = ''
844 foldprefix = unfoldprefix = lastfull = ''
845 for fold, f in sorted(foldmap.items()):
845 for fold, f in sorted(foldmap.items()):
846 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
846 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
847 # the folded prefix matches but actual casing is different
847 # the folded prefix matches but actual casing is different
848 raise error.Abort(_("case-folding collision between "
848 raise error.Abort(_("case-folding collision between "
849 "%s and directory of %s") % (lastfull, f))
849 "%s and directory of %s") % (lastfull, f))
850 foldprefix = fold + '/'
850 foldprefix = fold + '/'
851 unfoldprefix = f + '/'
851 unfoldprefix = f + '/'
852 lastfull = f
852 lastfull = f
853
853
854 def driverpreprocess(repo, ms, wctx, labels=None):
854 def driverpreprocess(repo, ms, wctx, labels=None):
855 """run the preprocess step of the merge driver, if any
855 """run the preprocess step of the merge driver, if any
856
856
857 This is currently not implemented -- it's an extension point."""
857 This is currently not implemented -- it's an extension point."""
858 return True
858 return True
859
859
860 def driverconclude(repo, ms, wctx, labels=None):
860 def driverconclude(repo, ms, wctx, labels=None):
861 """run the conclude step of the merge driver, if any
861 """run the conclude step of the merge driver, if any
862
862
863 This is currently not implemented -- it's an extension point."""
863 This is currently not implemented -- it's an extension point."""
864 return True
864 return True
865
865
866 def _filesindirs(repo, manifest, dirs):
866 def _filesindirs(repo, manifest, dirs):
867 """
867 """
868 Generator that yields pairs of all the files in the manifest that are found
868 Generator that yields pairs of all the files in the manifest that are found
869 inside the directories listed in dirs, and which directory they are found
869 inside the directories listed in dirs, and which directory they are found
870 in.
870 in.
871 """
871 """
872 for f in manifest:
872 for f in manifest:
873 for p in util.finddirs(f):
873 for p in util.finddirs(f):
874 if p in dirs:
874 if p in dirs:
875 yield f, p
875 yield f, p
876 break
876 break
877
877
878 def checkpathconflicts(repo, wctx, mctx, actions):
878 def checkpathconflicts(repo, wctx, mctx, actions):
879 """
879 """
880 Check if any actions introduce path conflicts in the repository, updating
880 Check if any actions introduce path conflicts in the repository, updating
881 actions to record or handle the path conflict accordingly.
881 actions to record or handle the path conflict accordingly.
882 """
882 """
883 mf = wctx.manifest()
883 mf = wctx.manifest()
884
884
885 # The set of local files that conflict with a remote directory.
885 # The set of local files that conflict with a remote directory.
886 localconflicts = set()
886 localconflicts = set()
887
887
888 # The set of directories that conflict with a remote file, and so may cause
888 # The set of directories that conflict with a remote file, and so may cause
889 # conflicts if they still contain any files after the merge.
889 # conflicts if they still contain any files after the merge.
890 remoteconflicts = set()
890 remoteconflicts = set()
891
891
892 # The set of directories that appear as both a file and a directory in the
892 # The set of directories that appear as both a file and a directory in the
893 # remote manifest. These indicate an invalid remote manifest, which
893 # remote manifest. These indicate an invalid remote manifest, which
894 # can't be updated to cleanly.
894 # can't be updated to cleanly.
895 invalidconflicts = set()
895 invalidconflicts = set()
896
896
897 # The set of files deleted by all the actions.
897 # The set of files deleted by all the actions.
898 deletedfiles = set()
898 deletedfiles = set()
899
899
900 for f, (m, args, msg) in actions.items():
900 for f, (m, args, msg) in actions.items():
901 if m in ('c', 'dc', 'm', 'cm'):
901 if m in ('c', 'dc', 'm', 'cm'):
902 # This action may create a new local file.
902 # This action may create a new local file.
903 if mf.hasdir(f):
903 if mf.hasdir(f):
904 # The file aliases a local directory. This might be ok if all
904 # The file aliases a local directory. This might be ok if all
905 # the files in the local directory are being deleted. This
905 # the files in the local directory are being deleted. This
906 # will be checked once we know what all the deleted files are.
906 # will be checked once we know what all the deleted files are.
907 remoteconflicts.add(f)
907 remoteconflicts.add(f)
908 for p in util.finddirs(f):
908 for p in util.finddirs(f):
909 if p in mf:
909 if p in mf:
910 if p in mctx:
910 if p in mctx:
911 # The file is in a directory which aliases both a local
911 # The file is in a directory which aliases both a local
912 # and a remote file. This is an internal inconsistency
912 # and a remote file. This is an internal inconsistency
913 # within the remote manifest.
913 # within the remote manifest.
914 invalidconflicts.add(p)
914 invalidconflicts.add(p)
915 else:
915 else:
916 # The file is in a directory which aliases a local file.
916 # The file is in a directory which aliases a local file.
917 # We will need to rename the local file.
917 # We will need to rename the local file.
918 localconflicts.add(p)
918 localconflicts.add(p)
919 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
919 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
920 # The file is in a directory which aliases a remote file.
920 # The file is in a directory which aliases a remote file.
921 # This is an internal inconsistency within the remote
921 # This is an internal inconsistency within the remote
922 # manifest.
922 # manifest.
923 invalidconflicts.add(p)
923 invalidconflicts.add(p)
924
924
925 # Track the names of all deleted files.
925 # Track the names of all deleted files.
926 if m == 'r':
926 if m == 'r':
927 deletedfiles.add(f)
927 deletedfiles.add(f)
928 if m == 'm':
928 if m == 'm':
929 f1, f2, fa, move, anc = args
929 f1, f2, fa, move, anc = args
930 if move:
930 if move:
931 deletedfiles.add(f1)
931 deletedfiles.add(f1)
932 if m == 'dm':
932 if m == 'dm':
933 f2, flags = args
933 f2, flags = args
934 deletedfiles.add(f2)
934 deletedfiles.add(f2)
935
935
936 # Rename all local conflicting files that have not been deleted.
936 # Rename all local conflicting files that have not been deleted.
937 for p in localconflicts:
937 for p in localconflicts:
938 if p not in deletedfiles:
938 if p not in deletedfiles:
939 ctxname = str(wctx).rstrip('+')
939 ctxname = str(wctx).rstrip('+')
940 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
940 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
941 actions[pnew] = ('pr', (p,), "local path conflict")
941 actions[pnew] = ('pr', (p,), "local path conflict")
942 actions[p] = ('p', (pnew, 'l'), "path conflict")
942 actions[p] = ('p', (pnew, 'l'), "path conflict")
943
943
944 if remoteconflicts:
944 if remoteconflicts:
945 # Check if all files in the conflicting directories have been removed.
945 # Check if all files in the conflicting directories have been removed.
946 ctxname = str(mctx).rstrip('+')
946 ctxname = str(mctx).rstrip('+')
947 for f, p in _filesindirs(repo, mf, remoteconflicts):
947 for f, p in _filesindirs(repo, mf, remoteconflicts):
948 if f not in deletedfiles:
948 if f not in deletedfiles:
949 m, args, msg = actions[p]
949 m, args, msg = actions[p]
950 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
950 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
951 if m in ('dc', 'm'):
951 if m in ('dc', 'm'):
952 # Action was merge, just update target.
952 # Action was merge, just update target.
953 actions[pnew] = (m, args, msg)
953 actions[pnew] = (m, args, msg)
954 else:
954 else:
955 # Action was create, change to renamed get action.
955 # Action was create, change to renamed get action.
956 fl = args[0]
956 fl = args[0]
957 actions[pnew] = ('dg', (p, fl), "remote path conflict")
957 actions[pnew] = ('dg', (p, fl), "remote path conflict")
958 actions[p] = ('p', (pnew, 'r'), "path conflict")
958 actions[p] = ('p', (pnew, 'r'), "path conflict")
959 remoteconflicts.remove(p)
959 remoteconflicts.remove(p)
960 break
960 break
961
961
962 if invalidconflicts:
962 if invalidconflicts:
963 for p in invalidconflicts:
963 for p in invalidconflicts:
964 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
964 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
965 raise error.Abort(_("destination manifest contains path conflicts"))
965 raise error.Abort(_("destination manifest contains path conflicts"))
966
966
967 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
967 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
968 acceptremote, followcopies, forcefulldiff=False):
968 acceptremote, followcopies, forcefulldiff=False):
969 """
969 """
970 Merge wctx and p2 with ancestor pa and generate merge action list
970 Merge wctx and p2 with ancestor pa and generate merge action list
971
971
972 branchmerge and force are as passed in to update
972 branchmerge and force are as passed in to update
973 matcher = matcher to filter file lists
973 matcher = matcher to filter file lists
974 acceptremote = accept the incoming changes without prompting
974 acceptremote = accept the incoming changes without prompting
975 """
975 """
976 if matcher is not None and matcher.always():
976 if matcher is not None and matcher.always():
977 matcher = None
977 matcher = None
978
978
979 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
979 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
980
980
981 # manifests fetched in order are going to be faster, so prime the caches
981 # manifests fetched in order are going to be faster, so prime the caches
982 [x.manifest() for x in
982 [x.manifest() for x in
983 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
983 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
984
984
985 if followcopies:
985 if followcopies:
986 ret = copies.mergecopies(repo, wctx, p2, pa)
986 ret = copies.mergecopies(repo, wctx, p2, pa)
987 copy, movewithdir, diverge, renamedelete, dirmove = ret
987 copy, movewithdir, diverge, renamedelete, dirmove = ret
988
988
989 boolbm = pycompat.bytestr(bool(branchmerge))
989 boolbm = pycompat.bytestr(bool(branchmerge))
990 boolf = pycompat.bytestr(bool(force))
990 boolf = pycompat.bytestr(bool(force))
991 boolm = pycompat.bytestr(bool(matcher))
991 boolm = pycompat.bytestr(bool(matcher))
992 repo.ui.note(_("resolving manifests\n"))
992 repo.ui.note(_("resolving manifests\n"))
993 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
993 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
994 % (boolbm, boolf, boolm))
994 % (boolbm, boolf, boolm))
995 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
995 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
996
996
997 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
997 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
998 copied = set(copy.values())
998 copied = set(copy.values())
999 copied.update(movewithdir.values())
999 copied.update(movewithdir.values())
1000
1000
1001 if '.hgsubstate' in m1:
1001 if '.hgsubstate' in m1:
1002 # check whether sub state is modified
1002 # check whether sub state is modified
1003 if any(wctx.sub(s).dirty() for s in wctx.substate):
1003 if any(wctx.sub(s).dirty() for s in wctx.substate):
1004 m1['.hgsubstate'] = modifiednodeid
1004 m1['.hgsubstate'] = modifiednodeid
1005
1005
1006 # Don't use m2-vs-ma optimization if:
1006 # Don't use m2-vs-ma optimization if:
1007 # - ma is the same as m1 or m2, which we're just going to diff again later
1007 # - ma is the same as m1 or m2, which we're just going to diff again later
1008 # - The caller specifically asks for a full diff, which is useful during bid
1008 # - The caller specifically asks for a full diff, which is useful during bid
1009 # merge.
1009 # merge.
1010 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1010 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1011 # Identify which files are relevant to the merge, so we can limit the
1011 # Identify which files are relevant to the merge, so we can limit the
1012 # total m1-vs-m2 diff to just those files. This has significant
1012 # total m1-vs-m2 diff to just those files. This has significant
1013 # performance benefits in large repositories.
1013 # performance benefits in large repositories.
1014 relevantfiles = set(ma.diff(m2).keys())
1014 relevantfiles = set(ma.diff(m2).keys())
1015
1015
1016 # For copied and moved files, we need to add the source file too.
1016 # For copied and moved files, we need to add the source file too.
1017 for copykey, copyvalue in copy.iteritems():
1017 for copykey, copyvalue in copy.iteritems():
1018 if copyvalue in relevantfiles:
1018 if copyvalue in relevantfiles:
1019 relevantfiles.add(copykey)
1019 relevantfiles.add(copykey)
1020 for movedirkey in movewithdir:
1020 for movedirkey in movewithdir:
1021 relevantfiles.add(movedirkey)
1021 relevantfiles.add(movedirkey)
1022 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1022 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1023 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1023 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1024
1024
1025 diff = m1.diff(m2, match=matcher)
1025 diff = m1.diff(m2, match=matcher)
1026
1026
1027 if matcher is None:
1027 if matcher is None:
1028 matcher = matchmod.always('', '')
1028 matcher = matchmod.always('', '')
1029
1029
1030 actions = {}
1030 actions = {}
1031 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1031 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1032 if n1 and n2: # file exists on both local and remote side
1032 if n1 and n2: # file exists on both local and remote side
1033 if f not in ma:
1033 if f not in ma:
1034 fa = copy.get(f, None)
1034 fa = copy.get(f, None)
1035 if fa is not None:
1035 if fa is not None:
1036 actions[f] = ('m', (f, f, fa, False, pa.node()),
1036 actions[f] = ('m', (f, f, fa, False, pa.node()),
1037 "both renamed from " + fa)
1037 "both renamed from " + fa)
1038 else:
1038 else:
1039 actions[f] = ('m', (f, f, None, False, pa.node()),
1039 actions[f] = ('m', (f, f, None, False, pa.node()),
1040 "both created")
1040 "both created")
1041 else:
1041 else:
1042 a = ma[f]
1042 a = ma[f]
1043 fla = ma.flags(f)
1043 fla = ma.flags(f)
1044 nol = 'l' not in fl1 + fl2 + fla
1044 nol = 'l' not in fl1 + fl2 + fla
1045 if n2 == a and fl2 == fla:
1045 if n2 == a and fl2 == fla:
1046 actions[f] = ('k', (), "remote unchanged")
1046 actions[f] = ('k', (), "remote unchanged")
1047 elif n1 == a and fl1 == fla: # local unchanged - use remote
1047 elif n1 == a and fl1 == fla: # local unchanged - use remote
1048 if n1 == n2: # optimization: keep local content
1048 if n1 == n2: # optimization: keep local content
1049 actions[f] = ('e', (fl2,), "update permissions")
1049 actions[f] = ('e', (fl2,), "update permissions")
1050 else:
1050 else:
1051 actions[f] = ('g', (fl2, False), "remote is newer")
1051 actions[f] = ('g', (fl2, False), "remote is newer")
1052 elif nol and n2 == a: # remote only changed 'x'
1052 elif nol and n2 == a: # remote only changed 'x'
1053 actions[f] = ('e', (fl2,), "update permissions")
1053 actions[f] = ('e', (fl2,), "update permissions")
1054 elif nol and n1 == a: # local only changed 'x'
1054 elif nol and n1 == a: # local only changed 'x'
1055 actions[f] = ('g', (fl1, False), "remote is newer")
1055 actions[f] = ('g', (fl1, False), "remote is newer")
1056 else: # both changed something
1056 else: # both changed something
1057 actions[f] = ('m', (f, f, f, False, pa.node()),
1057 actions[f] = ('m', (f, f, f, False, pa.node()),
1058 "versions differ")
1058 "versions differ")
1059 elif n1: # file exists only on local side
1059 elif n1: # file exists only on local side
1060 if f in copied:
1060 if f in copied:
1061 pass # we'll deal with it on m2 side
1061 pass # we'll deal with it on m2 side
1062 elif f in movewithdir: # directory rename, move local
1062 elif f in movewithdir: # directory rename, move local
1063 f2 = movewithdir[f]
1063 f2 = movewithdir[f]
1064 if f2 in m2:
1064 if f2 in m2:
1065 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1065 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1066 "remote directory rename, both created")
1066 "remote directory rename, both created")
1067 else:
1067 else:
1068 actions[f2] = ('dm', (f, fl1),
1068 actions[f2] = ('dm', (f, fl1),
1069 "remote directory rename - move from " + f)
1069 "remote directory rename - move from " + f)
1070 elif f in copy:
1070 elif f in copy:
1071 f2 = copy[f]
1071 f2 = copy[f]
1072 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1072 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1073 "local copied/moved from " + f2)
1073 "local copied/moved from " + f2)
1074 elif f in ma: # clean, a different, no remote
1074 elif f in ma: # clean, a different, no remote
1075 if n1 != ma[f]:
1075 if n1 != ma[f]:
1076 if acceptremote:
1076 if acceptremote:
1077 actions[f] = ('r', None, "remote delete")
1077 actions[f] = ('r', None, "remote delete")
1078 else:
1078 else:
1079 actions[f] = ('cd', (f, None, f, False, pa.node()),
1079 actions[f] = ('cd', (f, None, f, False, pa.node()),
1080 "prompt changed/deleted")
1080 "prompt changed/deleted")
1081 elif n1 == addednodeid:
1081 elif n1 == addednodeid:
1082 # This extra 'a' is added by working copy manifest to mark
1082 # This extra 'a' is added by working copy manifest to mark
1083 # the file as locally added. We should forget it instead of
1083 # the file as locally added. We should forget it instead of
1084 # deleting it.
1084 # deleting it.
1085 actions[f] = ('f', None, "remote deleted")
1085 actions[f] = ('f', None, "remote deleted")
1086 else:
1086 else:
1087 actions[f] = ('r', None, "other deleted")
1087 actions[f] = ('r', None, "other deleted")
1088 elif n2: # file exists only on remote side
1088 elif n2: # file exists only on remote side
1089 if f in copied:
1089 if f in copied:
1090 pass # we'll deal with it on m1 side
1090 pass # we'll deal with it on m1 side
1091 elif f in movewithdir:
1091 elif f in movewithdir:
1092 f2 = movewithdir[f]
1092 f2 = movewithdir[f]
1093 if f2 in m1:
1093 if f2 in m1:
1094 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1094 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1095 "local directory rename, both created")
1095 "local directory rename, both created")
1096 else:
1096 else:
1097 actions[f2] = ('dg', (f, fl2),
1097 actions[f2] = ('dg', (f, fl2),
1098 "local directory rename - get from " + f)
1098 "local directory rename - get from " + f)
1099 elif f in copy:
1099 elif f in copy:
1100 f2 = copy[f]
1100 f2 = copy[f]
1101 if f2 in m2:
1101 if f2 in m2:
1102 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1102 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1103 "remote copied from " + f2)
1103 "remote copied from " + f2)
1104 else:
1104 else:
1105 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1105 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1106 "remote moved from " + f2)
1106 "remote moved from " + f2)
1107 elif f not in ma:
1107 elif f not in ma:
1108 # local unknown, remote created: the logic is described by the
1108 # local unknown, remote created: the logic is described by the
1109 # following table:
1109 # following table:
1110 #
1110 #
1111 # force branchmerge different | action
1111 # force branchmerge different | action
1112 # n * * | create
1112 # n * * | create
1113 # y n * | create
1113 # y n * | create
1114 # y y n | create
1114 # y y n | create
1115 # y y y | merge
1115 # y y y | merge
1116 #
1116 #
1117 # Checking whether the files are different is expensive, so we
1117 # Checking whether the files are different is expensive, so we
1118 # don't do that when we can avoid it.
1118 # don't do that when we can avoid it.
1119 if not force:
1119 if not force:
1120 actions[f] = ('c', (fl2,), "remote created")
1120 actions[f] = ('c', (fl2,), "remote created")
1121 elif not branchmerge:
1121 elif not branchmerge:
1122 actions[f] = ('c', (fl2,), "remote created")
1122 actions[f] = ('c', (fl2,), "remote created")
1123 else:
1123 else:
1124 actions[f] = ('cm', (fl2, pa.node()),
1124 actions[f] = ('cm', (fl2, pa.node()),
1125 "remote created, get or merge")
1125 "remote created, get or merge")
1126 elif n2 != ma[f]:
1126 elif n2 != ma[f]:
1127 df = None
1127 df = None
1128 for d in dirmove:
1128 for d in dirmove:
1129 if f.startswith(d):
1129 if f.startswith(d):
1130 # new file added in a directory that was moved
1130 # new file added in a directory that was moved
1131 df = dirmove[d] + f[len(d):]
1131 df = dirmove[d] + f[len(d):]
1132 break
1132 break
1133 if df is not None and df in m1:
1133 if df is not None and df in m1:
1134 actions[df] = ('m', (df, f, f, False, pa.node()),
1134 actions[df] = ('m', (df, f, f, False, pa.node()),
1135 "local directory rename - respect move from " + f)
1135 "local directory rename - respect move from " + f)
1136 elif acceptremote:
1136 elif acceptremote:
1137 actions[f] = ('c', (fl2,), "remote recreating")
1137 actions[f] = ('c', (fl2,), "remote recreating")
1138 else:
1138 else:
1139 actions[f] = ('dc', (None, f, f, False, pa.node()),
1139 actions[f] = ('dc', (None, f, f, False, pa.node()),
1140 "prompt deleted/changed")
1140 "prompt deleted/changed")
1141
1141
1142 # If we are merging, look for path conflicts.
1142 # If we are merging, look for path conflicts.
1143 checkpathconflicts(repo, wctx, p2, actions)
1143 checkpathconflicts(repo, wctx, p2, actions)
1144
1144
1145 return actions, diverge, renamedelete
1145 return actions, diverge, renamedelete
1146
1146
1147 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1147 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1148 """Resolves false conflicts where the nodeid changed but the content
1148 """Resolves false conflicts where the nodeid changed but the content
1149 remained the same."""
1149 remained the same."""
1150
1150
1151 for f, (m, args, msg) in actions.items():
1151 for f, (m, args, msg) in actions.items():
1152 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1152 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1153 # local did change but ended up with same content
1153 # local did change but ended up with same content
1154 actions[f] = 'r', None, "prompt same"
1154 actions[f] = 'r', None, "prompt same"
1155 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1155 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1156 # remote did change but ended up with same content
1156 # remote did change but ended up with same content
1157 del actions[f] # don't get = keep local deleted
1157 del actions[f] # don't get = keep local deleted
1158
1158
1159 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1159 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1160 acceptremote, followcopies, matcher=None,
1160 acceptremote, followcopies, matcher=None,
1161 mergeforce=False):
1161 mergeforce=False):
1162 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1162 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1163 # Avoid cycle.
1163 # Avoid cycle.
1164 from . import sparse
1164 from . import sparse
1165
1165
1166 if len(ancestors) == 1: # default
1166 if len(ancestors) == 1: # default
1167 actions, diverge, renamedelete = manifestmerge(
1167 actions, diverge, renamedelete = manifestmerge(
1168 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1168 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1169 acceptremote, followcopies)
1169 acceptremote, followcopies)
1170 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1170 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1171
1171
1172 else: # only when merge.preferancestor=* - the default
1172 else: # only when merge.preferancestor=* - the default
1173 repo.ui.note(
1173 repo.ui.note(
1174 _("note: merging %s and %s using bids from ancestors %s\n") %
1174 _("note: merging %s and %s using bids from ancestors %s\n") %
1175 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1175 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1176 for anc in ancestors)))
1176 for anc in ancestors)))
1177
1177
1178 # Call for bids
1178 # Call for bids
1179 fbids = {} # mapping filename to bids (action method to list af actions)
1179 fbids = {} # mapping filename to bids (action method to list af actions)
1180 diverge, renamedelete = None, None
1180 diverge, renamedelete = None, None
1181 for ancestor in ancestors:
1181 for ancestor in ancestors:
1182 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1182 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1183 actions, diverge1, renamedelete1 = manifestmerge(
1183 actions, diverge1, renamedelete1 = manifestmerge(
1184 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1184 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1185 acceptremote, followcopies, forcefulldiff=True)
1185 acceptremote, followcopies, forcefulldiff=True)
1186 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1186 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1187
1187
1188 # Track the shortest set of warning on the theory that bid
1188 # Track the shortest set of warning on the theory that bid
1189 # merge will correctly incorporate more information
1189 # merge will correctly incorporate more information
1190 if diverge is None or len(diverge1) < len(diverge):
1190 if diverge is None or len(diverge1) < len(diverge):
1191 diverge = diverge1
1191 diverge = diverge1
1192 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1192 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1193 renamedelete = renamedelete1
1193 renamedelete = renamedelete1
1194
1194
1195 for f, a in sorted(actions.iteritems()):
1195 for f, a in sorted(actions.iteritems()):
1196 m, args, msg = a
1196 m, args, msg = a
1197 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1197 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1198 if f in fbids:
1198 if f in fbids:
1199 d = fbids[f]
1199 d = fbids[f]
1200 if m in d:
1200 if m in d:
1201 d[m].append(a)
1201 d[m].append(a)
1202 else:
1202 else:
1203 d[m] = [a]
1203 d[m] = [a]
1204 else:
1204 else:
1205 fbids[f] = {m: [a]}
1205 fbids[f] = {m: [a]}
1206
1206
1207 # Pick the best bid for each file
1207 # Pick the best bid for each file
1208 repo.ui.note(_('\nauction for merging merge bids\n'))
1208 repo.ui.note(_('\nauction for merging merge bids\n'))
1209 actions = {}
1209 actions = {}
1210 dms = [] # filenames that have dm actions
1210 dms = [] # filenames that have dm actions
1211 for f, bids in sorted(fbids.items()):
1211 for f, bids in sorted(fbids.items()):
1212 # bids is a mapping from action method to list af actions
1212 # bids is a mapping from action method to list af actions
1213 # Consensus?
1213 # Consensus?
1214 if len(bids) == 1: # all bids are the same kind of method
1214 if len(bids) == 1: # all bids are the same kind of method
1215 m, l = list(bids.items())[0]
1215 m, l = list(bids.items())[0]
1216 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1216 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1217 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1217 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1218 actions[f] = l[0]
1218 actions[f] = l[0]
1219 if m == 'dm':
1219 if m == 'dm':
1220 dms.append(f)
1220 dms.append(f)
1221 continue
1221 continue
1222 # If keep is an option, just do it.
1222 # If keep is an option, just do it.
1223 if 'k' in bids:
1223 if 'k' in bids:
1224 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1224 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1225 actions[f] = bids['k'][0]
1225 actions[f] = bids['k'][0]
1226 continue
1226 continue
1227 # If there are gets and they all agree [how could they not?], do it.
1227 # If there are gets and they all agree [how could they not?], do it.
1228 if 'g' in bids:
1228 if 'g' in bids:
1229 ga0 = bids['g'][0]
1229 ga0 = bids['g'][0]
1230 if all(a == ga0 for a in bids['g'][1:]):
1230 if all(a == ga0 for a in bids['g'][1:]):
1231 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1231 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1232 actions[f] = ga0
1232 actions[f] = ga0
1233 continue
1233 continue
1234 # TODO: Consider other simple actions such as mode changes
1234 # TODO: Consider other simple actions such as mode changes
1235 # Handle inefficient democrazy.
1235 # Handle inefficient democrazy.
1236 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1236 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1237 for m, l in sorted(bids.items()):
1237 for m, l in sorted(bids.items()):
1238 for _f, args, msg in l:
1238 for _f, args, msg in l:
1239 repo.ui.note(' %s -> %s\n' % (msg, m))
1239 repo.ui.note(' %s -> %s\n' % (msg, m))
1240 # Pick random action. TODO: Instead, prompt user when resolving
1240 # Pick random action. TODO: Instead, prompt user when resolving
1241 m, l = list(bids.items())[0]
1241 m, l = list(bids.items())[0]
1242 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1242 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1243 (f, m))
1243 (f, m))
1244 actions[f] = l[0]
1244 actions[f] = l[0]
1245 if m == 'dm':
1245 if m == 'dm':
1246 dms.append(f)
1246 dms.append(f)
1247 continue
1247 continue
1248 # Work around 'dm' that can cause multiple actions for the same file
1248 # Work around 'dm' that can cause multiple actions for the same file
1249 for f in dms:
1249 for f in dms:
1250 dm, (f0, flags), msg = actions[f]
1250 dm, (f0, flags), msg = actions[f]
1251 assert dm == 'dm', dm
1251 assert dm == 'dm', dm
1252 if f0 in actions and actions[f0][0] == 'r':
1252 if f0 in actions and actions[f0][0] == 'r':
1253 # We have one bid for removing a file and another for moving it.
1253 # We have one bid for removing a file and another for moving it.
1254 # These two could be merged as first move and then delete ...
1254 # These two could be merged as first move and then delete ...
1255 # but instead drop moving and just delete.
1255 # but instead drop moving and just delete.
1256 del actions[f]
1256 del actions[f]
1257 repo.ui.note(_('end of auction\n\n'))
1257 repo.ui.note(_('end of auction\n\n'))
1258
1258
1259 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1259 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1260
1260
1261 if wctx.rev() is None:
1261 if wctx.rev() is None:
1262 fractions = _forgetremoved(wctx, mctx, branchmerge)
1262 fractions = _forgetremoved(wctx, mctx, branchmerge)
1263 actions.update(fractions)
1263 actions.update(fractions)
1264
1264
1265 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1265 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1266 actions)
1266 actions)
1267
1267
1268 return prunedactions, diverge, renamedelete
1268 return prunedactions, diverge, renamedelete
1269
1269
1270 def _getcwd():
1270 def _getcwd():
1271 try:
1271 try:
1272 return pycompat.getcwd()
1272 return pycompat.getcwd()
1273 except OSError as err:
1273 except OSError as err:
1274 if err.errno == errno.ENOENT:
1274 if err.errno == errno.ENOENT:
1275 return None
1275 return None
1276 raise
1276 raise
1277
1277
1278 def batchremove(repo, wctx, actions):
1278 def batchremove(repo, wctx, actions):
1279 """apply removes to the working directory
1279 """apply removes to the working directory
1280
1280
1281 yields tuples for progress updates
1281 yields tuples for progress updates
1282 """
1282 """
1283 verbose = repo.ui.verbose
1283 verbose = repo.ui.verbose
1284 cwd = _getcwd()
1284 cwd = _getcwd()
1285 i = 0
1285 i = 0
1286 for f, args, msg in actions:
1286 for f, args, msg in actions:
1287 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1287 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1288 if verbose:
1288 if verbose:
1289 repo.ui.note(_("removing %s\n") % f)
1289 repo.ui.note(_("removing %s\n") % f)
1290 wctx[f].audit()
1290 wctx[f].audit()
1291 try:
1291 try:
1292 wctx[f].remove(ignoremissing=True)
1292 wctx[f].remove(ignoremissing=True)
1293 except OSError as inst:
1293 except OSError as inst:
1294 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1294 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1295 (f, inst.strerror))
1295 (f, inst.strerror))
1296 if i == 100:
1296 if i == 100:
1297 yield i, f
1297 yield i, f
1298 i = 0
1298 i = 0
1299 i += 1
1299 i += 1
1300 if i > 0:
1300 if i > 0:
1301 yield i, f
1301 yield i, f
1302
1302
1303 if cwd and not _getcwd():
1303 if cwd and not _getcwd():
1304 # cwd was removed in the course of removing files; print a helpful
1304 # cwd was removed in the course of removing files; print a helpful
1305 # warning.
1305 # warning.
1306 repo.ui.warn(_("current directory was removed\n"
1306 repo.ui.warn(_("current directory was removed\n"
1307 "(consider changing to repo root: %s)\n") % repo.root)
1307 "(consider changing to repo root: %s)\n") % repo.root)
1308
1308
1309 # It's necessary to flush here in case we're inside a worker fork and will
1309 # It's necessary to flush here in case we're inside a worker fork and will
1310 # quit after this function.
1310 # quit after this function.
1311 wctx.flushall()
1311 wctx.flushall()
1312
1312
1313 def batchget(repo, mctx, wctx, actions):
1313 def batchget(repo, mctx, wctx, actions):
1314 """apply gets to the working directory
1314 """apply gets to the working directory
1315
1315
1316 mctx is the context to get from
1316 mctx is the context to get from
1317
1317
1318 yields tuples for progress updates
1318 yields tuples for progress updates
1319 """
1319 """
1320 verbose = repo.ui.verbose
1320 verbose = repo.ui.verbose
1321 fctx = mctx.filectx
1321 fctx = mctx.filectx
1322 ui = repo.ui
1322 ui = repo.ui
1323 i = 0
1323 i = 0
1324 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1324 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1325 for f, (flags, backup), msg in actions:
1325 for f, (flags, backup), msg in actions:
1326 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1326 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1327 if verbose:
1327 if verbose:
1328 repo.ui.note(_("getting %s\n") % f)
1328 repo.ui.note(_("getting %s\n") % f)
1329
1329
1330 if backup:
1330 if backup:
1331 # If a file or directory exists with the same name, back that
1331 # If a file or directory exists with the same name, back that
1332 # up. Otherwise, look to see if there is a file that conflicts
1332 # up. Otherwise, look to see if there is a file that conflicts
1333 # with a directory this file is in, and if so, back that up.
1333 # with a directory this file is in, and if so, back that up.
1334 absf = repo.wjoin(f)
1334 absf = repo.wjoin(f)
1335 if not repo.wvfs.lexists(f):
1335 if not repo.wvfs.lexists(f):
1336 for p in util.finddirs(f):
1336 for p in util.finddirs(f):
1337 if repo.wvfs.isfileorlink(p):
1337 if repo.wvfs.isfileorlink(p):
1338 absf = repo.wjoin(p)
1338 absf = repo.wjoin(p)
1339 break
1339 break
1340 orig = scmutil.origpath(ui, repo, absf)
1340 orig = scmutil.origpath(ui, repo, absf)
1341 if repo.wvfs.lexists(absf):
1341 if repo.wvfs.lexists(absf):
1342 util.rename(absf, orig)
1342 util.rename(absf, orig)
1343 wctx[f].clearunknown()
1343 wctx[f].clearunknown()
1344 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1344 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1345 if i == 100:
1345 if i == 100:
1346 yield i, f
1346 yield i, f
1347 i = 0
1347 i = 0
1348 i += 1
1348 i += 1
1349 if i > 0:
1349 if i > 0:
1350 yield i, f
1350 yield i, f
1351
1351
1352 # It's necessary to flush here in case we're inside a worker fork and will
1352 # It's necessary to flush here in case we're inside a worker fork and will
1353 # quit after this function.
1353 # quit after this function.
1354 wctx.flushall()
1354 wctx.flushall()
1355
1355
1356 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1356 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1357 """apply the merge action list to the working directory
1357 """apply the merge action list to the working directory
1358
1358
1359 wctx is the working copy context
1359 wctx is the working copy context
1360 mctx is the context to be merged into the working copy
1360 mctx is the context to be merged into the working copy
1361
1361
1362 Return a tuple of counts (updated, merged, removed, unresolved) that
1362 Return a tuple of counts (updated, merged, removed, unresolved) that
1363 describes how many files were affected by the update.
1363 describes how many files were affected by the update.
1364 """
1364 """
1365
1365
1366 updated, merged, removed = 0, 0, 0
1366 updated, merged, removed = 0, 0, 0
1367 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1367 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1368 moves = []
1368 moves = []
1369 for m, l in actions.items():
1369 for m, l in actions.items():
1370 l.sort()
1370 l.sort()
1371
1371
1372 # 'cd' and 'dc' actions are treated like other merge conflicts
1372 # 'cd' and 'dc' actions are treated like other merge conflicts
1373 mergeactions = sorted(actions['cd'])
1373 mergeactions = sorted(actions['cd'])
1374 mergeactions.extend(sorted(actions['dc']))
1374 mergeactions.extend(sorted(actions['dc']))
1375 mergeactions.extend(actions['m'])
1375 mergeactions.extend(actions['m'])
1376 for f, args, msg in mergeactions:
1376 for f, args, msg in mergeactions:
1377 f1, f2, fa, move, anc = args
1377 f1, f2, fa, move, anc = args
1378 if f == '.hgsubstate': # merged internally
1378 if f == '.hgsubstate': # merged internally
1379 continue
1379 continue
1380 if f1 is None:
1380 if f1 is None:
1381 fcl = filemerge.absentfilectx(wctx, fa)
1381 fcl = filemerge.absentfilectx(wctx, fa)
1382 else:
1382 else:
1383 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1383 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1384 fcl = wctx[f1]
1384 fcl = wctx[f1]
1385 if f2 is None:
1385 if f2 is None:
1386 fco = filemerge.absentfilectx(mctx, fa)
1386 fco = filemerge.absentfilectx(mctx, fa)
1387 else:
1387 else:
1388 fco = mctx[f2]
1388 fco = mctx[f2]
1389 actx = repo[anc]
1389 actx = repo[anc]
1390 if fa in actx:
1390 if fa in actx:
1391 fca = actx[fa]
1391 fca = actx[fa]
1392 else:
1392 else:
1393 # TODO: move to absentfilectx
1393 # TODO: move to absentfilectx
1394 fca = repo.filectx(f1, fileid=nullrev)
1394 fca = repo.filectx(f1, fileid=nullrev)
1395 ms.add(fcl, fco, fca, f)
1395 ms.add(fcl, fco, fca, f)
1396 if f1 != f and move:
1396 if f1 != f and move:
1397 moves.append(f1)
1397 moves.append(f1)
1398
1398
1399 _updating = _('updating')
1399 _updating = _('updating')
1400 _files = _('files')
1400 _files = _('files')
1401 progress = repo.ui.progress
1401 progress = repo.ui.progress
1402
1402
1403 # remove renamed files after safely stored
1403 # remove renamed files after safely stored
1404 for f in moves:
1404 for f in moves:
1405 if wctx[f].lexists():
1405 if wctx[f].lexists():
1406 repo.ui.debug("removing %s\n" % f)
1406 repo.ui.debug("removing %s\n" % f)
1407 wctx[f].audit()
1407 wctx[f].audit()
1408 wctx[f].remove()
1408 wctx[f].remove()
1409
1409
1410 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1410 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1411 z = 0
1411 z = 0
1412
1412
1413 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1413 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1414 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1414 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1415
1415
1416 # record path conflicts
1416 # record path conflicts
1417 for f, args, msg in actions['p']:
1417 for f, args, msg in actions['p']:
1418 f1, fo = args
1418 f1, fo = args
1419 s = repo.ui.status
1419 s = repo.ui.status
1420 s(_("%s: path conflict - a file or link has the same name as a "
1420 s(_("%s: path conflict - a file or link has the same name as a "
1421 "directory\n") % f)
1421 "directory\n") % f)
1422 if fo == 'l':
1422 if fo == 'l':
1423 s(_("the local file has been renamed to %s\n") % f1)
1423 s(_("the local file has been renamed to %s\n") % f1)
1424 else:
1424 else:
1425 s(_("the remote file has been renamed to %s\n") % f1)
1425 s(_("the remote file has been renamed to %s\n") % f1)
1426 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1426 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1427 ms.addpath(f, f1, fo)
1427 ms.addpath(f, f1, fo)
1428 z += 1
1428 z += 1
1429 progress(_updating, z, item=f, total=numupdates, unit=_files)
1429 progress(_updating, z, item=f, total=numupdates, unit=_files)
1430
1430
1431 # When merging in-memory, we can't support worker processes, so set the
1431 # When merging in-memory, we can't support worker processes, so set the
1432 # per-item cost at 0 in that case.
1432 # per-item cost at 0 in that case.
1433 cost = 0 if wctx.isinmemory() else 0.001
1433 cost = 0 if wctx.isinmemory() else 0.001
1434
1434
1435 # remove in parallel (must come before resolving path conflicts and getting)
1435 # remove in parallel (must come before resolving path conflicts and getting)
1436 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1436 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1437 actions['r'])
1437 actions['r'])
1438 for i, item in prog:
1438 for i, item in prog:
1439 z += i
1439 z += i
1440 progress(_updating, z, item=item, total=numupdates, unit=_files)
1440 progress(_updating, z, item=item, total=numupdates, unit=_files)
1441 removed = len(actions['r'])
1441 removed = len(actions['r'])
1442
1442
1443 # resolve path conflicts (must come before getting)
1443 # resolve path conflicts (must come before getting)
1444 for f, args, msg in actions['pr']:
1444 for f, args, msg in actions['pr']:
1445 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1445 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1446 f0, = args
1446 f0, = args
1447 if wctx[f0].lexists():
1447 if wctx[f0].lexists():
1448 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1448 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1449 wctx[f].audit()
1449 wctx[f].audit()
1450 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1450 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1451 wctx[f0].remove()
1451 wctx[f0].remove()
1452 z += 1
1452 z += 1
1453 progress(_updating, z, item=f, total=numupdates, unit=_files)
1453 progress(_updating, z, item=f, total=numupdates, unit=_files)
1454
1454
1455 # We should flush before forking into worker processes, since those workers
1455 # We should flush before forking into worker processes, since those workers
1456 # flush when they complete, and we don't want to duplicate work.
1456 # flush when they complete, and we don't want to duplicate work.
1457 wctx.flushall()
1457 wctx.flushall()
1458
1458
1459 # get in parallel
1459 # get in parallel
1460 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1460 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1461 actions['g'])
1461 actions['g'])
1462 for i, item in prog:
1462 for i, item in prog:
1463 z += i
1463 z += i
1464 progress(_updating, z, item=item, total=numupdates, unit=_files)
1464 progress(_updating, z, item=item, total=numupdates, unit=_files)
1465 updated = len(actions['g'])
1465 updated = len(actions['g'])
1466
1466
1467 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1467 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1468 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1468 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1469
1469
1470 # forget (manifest only, just log it) (must come first)
1470 # forget (manifest only, just log it) (must come first)
1471 for f, args, msg in actions['f']:
1471 for f, args, msg in actions['f']:
1472 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1472 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1473 z += 1
1473 z += 1
1474 progress(_updating, z, item=f, total=numupdates, unit=_files)
1474 progress(_updating, z, item=f, total=numupdates, unit=_files)
1475
1475
1476 # re-add (manifest only, just log it)
1476 # re-add (manifest only, just log it)
1477 for f, args, msg in actions['a']:
1477 for f, args, msg in actions['a']:
1478 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1478 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1479 z += 1
1479 z += 1
1480 progress(_updating, z, item=f, total=numupdates, unit=_files)
1480 progress(_updating, z, item=f, total=numupdates, unit=_files)
1481
1481
1482 # re-add/mark as modified (manifest only, just log it)
1482 # re-add/mark as modified (manifest only, just log it)
1483 for f, args, msg in actions['am']:
1483 for f, args, msg in actions['am']:
1484 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1484 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1485 z += 1
1485 z += 1
1486 progress(_updating, z, item=f, total=numupdates, unit=_files)
1486 progress(_updating, z, item=f, total=numupdates, unit=_files)
1487
1487
1488 # keep (noop, just log it)
1488 # keep (noop, just log it)
1489 for f, args, msg in actions['k']:
1489 for f, args, msg in actions['k']:
1490 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1490 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1491 # no progress
1491 # no progress
1492
1492
1493 # directory rename, move local
1493 # directory rename, move local
1494 for f, args, msg in actions['dm']:
1494 for f, args, msg in actions['dm']:
1495 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1495 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1496 z += 1
1496 z += 1
1497 progress(_updating, z, item=f, total=numupdates, unit=_files)
1497 progress(_updating, z, item=f, total=numupdates, unit=_files)
1498 f0, flags = args
1498 f0, flags = args
1499 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1499 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1500 wctx[f].audit()
1500 wctx[f].audit()
1501 wctx[f].write(wctx.filectx(f0).data(), flags)
1501 wctx[f].write(wctx.filectx(f0).data(), flags)
1502 wctx[f0].remove()
1502 wctx[f0].remove()
1503 updated += 1
1503 updated += 1
1504
1504
1505 # local directory rename, get
1505 # local directory rename, get
1506 for f, args, msg in actions['dg']:
1506 for f, args, msg in actions['dg']:
1507 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1507 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1508 z += 1
1508 z += 1
1509 progress(_updating, z, item=f, total=numupdates, unit=_files)
1509 progress(_updating, z, item=f, total=numupdates, unit=_files)
1510 f0, flags = args
1510 f0, flags = args
1511 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1511 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1512 wctx[f].write(mctx.filectx(f0).data(), flags)
1512 wctx[f].write(mctx.filectx(f0).data(), flags)
1513 updated += 1
1513 updated += 1
1514
1514
1515 # exec
1515 # exec
1516 for f, args, msg in actions['e']:
1516 for f, args, msg in actions['e']:
1517 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1517 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1518 z += 1
1518 z += 1
1519 progress(_updating, z, item=f, total=numupdates, unit=_files)
1519 progress(_updating, z, item=f, total=numupdates, unit=_files)
1520 flags, = args
1520 flags, = args
1521 wctx[f].audit()
1521 wctx[f].audit()
1522 wctx[f].setflags('l' in flags, 'x' in flags)
1522 wctx[f].setflags('l' in flags, 'x' in flags)
1523 updated += 1
1523 updated += 1
1524
1524
1525 # the ordering is important here -- ms.mergedriver will raise if the merge
1525 # the ordering is important here -- ms.mergedriver will raise if the merge
1526 # driver has changed, and we want to be able to bypass it when overwrite is
1526 # driver has changed, and we want to be able to bypass it when overwrite is
1527 # True
1527 # True
1528 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1528 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1529
1529
1530 if usemergedriver:
1530 if usemergedriver:
1531 ms.commit()
1531 ms.commit()
1532 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1532 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1533 # the driver might leave some files unresolved
1533 # the driver might leave some files unresolved
1534 unresolvedf = set(ms.unresolved())
1534 unresolvedf = set(ms.unresolved())
1535 if not proceed:
1535 if not proceed:
1536 # XXX setting unresolved to at least 1 is a hack to make sure we
1536 # XXX setting unresolved to at least 1 is a hack to make sure we
1537 # error out
1537 # error out
1538 return updated, merged, removed, max(len(unresolvedf), 1)
1538 return updated, merged, removed, max(len(unresolvedf), 1)
1539 newactions = []
1539 newactions = []
1540 for f, args, msg in mergeactions:
1540 for f, args, msg in mergeactions:
1541 if f in unresolvedf:
1541 if f in unresolvedf:
1542 newactions.append((f, args, msg))
1542 newactions.append((f, args, msg))
1543 mergeactions = newactions
1543 mergeactions = newactions
1544
1544
1545 try:
1545 try:
1546 # premerge
1546 # premerge
1547 tocomplete = []
1547 tocomplete = []
1548 for f, args, msg in mergeactions:
1548 for f, args, msg in mergeactions:
1549 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1549 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1550 z += 1
1550 z += 1
1551 progress(_updating, z, item=f, total=numupdates, unit=_files)
1551 progress(_updating, z, item=f, total=numupdates, unit=_files)
1552 if f == '.hgsubstate': # subrepo states need updating
1552 if f == '.hgsubstate': # subrepo states need updating
1553 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1553 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1554 overwrite, labels)
1554 overwrite, labels)
1555 continue
1555 continue
1556 wctx[f].audit()
1556 wctx[f].audit()
1557 complete, r = ms.preresolve(f, wctx)
1557 complete, r = ms.preresolve(f, wctx)
1558 if not complete:
1558 if not complete:
1559 numupdates += 1
1559 numupdates += 1
1560 tocomplete.append((f, args, msg))
1560 tocomplete.append((f, args, msg))
1561
1561
1562 # merge
1562 # merge
1563 for f, args, msg in tocomplete:
1563 for f, args, msg in tocomplete:
1564 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1564 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1565 z += 1
1565 z += 1
1566 progress(_updating, z, item=f, total=numupdates, unit=_files)
1566 progress(_updating, z, item=f, total=numupdates, unit=_files)
1567 ms.resolve(f, wctx)
1567 ms.resolve(f, wctx)
1568
1568
1569 finally:
1569 finally:
1570 ms.commit()
1570 ms.commit()
1571
1571
1572 unresolved = ms.unresolvedcount()
1572 unresolved = ms.unresolvedcount()
1573
1573
1574 if usemergedriver and not unresolved and ms.mdstate() != 's':
1574 if usemergedriver and not unresolved and ms.mdstate() != 's':
1575 if not driverconclude(repo, ms, wctx, labels=labels):
1575 if not driverconclude(repo, ms, wctx, labels=labels):
1576 # XXX setting unresolved to at least 1 is a hack to make sure we
1576 # XXX setting unresolved to at least 1 is a hack to make sure we
1577 # error out
1577 # error out
1578 unresolved = max(unresolved, 1)
1578 unresolved = max(unresolved, 1)
1579
1579
1580 ms.commit()
1580 ms.commit()
1581
1581
1582 msupdated, msmerged, msremoved = ms.counts()
1582 msupdated, msmerged, msremoved = ms.counts()
1583 updated += msupdated
1583 updated += msupdated
1584 merged += msmerged
1584 merged += msmerged
1585 removed += msremoved
1585 removed += msremoved
1586
1586
1587 extraactions = ms.actions()
1587 extraactions = ms.actions()
1588 if extraactions:
1588 if extraactions:
1589 mfiles = set(a[0] for a in actions['m'])
1589 mfiles = set(a[0] for a in actions['m'])
1590 for k, acts in extraactions.iteritems():
1590 for k, acts in extraactions.iteritems():
1591 actions[k].extend(acts)
1591 actions[k].extend(acts)
1592 # Remove these files from actions['m'] as well. This is important
1592 # Remove these files from actions['m'] as well. This is important
1593 # because in recordupdates, files in actions['m'] are processed
1593 # because in recordupdates, files in actions['m'] are processed
1594 # after files in other actions, and the merge driver might add
1594 # after files in other actions, and the merge driver might add
1595 # files to those actions via extraactions above. This can lead to a
1595 # files to those actions via extraactions above. This can lead to a
1596 # file being recorded twice, with poor results. This is especially
1596 # file being recorded twice, with poor results. This is especially
1597 # problematic for actions['r'] (currently only possible with the
1597 # problematic for actions['r'] (currently only possible with the
1598 # merge driver in the initial merge process; interrupted merges
1598 # merge driver in the initial merge process; interrupted merges
1599 # don't go through this flow).
1599 # don't go through this flow).
1600 #
1600 #
1601 # The real fix here is to have indexes by both file and action so
1601 # The real fix here is to have indexes by both file and action so
1602 # that when the action for a file is changed it is automatically
1602 # that when the action for a file is changed it is automatically
1603 # reflected in the other action lists. But that involves a more
1603 # reflected in the other action lists. But that involves a more
1604 # complex data structure, so this will do for now.
1604 # complex data structure, so this will do for now.
1605 #
1605 #
1606 # We don't need to do the same operation for 'dc' and 'cd' because
1606 # We don't need to do the same operation for 'dc' and 'cd' because
1607 # those lists aren't consulted again.
1607 # those lists aren't consulted again.
1608 mfiles.difference_update(a[0] for a in acts)
1608 mfiles.difference_update(a[0] for a in acts)
1609
1609
1610 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1610 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1611
1611
1612 progress(_updating, None, total=numupdates, unit=_files)
1612 progress(_updating, None, total=numupdates, unit=_files)
1613
1613
1614 return updated, merged, removed, unresolved
1614 return updated, merged, removed, unresolved
1615
1615
1616 def recordupdates(repo, actions, branchmerge):
1616 def recordupdates(repo, actions, branchmerge):
1617 "record merge actions to the dirstate"
1617 "record merge actions to the dirstate"
1618 # remove (must come first)
1618 # remove (must come first)
1619 for f, args, msg in actions.get('r', []):
1619 for f, args, msg in actions.get('r', []):
1620 if branchmerge:
1620 if branchmerge:
1621 repo.dirstate.remove(f)
1621 repo.dirstate.remove(f)
1622 else:
1622 else:
1623 repo.dirstate.drop(f)
1623 repo.dirstate.drop(f)
1624
1624
1625 # forget (must come first)
1625 # forget (must come first)
1626 for f, args, msg in actions.get('f', []):
1626 for f, args, msg in actions.get('f', []):
1627 repo.dirstate.drop(f)
1627 repo.dirstate.drop(f)
1628
1628
1629 # resolve path conflicts
1629 # resolve path conflicts
1630 for f, args, msg in actions.get('pr', []):
1630 for f, args, msg in actions.get('pr', []):
1631 f0, = args
1631 f0, = args
1632 origf0 = repo.dirstate.copied(f0) or f0
1632 origf0 = repo.dirstate.copied(f0) or f0
1633 repo.dirstate.add(f)
1633 repo.dirstate.add(f)
1634 repo.dirstate.copy(origf0, f)
1634 repo.dirstate.copy(origf0, f)
1635 if f0 == origf0:
1635 if f0 == origf0:
1636 repo.dirstate.remove(f0)
1636 repo.dirstate.remove(f0)
1637 else:
1637 else:
1638 repo.dirstate.drop(f0)
1638 repo.dirstate.drop(f0)
1639
1639
1640 # re-add
1640 # re-add
1641 for f, args, msg in actions.get('a', []):
1641 for f, args, msg in actions.get('a', []):
1642 repo.dirstate.add(f)
1642 repo.dirstate.add(f)
1643
1643
1644 # re-add/mark as modified
1644 # re-add/mark as modified
1645 for f, args, msg in actions.get('am', []):
1645 for f, args, msg in actions.get('am', []):
1646 if branchmerge:
1646 if branchmerge:
1647 repo.dirstate.normallookup(f)
1647 repo.dirstate.normallookup(f)
1648 else:
1648 else:
1649 repo.dirstate.add(f)
1649 repo.dirstate.add(f)
1650
1650
1651 # exec change
1651 # exec change
1652 for f, args, msg in actions.get('e', []):
1652 for f, args, msg in actions.get('e', []):
1653 repo.dirstate.normallookup(f)
1653 repo.dirstate.normallookup(f)
1654
1654
1655 # keep
1655 # keep
1656 for f, args, msg in actions.get('k', []):
1656 for f, args, msg in actions.get('k', []):
1657 pass
1657 pass
1658
1658
1659 # get
1659 # get
1660 for f, args, msg in actions.get('g', []):
1660 for f, args, msg in actions.get('g', []):
1661 if branchmerge:
1661 if branchmerge:
1662 repo.dirstate.otherparent(f)
1662 repo.dirstate.otherparent(f)
1663 else:
1663 else:
1664 repo.dirstate.normal(f)
1664 repo.dirstate.normal(f)
1665
1665
1666 # merge
1666 # merge
1667 for f, args, msg in actions.get('m', []):
1667 for f, args, msg in actions.get('m', []):
1668 f1, f2, fa, move, anc = args
1668 f1, f2, fa, move, anc = args
1669 if branchmerge:
1669 if branchmerge:
1670 # We've done a branch merge, mark this file as merged
1670 # We've done a branch merge, mark this file as merged
1671 # so that we properly record the merger later
1671 # so that we properly record the merger later
1672 repo.dirstate.merge(f)
1672 repo.dirstate.merge(f)
1673 if f1 != f2: # copy/rename
1673 if f1 != f2: # copy/rename
1674 if move:
1674 if move:
1675 repo.dirstate.remove(f1)
1675 repo.dirstate.remove(f1)
1676 if f1 != f:
1676 if f1 != f:
1677 repo.dirstate.copy(f1, f)
1677 repo.dirstate.copy(f1, f)
1678 else:
1678 else:
1679 repo.dirstate.copy(f2, f)
1679 repo.dirstate.copy(f2, f)
1680 else:
1680 else:
1681 # We've update-merged a locally modified file, so
1681 # We've update-merged a locally modified file, so
1682 # we set the dirstate to emulate a normal checkout
1682 # we set the dirstate to emulate a normal checkout
1683 # of that file some time in the past. Thus our
1683 # of that file some time in the past. Thus our
1684 # merge will appear as a normal local file
1684 # merge will appear as a normal local file
1685 # modification.
1685 # modification.
1686 if f2 == f: # file not locally copied/moved
1686 if f2 == f: # file not locally copied/moved
1687 repo.dirstate.normallookup(f)
1687 repo.dirstate.normallookup(f)
1688 if move:
1688 if move:
1689 repo.dirstate.drop(f1)
1689 repo.dirstate.drop(f1)
1690
1690
1691 # directory rename, move local
1691 # directory rename, move local
1692 for f, args, msg in actions.get('dm', []):
1692 for f, args, msg in actions.get('dm', []):
1693 f0, flag = args
1693 f0, flag = args
1694 if branchmerge:
1694 if branchmerge:
1695 repo.dirstate.add(f)
1695 repo.dirstate.add(f)
1696 repo.dirstate.remove(f0)
1696 repo.dirstate.remove(f0)
1697 repo.dirstate.copy(f0, f)
1697 repo.dirstate.copy(f0, f)
1698 else:
1698 else:
1699 repo.dirstate.normal(f)
1699 repo.dirstate.normal(f)
1700 repo.dirstate.drop(f0)
1700 repo.dirstate.drop(f0)
1701
1701
1702 # directory rename, get
1702 # directory rename, get
1703 for f, args, msg in actions.get('dg', []):
1703 for f, args, msg in actions.get('dg', []):
1704 f0, flag = args
1704 f0, flag = args
1705 if branchmerge:
1705 if branchmerge:
1706 repo.dirstate.add(f)
1706 repo.dirstate.add(f)
1707 repo.dirstate.copy(f0, f)
1707 repo.dirstate.copy(f0, f)
1708 else:
1708 else:
1709 repo.dirstate.normal(f)
1709 repo.dirstate.normal(f)
1710
1710
1711 def update(repo, node, branchmerge, force, ancestor=None,
1711 def update(repo, node, branchmerge, force, ancestor=None,
1712 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1712 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1713 updatecheck=None, wc=None):
1713 updatecheck=None, wc=None):
1714 """
1714 """
1715 Perform a merge between the working directory and the given node
1715 Perform a merge between the working directory and the given node
1716
1716
1717 node = the node to update to
1717 node = the node to update to
1718 branchmerge = whether to merge between branches
1718 branchmerge = whether to merge between branches
1719 force = whether to force branch merging or file overwriting
1719 force = whether to force branch merging or file overwriting
1720 matcher = a matcher to filter file lists (dirstate not updated)
1720 matcher = a matcher to filter file lists (dirstate not updated)
1721 mergeancestor = whether it is merging with an ancestor. If true,
1721 mergeancestor = whether it is merging with an ancestor. If true,
1722 we should accept the incoming changes for any prompts that occur.
1722 we should accept the incoming changes for any prompts that occur.
1723 If false, merging with an ancestor (fast-forward) is only allowed
1723 If false, merging with an ancestor (fast-forward) is only allowed
1724 between different named branches. This flag is used by rebase extension
1724 between different named branches. This flag is used by rebase extension
1725 as a temporary fix and should be avoided in general.
1725 as a temporary fix and should be avoided in general.
1726 labels = labels to use for base, local and other
1726 labels = labels to use for base, local and other
1727 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1727 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1728 this is True, then 'force' should be True as well.
1728 this is True, then 'force' should be True as well.
1729
1729
1730 The table below shows all the behaviors of the update command
1730 The table below shows all the behaviors of the update command given the
1731 given the -c and -C or no options, whether the working directory
1731 -c/--check and -C/--clean or no options, whether the working directory is
1732 is dirty, whether a revision is specified, and the relationship of
1732 dirty, whether a revision is specified, and the relationship of the parent
1733 the parent rev to the target rev (linear or not). Match from top first. The
1733 rev to the target rev (linear or not). Match from top first. The -n
1734 -n option doesn't exist on the command line, but represents the
1734 option doesn't exist on the command line, but represents the
1735 experimental.updatecheck=noconflict option.
1735 experimental.updatecheck=noconflict option.
1736
1736
1737 This logic is tested by test-update-branches.t.
1737 This logic is tested by test-update-branches.t.
1738
1738
1739 -c -C -n -m dirty rev linear | result
1739 -c -C -n -m dirty rev linear | result
1740 y y * * * * * | (1)
1740 y y * * * * * | (1)
1741 y * y * * * * | (1)
1741 y * y * * * * | (1)
1742 y * * y * * * | (1)
1742 y * * y * * * | (1)
1743 * y y * * * * | (1)
1743 * y y * * * * | (1)
1744 * y * y * * * | (1)
1744 * y * y * * * | (1)
1745 * * y y * * * | (1)
1745 * * y y * * * | (1)
1746 * * * * * n n | x
1746 * * * * * n n | x
1747 * * * * n * * | ok
1747 * * * * n * * | ok
1748 n n n n y * y | merge
1748 n n n n y * y | merge
1749 n n n n y y n | (2)
1749 n n n n y y n | (2)
1750 n n n y y * * | merge
1750 n n n y y * * | merge
1751 n n y n y * * | merge if no conflict
1751 n n y n y * * | merge if no conflict
1752 n y n n y * * | discard
1752 n y n n y * * | discard
1753 y n n n y * * | (3)
1753 y n n n y * * | (3)
1754
1754
1755 x = can't happen
1755 x = can't happen
1756 * = don't-care
1756 * = don't-care
1757 1 = incompatible options (checked in commands.py)
1757 1 = incompatible options (checked in commands.py)
1758 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1758 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1759 3 = abort: uncommitted changes (checked in commands.py)
1759 3 = abort: uncommitted changes (checked in commands.py)
1760
1760
1761 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1761 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1762 to repo[None] if None is passed.
1762 to repo[None] if None is passed.
1763
1763
1764 Return the same tuple as applyupdates().
1764 Return the same tuple as applyupdates().
1765 """
1765 """
1766 # Avoid cycle.
1766 # Avoid cycle.
1767 from . import sparse
1767 from . import sparse
1768
1768
1769 # This function used to find the default destination if node was None, but
1769 # This function used to find the default destination if node was None, but
1770 # that's now in destutil.py.
1770 # that's now in destutil.py.
1771 assert node is not None
1771 assert node is not None
1772 if not branchmerge and not force:
1772 if not branchmerge and not force:
1773 # TODO: remove the default once all callers that pass branchmerge=False
1773 # TODO: remove the default once all callers that pass branchmerge=False
1774 # and force=False pass a value for updatecheck. We may want to allow
1774 # and force=False pass a value for updatecheck. We may want to allow
1775 # updatecheck='abort' to better suppport some of these callers.
1775 # updatecheck='abort' to better suppport some of these callers.
1776 if updatecheck is None:
1776 if updatecheck is None:
1777 updatecheck = 'linear'
1777 updatecheck = 'linear'
1778 assert updatecheck in ('none', 'linear', 'noconflict')
1778 assert updatecheck in ('none', 'linear', 'noconflict')
1779 # If we're doing a partial update, we need to skip updating
1779 # If we're doing a partial update, we need to skip updating
1780 # the dirstate, so make a note of any partial-ness to the
1780 # the dirstate, so make a note of any partial-ness to the
1781 # update here.
1781 # update here.
1782 if matcher is None or matcher.always():
1782 if matcher is None or matcher.always():
1783 partial = False
1783 partial = False
1784 else:
1784 else:
1785 partial = True
1785 partial = True
1786 with repo.wlock():
1786 with repo.wlock():
1787 if wc is None:
1787 if wc is None:
1788 wc = repo[None]
1788 wc = repo[None]
1789 pl = wc.parents()
1789 pl = wc.parents()
1790 p1 = pl[0]
1790 p1 = pl[0]
1791 pas = [None]
1791 pas = [None]
1792 if ancestor is not None:
1792 if ancestor is not None:
1793 pas = [repo[ancestor]]
1793 pas = [repo[ancestor]]
1794
1794
1795 overwrite = force and not branchmerge
1795 overwrite = force and not branchmerge
1796
1796
1797 p2 = repo[node]
1797 p2 = repo[node]
1798 if pas[0] is None:
1798 if pas[0] is None:
1799 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1799 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1800 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1800 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1801 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1801 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1802 else:
1802 else:
1803 pas = [p1.ancestor(p2, warn=branchmerge)]
1803 pas = [p1.ancestor(p2, warn=branchmerge)]
1804
1804
1805 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1805 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1806
1806
1807 ### check phase
1807 ### check phase
1808 if not overwrite:
1808 if not overwrite:
1809 if len(pl) > 1:
1809 if len(pl) > 1:
1810 raise error.Abort(_("outstanding uncommitted merge"))
1810 raise error.Abort(_("outstanding uncommitted merge"))
1811 ms = mergestate.read(repo)
1811 ms = mergestate.read(repo)
1812 if list(ms.unresolved()):
1812 if list(ms.unresolved()):
1813 raise error.Abort(_("outstanding merge conflicts"))
1813 raise error.Abort(_("outstanding merge conflicts"))
1814 if branchmerge:
1814 if branchmerge:
1815 if pas == [p2]:
1815 if pas == [p2]:
1816 raise error.Abort(_("merging with a working directory ancestor"
1816 raise error.Abort(_("merging with a working directory ancestor"
1817 " has no effect"))
1817 " has no effect"))
1818 elif pas == [p1]:
1818 elif pas == [p1]:
1819 if not mergeancestor and wc.branch() == p2.branch():
1819 if not mergeancestor and wc.branch() == p2.branch():
1820 raise error.Abort(_("nothing to merge"),
1820 raise error.Abort(_("nothing to merge"),
1821 hint=_("use 'hg update' "
1821 hint=_("use 'hg update' "
1822 "or check 'hg heads'"))
1822 "or check 'hg heads'"))
1823 if not force and (wc.files() or wc.deleted()):
1823 if not force and (wc.files() or wc.deleted()):
1824 raise error.Abort(_("uncommitted changes"),
1824 raise error.Abort(_("uncommitted changes"),
1825 hint=_("use 'hg status' to list changes"))
1825 hint=_("use 'hg status' to list changes"))
1826 for s in sorted(wc.substate):
1826 for s in sorted(wc.substate):
1827 wc.sub(s).bailifchanged()
1827 wc.sub(s).bailifchanged()
1828
1828
1829 elif not overwrite:
1829 elif not overwrite:
1830 if p1 == p2: # no-op update
1830 if p1 == p2: # no-op update
1831 # call the hooks and exit early
1831 # call the hooks and exit early
1832 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1832 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1833 repo.hook('update', parent1=xp2, parent2='', error=0)
1833 repo.hook('update', parent1=xp2, parent2='', error=0)
1834 return 0, 0, 0, 0
1834 return 0, 0, 0, 0
1835
1835
1836 if (updatecheck == 'linear' and
1836 if (updatecheck == 'linear' and
1837 pas not in ([p1], [p2])): # nonlinear
1837 pas not in ([p1], [p2])): # nonlinear
1838 dirty = wc.dirty(missing=True)
1838 dirty = wc.dirty(missing=True)
1839 if dirty:
1839 if dirty:
1840 # Branching is a bit strange to ensure we do the minimal
1840 # Branching is a bit strange to ensure we do the minimal
1841 # amount of call to obsutil.foreground.
1841 # amount of call to obsutil.foreground.
1842 foreground = obsutil.foreground(repo, [p1.node()])
1842 foreground = obsutil.foreground(repo, [p1.node()])
1843 # note: the <node> variable contains a random identifier
1843 # note: the <node> variable contains a random identifier
1844 if repo[node].node() in foreground:
1844 if repo[node].node() in foreground:
1845 pass # allow updating to successors
1845 pass # allow updating to successors
1846 else:
1846 else:
1847 msg = _("uncommitted changes")
1847 msg = _("uncommitted changes")
1848 hint = _("commit or update --clean to discard changes")
1848 hint = _("commit or update --clean to discard changes")
1849 raise error.UpdateAbort(msg, hint=hint)
1849 raise error.UpdateAbort(msg, hint=hint)
1850 else:
1850 else:
1851 # Allow jumping branches if clean and specific rev given
1851 # Allow jumping branches if clean and specific rev given
1852 pass
1852 pass
1853
1853
1854 if overwrite:
1854 if overwrite:
1855 pas = [wc]
1855 pas = [wc]
1856 elif not branchmerge:
1856 elif not branchmerge:
1857 pas = [p1]
1857 pas = [p1]
1858
1858
1859 # deprecated config: merge.followcopies
1859 # deprecated config: merge.followcopies
1860 followcopies = repo.ui.configbool('merge', 'followcopies')
1860 followcopies = repo.ui.configbool('merge', 'followcopies')
1861 if overwrite:
1861 if overwrite:
1862 followcopies = False
1862 followcopies = False
1863 elif not pas[0]:
1863 elif not pas[0]:
1864 followcopies = False
1864 followcopies = False
1865 if not branchmerge and not wc.dirty(missing=True):
1865 if not branchmerge and not wc.dirty(missing=True):
1866 followcopies = False
1866 followcopies = False
1867
1867
1868 ### calculate phase
1868 ### calculate phase
1869 actionbyfile, diverge, renamedelete = calculateupdates(
1869 actionbyfile, diverge, renamedelete = calculateupdates(
1870 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1870 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1871 followcopies, matcher=matcher, mergeforce=mergeforce)
1871 followcopies, matcher=matcher, mergeforce=mergeforce)
1872
1872
1873 if updatecheck == 'noconflict':
1873 if updatecheck == 'noconflict':
1874 for f, (m, args, msg) in actionbyfile.iteritems():
1874 for f, (m, args, msg) in actionbyfile.iteritems():
1875 if m not in ('g', 'k', 'e', 'r', 'pr'):
1875 if m not in ('g', 'k', 'e', 'r', 'pr'):
1876 msg = _("conflicting changes")
1876 msg = _("conflicting changes")
1877 hint = _("commit or update --clean to discard changes")
1877 hint = _("commit or update --clean to discard changes")
1878 raise error.Abort(msg, hint=hint)
1878 raise error.Abort(msg, hint=hint)
1879
1879
1880 # Prompt and create actions. Most of this is in the resolve phase
1880 # Prompt and create actions. Most of this is in the resolve phase
1881 # already, but we can't handle .hgsubstate in filemerge or
1881 # already, but we can't handle .hgsubstate in filemerge or
1882 # subrepo.submerge yet so we have to keep prompting for it.
1882 # subrepo.submerge yet so we have to keep prompting for it.
1883 if '.hgsubstate' in actionbyfile:
1883 if '.hgsubstate' in actionbyfile:
1884 f = '.hgsubstate'
1884 f = '.hgsubstate'
1885 m, args, msg = actionbyfile[f]
1885 m, args, msg = actionbyfile[f]
1886 prompts = filemerge.partextras(labels)
1886 prompts = filemerge.partextras(labels)
1887 prompts['f'] = f
1887 prompts['f'] = f
1888 if m == 'cd':
1888 if m == 'cd':
1889 if repo.ui.promptchoice(
1889 if repo.ui.promptchoice(
1890 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1890 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1891 "use (c)hanged version or (d)elete?"
1891 "use (c)hanged version or (d)elete?"
1892 "$$ &Changed $$ &Delete") % prompts, 0):
1892 "$$ &Changed $$ &Delete") % prompts, 0):
1893 actionbyfile[f] = ('r', None, "prompt delete")
1893 actionbyfile[f] = ('r', None, "prompt delete")
1894 elif f in p1:
1894 elif f in p1:
1895 actionbyfile[f] = ('am', None, "prompt keep")
1895 actionbyfile[f] = ('am', None, "prompt keep")
1896 else:
1896 else:
1897 actionbyfile[f] = ('a', None, "prompt keep")
1897 actionbyfile[f] = ('a', None, "prompt keep")
1898 elif m == 'dc':
1898 elif m == 'dc':
1899 f1, f2, fa, move, anc = args
1899 f1, f2, fa, move, anc = args
1900 flags = p2[f2].flags()
1900 flags = p2[f2].flags()
1901 if repo.ui.promptchoice(
1901 if repo.ui.promptchoice(
1902 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1902 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1903 "use (c)hanged version or leave (d)eleted?"
1903 "use (c)hanged version or leave (d)eleted?"
1904 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1904 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1905 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1905 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1906 else:
1906 else:
1907 del actionbyfile[f]
1907 del actionbyfile[f]
1908
1908
1909 # Convert to dictionary-of-lists format
1909 # Convert to dictionary-of-lists format
1910 actions = dict((m, [])
1910 actions = dict((m, [])
1911 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1911 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1912 for f, (m, args, msg) in actionbyfile.iteritems():
1912 for f, (m, args, msg) in actionbyfile.iteritems():
1913 if m not in actions:
1913 if m not in actions:
1914 actions[m] = []
1914 actions[m] = []
1915 actions[m].append((f, args, msg))
1915 actions[m].append((f, args, msg))
1916
1916
1917 if not util.fscasesensitive(repo.path):
1917 if not util.fscasesensitive(repo.path):
1918 # check collision between files only in p2 for clean update
1918 # check collision between files only in p2 for clean update
1919 if (not branchmerge and
1919 if (not branchmerge and
1920 (force or not wc.dirty(missing=True, branch=False))):
1920 (force or not wc.dirty(missing=True, branch=False))):
1921 _checkcollision(repo, p2.manifest(), None)
1921 _checkcollision(repo, p2.manifest(), None)
1922 else:
1922 else:
1923 _checkcollision(repo, wc.manifest(), actions)
1923 _checkcollision(repo, wc.manifest(), actions)
1924
1924
1925 # divergent renames
1925 # divergent renames
1926 for f, fl in sorted(diverge.iteritems()):
1926 for f, fl in sorted(diverge.iteritems()):
1927 repo.ui.warn(_("note: possible conflict - %s was renamed "
1927 repo.ui.warn(_("note: possible conflict - %s was renamed "
1928 "multiple times to:\n") % f)
1928 "multiple times to:\n") % f)
1929 for nf in fl:
1929 for nf in fl:
1930 repo.ui.warn(" %s\n" % nf)
1930 repo.ui.warn(" %s\n" % nf)
1931
1931
1932 # rename and delete
1932 # rename and delete
1933 for f, fl in sorted(renamedelete.iteritems()):
1933 for f, fl in sorted(renamedelete.iteritems()):
1934 repo.ui.warn(_("note: possible conflict - %s was deleted "
1934 repo.ui.warn(_("note: possible conflict - %s was deleted "
1935 "and renamed to:\n") % f)
1935 "and renamed to:\n") % f)
1936 for nf in fl:
1936 for nf in fl:
1937 repo.ui.warn(" %s\n" % nf)
1937 repo.ui.warn(" %s\n" % nf)
1938
1938
1939 ### apply phase
1939 ### apply phase
1940 if not branchmerge: # just jump to the new rev
1940 if not branchmerge: # just jump to the new rev
1941 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1941 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1942 if not partial:
1942 if not partial:
1943 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1943 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1944 # note that we're in the middle of an update
1944 # note that we're in the middle of an update
1945 repo.vfs.write('updatestate', p2.hex())
1945 repo.vfs.write('updatestate', p2.hex())
1946
1946
1947 # Advertise fsmonitor when its presence could be useful.
1947 # Advertise fsmonitor when its presence could be useful.
1948 #
1948 #
1949 # We only advertise when performing an update from an empty working
1949 # We only advertise when performing an update from an empty working
1950 # directory. This typically only occurs during initial clone.
1950 # directory. This typically only occurs during initial clone.
1951 #
1951 #
1952 # We give users a mechanism to disable the warning in case it is
1952 # We give users a mechanism to disable the warning in case it is
1953 # annoying.
1953 # annoying.
1954 #
1954 #
1955 # We only allow on Linux and MacOS because that's where fsmonitor is
1955 # We only allow on Linux and MacOS because that's where fsmonitor is
1956 # considered stable.
1956 # considered stable.
1957 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1957 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1958 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1958 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1959 'warn_update_file_count')
1959 'warn_update_file_count')
1960 try:
1960 try:
1961 extensions.find('fsmonitor')
1961 extensions.find('fsmonitor')
1962 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1962 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1963 # We intentionally don't look at whether fsmonitor has disabled
1963 # We intentionally don't look at whether fsmonitor has disabled
1964 # itself because a) fsmonitor may have already printed a warning
1964 # itself because a) fsmonitor may have already printed a warning
1965 # b) we only care about the config state here.
1965 # b) we only care about the config state here.
1966 except KeyError:
1966 except KeyError:
1967 fsmonitorenabled = False
1967 fsmonitorenabled = False
1968
1968
1969 if (fsmonitorwarning
1969 if (fsmonitorwarning
1970 and not fsmonitorenabled
1970 and not fsmonitorenabled
1971 and p1.node() == nullid
1971 and p1.node() == nullid
1972 and len(actions['g']) >= fsmonitorthreshold
1972 and len(actions['g']) >= fsmonitorthreshold
1973 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1973 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1974 repo.ui.warn(
1974 repo.ui.warn(
1975 _('(warning: large working directory being used without '
1975 _('(warning: large working directory being used without '
1976 'fsmonitor enabled; enable fsmonitor to improve performance; '
1976 'fsmonitor enabled; enable fsmonitor to improve performance; '
1977 'see "hg help -e fsmonitor")\n'))
1977 'see "hg help -e fsmonitor")\n'))
1978
1978
1979 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1979 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1980 wc.flushall()
1980 wc.flushall()
1981
1981
1982 if not partial:
1982 if not partial:
1983 with repo.dirstate.parentchange():
1983 with repo.dirstate.parentchange():
1984 repo.setparents(fp1, fp2)
1984 repo.setparents(fp1, fp2)
1985 recordupdates(repo, actions, branchmerge)
1985 recordupdates(repo, actions, branchmerge)
1986 # update completed, clear state
1986 # update completed, clear state
1987 util.unlink(repo.vfs.join('updatestate'))
1987 util.unlink(repo.vfs.join('updatestate'))
1988
1988
1989 if not branchmerge:
1989 if not branchmerge:
1990 repo.dirstate.setbranch(p2.branch())
1990 repo.dirstate.setbranch(p2.branch())
1991
1991
1992 # If we're updating to a location, clean up any stale temporary includes
1992 # If we're updating to a location, clean up any stale temporary includes
1993 # (ex: this happens during hg rebase --abort).
1993 # (ex: this happens during hg rebase --abort).
1994 if not branchmerge:
1994 if not branchmerge:
1995 sparse.prunetemporaryincludes(repo)
1995 sparse.prunetemporaryincludes(repo)
1996
1996
1997 if not partial:
1997 if not partial:
1998 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1998 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1999 return stats
1999 return stats
2000
2000
2001 def graft(repo, ctx, pctx, labels, keepparent=False):
2001 def graft(repo, ctx, pctx, labels, keepparent=False):
2002 """Do a graft-like merge.
2002 """Do a graft-like merge.
2003
2003
2004 This is a merge where the merge ancestor is chosen such that one
2004 This is a merge where the merge ancestor is chosen such that one
2005 or more changesets are grafted onto the current changeset. In
2005 or more changesets are grafted onto the current changeset. In
2006 addition to the merge, this fixes up the dirstate to include only
2006 addition to the merge, this fixes up the dirstate to include only
2007 a single parent (if keepparent is False) and tries to duplicate any
2007 a single parent (if keepparent is False) and tries to duplicate any
2008 renames/copies appropriately.
2008 renames/copies appropriately.
2009
2009
2010 ctx - changeset to rebase
2010 ctx - changeset to rebase
2011 pctx - merge base, usually ctx.p1()
2011 pctx - merge base, usually ctx.p1()
2012 labels - merge labels eg ['local', 'graft']
2012 labels - merge labels eg ['local', 'graft']
2013 keepparent - keep second parent if any
2013 keepparent - keep second parent if any
2014
2014
2015 """
2015 """
2016 # If we're grafting a descendant onto an ancestor, be sure to pass
2016 # If we're grafting a descendant onto an ancestor, be sure to pass
2017 # mergeancestor=True to update. This does two things: 1) allows the merge if
2017 # mergeancestor=True to update. This does two things: 1) allows the merge if
2018 # the destination is the same as the parent of the ctx (so we can use graft
2018 # the destination is the same as the parent of the ctx (so we can use graft
2019 # to copy commits), and 2) informs update that the incoming changes are
2019 # to copy commits), and 2) informs update that the incoming changes are
2020 # newer than the destination so it doesn't prompt about "remote changed foo
2020 # newer than the destination so it doesn't prompt about "remote changed foo
2021 # which local deleted".
2021 # which local deleted".
2022 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2022 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2023
2023
2024 stats = update(repo, ctx.node(), True, True, pctx.node(),
2024 stats = update(repo, ctx.node(), True, True, pctx.node(),
2025 mergeancestor=mergeancestor, labels=labels)
2025 mergeancestor=mergeancestor, labels=labels)
2026
2026
2027 pother = nullid
2027 pother = nullid
2028 parents = ctx.parents()
2028 parents = ctx.parents()
2029 if keepparent and len(parents) == 2 and pctx in parents:
2029 if keepparent and len(parents) == 2 and pctx in parents:
2030 parents.remove(pctx)
2030 parents.remove(pctx)
2031 pother = parents[0].node()
2031 pother = parents[0].node()
2032
2032
2033 with repo.dirstate.parentchange():
2033 with repo.dirstate.parentchange():
2034 repo.setparents(repo['.'].node(), pother)
2034 repo.setparents(repo['.'].node(), pother)
2035 repo.dirstate.write(repo.currenttransaction())
2035 repo.dirstate.write(repo.currenttransaction())
2036 # fix up dirstate for copies and renames
2036 # fix up dirstate for copies and renames
2037 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2037 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2038 return stats
2038 return stats
General Comments 0
You need to be logged in to leave comments. Login now