##// END OF EJS Templates
merge: use constants for merge record state...
Gregory Szorc -
r37129:aa5199c7 default
parent child Browse files
Show More
@@ -1,2140 +1,2149 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .thirdparty import (
25 from .thirdparty import (
26 attr,
26 attr,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 subrepoutil,
36 subrepoutil,
37 util,
37 util,
38 worker,
38 worker,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43
43
44 def _droponode(data):
44 def _droponode(data):
45 # used for compatibility for v1
45 # used for compatibility for v1
46 bits = data.split('\0')
46 bits = data.split('\0')
47 bits = bits[:-2] + bits[-1:]
47 bits = bits[:-2] + bits[-1:]
48 return '\0'.join(bits)
48 return '\0'.join(bits)
49
49
50 # Merge state record types. See ``mergestate`` docs for more.
50 # Merge state record types. See ``mergestate`` docs for more.
51 RECORD_LOCAL = b'L'
51 RECORD_LOCAL = b'L'
52 RECORD_OTHER = b'O'
52 RECORD_OTHER = b'O'
53 RECORD_MERGED = b'F'
53 RECORD_MERGED = b'F'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 RECORD_PATH_CONFLICT = b'P'
56 RECORD_PATH_CONFLICT = b'P'
57 RECORD_MERGE_DRIVER_STATE = b'm'
57 RECORD_MERGE_DRIVER_STATE = b'm'
58 RECORD_FILE_VALUES = b'f'
58 RECORD_FILE_VALUES = b'f'
59 RECORD_LABELS = b'l'
59 RECORD_LABELS = b'l'
60 RECORD_OVERRIDE = b't'
60 RECORD_OVERRIDE = b't'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63
63
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
65 MERGE_DRIVER_STATE_MARKED = b'm'
65 MERGE_DRIVER_STATE_MARKED = b'm'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
67
67
68 MERGE_RECORD_UNRESOLVED = b'u'
69 MERGE_RECORD_RESOLVED = b'r'
70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
71 MERGE_RECORD_RESOLVED_PATH = b'pr'
72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
73
68 class mergestate(object):
74 class mergestate(object):
69 '''track 3-way merge state of individual files
75 '''track 3-way merge state of individual files
70
76
71 The merge state is stored on disk when needed. Two files are used: one with
77 The merge state is stored on disk when needed. Two files are used: one with
72 an old format (version 1), and one with a new format (version 2). Version 2
78 an old format (version 1), and one with a new format (version 2). Version 2
73 stores a superset of the data in version 1, including new kinds of records
79 stores a superset of the data in version 1, including new kinds of records
74 in the future. For more about the new format, see the documentation for
80 in the future. For more about the new format, see the documentation for
75 `_readrecordsv2`.
81 `_readrecordsv2`.
76
82
77 Each record can contain arbitrary content, and has an associated type. This
83 Each record can contain arbitrary content, and has an associated type. This
78 `type` should be a letter. If `type` is uppercase, the record is mandatory:
84 `type` should be a letter. If `type` is uppercase, the record is mandatory:
79 versions of Mercurial that don't support it should abort. If `type` is
85 versions of Mercurial that don't support it should abort. If `type` is
80 lowercase, the record can be safely ignored.
86 lowercase, the record can be safely ignored.
81
87
82 Currently known records:
88 Currently known records:
83
89
84 L: the node of the "local" part of the merge (hexified version)
90 L: the node of the "local" part of the merge (hexified version)
85 O: the node of the "other" part of the merge (hexified version)
91 O: the node of the "other" part of the merge (hexified version)
86 F: a file to be merged entry
92 F: a file to be merged entry
87 C: a change/delete or delete/change conflict
93 C: a change/delete or delete/change conflict
88 D: a file that the external merge driver will merge internally
94 D: a file that the external merge driver will merge internally
89 (experimental)
95 (experimental)
90 P: a path conflict (file vs directory)
96 P: a path conflict (file vs directory)
91 m: the external merge driver defined for this merge plus its run state
97 m: the external merge driver defined for this merge plus its run state
92 (experimental)
98 (experimental)
93 f: a (filename, dictionary) tuple of optional values for a given file
99 f: a (filename, dictionary) tuple of optional values for a given file
94 X: unsupported mandatory record type (used in tests)
100 X: unsupported mandatory record type (used in tests)
95 x: unsupported advisory record type (used in tests)
101 x: unsupported advisory record type (used in tests)
96 l: the labels for the parts of the merge.
102 l: the labels for the parts of the merge.
97
103
98 Merge driver run states (experimental):
104 Merge driver run states (experimental):
99 u: driver-resolved files unmarked -- needs to be run next time we're about
105 u: driver-resolved files unmarked -- needs to be run next time we're about
100 to resolve or commit
106 to resolve or commit
101 m: driver-resolved files marked -- only needs to be run before commit
107 m: driver-resolved files marked -- only needs to be run before commit
102 s: success/skipped -- does not need to be run any more
108 s: success/skipped -- does not need to be run any more
103
109
104 Merge record states (stored in self._state, indexed by filename):
110 Merge record states (stored in self._state, indexed by filename):
105 u: unresolved conflict
111 u: unresolved conflict
106 r: resolved conflict
112 r: resolved conflict
107 pu: unresolved path conflict (file conflicts with directory)
113 pu: unresolved path conflict (file conflicts with directory)
108 pr: resolved path conflict
114 pr: resolved path conflict
109 d: driver-resolved conflict
115 d: driver-resolved conflict
110
116
111 The resolve command transitions between 'u' and 'r' for conflicts and
117 The resolve command transitions between 'u' and 'r' for conflicts and
112 'pu' and 'pr' for path conflicts.
118 'pu' and 'pr' for path conflicts.
113 '''
119 '''
114 statepathv1 = 'merge/state'
120 statepathv1 = 'merge/state'
115 statepathv2 = 'merge/state2'
121 statepathv2 = 'merge/state2'
116
122
117 @staticmethod
123 @staticmethod
118 def clean(repo, node=None, other=None, labels=None):
124 def clean(repo, node=None, other=None, labels=None):
119 """Initialize a brand new merge state, removing any existing state on
125 """Initialize a brand new merge state, removing any existing state on
120 disk."""
126 disk."""
121 ms = mergestate(repo)
127 ms = mergestate(repo)
122 ms.reset(node, other, labels)
128 ms.reset(node, other, labels)
123 return ms
129 return ms
124
130
125 @staticmethod
131 @staticmethod
126 def read(repo):
132 def read(repo):
127 """Initialize the merge state, reading it from disk."""
133 """Initialize the merge state, reading it from disk."""
128 ms = mergestate(repo)
134 ms = mergestate(repo)
129 ms._read()
135 ms._read()
130 return ms
136 return ms
131
137
132 def __init__(self, repo):
138 def __init__(self, repo):
133 """Initialize the merge state.
139 """Initialize the merge state.
134
140
135 Do not use this directly! Instead call read() or clean()."""
141 Do not use this directly! Instead call read() or clean()."""
136 self._repo = repo
142 self._repo = repo
137 self._dirty = False
143 self._dirty = False
138 self._labels = None
144 self._labels = None
139
145
140 def reset(self, node=None, other=None, labels=None):
146 def reset(self, node=None, other=None, labels=None):
141 self._state = {}
147 self._state = {}
142 self._stateextras = {}
148 self._stateextras = {}
143 self._local = None
149 self._local = None
144 self._other = None
150 self._other = None
145 self._labels = labels
151 self._labels = labels
146 for var in ('localctx', 'otherctx'):
152 for var in ('localctx', 'otherctx'):
147 if var in vars(self):
153 if var in vars(self):
148 delattr(self, var)
154 delattr(self, var)
149 if node:
155 if node:
150 self._local = node
156 self._local = node
151 self._other = other
157 self._other = other
152 self._readmergedriver = None
158 self._readmergedriver = None
153 if self.mergedriver:
159 if self.mergedriver:
154 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
160 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
155 else:
161 else:
156 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
162 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
157 shutil.rmtree(self._repo.vfs.join('merge'), True)
163 shutil.rmtree(self._repo.vfs.join('merge'), True)
158 self._results = {}
164 self._results = {}
159 self._dirty = False
165 self._dirty = False
160
166
161 def _read(self):
167 def _read(self):
162 """Analyse each record content to restore a serialized state from disk
168 """Analyse each record content to restore a serialized state from disk
163
169
164 This function process "record" entry produced by the de-serialization
170 This function process "record" entry produced by the de-serialization
165 of on disk file.
171 of on disk file.
166 """
172 """
167 self._state = {}
173 self._state = {}
168 self._stateextras = {}
174 self._stateextras = {}
169 self._local = None
175 self._local = None
170 self._other = None
176 self._other = None
171 for var in ('localctx', 'otherctx'):
177 for var in ('localctx', 'otherctx'):
172 if var in vars(self):
178 if var in vars(self):
173 delattr(self, var)
179 delattr(self, var)
174 self._readmergedriver = None
180 self._readmergedriver = None
175 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
181 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
176 unsupported = set()
182 unsupported = set()
177 records = self._readrecords()
183 records = self._readrecords()
178 for rtype, record in records:
184 for rtype, record in records:
179 if rtype == RECORD_LOCAL:
185 if rtype == RECORD_LOCAL:
180 self._local = bin(record)
186 self._local = bin(record)
181 elif rtype == RECORD_OTHER:
187 elif rtype == RECORD_OTHER:
182 self._other = bin(record)
188 self._other = bin(record)
183 elif rtype == RECORD_MERGE_DRIVER_STATE:
189 elif rtype == RECORD_MERGE_DRIVER_STATE:
184 bits = record.split('\0', 1)
190 bits = record.split('\0', 1)
185 mdstate = bits[1]
191 mdstate = bits[1]
186 if len(mdstate) != 1 or mdstate not in (
192 if len(mdstate) != 1 or mdstate not in (
187 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
193 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
188 MERGE_DRIVER_STATE_SUCCESS):
194 MERGE_DRIVER_STATE_SUCCESS):
189 # the merge driver should be idempotent, so just rerun it
195 # the merge driver should be idempotent, so just rerun it
190 mdstate = MERGE_DRIVER_STATE_UNMARKED
196 mdstate = MERGE_DRIVER_STATE_UNMARKED
191
197
192 self._readmergedriver = bits[0]
198 self._readmergedriver = bits[0]
193 self._mdstate = mdstate
199 self._mdstate = mdstate
194 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
200 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
195 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
201 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
196 bits = record.split('\0')
202 bits = record.split('\0')
197 self._state[bits[0]] = bits[1:]
203 self._state[bits[0]] = bits[1:]
198 elif rtype == RECORD_FILE_VALUES:
204 elif rtype == RECORD_FILE_VALUES:
199 filename, rawextras = record.split('\0', 1)
205 filename, rawextras = record.split('\0', 1)
200 extraparts = rawextras.split('\0')
206 extraparts = rawextras.split('\0')
201 extras = {}
207 extras = {}
202 i = 0
208 i = 0
203 while i < len(extraparts):
209 while i < len(extraparts):
204 extras[extraparts[i]] = extraparts[i + 1]
210 extras[extraparts[i]] = extraparts[i + 1]
205 i += 2
211 i += 2
206
212
207 self._stateextras[filename] = extras
213 self._stateextras[filename] = extras
208 elif rtype == RECORD_LABELS:
214 elif rtype == RECORD_LABELS:
209 labels = record.split('\0', 2)
215 labels = record.split('\0', 2)
210 self._labels = [l for l in labels if len(l) > 0]
216 self._labels = [l for l in labels if len(l) > 0]
211 elif not rtype.islower():
217 elif not rtype.islower():
212 unsupported.add(rtype)
218 unsupported.add(rtype)
213 self._results = {}
219 self._results = {}
214 self._dirty = False
220 self._dirty = False
215
221
216 if unsupported:
222 if unsupported:
217 raise error.UnsupportedMergeRecords(unsupported)
223 raise error.UnsupportedMergeRecords(unsupported)
218
224
219 def _readrecords(self):
225 def _readrecords(self):
220 """Read merge state from disk and return a list of record (TYPE, data)
226 """Read merge state from disk and return a list of record (TYPE, data)
221
227
222 We read data from both v1 and v2 files and decide which one to use.
228 We read data from both v1 and v2 files and decide which one to use.
223
229
224 V1 has been used by version prior to 2.9.1 and contains less data than
230 V1 has been used by version prior to 2.9.1 and contains less data than
225 v2. We read both versions and check if no data in v2 contradicts
231 v2. We read both versions and check if no data in v2 contradicts
226 v1. If there is not contradiction we can safely assume that both v1
232 v1. If there is not contradiction we can safely assume that both v1
227 and v2 were written at the same time and use the extract data in v2. If
233 and v2 were written at the same time and use the extract data in v2. If
228 there is contradiction we ignore v2 content as we assume an old version
234 there is contradiction we ignore v2 content as we assume an old version
229 of Mercurial has overwritten the mergestate file and left an old v2
235 of Mercurial has overwritten the mergestate file and left an old v2
230 file around.
236 file around.
231
237
232 returns list of record [(TYPE, data), ...]"""
238 returns list of record [(TYPE, data), ...]"""
233 v1records = self._readrecordsv1()
239 v1records = self._readrecordsv1()
234 v2records = self._readrecordsv2()
240 v2records = self._readrecordsv2()
235 if self._v1v2match(v1records, v2records):
241 if self._v1v2match(v1records, v2records):
236 return v2records
242 return v2records
237 else:
243 else:
238 # v1 file is newer than v2 file, use it
244 # v1 file is newer than v2 file, use it
239 # we have to infer the "other" changeset of the merge
245 # we have to infer the "other" changeset of the merge
240 # we cannot do better than that with v1 of the format
246 # we cannot do better than that with v1 of the format
241 mctx = self._repo[None].parents()[-1]
247 mctx = self._repo[None].parents()[-1]
242 v1records.append((RECORD_OTHER, mctx.hex()))
248 v1records.append((RECORD_OTHER, mctx.hex()))
243 # add place holder "other" file node information
249 # add place holder "other" file node information
244 # nobody is using it yet so we do no need to fetch the data
250 # nobody is using it yet so we do no need to fetch the data
245 # if mctx was wrong `mctx[bits[-2]]` may fails.
251 # if mctx was wrong `mctx[bits[-2]]` may fails.
246 for idx, r in enumerate(v1records):
252 for idx, r in enumerate(v1records):
247 if r[0] == RECORD_MERGED:
253 if r[0] == RECORD_MERGED:
248 bits = r[1].split('\0')
254 bits = r[1].split('\0')
249 bits.insert(-2, '')
255 bits.insert(-2, '')
250 v1records[idx] = (r[0], '\0'.join(bits))
256 v1records[idx] = (r[0], '\0'.join(bits))
251 return v1records
257 return v1records
252
258
253 def _v1v2match(self, v1records, v2records):
259 def _v1v2match(self, v1records, v2records):
254 oldv2 = set() # old format version of v2 record
260 oldv2 = set() # old format version of v2 record
255 for rec in v2records:
261 for rec in v2records:
256 if rec[0] == RECORD_LOCAL:
262 if rec[0] == RECORD_LOCAL:
257 oldv2.add(rec)
263 oldv2.add(rec)
258 elif rec[0] == RECORD_MERGED:
264 elif rec[0] == RECORD_MERGED:
259 # drop the onode data (not contained in v1)
265 # drop the onode data (not contained in v1)
260 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
266 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
261 for rec in v1records:
267 for rec in v1records:
262 if rec not in oldv2:
268 if rec not in oldv2:
263 return False
269 return False
264 else:
270 else:
265 return True
271 return True
266
272
267 def _readrecordsv1(self):
273 def _readrecordsv1(self):
268 """read on disk merge state for version 1 file
274 """read on disk merge state for version 1 file
269
275
270 returns list of record [(TYPE, data), ...]
276 returns list of record [(TYPE, data), ...]
271
277
272 Note: the "F" data from this file are one entry short
278 Note: the "F" data from this file are one entry short
273 (no "other file node" entry)
279 (no "other file node" entry)
274 """
280 """
275 records = []
281 records = []
276 try:
282 try:
277 f = self._repo.vfs(self.statepathv1)
283 f = self._repo.vfs(self.statepathv1)
278 for i, l in enumerate(f):
284 for i, l in enumerate(f):
279 if i == 0:
285 if i == 0:
280 records.append((RECORD_LOCAL, l[:-1]))
286 records.append((RECORD_LOCAL, l[:-1]))
281 else:
287 else:
282 records.append((RECORD_MERGED, l[:-1]))
288 records.append((RECORD_MERGED, l[:-1]))
283 f.close()
289 f.close()
284 except IOError as err:
290 except IOError as err:
285 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
286 raise
292 raise
287 return records
293 return records
288
294
289 def _readrecordsv2(self):
295 def _readrecordsv2(self):
290 """read on disk merge state for version 2 file
296 """read on disk merge state for version 2 file
291
297
292 This format is a list of arbitrary records of the form:
298 This format is a list of arbitrary records of the form:
293
299
294 [type][length][content]
300 [type][length][content]
295
301
296 `type` is a single character, `length` is a 4 byte integer, and
302 `type` is a single character, `length` is a 4 byte integer, and
297 `content` is an arbitrary byte sequence of length `length`.
303 `content` is an arbitrary byte sequence of length `length`.
298
304
299 Mercurial versions prior to 3.7 have a bug where if there are
305 Mercurial versions prior to 3.7 have a bug where if there are
300 unsupported mandatory merge records, attempting to clear out the merge
306 unsupported mandatory merge records, attempting to clear out the merge
301 state with hg update --clean or similar aborts. The 't' record type
307 state with hg update --clean or similar aborts. The 't' record type
302 works around that by writing out what those versions treat as an
308 works around that by writing out what those versions treat as an
303 advisory record, but later versions interpret as special: the first
309 advisory record, but later versions interpret as special: the first
304 character is the 'real' record type and everything onwards is the data.
310 character is the 'real' record type and everything onwards is the data.
305
311
306 Returns list of records [(TYPE, data), ...]."""
312 Returns list of records [(TYPE, data), ...]."""
307 records = []
313 records = []
308 try:
314 try:
309 f = self._repo.vfs(self.statepathv2)
315 f = self._repo.vfs(self.statepathv2)
310 data = f.read()
316 data = f.read()
311 off = 0
317 off = 0
312 end = len(data)
318 end = len(data)
313 while off < end:
319 while off < end:
314 rtype = data[off:off + 1]
320 rtype = data[off:off + 1]
315 off += 1
321 off += 1
316 length = _unpack('>I', data[off:(off + 4)])[0]
322 length = _unpack('>I', data[off:(off + 4)])[0]
317 off += 4
323 off += 4
318 record = data[off:(off + length)]
324 record = data[off:(off + length)]
319 off += length
325 off += length
320 if rtype == RECORD_OVERRIDE:
326 if rtype == RECORD_OVERRIDE:
321 rtype, record = record[0:1], record[1:]
327 rtype, record = record[0:1], record[1:]
322 records.append((rtype, record))
328 records.append((rtype, record))
323 f.close()
329 f.close()
324 except IOError as err:
330 except IOError as err:
325 if err.errno != errno.ENOENT:
331 if err.errno != errno.ENOENT:
326 raise
332 raise
327 return records
333 return records
328
334
329 @util.propertycache
335 @util.propertycache
330 def mergedriver(self):
336 def mergedriver(self):
331 # protect against the following:
337 # protect against the following:
332 # - A configures a malicious merge driver in their hgrc, then
338 # - A configures a malicious merge driver in their hgrc, then
333 # pauses the merge
339 # pauses the merge
334 # - A edits their hgrc to remove references to the merge driver
340 # - A edits their hgrc to remove references to the merge driver
335 # - A gives a copy of their entire repo, including .hg, to B
341 # - A gives a copy of their entire repo, including .hg, to B
336 # - B inspects .hgrc and finds it to be clean
342 # - B inspects .hgrc and finds it to be clean
337 # - B then continues the merge and the malicious merge driver
343 # - B then continues the merge and the malicious merge driver
338 # gets invoked
344 # gets invoked
339 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
345 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
340 if (self._readmergedriver is not None
346 if (self._readmergedriver is not None
341 and self._readmergedriver != configmergedriver):
347 and self._readmergedriver != configmergedriver):
342 raise error.ConfigError(
348 raise error.ConfigError(
343 _("merge driver changed since merge started"),
349 _("merge driver changed since merge started"),
344 hint=_("revert merge driver change or abort merge"))
350 hint=_("revert merge driver change or abort merge"))
345
351
346 return configmergedriver
352 return configmergedriver
347
353
348 @util.propertycache
354 @util.propertycache
349 def localctx(self):
355 def localctx(self):
350 if self._local is None:
356 if self._local is None:
351 msg = "localctx accessed but self._local isn't set"
357 msg = "localctx accessed but self._local isn't set"
352 raise error.ProgrammingError(msg)
358 raise error.ProgrammingError(msg)
353 return self._repo[self._local]
359 return self._repo[self._local]
354
360
355 @util.propertycache
361 @util.propertycache
356 def otherctx(self):
362 def otherctx(self):
357 if self._other is None:
363 if self._other is None:
358 msg = "otherctx accessed but self._other isn't set"
364 msg = "otherctx accessed but self._other isn't set"
359 raise error.ProgrammingError(msg)
365 raise error.ProgrammingError(msg)
360 return self._repo[self._other]
366 return self._repo[self._other]
361
367
362 def active(self):
368 def active(self):
363 """Whether mergestate is active.
369 """Whether mergestate is active.
364
370
365 Returns True if there appears to be mergestate. This is a rough proxy
371 Returns True if there appears to be mergestate. This is a rough proxy
366 for "is a merge in progress."
372 for "is a merge in progress."
367 """
373 """
368 # Check local variables before looking at filesystem for performance
374 # Check local variables before looking at filesystem for performance
369 # reasons.
375 # reasons.
370 return bool(self._local) or bool(self._state) or \
376 return bool(self._local) or bool(self._state) or \
371 self._repo.vfs.exists(self.statepathv1) or \
377 self._repo.vfs.exists(self.statepathv1) or \
372 self._repo.vfs.exists(self.statepathv2)
378 self._repo.vfs.exists(self.statepathv2)
373
379
374 def commit(self):
380 def commit(self):
375 """Write current state on disk (if necessary)"""
381 """Write current state on disk (if necessary)"""
376 if self._dirty:
382 if self._dirty:
377 records = self._makerecords()
383 records = self._makerecords()
378 self._writerecords(records)
384 self._writerecords(records)
379 self._dirty = False
385 self._dirty = False
380
386
381 def _makerecords(self):
387 def _makerecords(self):
382 records = []
388 records = []
383 records.append((RECORD_LOCAL, hex(self._local)))
389 records.append((RECORD_LOCAL, hex(self._local)))
384 records.append((RECORD_OTHER, hex(self._other)))
390 records.append((RECORD_OTHER, hex(self._other)))
385 if self.mergedriver:
391 if self.mergedriver:
386 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
392 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
387 self.mergedriver, self._mdstate])))
393 self.mergedriver, self._mdstate])))
388 # Write out state items. In all cases, the value of the state map entry
394 # Write out state items. In all cases, the value of the state map entry
389 # is written as the contents of the record. The record type depends on
395 # is written as the contents of the record. The record type depends on
390 # the type of state that is stored, and capital-letter records are used
396 # the type of state that is stored, and capital-letter records are used
391 # to prevent older versions of Mercurial that do not support the feature
397 # to prevent older versions of Mercurial that do not support the feature
392 # from loading them.
398 # from loading them.
393 for filename, v in self._state.iteritems():
399 for filename, v in self._state.iteritems():
394 if v[0] == 'd':
400 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
395 # Driver-resolved merge. These are stored in 'D' records.
401 # Driver-resolved merge. These are stored in 'D' records.
396 records.append((RECORD_MERGE_DRIVER_MERGE,
402 records.append((RECORD_MERGE_DRIVER_MERGE,
397 '\0'.join([filename] + v)))
403 '\0'.join([filename] + v)))
398 elif v[0] in ('pu', 'pr'):
404 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
405 MERGE_RECORD_RESOLVED_PATH):
399 # Path conflicts. These are stored in 'P' records. The current
406 # Path conflicts. These are stored in 'P' records. The current
400 # resolution state ('pu' or 'pr') is stored within the record.
407 # resolution state ('pu' or 'pr') is stored within the record.
401 records.append((RECORD_PATH_CONFLICT,
408 records.append((RECORD_PATH_CONFLICT,
402 '\0'.join([filename] + v)))
409 '\0'.join([filename] + v)))
403 elif v[1] == nullhex or v[6] == nullhex:
410 elif v[1] == nullhex or v[6] == nullhex:
404 # Change/Delete or Delete/Change conflicts. These are stored in
411 # Change/Delete or Delete/Change conflicts. These are stored in
405 # 'C' records. v[1] is the local file, and is nullhex when the
412 # 'C' records. v[1] is the local file, and is nullhex when the
406 # file is deleted locally ('dc'). v[6] is the remote file, and
413 # file is deleted locally ('dc'). v[6] is the remote file, and
407 # is nullhex when the file is deleted remotely ('cd').
414 # is nullhex when the file is deleted remotely ('cd').
408 records.append((RECORD_CHANGEDELETE_CONFLICT,
415 records.append((RECORD_CHANGEDELETE_CONFLICT,
409 '\0'.join([filename] + v)))
416 '\0'.join([filename] + v)))
410 else:
417 else:
411 # Normal files. These are stored in 'F' records.
418 # Normal files. These are stored in 'F' records.
412 records.append((RECORD_MERGED,
419 records.append((RECORD_MERGED,
413 '\0'.join([filename] + v)))
420 '\0'.join([filename] + v)))
414 for filename, extras in sorted(self._stateextras.iteritems()):
421 for filename, extras in sorted(self._stateextras.iteritems()):
415 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
422 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
416 extras.iteritems())
423 extras.iteritems())
417 records.append((RECORD_FILE_VALUES,
424 records.append((RECORD_FILE_VALUES,
418 '%s\0%s' % (filename, rawextras)))
425 '%s\0%s' % (filename, rawextras)))
419 if self._labels is not None:
426 if self._labels is not None:
420 labels = '\0'.join(self._labels)
427 labels = '\0'.join(self._labels)
421 records.append((RECORD_LABELS, labels))
428 records.append((RECORD_LABELS, labels))
422 return records
429 return records
423
430
424 def _writerecords(self, records):
431 def _writerecords(self, records):
425 """Write current state on disk (both v1 and v2)"""
432 """Write current state on disk (both v1 and v2)"""
426 self._writerecordsv1(records)
433 self._writerecordsv1(records)
427 self._writerecordsv2(records)
434 self._writerecordsv2(records)
428
435
429 def _writerecordsv1(self, records):
436 def _writerecordsv1(self, records):
430 """Write current state on disk in a version 1 file"""
437 """Write current state on disk in a version 1 file"""
431 f = self._repo.vfs(self.statepathv1, 'wb')
438 f = self._repo.vfs(self.statepathv1, 'wb')
432 irecords = iter(records)
439 irecords = iter(records)
433 lrecords = next(irecords)
440 lrecords = next(irecords)
434 assert lrecords[0] == RECORD_LOCAL
441 assert lrecords[0] == RECORD_LOCAL
435 f.write(hex(self._local) + '\n')
442 f.write(hex(self._local) + '\n')
436 for rtype, data in irecords:
443 for rtype, data in irecords:
437 if rtype == RECORD_MERGED:
444 if rtype == RECORD_MERGED:
438 f.write('%s\n' % _droponode(data))
445 f.write('%s\n' % _droponode(data))
439 f.close()
446 f.close()
440
447
441 def _writerecordsv2(self, records):
448 def _writerecordsv2(self, records):
442 """Write current state on disk in a version 2 file
449 """Write current state on disk in a version 2 file
443
450
444 See the docstring for _readrecordsv2 for why we use 't'."""
451 See the docstring for _readrecordsv2 for why we use 't'."""
445 # these are the records that all version 2 clients can read
452 # these are the records that all version 2 clients can read
446 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
453 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
447 f = self._repo.vfs(self.statepathv2, 'wb')
454 f = self._repo.vfs(self.statepathv2, 'wb')
448 for key, data in records:
455 for key, data in records:
449 assert len(key) == 1
456 assert len(key) == 1
450 if key not in allowlist:
457 if key not in allowlist:
451 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
458 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
452 format = '>sI%is' % len(data)
459 format = '>sI%is' % len(data)
453 f.write(_pack(format, key, len(data), data))
460 f.write(_pack(format, key, len(data), data))
454 f.close()
461 f.close()
455
462
456 def add(self, fcl, fco, fca, fd):
463 def add(self, fcl, fco, fca, fd):
457 """add a new (potentially?) conflicting file the merge state
464 """add a new (potentially?) conflicting file the merge state
458 fcl: file context for local,
465 fcl: file context for local,
459 fco: file context for remote,
466 fco: file context for remote,
460 fca: file context for ancestors,
467 fca: file context for ancestors,
461 fd: file path of the resulting merge.
468 fd: file path of the resulting merge.
462
469
463 note: also write the local version to the `.hg/merge` directory.
470 note: also write the local version to the `.hg/merge` directory.
464 """
471 """
465 if fcl.isabsent():
472 if fcl.isabsent():
466 hash = nullhex
473 hash = nullhex
467 else:
474 else:
468 hash = hex(hashlib.sha1(fcl.path()).digest())
475 hash = hex(hashlib.sha1(fcl.path()).digest())
469 self._repo.vfs.write('merge/' + hash, fcl.data())
476 self._repo.vfs.write('merge/' + hash, fcl.data())
470 self._state[fd] = ['u', hash, fcl.path(),
477 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
471 fca.path(), hex(fca.filenode()),
478 fca.path(), hex(fca.filenode()),
472 fco.path(), hex(fco.filenode()),
479 fco.path(), hex(fco.filenode()),
473 fcl.flags()]
480 fcl.flags()]
474 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
481 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
475 self._dirty = True
482 self._dirty = True
476
483
477 def addpath(self, path, frename, forigin):
484 def addpath(self, path, frename, forigin):
478 """add a new conflicting path to the merge state
485 """add a new conflicting path to the merge state
479 path: the path that conflicts
486 path: the path that conflicts
480 frename: the filename the conflicting file was renamed to
487 frename: the filename the conflicting file was renamed to
481 forigin: origin of the file ('l' or 'r' for local/remote)
488 forigin: origin of the file ('l' or 'r' for local/remote)
482 """
489 """
483 self._state[path] = ['pu', frename, forigin]
490 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
484 self._dirty = True
491 self._dirty = True
485
492
486 def __contains__(self, dfile):
493 def __contains__(self, dfile):
487 return dfile in self._state
494 return dfile in self._state
488
495
489 def __getitem__(self, dfile):
496 def __getitem__(self, dfile):
490 return self._state[dfile][0]
497 return self._state[dfile][0]
491
498
492 def __iter__(self):
499 def __iter__(self):
493 return iter(sorted(self._state))
500 return iter(sorted(self._state))
494
501
495 def files(self):
502 def files(self):
496 return self._state.keys()
503 return self._state.keys()
497
504
498 def mark(self, dfile, state):
505 def mark(self, dfile, state):
499 self._state[dfile][0] = state
506 self._state[dfile][0] = state
500 self._dirty = True
507 self._dirty = True
501
508
502 def mdstate(self):
509 def mdstate(self):
503 return self._mdstate
510 return self._mdstate
504
511
505 def unresolved(self):
512 def unresolved(self):
506 """Obtain the paths of unresolved files."""
513 """Obtain the paths of unresolved files."""
507
514
508 for f, entry in self._state.iteritems():
515 for f, entry in self._state.iteritems():
509 if entry[0] in ('u', 'pu'):
516 if entry[0] in (MERGE_RECORD_UNRESOLVED,
517 MERGE_RECORD_UNRESOLVED_PATH):
510 yield f
518 yield f
511
519
512 def driverresolved(self):
520 def driverresolved(self):
513 """Obtain the paths of driver-resolved files."""
521 """Obtain the paths of driver-resolved files."""
514
522
515 for f, entry in self._state.items():
523 for f, entry in self._state.items():
516 if entry[0] == 'd':
524 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
517 yield f
525 yield f
518
526
519 def extras(self, filename):
527 def extras(self, filename):
520 return self._stateextras.setdefault(filename, {})
528 return self._stateextras.setdefault(filename, {})
521
529
522 def _resolve(self, preresolve, dfile, wctx):
530 def _resolve(self, preresolve, dfile, wctx):
523 """rerun merge process for file path `dfile`"""
531 """rerun merge process for file path `dfile`"""
524 if self[dfile] in 'rd':
532 if self[dfile] in (MERGE_RECORD_RESOLVED,
533 MERGE_RECORD_DRIVER_RESOLVED):
525 return True, 0
534 return True, 0
526 stateentry = self._state[dfile]
535 stateentry = self._state[dfile]
527 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
536 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
528 octx = self._repo[self._other]
537 octx = self._repo[self._other]
529 extras = self.extras(dfile)
538 extras = self.extras(dfile)
530 anccommitnode = extras.get('ancestorlinknode')
539 anccommitnode = extras.get('ancestorlinknode')
531 if anccommitnode:
540 if anccommitnode:
532 actx = self._repo[anccommitnode]
541 actx = self._repo[anccommitnode]
533 else:
542 else:
534 actx = None
543 actx = None
535 fcd = self._filectxorabsent(hash, wctx, dfile)
544 fcd = self._filectxorabsent(hash, wctx, dfile)
536 fco = self._filectxorabsent(onode, octx, ofile)
545 fco = self._filectxorabsent(onode, octx, ofile)
537 # TODO: move this to filectxorabsent
546 # TODO: move this to filectxorabsent
538 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
547 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
539 # "premerge" x flags
548 # "premerge" x flags
540 flo = fco.flags()
549 flo = fco.flags()
541 fla = fca.flags()
550 fla = fca.flags()
542 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
551 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
543 if fca.node() == nullid and flags != flo:
552 if fca.node() == nullid and flags != flo:
544 if preresolve:
553 if preresolve:
545 self._repo.ui.warn(
554 self._repo.ui.warn(
546 _('warning: cannot merge flags for %s '
555 _('warning: cannot merge flags for %s '
547 'without common ancestor - keeping local flags\n')
556 'without common ancestor - keeping local flags\n')
548 % afile)
557 % afile)
549 elif flags == fla:
558 elif flags == fla:
550 flags = flo
559 flags = flo
551 if preresolve:
560 if preresolve:
552 # restore local
561 # restore local
553 if hash != nullhex:
562 if hash != nullhex:
554 f = self._repo.vfs('merge/' + hash)
563 f = self._repo.vfs('merge/' + hash)
555 wctx[dfile].write(f.read(), flags)
564 wctx[dfile].write(f.read(), flags)
556 f.close()
565 f.close()
557 else:
566 else:
558 wctx[dfile].remove(ignoremissing=True)
567 wctx[dfile].remove(ignoremissing=True)
559 complete, r, deleted = filemerge.premerge(self._repo, wctx,
568 complete, r, deleted = filemerge.premerge(self._repo, wctx,
560 self._local, lfile, fcd,
569 self._local, lfile, fcd,
561 fco, fca,
570 fco, fca,
562 labels=self._labels)
571 labels=self._labels)
563 else:
572 else:
564 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
573 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
565 self._local, lfile, fcd,
574 self._local, lfile, fcd,
566 fco, fca,
575 fco, fca,
567 labels=self._labels)
576 labels=self._labels)
568 if r is None:
577 if r is None:
569 # no real conflict
578 # no real conflict
570 del self._state[dfile]
579 del self._state[dfile]
571 self._stateextras.pop(dfile, None)
580 self._stateextras.pop(dfile, None)
572 self._dirty = True
581 self._dirty = True
573 elif not r:
582 elif not r:
574 self.mark(dfile, 'r')
583 self.mark(dfile, MERGE_RECORD_RESOLVED)
575
584
576 if complete:
585 if complete:
577 action = None
586 action = None
578 if deleted:
587 if deleted:
579 if fcd.isabsent():
588 if fcd.isabsent():
580 # dc: local picked. Need to drop if present, which may
589 # dc: local picked. Need to drop if present, which may
581 # happen on re-resolves.
590 # happen on re-resolves.
582 action = 'f'
591 action = 'f'
583 else:
592 else:
584 # cd: remote picked (or otherwise deleted)
593 # cd: remote picked (or otherwise deleted)
585 action = 'r'
594 action = 'r'
586 else:
595 else:
587 if fcd.isabsent(): # dc: remote picked
596 if fcd.isabsent(): # dc: remote picked
588 action = 'g'
597 action = 'g'
589 elif fco.isabsent(): # cd: local picked
598 elif fco.isabsent(): # cd: local picked
590 if dfile in self.localctx:
599 if dfile in self.localctx:
591 action = 'am'
600 action = 'am'
592 else:
601 else:
593 action = 'a'
602 action = 'a'
594 # else: regular merges (no action necessary)
603 # else: regular merges (no action necessary)
595 self._results[dfile] = r, action
604 self._results[dfile] = r, action
596
605
597 return complete, r
606 return complete, r
598
607
599 def _filectxorabsent(self, hexnode, ctx, f):
608 def _filectxorabsent(self, hexnode, ctx, f):
600 if hexnode == nullhex:
609 if hexnode == nullhex:
601 return filemerge.absentfilectx(ctx, f)
610 return filemerge.absentfilectx(ctx, f)
602 else:
611 else:
603 return ctx[f]
612 return ctx[f]
604
613
605 def preresolve(self, dfile, wctx):
614 def preresolve(self, dfile, wctx):
606 """run premerge process for dfile
615 """run premerge process for dfile
607
616
608 Returns whether the merge is complete, and the exit code."""
617 Returns whether the merge is complete, and the exit code."""
609 return self._resolve(True, dfile, wctx)
618 return self._resolve(True, dfile, wctx)
610
619
611 def resolve(self, dfile, wctx):
620 def resolve(self, dfile, wctx):
612 """run merge process (assuming premerge was run) for dfile
621 """run merge process (assuming premerge was run) for dfile
613
622
614 Returns the exit code of the merge."""
623 Returns the exit code of the merge."""
615 return self._resolve(False, dfile, wctx)[1]
624 return self._resolve(False, dfile, wctx)[1]
616
625
617 def counts(self):
626 def counts(self):
618 """return counts for updated, merged and removed files in this
627 """return counts for updated, merged and removed files in this
619 session"""
628 session"""
620 updated, merged, removed = 0, 0, 0
629 updated, merged, removed = 0, 0, 0
621 for r, action in self._results.itervalues():
630 for r, action in self._results.itervalues():
622 if r is None:
631 if r is None:
623 updated += 1
632 updated += 1
624 elif r == 0:
633 elif r == 0:
625 if action == 'r':
634 if action == 'r':
626 removed += 1
635 removed += 1
627 else:
636 else:
628 merged += 1
637 merged += 1
629 return updated, merged, removed
638 return updated, merged, removed
630
639
631 def unresolvedcount(self):
640 def unresolvedcount(self):
632 """get unresolved count for this merge (persistent)"""
641 """get unresolved count for this merge (persistent)"""
633 return len(list(self.unresolved()))
642 return len(list(self.unresolved()))
634
643
635 def actions(self):
644 def actions(self):
636 """return lists of actions to perform on the dirstate"""
645 """return lists of actions to perform on the dirstate"""
637 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
646 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
638 for f, (r, action) in self._results.iteritems():
647 for f, (r, action) in self._results.iteritems():
639 if action is not None:
648 if action is not None:
640 actions[action].append((f, None, "merge result"))
649 actions[action].append((f, None, "merge result"))
641 return actions
650 return actions
642
651
643 def recordactions(self):
652 def recordactions(self):
644 """record remove/add/get actions in the dirstate"""
653 """record remove/add/get actions in the dirstate"""
645 branchmerge = self._repo.dirstate.p2() != nullid
654 branchmerge = self._repo.dirstate.p2() != nullid
646 recordupdates(self._repo, self.actions(), branchmerge)
655 recordupdates(self._repo, self.actions(), branchmerge)
647
656
648 def queueremove(self, f):
657 def queueremove(self, f):
649 """queues a file to be removed from the dirstate
658 """queues a file to be removed from the dirstate
650
659
651 Meant for use by custom merge drivers."""
660 Meant for use by custom merge drivers."""
652 self._results[f] = 0, 'r'
661 self._results[f] = 0, 'r'
653
662
654 def queueadd(self, f):
663 def queueadd(self, f):
655 """queues a file to be added to the dirstate
664 """queues a file to be added to the dirstate
656
665
657 Meant for use by custom merge drivers."""
666 Meant for use by custom merge drivers."""
658 self._results[f] = 0, 'a'
667 self._results[f] = 0, 'a'
659
668
660 def queueget(self, f):
669 def queueget(self, f):
661 """queues a file to be marked modified in the dirstate
670 """queues a file to be marked modified in the dirstate
662
671
663 Meant for use by custom merge drivers."""
672 Meant for use by custom merge drivers."""
664 self._results[f] = 0, 'g'
673 self._results[f] = 0, 'g'
665
674
666 def _getcheckunknownconfig(repo, section, name):
675 def _getcheckunknownconfig(repo, section, name):
667 config = repo.ui.config(section, name)
676 config = repo.ui.config(section, name)
668 valid = ['abort', 'ignore', 'warn']
677 valid = ['abort', 'ignore', 'warn']
669 if config not in valid:
678 if config not in valid:
670 validstr = ', '.join(["'" + v + "'" for v in valid])
679 validstr = ', '.join(["'" + v + "'" for v in valid])
671 raise error.ConfigError(_("%s.%s not valid "
680 raise error.ConfigError(_("%s.%s not valid "
672 "('%s' is none of %s)")
681 "('%s' is none of %s)")
673 % (section, name, config, validstr))
682 % (section, name, config, validstr))
674 return config
683 return config
675
684
676 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
685 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
677 if wctx.isinmemory():
686 if wctx.isinmemory():
678 # Nothing to do in IMM because nothing in the "working copy" can be an
687 # Nothing to do in IMM because nothing in the "working copy" can be an
679 # unknown file.
688 # unknown file.
680 #
689 #
681 # Note that we should bail out here, not in ``_checkunknownfiles()``,
690 # Note that we should bail out here, not in ``_checkunknownfiles()``,
682 # because that function does other useful work.
691 # because that function does other useful work.
683 return False
692 return False
684
693
685 if f2 is None:
694 if f2 is None:
686 f2 = f
695 f2 = f
687 return (repo.wvfs.audit.check(f)
696 return (repo.wvfs.audit.check(f)
688 and repo.wvfs.isfileorlink(f)
697 and repo.wvfs.isfileorlink(f)
689 and repo.dirstate.normalize(f) not in repo.dirstate
698 and repo.dirstate.normalize(f) not in repo.dirstate
690 and mctx[f2].cmp(wctx[f]))
699 and mctx[f2].cmp(wctx[f]))
691
700
692 class _unknowndirschecker(object):
701 class _unknowndirschecker(object):
693 """
702 """
694 Look for any unknown files or directories that may have a path conflict
703 Look for any unknown files or directories that may have a path conflict
695 with a file. If any path prefix of the file exists as a file or link,
704 with a file. If any path prefix of the file exists as a file or link,
696 then it conflicts. If the file itself is a directory that contains any
705 then it conflicts. If the file itself is a directory that contains any
697 file that is not tracked, then it conflicts.
706 file that is not tracked, then it conflicts.
698
707
699 Returns the shortest path at which a conflict occurs, or None if there is
708 Returns the shortest path at which a conflict occurs, or None if there is
700 no conflict.
709 no conflict.
701 """
710 """
702 def __init__(self):
711 def __init__(self):
703 # A set of paths known to be good. This prevents repeated checking of
712 # A set of paths known to be good. This prevents repeated checking of
704 # dirs. It will be updated with any new dirs that are checked and found
713 # dirs. It will be updated with any new dirs that are checked and found
705 # to be safe.
714 # to be safe.
706 self._unknowndircache = set()
715 self._unknowndircache = set()
707
716
708 # A set of paths that are known to be absent. This prevents repeated
717 # A set of paths that are known to be absent. This prevents repeated
709 # checking of subdirectories that are known not to exist. It will be
718 # checking of subdirectories that are known not to exist. It will be
710 # updated with any new dirs that are checked and found to be absent.
719 # updated with any new dirs that are checked and found to be absent.
711 self._missingdircache = set()
720 self._missingdircache = set()
712
721
713 def __call__(self, repo, wctx, f):
722 def __call__(self, repo, wctx, f):
714 if wctx.isinmemory():
723 if wctx.isinmemory():
715 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
724 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
716 return False
725 return False
717
726
718 # Check for path prefixes that exist as unknown files.
727 # Check for path prefixes that exist as unknown files.
719 for p in reversed(list(util.finddirs(f))):
728 for p in reversed(list(util.finddirs(f))):
720 if p in self._missingdircache:
729 if p in self._missingdircache:
721 return
730 return
722 if p in self._unknowndircache:
731 if p in self._unknowndircache:
723 continue
732 continue
724 if repo.wvfs.audit.check(p):
733 if repo.wvfs.audit.check(p):
725 if (repo.wvfs.isfileorlink(p)
734 if (repo.wvfs.isfileorlink(p)
726 and repo.dirstate.normalize(p) not in repo.dirstate):
735 and repo.dirstate.normalize(p) not in repo.dirstate):
727 return p
736 return p
728 if not repo.wvfs.lexists(p):
737 if not repo.wvfs.lexists(p):
729 self._missingdircache.add(p)
738 self._missingdircache.add(p)
730 return
739 return
731 self._unknowndircache.add(p)
740 self._unknowndircache.add(p)
732
741
733 # Check if the file conflicts with a directory containing unknown files.
742 # Check if the file conflicts with a directory containing unknown files.
734 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
743 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
735 # Does the directory contain any files that are not in the dirstate?
744 # Does the directory contain any files that are not in the dirstate?
736 for p, dirs, files in repo.wvfs.walk(f):
745 for p, dirs, files in repo.wvfs.walk(f):
737 for fn in files:
746 for fn in files:
738 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
747 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
739 relf = repo.dirstate.normalize(relf, isknown=True)
748 relf = repo.dirstate.normalize(relf, isknown=True)
740 if relf not in repo.dirstate:
749 if relf not in repo.dirstate:
741 return f
750 return f
742 return None
751 return None
743
752
744 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
753 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
745 """
754 """
746 Considers any actions that care about the presence of conflicting unknown
755 Considers any actions that care about the presence of conflicting unknown
747 files. For some actions, the result is to abort; for others, it is to
756 files. For some actions, the result is to abort; for others, it is to
748 choose a different action.
757 choose a different action.
749 """
758 """
750 fileconflicts = set()
759 fileconflicts = set()
751 pathconflicts = set()
760 pathconflicts = set()
752 warnconflicts = set()
761 warnconflicts = set()
753 abortconflicts = set()
762 abortconflicts = set()
754 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
763 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
755 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
764 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
756 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
765 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
757 if not force:
766 if not force:
758 def collectconflicts(conflicts, config):
767 def collectconflicts(conflicts, config):
759 if config == 'abort':
768 if config == 'abort':
760 abortconflicts.update(conflicts)
769 abortconflicts.update(conflicts)
761 elif config == 'warn':
770 elif config == 'warn':
762 warnconflicts.update(conflicts)
771 warnconflicts.update(conflicts)
763
772
764 checkunknowndirs = _unknowndirschecker()
773 checkunknowndirs = _unknowndirschecker()
765 for f, (m, args, msg) in actions.iteritems():
774 for f, (m, args, msg) in actions.iteritems():
766 if m in ('c', 'dc'):
775 if m in ('c', 'dc'):
767 if _checkunknownfile(repo, wctx, mctx, f):
776 if _checkunknownfile(repo, wctx, mctx, f):
768 fileconflicts.add(f)
777 fileconflicts.add(f)
769 elif pathconfig and f not in wctx:
778 elif pathconfig and f not in wctx:
770 path = checkunknowndirs(repo, wctx, f)
779 path = checkunknowndirs(repo, wctx, f)
771 if path is not None:
780 if path is not None:
772 pathconflicts.add(path)
781 pathconflicts.add(path)
773 elif m == 'dg':
782 elif m == 'dg':
774 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
783 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
775 fileconflicts.add(f)
784 fileconflicts.add(f)
776
785
777 allconflicts = fileconflicts | pathconflicts
786 allconflicts = fileconflicts | pathconflicts
778 ignoredconflicts = set([c for c in allconflicts
787 ignoredconflicts = set([c for c in allconflicts
779 if repo.dirstate._ignore(c)])
788 if repo.dirstate._ignore(c)])
780 unknownconflicts = allconflicts - ignoredconflicts
789 unknownconflicts = allconflicts - ignoredconflicts
781 collectconflicts(ignoredconflicts, ignoredconfig)
790 collectconflicts(ignoredconflicts, ignoredconfig)
782 collectconflicts(unknownconflicts, unknownconfig)
791 collectconflicts(unknownconflicts, unknownconfig)
783 else:
792 else:
784 for f, (m, args, msg) in actions.iteritems():
793 for f, (m, args, msg) in actions.iteritems():
785 if m == 'cm':
794 if m == 'cm':
786 fl2, anc = args
795 fl2, anc = args
787 different = _checkunknownfile(repo, wctx, mctx, f)
796 different = _checkunknownfile(repo, wctx, mctx, f)
788 if repo.dirstate._ignore(f):
797 if repo.dirstate._ignore(f):
789 config = ignoredconfig
798 config = ignoredconfig
790 else:
799 else:
791 config = unknownconfig
800 config = unknownconfig
792
801
793 # The behavior when force is True is described by this table:
802 # The behavior when force is True is described by this table:
794 # config different mergeforce | action backup
803 # config different mergeforce | action backup
795 # * n * | get n
804 # * n * | get n
796 # * y y | merge -
805 # * y y | merge -
797 # abort y n | merge - (1)
806 # abort y n | merge - (1)
798 # warn y n | warn + get y
807 # warn y n | warn + get y
799 # ignore y n | get y
808 # ignore y n | get y
800 #
809 #
801 # (1) this is probably the wrong behavior here -- we should
810 # (1) this is probably the wrong behavior here -- we should
802 # probably abort, but some actions like rebases currently
811 # probably abort, but some actions like rebases currently
803 # don't like an abort happening in the middle of
812 # don't like an abort happening in the middle of
804 # merge.update.
813 # merge.update.
805 if not different:
814 if not different:
806 actions[f] = ('g', (fl2, False), "remote created")
815 actions[f] = ('g', (fl2, False), "remote created")
807 elif mergeforce or config == 'abort':
816 elif mergeforce or config == 'abort':
808 actions[f] = ('m', (f, f, None, False, anc),
817 actions[f] = ('m', (f, f, None, False, anc),
809 "remote differs from untracked local")
818 "remote differs from untracked local")
810 elif config == 'abort':
819 elif config == 'abort':
811 abortconflicts.add(f)
820 abortconflicts.add(f)
812 else:
821 else:
813 if config == 'warn':
822 if config == 'warn':
814 warnconflicts.add(f)
823 warnconflicts.add(f)
815 actions[f] = ('g', (fl2, True), "remote created")
824 actions[f] = ('g', (fl2, True), "remote created")
816
825
817 for f in sorted(abortconflicts):
826 for f in sorted(abortconflicts):
818 warn = repo.ui.warn
827 warn = repo.ui.warn
819 if f in pathconflicts:
828 if f in pathconflicts:
820 if repo.wvfs.isfileorlink(f):
829 if repo.wvfs.isfileorlink(f):
821 warn(_("%s: untracked file conflicts with directory\n") % f)
830 warn(_("%s: untracked file conflicts with directory\n") % f)
822 else:
831 else:
823 warn(_("%s: untracked directory conflicts with file\n") % f)
832 warn(_("%s: untracked directory conflicts with file\n") % f)
824 else:
833 else:
825 warn(_("%s: untracked file differs\n") % f)
834 warn(_("%s: untracked file differs\n") % f)
826 if abortconflicts:
835 if abortconflicts:
827 raise error.Abort(_("untracked files in working directory "
836 raise error.Abort(_("untracked files in working directory "
828 "differ from files in requested revision"))
837 "differ from files in requested revision"))
829
838
830 for f in sorted(warnconflicts):
839 for f in sorted(warnconflicts):
831 if repo.wvfs.isfileorlink(f):
840 if repo.wvfs.isfileorlink(f):
832 repo.ui.warn(_("%s: replacing untracked file\n") % f)
841 repo.ui.warn(_("%s: replacing untracked file\n") % f)
833 else:
842 else:
834 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
843 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
835
844
836 for f, (m, args, msg) in actions.iteritems():
845 for f, (m, args, msg) in actions.iteritems():
837 if m == 'c':
846 if m == 'c':
838 backup = (f in fileconflicts or f in pathconflicts or
847 backup = (f in fileconflicts or f in pathconflicts or
839 any(p in pathconflicts for p in util.finddirs(f)))
848 any(p in pathconflicts for p in util.finddirs(f)))
840 flags, = args
849 flags, = args
841 actions[f] = ('g', (flags, backup), msg)
850 actions[f] = ('g', (flags, backup), msg)
842
851
843 def _forgetremoved(wctx, mctx, branchmerge):
852 def _forgetremoved(wctx, mctx, branchmerge):
844 """
853 """
845 Forget removed files
854 Forget removed files
846
855
847 If we're jumping between revisions (as opposed to merging), and if
856 If we're jumping between revisions (as opposed to merging), and if
848 neither the working directory nor the target rev has the file,
857 neither the working directory nor the target rev has the file,
849 then we need to remove it from the dirstate, to prevent the
858 then we need to remove it from the dirstate, to prevent the
850 dirstate from listing the file when it is no longer in the
859 dirstate from listing the file when it is no longer in the
851 manifest.
860 manifest.
852
861
853 If we're merging, and the other revision has removed a file
862 If we're merging, and the other revision has removed a file
854 that is not present in the working directory, we need to mark it
863 that is not present in the working directory, we need to mark it
855 as removed.
864 as removed.
856 """
865 """
857
866
858 actions = {}
867 actions = {}
859 m = 'f'
868 m = 'f'
860 if branchmerge:
869 if branchmerge:
861 m = 'r'
870 m = 'r'
862 for f in wctx.deleted():
871 for f in wctx.deleted():
863 if f not in mctx:
872 if f not in mctx:
864 actions[f] = m, None, "forget deleted"
873 actions[f] = m, None, "forget deleted"
865
874
866 if not branchmerge:
875 if not branchmerge:
867 for f in wctx.removed():
876 for f in wctx.removed():
868 if f not in mctx:
877 if f not in mctx:
869 actions[f] = 'f', None, "forget removed"
878 actions[f] = 'f', None, "forget removed"
870
879
871 return actions
880 return actions
872
881
873 def _checkcollision(repo, wmf, actions):
882 def _checkcollision(repo, wmf, actions):
874 # build provisional merged manifest up
883 # build provisional merged manifest up
875 pmmf = set(wmf)
884 pmmf = set(wmf)
876
885
877 if actions:
886 if actions:
878 # k, dr, e and rd are no-op
887 # k, dr, e and rd are no-op
879 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
888 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
880 for f, args, msg in actions[m]:
889 for f, args, msg in actions[m]:
881 pmmf.add(f)
890 pmmf.add(f)
882 for f, args, msg in actions['r']:
891 for f, args, msg in actions['r']:
883 pmmf.discard(f)
892 pmmf.discard(f)
884 for f, args, msg in actions['dm']:
893 for f, args, msg in actions['dm']:
885 f2, flags = args
894 f2, flags = args
886 pmmf.discard(f2)
895 pmmf.discard(f2)
887 pmmf.add(f)
896 pmmf.add(f)
888 for f, args, msg in actions['dg']:
897 for f, args, msg in actions['dg']:
889 pmmf.add(f)
898 pmmf.add(f)
890 for f, args, msg in actions['m']:
899 for f, args, msg in actions['m']:
891 f1, f2, fa, move, anc = args
900 f1, f2, fa, move, anc = args
892 if move:
901 if move:
893 pmmf.discard(f1)
902 pmmf.discard(f1)
894 pmmf.add(f)
903 pmmf.add(f)
895
904
896 # check case-folding collision in provisional merged manifest
905 # check case-folding collision in provisional merged manifest
897 foldmap = {}
906 foldmap = {}
898 for f in pmmf:
907 for f in pmmf:
899 fold = util.normcase(f)
908 fold = util.normcase(f)
900 if fold in foldmap:
909 if fold in foldmap:
901 raise error.Abort(_("case-folding collision between %s and %s")
910 raise error.Abort(_("case-folding collision between %s and %s")
902 % (f, foldmap[fold]))
911 % (f, foldmap[fold]))
903 foldmap[fold] = f
912 foldmap[fold] = f
904
913
905 # check case-folding of directories
914 # check case-folding of directories
906 foldprefix = unfoldprefix = lastfull = ''
915 foldprefix = unfoldprefix = lastfull = ''
907 for fold, f in sorted(foldmap.items()):
916 for fold, f in sorted(foldmap.items()):
908 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
917 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
909 # the folded prefix matches but actual casing is different
918 # the folded prefix matches but actual casing is different
910 raise error.Abort(_("case-folding collision between "
919 raise error.Abort(_("case-folding collision between "
911 "%s and directory of %s") % (lastfull, f))
920 "%s and directory of %s") % (lastfull, f))
912 foldprefix = fold + '/'
921 foldprefix = fold + '/'
913 unfoldprefix = f + '/'
922 unfoldprefix = f + '/'
914 lastfull = f
923 lastfull = f
915
924
916 def driverpreprocess(repo, ms, wctx, labels=None):
925 def driverpreprocess(repo, ms, wctx, labels=None):
917 """run the preprocess step of the merge driver, if any
926 """run the preprocess step of the merge driver, if any
918
927
919 This is currently not implemented -- it's an extension point."""
928 This is currently not implemented -- it's an extension point."""
920 return True
929 return True
921
930
922 def driverconclude(repo, ms, wctx, labels=None):
931 def driverconclude(repo, ms, wctx, labels=None):
923 """run the conclude step of the merge driver, if any
932 """run the conclude step of the merge driver, if any
924
933
925 This is currently not implemented -- it's an extension point."""
934 This is currently not implemented -- it's an extension point."""
926 return True
935 return True
927
936
928 def _filesindirs(repo, manifest, dirs):
937 def _filesindirs(repo, manifest, dirs):
929 """
938 """
930 Generator that yields pairs of all the files in the manifest that are found
939 Generator that yields pairs of all the files in the manifest that are found
931 inside the directories listed in dirs, and which directory they are found
940 inside the directories listed in dirs, and which directory they are found
932 in.
941 in.
933 """
942 """
934 for f in manifest:
943 for f in manifest:
935 for p in util.finddirs(f):
944 for p in util.finddirs(f):
936 if p in dirs:
945 if p in dirs:
937 yield f, p
946 yield f, p
938 break
947 break
939
948
940 def checkpathconflicts(repo, wctx, mctx, actions):
949 def checkpathconflicts(repo, wctx, mctx, actions):
941 """
950 """
942 Check if any actions introduce path conflicts in the repository, updating
951 Check if any actions introduce path conflicts in the repository, updating
943 actions to record or handle the path conflict accordingly.
952 actions to record or handle the path conflict accordingly.
944 """
953 """
945 mf = wctx.manifest()
954 mf = wctx.manifest()
946
955
947 # The set of local files that conflict with a remote directory.
956 # The set of local files that conflict with a remote directory.
948 localconflicts = set()
957 localconflicts = set()
949
958
950 # The set of directories that conflict with a remote file, and so may cause
959 # The set of directories that conflict with a remote file, and so may cause
951 # conflicts if they still contain any files after the merge.
960 # conflicts if they still contain any files after the merge.
952 remoteconflicts = set()
961 remoteconflicts = set()
953
962
954 # The set of directories that appear as both a file and a directory in the
963 # The set of directories that appear as both a file and a directory in the
955 # remote manifest. These indicate an invalid remote manifest, which
964 # remote manifest. These indicate an invalid remote manifest, which
956 # can't be updated to cleanly.
965 # can't be updated to cleanly.
957 invalidconflicts = set()
966 invalidconflicts = set()
958
967
959 # The set of directories that contain files that are being created.
968 # The set of directories that contain files that are being created.
960 createdfiledirs = set()
969 createdfiledirs = set()
961
970
962 # The set of files deleted by all the actions.
971 # The set of files deleted by all the actions.
963 deletedfiles = set()
972 deletedfiles = set()
964
973
965 for f, (m, args, msg) in actions.items():
974 for f, (m, args, msg) in actions.items():
966 if m in ('c', 'dc', 'm', 'cm'):
975 if m in ('c', 'dc', 'm', 'cm'):
967 # This action may create a new local file.
976 # This action may create a new local file.
968 createdfiledirs.update(util.finddirs(f))
977 createdfiledirs.update(util.finddirs(f))
969 if mf.hasdir(f):
978 if mf.hasdir(f):
970 # The file aliases a local directory. This might be ok if all
979 # The file aliases a local directory. This might be ok if all
971 # the files in the local directory are being deleted. This
980 # the files in the local directory are being deleted. This
972 # will be checked once we know what all the deleted files are.
981 # will be checked once we know what all the deleted files are.
973 remoteconflicts.add(f)
982 remoteconflicts.add(f)
974 # Track the names of all deleted files.
983 # Track the names of all deleted files.
975 if m == 'r':
984 if m == 'r':
976 deletedfiles.add(f)
985 deletedfiles.add(f)
977 if m == 'm':
986 if m == 'm':
978 f1, f2, fa, move, anc = args
987 f1, f2, fa, move, anc = args
979 if move:
988 if move:
980 deletedfiles.add(f1)
989 deletedfiles.add(f1)
981 if m == 'dm':
990 if m == 'dm':
982 f2, flags = args
991 f2, flags = args
983 deletedfiles.add(f2)
992 deletedfiles.add(f2)
984
993
985 # Check all directories that contain created files for path conflicts.
994 # Check all directories that contain created files for path conflicts.
986 for p in createdfiledirs:
995 for p in createdfiledirs:
987 if p in mf:
996 if p in mf:
988 if p in mctx:
997 if p in mctx:
989 # A file is in a directory which aliases both a local
998 # A file is in a directory which aliases both a local
990 # and a remote file. This is an internal inconsistency
999 # and a remote file. This is an internal inconsistency
991 # within the remote manifest.
1000 # within the remote manifest.
992 invalidconflicts.add(p)
1001 invalidconflicts.add(p)
993 else:
1002 else:
994 # A file is in a directory which aliases a local file.
1003 # A file is in a directory which aliases a local file.
995 # We will need to rename the local file.
1004 # We will need to rename the local file.
996 localconflicts.add(p)
1005 localconflicts.add(p)
997 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
1006 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
998 # The file is in a directory which aliases a remote file.
1007 # The file is in a directory which aliases a remote file.
999 # This is an internal inconsistency within the remote
1008 # This is an internal inconsistency within the remote
1000 # manifest.
1009 # manifest.
1001 invalidconflicts.add(p)
1010 invalidconflicts.add(p)
1002
1011
1003 # Rename all local conflicting files that have not been deleted.
1012 # Rename all local conflicting files that have not been deleted.
1004 for p in localconflicts:
1013 for p in localconflicts:
1005 if p not in deletedfiles:
1014 if p not in deletedfiles:
1006 ctxname = bytes(wctx).rstrip('+')
1015 ctxname = bytes(wctx).rstrip('+')
1007 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1016 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1008 actions[pnew] = ('pr', (p,), "local path conflict")
1017 actions[pnew] = ('pr', (p,), "local path conflict")
1009 actions[p] = ('p', (pnew, 'l'), "path conflict")
1018 actions[p] = ('p', (pnew, 'l'), "path conflict")
1010
1019
1011 if remoteconflicts:
1020 if remoteconflicts:
1012 # Check if all files in the conflicting directories have been removed.
1021 # Check if all files in the conflicting directories have been removed.
1013 ctxname = bytes(mctx).rstrip('+')
1022 ctxname = bytes(mctx).rstrip('+')
1014 for f, p in _filesindirs(repo, mf, remoteconflicts):
1023 for f, p in _filesindirs(repo, mf, remoteconflicts):
1015 if f not in deletedfiles:
1024 if f not in deletedfiles:
1016 m, args, msg = actions[p]
1025 m, args, msg = actions[p]
1017 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1026 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1018 if m in ('dc', 'm'):
1027 if m in ('dc', 'm'):
1019 # Action was merge, just update target.
1028 # Action was merge, just update target.
1020 actions[pnew] = (m, args, msg)
1029 actions[pnew] = (m, args, msg)
1021 else:
1030 else:
1022 # Action was create, change to renamed get action.
1031 # Action was create, change to renamed get action.
1023 fl = args[0]
1032 fl = args[0]
1024 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1033 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1025 actions[p] = ('p', (pnew, 'r'), "path conflict")
1034 actions[p] = ('p', (pnew, 'r'), "path conflict")
1026 remoteconflicts.remove(p)
1035 remoteconflicts.remove(p)
1027 break
1036 break
1028
1037
1029 if invalidconflicts:
1038 if invalidconflicts:
1030 for p in invalidconflicts:
1039 for p in invalidconflicts:
1031 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1040 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1032 raise error.Abort(_("destination manifest contains path conflicts"))
1041 raise error.Abort(_("destination manifest contains path conflicts"))
1033
1042
1034 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1043 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1035 acceptremote, followcopies, forcefulldiff=False):
1044 acceptremote, followcopies, forcefulldiff=False):
1036 """
1045 """
1037 Merge wctx and p2 with ancestor pa and generate merge action list
1046 Merge wctx and p2 with ancestor pa and generate merge action list
1038
1047
1039 branchmerge and force are as passed in to update
1048 branchmerge and force are as passed in to update
1040 matcher = matcher to filter file lists
1049 matcher = matcher to filter file lists
1041 acceptremote = accept the incoming changes without prompting
1050 acceptremote = accept the incoming changes without prompting
1042 """
1051 """
1043 if matcher is not None and matcher.always():
1052 if matcher is not None and matcher.always():
1044 matcher = None
1053 matcher = None
1045
1054
1046 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1055 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1047
1056
1048 # manifests fetched in order are going to be faster, so prime the caches
1057 # manifests fetched in order are going to be faster, so prime the caches
1049 [x.manifest() for x in
1058 [x.manifest() for x in
1050 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1059 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1051
1060
1052 if followcopies:
1061 if followcopies:
1053 ret = copies.mergecopies(repo, wctx, p2, pa)
1062 ret = copies.mergecopies(repo, wctx, p2, pa)
1054 copy, movewithdir, diverge, renamedelete, dirmove = ret
1063 copy, movewithdir, diverge, renamedelete, dirmove = ret
1055
1064
1056 boolbm = pycompat.bytestr(bool(branchmerge))
1065 boolbm = pycompat.bytestr(bool(branchmerge))
1057 boolf = pycompat.bytestr(bool(force))
1066 boolf = pycompat.bytestr(bool(force))
1058 boolm = pycompat.bytestr(bool(matcher))
1067 boolm = pycompat.bytestr(bool(matcher))
1059 repo.ui.note(_("resolving manifests\n"))
1068 repo.ui.note(_("resolving manifests\n"))
1060 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1069 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1061 % (boolbm, boolf, boolm))
1070 % (boolbm, boolf, boolm))
1062 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1071 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1063
1072
1064 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1073 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1065 copied = set(copy.values())
1074 copied = set(copy.values())
1066 copied.update(movewithdir.values())
1075 copied.update(movewithdir.values())
1067
1076
1068 if '.hgsubstate' in m1:
1077 if '.hgsubstate' in m1:
1069 # check whether sub state is modified
1078 # check whether sub state is modified
1070 if any(wctx.sub(s).dirty() for s in wctx.substate):
1079 if any(wctx.sub(s).dirty() for s in wctx.substate):
1071 m1['.hgsubstate'] = modifiednodeid
1080 m1['.hgsubstate'] = modifiednodeid
1072
1081
1073 # Don't use m2-vs-ma optimization if:
1082 # Don't use m2-vs-ma optimization if:
1074 # - ma is the same as m1 or m2, which we're just going to diff again later
1083 # - ma is the same as m1 or m2, which we're just going to diff again later
1075 # - The caller specifically asks for a full diff, which is useful during bid
1084 # - The caller specifically asks for a full diff, which is useful during bid
1076 # merge.
1085 # merge.
1077 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1086 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1078 # Identify which files are relevant to the merge, so we can limit the
1087 # Identify which files are relevant to the merge, so we can limit the
1079 # total m1-vs-m2 diff to just those files. This has significant
1088 # total m1-vs-m2 diff to just those files. This has significant
1080 # performance benefits in large repositories.
1089 # performance benefits in large repositories.
1081 relevantfiles = set(ma.diff(m2).keys())
1090 relevantfiles = set(ma.diff(m2).keys())
1082
1091
1083 # For copied and moved files, we need to add the source file too.
1092 # For copied and moved files, we need to add the source file too.
1084 for copykey, copyvalue in copy.iteritems():
1093 for copykey, copyvalue in copy.iteritems():
1085 if copyvalue in relevantfiles:
1094 if copyvalue in relevantfiles:
1086 relevantfiles.add(copykey)
1095 relevantfiles.add(copykey)
1087 for movedirkey in movewithdir:
1096 for movedirkey in movewithdir:
1088 relevantfiles.add(movedirkey)
1097 relevantfiles.add(movedirkey)
1089 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1098 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1090 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1099 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1091
1100
1092 diff = m1.diff(m2, match=matcher)
1101 diff = m1.diff(m2, match=matcher)
1093
1102
1094 if matcher is None:
1103 if matcher is None:
1095 matcher = matchmod.always('', '')
1104 matcher = matchmod.always('', '')
1096
1105
1097 actions = {}
1106 actions = {}
1098 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1107 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1099 if n1 and n2: # file exists on both local and remote side
1108 if n1 and n2: # file exists on both local and remote side
1100 if f not in ma:
1109 if f not in ma:
1101 fa = copy.get(f, None)
1110 fa = copy.get(f, None)
1102 if fa is not None:
1111 if fa is not None:
1103 actions[f] = ('m', (f, f, fa, False, pa.node()),
1112 actions[f] = ('m', (f, f, fa, False, pa.node()),
1104 "both renamed from " + fa)
1113 "both renamed from " + fa)
1105 else:
1114 else:
1106 actions[f] = ('m', (f, f, None, False, pa.node()),
1115 actions[f] = ('m', (f, f, None, False, pa.node()),
1107 "both created")
1116 "both created")
1108 else:
1117 else:
1109 a = ma[f]
1118 a = ma[f]
1110 fla = ma.flags(f)
1119 fla = ma.flags(f)
1111 nol = 'l' not in fl1 + fl2 + fla
1120 nol = 'l' not in fl1 + fl2 + fla
1112 if n2 == a and fl2 == fla:
1121 if n2 == a and fl2 == fla:
1113 actions[f] = ('k', (), "remote unchanged")
1122 actions[f] = ('k', (), "remote unchanged")
1114 elif n1 == a and fl1 == fla: # local unchanged - use remote
1123 elif n1 == a and fl1 == fla: # local unchanged - use remote
1115 if n1 == n2: # optimization: keep local content
1124 if n1 == n2: # optimization: keep local content
1116 actions[f] = ('e', (fl2,), "update permissions")
1125 actions[f] = ('e', (fl2,), "update permissions")
1117 else:
1126 else:
1118 actions[f] = ('g', (fl2, False), "remote is newer")
1127 actions[f] = ('g', (fl2, False), "remote is newer")
1119 elif nol and n2 == a: # remote only changed 'x'
1128 elif nol and n2 == a: # remote only changed 'x'
1120 actions[f] = ('e', (fl2,), "update permissions")
1129 actions[f] = ('e', (fl2,), "update permissions")
1121 elif nol and n1 == a: # local only changed 'x'
1130 elif nol and n1 == a: # local only changed 'x'
1122 actions[f] = ('g', (fl1, False), "remote is newer")
1131 actions[f] = ('g', (fl1, False), "remote is newer")
1123 else: # both changed something
1132 else: # both changed something
1124 actions[f] = ('m', (f, f, f, False, pa.node()),
1133 actions[f] = ('m', (f, f, f, False, pa.node()),
1125 "versions differ")
1134 "versions differ")
1126 elif n1: # file exists only on local side
1135 elif n1: # file exists only on local side
1127 if f in copied:
1136 if f in copied:
1128 pass # we'll deal with it on m2 side
1137 pass # we'll deal with it on m2 side
1129 elif f in movewithdir: # directory rename, move local
1138 elif f in movewithdir: # directory rename, move local
1130 f2 = movewithdir[f]
1139 f2 = movewithdir[f]
1131 if f2 in m2:
1140 if f2 in m2:
1132 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1141 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1133 "remote directory rename, both created")
1142 "remote directory rename, both created")
1134 else:
1143 else:
1135 actions[f2] = ('dm', (f, fl1),
1144 actions[f2] = ('dm', (f, fl1),
1136 "remote directory rename - move from " + f)
1145 "remote directory rename - move from " + f)
1137 elif f in copy:
1146 elif f in copy:
1138 f2 = copy[f]
1147 f2 = copy[f]
1139 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1148 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1140 "local copied/moved from " + f2)
1149 "local copied/moved from " + f2)
1141 elif f in ma: # clean, a different, no remote
1150 elif f in ma: # clean, a different, no remote
1142 if n1 != ma[f]:
1151 if n1 != ma[f]:
1143 if acceptremote:
1152 if acceptremote:
1144 actions[f] = ('r', None, "remote delete")
1153 actions[f] = ('r', None, "remote delete")
1145 else:
1154 else:
1146 actions[f] = ('cd', (f, None, f, False, pa.node()),
1155 actions[f] = ('cd', (f, None, f, False, pa.node()),
1147 "prompt changed/deleted")
1156 "prompt changed/deleted")
1148 elif n1 == addednodeid:
1157 elif n1 == addednodeid:
1149 # This extra 'a' is added by working copy manifest to mark
1158 # This extra 'a' is added by working copy manifest to mark
1150 # the file as locally added. We should forget it instead of
1159 # the file as locally added. We should forget it instead of
1151 # deleting it.
1160 # deleting it.
1152 actions[f] = ('f', None, "remote deleted")
1161 actions[f] = ('f', None, "remote deleted")
1153 else:
1162 else:
1154 actions[f] = ('r', None, "other deleted")
1163 actions[f] = ('r', None, "other deleted")
1155 elif n2: # file exists only on remote side
1164 elif n2: # file exists only on remote side
1156 if f in copied:
1165 if f in copied:
1157 pass # we'll deal with it on m1 side
1166 pass # we'll deal with it on m1 side
1158 elif f in movewithdir:
1167 elif f in movewithdir:
1159 f2 = movewithdir[f]
1168 f2 = movewithdir[f]
1160 if f2 in m1:
1169 if f2 in m1:
1161 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1170 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1162 "local directory rename, both created")
1171 "local directory rename, both created")
1163 else:
1172 else:
1164 actions[f2] = ('dg', (f, fl2),
1173 actions[f2] = ('dg', (f, fl2),
1165 "local directory rename - get from " + f)
1174 "local directory rename - get from " + f)
1166 elif f in copy:
1175 elif f in copy:
1167 f2 = copy[f]
1176 f2 = copy[f]
1168 if f2 in m2:
1177 if f2 in m2:
1169 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1178 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1170 "remote copied from " + f2)
1179 "remote copied from " + f2)
1171 else:
1180 else:
1172 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1181 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1173 "remote moved from " + f2)
1182 "remote moved from " + f2)
1174 elif f not in ma:
1183 elif f not in ma:
1175 # local unknown, remote created: the logic is described by the
1184 # local unknown, remote created: the logic is described by the
1176 # following table:
1185 # following table:
1177 #
1186 #
1178 # force branchmerge different | action
1187 # force branchmerge different | action
1179 # n * * | create
1188 # n * * | create
1180 # y n * | create
1189 # y n * | create
1181 # y y n | create
1190 # y y n | create
1182 # y y y | merge
1191 # y y y | merge
1183 #
1192 #
1184 # Checking whether the files are different is expensive, so we
1193 # Checking whether the files are different is expensive, so we
1185 # don't do that when we can avoid it.
1194 # don't do that when we can avoid it.
1186 if not force:
1195 if not force:
1187 actions[f] = ('c', (fl2,), "remote created")
1196 actions[f] = ('c', (fl2,), "remote created")
1188 elif not branchmerge:
1197 elif not branchmerge:
1189 actions[f] = ('c', (fl2,), "remote created")
1198 actions[f] = ('c', (fl2,), "remote created")
1190 else:
1199 else:
1191 actions[f] = ('cm', (fl2, pa.node()),
1200 actions[f] = ('cm', (fl2, pa.node()),
1192 "remote created, get or merge")
1201 "remote created, get or merge")
1193 elif n2 != ma[f]:
1202 elif n2 != ma[f]:
1194 df = None
1203 df = None
1195 for d in dirmove:
1204 for d in dirmove:
1196 if f.startswith(d):
1205 if f.startswith(d):
1197 # new file added in a directory that was moved
1206 # new file added in a directory that was moved
1198 df = dirmove[d] + f[len(d):]
1207 df = dirmove[d] + f[len(d):]
1199 break
1208 break
1200 if df is not None and df in m1:
1209 if df is not None and df in m1:
1201 actions[df] = ('m', (df, f, f, False, pa.node()),
1210 actions[df] = ('m', (df, f, f, False, pa.node()),
1202 "local directory rename - respect move from " + f)
1211 "local directory rename - respect move from " + f)
1203 elif acceptremote:
1212 elif acceptremote:
1204 actions[f] = ('c', (fl2,), "remote recreating")
1213 actions[f] = ('c', (fl2,), "remote recreating")
1205 else:
1214 else:
1206 actions[f] = ('dc', (None, f, f, False, pa.node()),
1215 actions[f] = ('dc', (None, f, f, False, pa.node()),
1207 "prompt deleted/changed")
1216 "prompt deleted/changed")
1208
1217
1209 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1218 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1210 # If we are merging, look for path conflicts.
1219 # If we are merging, look for path conflicts.
1211 checkpathconflicts(repo, wctx, p2, actions)
1220 checkpathconflicts(repo, wctx, p2, actions)
1212
1221
1213 return actions, diverge, renamedelete
1222 return actions, diverge, renamedelete
1214
1223
1215 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1224 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1216 """Resolves false conflicts where the nodeid changed but the content
1225 """Resolves false conflicts where the nodeid changed but the content
1217 remained the same."""
1226 remained the same."""
1218 # We force a copy of actions.items() because we're going to mutate
1227 # We force a copy of actions.items() because we're going to mutate
1219 # actions as we resolve trivial conflicts.
1228 # actions as we resolve trivial conflicts.
1220 for f, (m, args, msg) in list(actions.items()):
1229 for f, (m, args, msg) in list(actions.items()):
1221 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1230 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1222 # local did change but ended up with same content
1231 # local did change but ended up with same content
1223 actions[f] = 'r', None, "prompt same"
1232 actions[f] = 'r', None, "prompt same"
1224 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1233 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1225 # remote did change but ended up with same content
1234 # remote did change but ended up with same content
1226 del actions[f] # don't get = keep local deleted
1235 del actions[f] # don't get = keep local deleted
1227
1236
1228 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1237 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1229 acceptremote, followcopies, matcher=None,
1238 acceptremote, followcopies, matcher=None,
1230 mergeforce=False):
1239 mergeforce=False):
1231 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1240 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1232 # Avoid cycle.
1241 # Avoid cycle.
1233 from . import sparse
1242 from . import sparse
1234
1243
1235 if len(ancestors) == 1: # default
1244 if len(ancestors) == 1: # default
1236 actions, diverge, renamedelete = manifestmerge(
1245 actions, diverge, renamedelete = manifestmerge(
1237 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1246 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1238 acceptremote, followcopies)
1247 acceptremote, followcopies)
1239 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1248 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1240
1249
1241 else: # only when merge.preferancestor=* - the default
1250 else: # only when merge.preferancestor=* - the default
1242 repo.ui.note(
1251 repo.ui.note(
1243 _("note: merging %s and %s using bids from ancestors %s\n") %
1252 _("note: merging %s and %s using bids from ancestors %s\n") %
1244 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1253 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1245 for anc in ancestors)))
1254 for anc in ancestors)))
1246
1255
1247 # Call for bids
1256 # Call for bids
1248 fbids = {} # mapping filename to bids (action method to list af actions)
1257 fbids = {} # mapping filename to bids (action method to list af actions)
1249 diverge, renamedelete = None, None
1258 diverge, renamedelete = None, None
1250 for ancestor in ancestors:
1259 for ancestor in ancestors:
1251 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1260 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1252 actions, diverge1, renamedelete1 = manifestmerge(
1261 actions, diverge1, renamedelete1 = manifestmerge(
1253 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1262 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1254 acceptremote, followcopies, forcefulldiff=True)
1263 acceptremote, followcopies, forcefulldiff=True)
1255 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1264 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1256
1265
1257 # Track the shortest set of warning on the theory that bid
1266 # Track the shortest set of warning on the theory that bid
1258 # merge will correctly incorporate more information
1267 # merge will correctly incorporate more information
1259 if diverge is None or len(diverge1) < len(diverge):
1268 if diverge is None or len(diverge1) < len(diverge):
1260 diverge = diverge1
1269 diverge = diverge1
1261 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1270 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1262 renamedelete = renamedelete1
1271 renamedelete = renamedelete1
1263
1272
1264 for f, a in sorted(actions.iteritems()):
1273 for f, a in sorted(actions.iteritems()):
1265 m, args, msg = a
1274 m, args, msg = a
1266 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1275 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1267 if f in fbids:
1276 if f in fbids:
1268 d = fbids[f]
1277 d = fbids[f]
1269 if m in d:
1278 if m in d:
1270 d[m].append(a)
1279 d[m].append(a)
1271 else:
1280 else:
1272 d[m] = [a]
1281 d[m] = [a]
1273 else:
1282 else:
1274 fbids[f] = {m: [a]}
1283 fbids[f] = {m: [a]}
1275
1284
1276 # Pick the best bid for each file
1285 # Pick the best bid for each file
1277 repo.ui.note(_('\nauction for merging merge bids\n'))
1286 repo.ui.note(_('\nauction for merging merge bids\n'))
1278 actions = {}
1287 actions = {}
1279 dms = [] # filenames that have dm actions
1288 dms = [] # filenames that have dm actions
1280 for f, bids in sorted(fbids.items()):
1289 for f, bids in sorted(fbids.items()):
1281 # bids is a mapping from action method to list af actions
1290 # bids is a mapping from action method to list af actions
1282 # Consensus?
1291 # Consensus?
1283 if len(bids) == 1: # all bids are the same kind of method
1292 if len(bids) == 1: # all bids are the same kind of method
1284 m, l = list(bids.items())[0]
1293 m, l = list(bids.items())[0]
1285 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1294 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1286 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1295 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1287 actions[f] = l[0]
1296 actions[f] = l[0]
1288 if m == 'dm':
1297 if m == 'dm':
1289 dms.append(f)
1298 dms.append(f)
1290 continue
1299 continue
1291 # If keep is an option, just do it.
1300 # If keep is an option, just do it.
1292 if 'k' in bids:
1301 if 'k' in bids:
1293 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1302 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1294 actions[f] = bids['k'][0]
1303 actions[f] = bids['k'][0]
1295 continue
1304 continue
1296 # If there are gets and they all agree [how could they not?], do it.
1305 # If there are gets and they all agree [how could they not?], do it.
1297 if 'g' in bids:
1306 if 'g' in bids:
1298 ga0 = bids['g'][0]
1307 ga0 = bids['g'][0]
1299 if all(a == ga0 for a in bids['g'][1:]):
1308 if all(a == ga0 for a in bids['g'][1:]):
1300 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1309 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1301 actions[f] = ga0
1310 actions[f] = ga0
1302 continue
1311 continue
1303 # TODO: Consider other simple actions such as mode changes
1312 # TODO: Consider other simple actions such as mode changes
1304 # Handle inefficient democrazy.
1313 # Handle inefficient democrazy.
1305 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1314 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1306 for m, l in sorted(bids.items()):
1315 for m, l in sorted(bids.items()):
1307 for _f, args, msg in l:
1316 for _f, args, msg in l:
1308 repo.ui.note(' %s -> %s\n' % (msg, m))
1317 repo.ui.note(' %s -> %s\n' % (msg, m))
1309 # Pick random action. TODO: Instead, prompt user when resolving
1318 # Pick random action. TODO: Instead, prompt user when resolving
1310 m, l = list(bids.items())[0]
1319 m, l = list(bids.items())[0]
1311 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1320 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1312 (f, m))
1321 (f, m))
1313 actions[f] = l[0]
1322 actions[f] = l[0]
1314 if m == 'dm':
1323 if m == 'dm':
1315 dms.append(f)
1324 dms.append(f)
1316 continue
1325 continue
1317 # Work around 'dm' that can cause multiple actions for the same file
1326 # Work around 'dm' that can cause multiple actions for the same file
1318 for f in dms:
1327 for f in dms:
1319 dm, (f0, flags), msg = actions[f]
1328 dm, (f0, flags), msg = actions[f]
1320 assert dm == 'dm', dm
1329 assert dm == 'dm', dm
1321 if f0 in actions and actions[f0][0] == 'r':
1330 if f0 in actions and actions[f0][0] == 'r':
1322 # We have one bid for removing a file and another for moving it.
1331 # We have one bid for removing a file and another for moving it.
1323 # These two could be merged as first move and then delete ...
1332 # These two could be merged as first move and then delete ...
1324 # but instead drop moving and just delete.
1333 # but instead drop moving and just delete.
1325 del actions[f]
1334 del actions[f]
1326 repo.ui.note(_('end of auction\n\n'))
1335 repo.ui.note(_('end of auction\n\n'))
1327
1336
1328 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1337 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1329
1338
1330 if wctx.rev() is None:
1339 if wctx.rev() is None:
1331 fractions = _forgetremoved(wctx, mctx, branchmerge)
1340 fractions = _forgetremoved(wctx, mctx, branchmerge)
1332 actions.update(fractions)
1341 actions.update(fractions)
1333
1342
1334 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1343 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1335 actions)
1344 actions)
1336
1345
1337 return prunedactions, diverge, renamedelete
1346 return prunedactions, diverge, renamedelete
1338
1347
1339 def _getcwd():
1348 def _getcwd():
1340 try:
1349 try:
1341 return pycompat.getcwd()
1350 return pycompat.getcwd()
1342 except OSError as err:
1351 except OSError as err:
1343 if err.errno == errno.ENOENT:
1352 if err.errno == errno.ENOENT:
1344 return None
1353 return None
1345 raise
1354 raise
1346
1355
1347 def batchremove(repo, wctx, actions):
1356 def batchremove(repo, wctx, actions):
1348 """apply removes to the working directory
1357 """apply removes to the working directory
1349
1358
1350 yields tuples for progress updates
1359 yields tuples for progress updates
1351 """
1360 """
1352 verbose = repo.ui.verbose
1361 verbose = repo.ui.verbose
1353 cwd = _getcwd()
1362 cwd = _getcwd()
1354 i = 0
1363 i = 0
1355 for f, args, msg in actions:
1364 for f, args, msg in actions:
1356 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1365 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1357 if verbose:
1366 if verbose:
1358 repo.ui.note(_("removing %s\n") % f)
1367 repo.ui.note(_("removing %s\n") % f)
1359 wctx[f].audit()
1368 wctx[f].audit()
1360 try:
1369 try:
1361 wctx[f].remove(ignoremissing=True)
1370 wctx[f].remove(ignoremissing=True)
1362 except OSError as inst:
1371 except OSError as inst:
1363 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1372 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1364 (f, inst.strerror))
1373 (f, inst.strerror))
1365 if i == 100:
1374 if i == 100:
1366 yield i, f
1375 yield i, f
1367 i = 0
1376 i = 0
1368 i += 1
1377 i += 1
1369 if i > 0:
1378 if i > 0:
1370 yield i, f
1379 yield i, f
1371
1380
1372 if cwd and not _getcwd():
1381 if cwd and not _getcwd():
1373 # cwd was removed in the course of removing files; print a helpful
1382 # cwd was removed in the course of removing files; print a helpful
1374 # warning.
1383 # warning.
1375 repo.ui.warn(_("current directory was removed\n"
1384 repo.ui.warn(_("current directory was removed\n"
1376 "(consider changing to repo root: %s)\n") % repo.root)
1385 "(consider changing to repo root: %s)\n") % repo.root)
1377
1386
1378 def batchget(repo, mctx, wctx, actions):
1387 def batchget(repo, mctx, wctx, actions):
1379 """apply gets to the working directory
1388 """apply gets to the working directory
1380
1389
1381 mctx is the context to get from
1390 mctx is the context to get from
1382
1391
1383 yields tuples for progress updates
1392 yields tuples for progress updates
1384 """
1393 """
1385 verbose = repo.ui.verbose
1394 verbose = repo.ui.verbose
1386 fctx = mctx.filectx
1395 fctx = mctx.filectx
1387 ui = repo.ui
1396 ui = repo.ui
1388 i = 0
1397 i = 0
1389 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1398 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1390 for f, (flags, backup), msg in actions:
1399 for f, (flags, backup), msg in actions:
1391 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1400 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1392 if verbose:
1401 if verbose:
1393 repo.ui.note(_("getting %s\n") % f)
1402 repo.ui.note(_("getting %s\n") % f)
1394
1403
1395 if backup:
1404 if backup:
1396 # If a file or directory exists with the same name, back that
1405 # If a file or directory exists with the same name, back that
1397 # up. Otherwise, look to see if there is a file that conflicts
1406 # up. Otherwise, look to see if there is a file that conflicts
1398 # with a directory this file is in, and if so, back that up.
1407 # with a directory this file is in, and if so, back that up.
1399 absf = repo.wjoin(f)
1408 absf = repo.wjoin(f)
1400 if not repo.wvfs.lexists(f):
1409 if not repo.wvfs.lexists(f):
1401 for p in util.finddirs(f):
1410 for p in util.finddirs(f):
1402 if repo.wvfs.isfileorlink(p):
1411 if repo.wvfs.isfileorlink(p):
1403 absf = repo.wjoin(p)
1412 absf = repo.wjoin(p)
1404 break
1413 break
1405 orig = scmutil.origpath(ui, repo, absf)
1414 orig = scmutil.origpath(ui, repo, absf)
1406 if repo.wvfs.lexists(absf):
1415 if repo.wvfs.lexists(absf):
1407 util.rename(absf, orig)
1416 util.rename(absf, orig)
1408 wctx[f].clearunknown()
1417 wctx[f].clearunknown()
1409 atomictemp = ui.configbool("experimental", "update.atomic-file")
1418 atomictemp = ui.configbool("experimental", "update.atomic-file")
1410 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1419 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1411 atomictemp=atomictemp)
1420 atomictemp=atomictemp)
1412 if i == 100:
1421 if i == 100:
1413 yield i, f
1422 yield i, f
1414 i = 0
1423 i = 0
1415 i += 1
1424 i += 1
1416 if i > 0:
1425 if i > 0:
1417 yield i, f
1426 yield i, f
1418
1427
1419 def _prefetchfiles(repo, ctx, actions):
1428 def _prefetchfiles(repo, ctx, actions):
1420 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1429 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1421 of merge actions. ``ctx`` is the context being merged in."""
1430 of merge actions. ``ctx`` is the context being merged in."""
1422
1431
1423 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1432 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1424 # don't touch the context to be merged in. 'cd' is skipped, because
1433 # don't touch the context to be merged in. 'cd' is skipped, because
1425 # changed/deleted never resolves to something from the remote side.
1434 # changed/deleted never resolves to something from the remote side.
1426 oplist = [actions[a] for a in 'g dc dg m'.split()]
1435 oplist = [actions[a] for a in 'g dc dg m'.split()]
1427 prefetch = scmutil.fileprefetchhooks
1436 prefetch = scmutil.fileprefetchhooks
1428 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1437 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1429
1438
1430 @attr.s(frozen=True)
1439 @attr.s(frozen=True)
1431 class updateresult(object):
1440 class updateresult(object):
1432 updatedcount = attr.ib()
1441 updatedcount = attr.ib()
1433 mergedcount = attr.ib()
1442 mergedcount = attr.ib()
1434 removedcount = attr.ib()
1443 removedcount = attr.ib()
1435 unresolvedcount = attr.ib()
1444 unresolvedcount = attr.ib()
1436
1445
1437 # TODO remove container emulation once consumers switch to new API.
1446 # TODO remove container emulation once consumers switch to new API.
1438
1447
1439 def __getitem__(self, x):
1448 def __getitem__(self, x):
1440 if x == 0:
1449 if x == 0:
1441 return self.updatedcount
1450 return self.updatedcount
1442 elif x == 1:
1451 elif x == 1:
1443 return self.mergedcount
1452 return self.mergedcount
1444 elif x == 2:
1453 elif x == 2:
1445 return self.removedcount
1454 return self.removedcount
1446 elif x == 3:
1455 elif x == 3:
1447 return self.unresolvedcount
1456 return self.unresolvedcount
1448 else:
1457 else:
1449 raise IndexError('can only access items 0-3')
1458 raise IndexError('can only access items 0-3')
1450
1459
1451 def __len__(self):
1460 def __len__(self):
1452 return 4
1461 return 4
1453
1462
1454 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1463 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1455 """apply the merge action list to the working directory
1464 """apply the merge action list to the working directory
1456
1465
1457 wctx is the working copy context
1466 wctx is the working copy context
1458 mctx is the context to be merged into the working copy
1467 mctx is the context to be merged into the working copy
1459
1468
1460 Return a tuple of counts (updated, merged, removed, unresolved) that
1469 Return a tuple of counts (updated, merged, removed, unresolved) that
1461 describes how many files were affected by the update.
1470 describes how many files were affected by the update.
1462 """
1471 """
1463
1472
1464 _prefetchfiles(repo, mctx, actions)
1473 _prefetchfiles(repo, mctx, actions)
1465
1474
1466 updated, merged, removed = 0, 0, 0
1475 updated, merged, removed = 0, 0, 0
1467 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1476 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1468 moves = []
1477 moves = []
1469 for m, l in actions.items():
1478 for m, l in actions.items():
1470 l.sort()
1479 l.sort()
1471
1480
1472 # 'cd' and 'dc' actions are treated like other merge conflicts
1481 # 'cd' and 'dc' actions are treated like other merge conflicts
1473 mergeactions = sorted(actions['cd'])
1482 mergeactions = sorted(actions['cd'])
1474 mergeactions.extend(sorted(actions['dc']))
1483 mergeactions.extend(sorted(actions['dc']))
1475 mergeactions.extend(actions['m'])
1484 mergeactions.extend(actions['m'])
1476 for f, args, msg in mergeactions:
1485 for f, args, msg in mergeactions:
1477 f1, f2, fa, move, anc = args
1486 f1, f2, fa, move, anc = args
1478 if f == '.hgsubstate': # merged internally
1487 if f == '.hgsubstate': # merged internally
1479 continue
1488 continue
1480 if f1 is None:
1489 if f1 is None:
1481 fcl = filemerge.absentfilectx(wctx, fa)
1490 fcl = filemerge.absentfilectx(wctx, fa)
1482 else:
1491 else:
1483 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1492 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1484 fcl = wctx[f1]
1493 fcl = wctx[f1]
1485 if f2 is None:
1494 if f2 is None:
1486 fco = filemerge.absentfilectx(mctx, fa)
1495 fco = filemerge.absentfilectx(mctx, fa)
1487 else:
1496 else:
1488 fco = mctx[f2]
1497 fco = mctx[f2]
1489 actx = repo[anc]
1498 actx = repo[anc]
1490 if fa in actx:
1499 if fa in actx:
1491 fca = actx[fa]
1500 fca = actx[fa]
1492 else:
1501 else:
1493 # TODO: move to absentfilectx
1502 # TODO: move to absentfilectx
1494 fca = repo.filectx(f1, fileid=nullrev)
1503 fca = repo.filectx(f1, fileid=nullrev)
1495 ms.add(fcl, fco, fca, f)
1504 ms.add(fcl, fco, fca, f)
1496 if f1 != f and move:
1505 if f1 != f and move:
1497 moves.append(f1)
1506 moves.append(f1)
1498
1507
1499 _updating = _('updating')
1508 _updating = _('updating')
1500 _files = _('files')
1509 _files = _('files')
1501 progress = repo.ui.progress
1510 progress = repo.ui.progress
1502
1511
1503 # remove renamed files after safely stored
1512 # remove renamed files after safely stored
1504 for f in moves:
1513 for f in moves:
1505 if wctx[f].lexists():
1514 if wctx[f].lexists():
1506 repo.ui.debug("removing %s\n" % f)
1515 repo.ui.debug("removing %s\n" % f)
1507 wctx[f].audit()
1516 wctx[f].audit()
1508 wctx[f].remove()
1517 wctx[f].remove()
1509
1518
1510 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1519 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1511 z = 0
1520 z = 0
1512
1521
1513 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1522 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1514 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1523 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1515
1524
1516 # record path conflicts
1525 # record path conflicts
1517 for f, args, msg in actions['p']:
1526 for f, args, msg in actions['p']:
1518 f1, fo = args
1527 f1, fo = args
1519 s = repo.ui.status
1528 s = repo.ui.status
1520 s(_("%s: path conflict - a file or link has the same name as a "
1529 s(_("%s: path conflict - a file or link has the same name as a "
1521 "directory\n") % f)
1530 "directory\n") % f)
1522 if fo == 'l':
1531 if fo == 'l':
1523 s(_("the local file has been renamed to %s\n") % f1)
1532 s(_("the local file has been renamed to %s\n") % f1)
1524 else:
1533 else:
1525 s(_("the remote file has been renamed to %s\n") % f1)
1534 s(_("the remote file has been renamed to %s\n") % f1)
1526 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1535 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1527 ms.addpath(f, f1, fo)
1536 ms.addpath(f, f1, fo)
1528 z += 1
1537 z += 1
1529 progress(_updating, z, item=f, total=numupdates, unit=_files)
1538 progress(_updating, z, item=f, total=numupdates, unit=_files)
1530
1539
1531 # When merging in-memory, we can't support worker processes, so set the
1540 # When merging in-memory, we can't support worker processes, so set the
1532 # per-item cost at 0 in that case.
1541 # per-item cost at 0 in that case.
1533 cost = 0 if wctx.isinmemory() else 0.001
1542 cost = 0 if wctx.isinmemory() else 0.001
1534
1543
1535 # remove in parallel (must come before resolving path conflicts and getting)
1544 # remove in parallel (must come before resolving path conflicts and getting)
1536 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1545 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1537 actions['r'])
1546 actions['r'])
1538 for i, item in prog:
1547 for i, item in prog:
1539 z += i
1548 z += i
1540 progress(_updating, z, item=item, total=numupdates, unit=_files)
1549 progress(_updating, z, item=item, total=numupdates, unit=_files)
1541 removed = len(actions['r'])
1550 removed = len(actions['r'])
1542
1551
1543 # resolve path conflicts (must come before getting)
1552 # resolve path conflicts (must come before getting)
1544 for f, args, msg in actions['pr']:
1553 for f, args, msg in actions['pr']:
1545 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1554 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1546 f0, = args
1555 f0, = args
1547 if wctx[f0].lexists():
1556 if wctx[f0].lexists():
1548 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1557 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1549 wctx[f].audit()
1558 wctx[f].audit()
1550 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1559 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1551 wctx[f0].remove()
1560 wctx[f0].remove()
1552 z += 1
1561 z += 1
1553 progress(_updating, z, item=f, total=numupdates, unit=_files)
1562 progress(_updating, z, item=f, total=numupdates, unit=_files)
1554
1563
1555 # get in parallel
1564 # get in parallel
1556 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1565 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1557 actions['g'])
1566 actions['g'])
1558 for i, item in prog:
1567 for i, item in prog:
1559 z += i
1568 z += i
1560 progress(_updating, z, item=item, total=numupdates, unit=_files)
1569 progress(_updating, z, item=item, total=numupdates, unit=_files)
1561 updated = len(actions['g'])
1570 updated = len(actions['g'])
1562
1571
1563 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1572 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1564 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1573 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1565
1574
1566 # forget (manifest only, just log it) (must come first)
1575 # forget (manifest only, just log it) (must come first)
1567 for f, args, msg in actions['f']:
1576 for f, args, msg in actions['f']:
1568 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1577 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1569 z += 1
1578 z += 1
1570 progress(_updating, z, item=f, total=numupdates, unit=_files)
1579 progress(_updating, z, item=f, total=numupdates, unit=_files)
1571
1580
1572 # re-add (manifest only, just log it)
1581 # re-add (manifest only, just log it)
1573 for f, args, msg in actions['a']:
1582 for f, args, msg in actions['a']:
1574 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1583 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1575 z += 1
1584 z += 1
1576 progress(_updating, z, item=f, total=numupdates, unit=_files)
1585 progress(_updating, z, item=f, total=numupdates, unit=_files)
1577
1586
1578 # re-add/mark as modified (manifest only, just log it)
1587 # re-add/mark as modified (manifest only, just log it)
1579 for f, args, msg in actions['am']:
1588 for f, args, msg in actions['am']:
1580 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1589 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1581 z += 1
1590 z += 1
1582 progress(_updating, z, item=f, total=numupdates, unit=_files)
1591 progress(_updating, z, item=f, total=numupdates, unit=_files)
1583
1592
1584 # keep (noop, just log it)
1593 # keep (noop, just log it)
1585 for f, args, msg in actions['k']:
1594 for f, args, msg in actions['k']:
1586 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1595 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1587 # no progress
1596 # no progress
1588
1597
1589 # directory rename, move local
1598 # directory rename, move local
1590 for f, args, msg in actions['dm']:
1599 for f, args, msg in actions['dm']:
1591 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1600 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1592 z += 1
1601 z += 1
1593 progress(_updating, z, item=f, total=numupdates, unit=_files)
1602 progress(_updating, z, item=f, total=numupdates, unit=_files)
1594 f0, flags = args
1603 f0, flags = args
1595 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1604 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1596 wctx[f].audit()
1605 wctx[f].audit()
1597 wctx[f].write(wctx.filectx(f0).data(), flags)
1606 wctx[f].write(wctx.filectx(f0).data(), flags)
1598 wctx[f0].remove()
1607 wctx[f0].remove()
1599 updated += 1
1608 updated += 1
1600
1609
1601 # local directory rename, get
1610 # local directory rename, get
1602 for f, args, msg in actions['dg']:
1611 for f, args, msg in actions['dg']:
1603 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1612 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1604 z += 1
1613 z += 1
1605 progress(_updating, z, item=f, total=numupdates, unit=_files)
1614 progress(_updating, z, item=f, total=numupdates, unit=_files)
1606 f0, flags = args
1615 f0, flags = args
1607 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1616 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1608 wctx[f].write(mctx.filectx(f0).data(), flags)
1617 wctx[f].write(mctx.filectx(f0).data(), flags)
1609 updated += 1
1618 updated += 1
1610
1619
1611 # exec
1620 # exec
1612 for f, args, msg in actions['e']:
1621 for f, args, msg in actions['e']:
1613 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1622 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1614 z += 1
1623 z += 1
1615 progress(_updating, z, item=f, total=numupdates, unit=_files)
1624 progress(_updating, z, item=f, total=numupdates, unit=_files)
1616 flags, = args
1625 flags, = args
1617 wctx[f].audit()
1626 wctx[f].audit()
1618 wctx[f].setflags('l' in flags, 'x' in flags)
1627 wctx[f].setflags('l' in flags, 'x' in flags)
1619 updated += 1
1628 updated += 1
1620
1629
1621 # the ordering is important here -- ms.mergedriver will raise if the merge
1630 # the ordering is important here -- ms.mergedriver will raise if the merge
1622 # driver has changed, and we want to be able to bypass it when overwrite is
1631 # driver has changed, and we want to be able to bypass it when overwrite is
1623 # True
1632 # True
1624 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1633 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1625
1634
1626 if usemergedriver:
1635 if usemergedriver:
1627 if wctx.isinmemory():
1636 if wctx.isinmemory():
1628 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1637 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1629 "support mergedriver")
1638 "support mergedriver")
1630 ms.commit()
1639 ms.commit()
1631 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1640 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1632 # the driver might leave some files unresolved
1641 # the driver might leave some files unresolved
1633 unresolvedf = set(ms.unresolved())
1642 unresolvedf = set(ms.unresolved())
1634 if not proceed:
1643 if not proceed:
1635 # XXX setting unresolved to at least 1 is a hack to make sure we
1644 # XXX setting unresolved to at least 1 is a hack to make sure we
1636 # error out
1645 # error out
1637 return updateresult(updated, merged, removed,
1646 return updateresult(updated, merged, removed,
1638 max(len(unresolvedf), 1))
1647 max(len(unresolvedf), 1))
1639 newactions = []
1648 newactions = []
1640 for f, args, msg in mergeactions:
1649 for f, args, msg in mergeactions:
1641 if f in unresolvedf:
1650 if f in unresolvedf:
1642 newactions.append((f, args, msg))
1651 newactions.append((f, args, msg))
1643 mergeactions = newactions
1652 mergeactions = newactions
1644
1653
1645 try:
1654 try:
1646 # premerge
1655 # premerge
1647 tocomplete = []
1656 tocomplete = []
1648 for f, args, msg in mergeactions:
1657 for f, args, msg in mergeactions:
1649 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1658 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1650 z += 1
1659 z += 1
1651 progress(_updating, z, item=f, total=numupdates, unit=_files)
1660 progress(_updating, z, item=f, total=numupdates, unit=_files)
1652 if f == '.hgsubstate': # subrepo states need updating
1661 if f == '.hgsubstate': # subrepo states need updating
1653 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1662 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1654 overwrite, labels)
1663 overwrite, labels)
1655 continue
1664 continue
1656 wctx[f].audit()
1665 wctx[f].audit()
1657 complete, r = ms.preresolve(f, wctx)
1666 complete, r = ms.preresolve(f, wctx)
1658 if not complete:
1667 if not complete:
1659 numupdates += 1
1668 numupdates += 1
1660 tocomplete.append((f, args, msg))
1669 tocomplete.append((f, args, msg))
1661
1670
1662 # merge
1671 # merge
1663 for f, args, msg in tocomplete:
1672 for f, args, msg in tocomplete:
1664 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1673 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1665 z += 1
1674 z += 1
1666 progress(_updating, z, item=f, total=numupdates, unit=_files)
1675 progress(_updating, z, item=f, total=numupdates, unit=_files)
1667 ms.resolve(f, wctx)
1676 ms.resolve(f, wctx)
1668
1677
1669 finally:
1678 finally:
1670 ms.commit()
1679 ms.commit()
1671
1680
1672 unresolved = ms.unresolvedcount()
1681 unresolved = ms.unresolvedcount()
1673
1682
1674 if (usemergedriver and not unresolved
1683 if (usemergedriver and not unresolved
1675 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1684 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1676 if not driverconclude(repo, ms, wctx, labels=labels):
1685 if not driverconclude(repo, ms, wctx, labels=labels):
1677 # XXX setting unresolved to at least 1 is a hack to make sure we
1686 # XXX setting unresolved to at least 1 is a hack to make sure we
1678 # error out
1687 # error out
1679 unresolved = max(unresolved, 1)
1688 unresolved = max(unresolved, 1)
1680
1689
1681 ms.commit()
1690 ms.commit()
1682
1691
1683 msupdated, msmerged, msremoved = ms.counts()
1692 msupdated, msmerged, msremoved = ms.counts()
1684 updated += msupdated
1693 updated += msupdated
1685 merged += msmerged
1694 merged += msmerged
1686 removed += msremoved
1695 removed += msremoved
1687
1696
1688 extraactions = ms.actions()
1697 extraactions = ms.actions()
1689 if extraactions:
1698 if extraactions:
1690 mfiles = set(a[0] for a in actions['m'])
1699 mfiles = set(a[0] for a in actions['m'])
1691 for k, acts in extraactions.iteritems():
1700 for k, acts in extraactions.iteritems():
1692 actions[k].extend(acts)
1701 actions[k].extend(acts)
1693 # Remove these files from actions['m'] as well. This is important
1702 # Remove these files from actions['m'] as well. This is important
1694 # because in recordupdates, files in actions['m'] are processed
1703 # because in recordupdates, files in actions['m'] are processed
1695 # after files in other actions, and the merge driver might add
1704 # after files in other actions, and the merge driver might add
1696 # files to those actions via extraactions above. This can lead to a
1705 # files to those actions via extraactions above. This can lead to a
1697 # file being recorded twice, with poor results. This is especially
1706 # file being recorded twice, with poor results. This is especially
1698 # problematic for actions['r'] (currently only possible with the
1707 # problematic for actions['r'] (currently only possible with the
1699 # merge driver in the initial merge process; interrupted merges
1708 # merge driver in the initial merge process; interrupted merges
1700 # don't go through this flow).
1709 # don't go through this flow).
1701 #
1710 #
1702 # The real fix here is to have indexes by both file and action so
1711 # The real fix here is to have indexes by both file and action so
1703 # that when the action for a file is changed it is automatically
1712 # that when the action for a file is changed it is automatically
1704 # reflected in the other action lists. But that involves a more
1713 # reflected in the other action lists. But that involves a more
1705 # complex data structure, so this will do for now.
1714 # complex data structure, so this will do for now.
1706 #
1715 #
1707 # We don't need to do the same operation for 'dc' and 'cd' because
1716 # We don't need to do the same operation for 'dc' and 'cd' because
1708 # those lists aren't consulted again.
1717 # those lists aren't consulted again.
1709 mfiles.difference_update(a[0] for a in acts)
1718 mfiles.difference_update(a[0] for a in acts)
1710
1719
1711 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1720 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1712
1721
1713 progress(_updating, None, total=numupdates, unit=_files)
1722 progress(_updating, None, total=numupdates, unit=_files)
1714 return updateresult(updated, merged, removed, unresolved)
1723 return updateresult(updated, merged, removed, unresolved)
1715
1724
1716 def recordupdates(repo, actions, branchmerge):
1725 def recordupdates(repo, actions, branchmerge):
1717 "record merge actions to the dirstate"
1726 "record merge actions to the dirstate"
1718 # remove (must come first)
1727 # remove (must come first)
1719 for f, args, msg in actions.get('r', []):
1728 for f, args, msg in actions.get('r', []):
1720 if branchmerge:
1729 if branchmerge:
1721 repo.dirstate.remove(f)
1730 repo.dirstate.remove(f)
1722 else:
1731 else:
1723 repo.dirstate.drop(f)
1732 repo.dirstate.drop(f)
1724
1733
1725 # forget (must come first)
1734 # forget (must come first)
1726 for f, args, msg in actions.get('f', []):
1735 for f, args, msg in actions.get('f', []):
1727 repo.dirstate.drop(f)
1736 repo.dirstate.drop(f)
1728
1737
1729 # resolve path conflicts
1738 # resolve path conflicts
1730 for f, args, msg in actions.get('pr', []):
1739 for f, args, msg in actions.get('pr', []):
1731 f0, = args
1740 f0, = args
1732 origf0 = repo.dirstate.copied(f0) or f0
1741 origf0 = repo.dirstate.copied(f0) or f0
1733 repo.dirstate.add(f)
1742 repo.dirstate.add(f)
1734 repo.dirstate.copy(origf0, f)
1743 repo.dirstate.copy(origf0, f)
1735 if f0 == origf0:
1744 if f0 == origf0:
1736 repo.dirstate.remove(f0)
1745 repo.dirstate.remove(f0)
1737 else:
1746 else:
1738 repo.dirstate.drop(f0)
1747 repo.dirstate.drop(f0)
1739
1748
1740 # re-add
1749 # re-add
1741 for f, args, msg in actions.get('a', []):
1750 for f, args, msg in actions.get('a', []):
1742 repo.dirstate.add(f)
1751 repo.dirstate.add(f)
1743
1752
1744 # re-add/mark as modified
1753 # re-add/mark as modified
1745 for f, args, msg in actions.get('am', []):
1754 for f, args, msg in actions.get('am', []):
1746 if branchmerge:
1755 if branchmerge:
1747 repo.dirstate.normallookup(f)
1756 repo.dirstate.normallookup(f)
1748 else:
1757 else:
1749 repo.dirstate.add(f)
1758 repo.dirstate.add(f)
1750
1759
1751 # exec change
1760 # exec change
1752 for f, args, msg in actions.get('e', []):
1761 for f, args, msg in actions.get('e', []):
1753 repo.dirstate.normallookup(f)
1762 repo.dirstate.normallookup(f)
1754
1763
1755 # keep
1764 # keep
1756 for f, args, msg in actions.get('k', []):
1765 for f, args, msg in actions.get('k', []):
1757 pass
1766 pass
1758
1767
1759 # get
1768 # get
1760 for f, args, msg in actions.get('g', []):
1769 for f, args, msg in actions.get('g', []):
1761 if branchmerge:
1770 if branchmerge:
1762 repo.dirstate.otherparent(f)
1771 repo.dirstate.otherparent(f)
1763 else:
1772 else:
1764 repo.dirstate.normal(f)
1773 repo.dirstate.normal(f)
1765
1774
1766 # merge
1775 # merge
1767 for f, args, msg in actions.get('m', []):
1776 for f, args, msg in actions.get('m', []):
1768 f1, f2, fa, move, anc = args
1777 f1, f2, fa, move, anc = args
1769 if branchmerge:
1778 if branchmerge:
1770 # We've done a branch merge, mark this file as merged
1779 # We've done a branch merge, mark this file as merged
1771 # so that we properly record the merger later
1780 # so that we properly record the merger later
1772 repo.dirstate.merge(f)
1781 repo.dirstate.merge(f)
1773 if f1 != f2: # copy/rename
1782 if f1 != f2: # copy/rename
1774 if move:
1783 if move:
1775 repo.dirstate.remove(f1)
1784 repo.dirstate.remove(f1)
1776 if f1 != f:
1785 if f1 != f:
1777 repo.dirstate.copy(f1, f)
1786 repo.dirstate.copy(f1, f)
1778 else:
1787 else:
1779 repo.dirstate.copy(f2, f)
1788 repo.dirstate.copy(f2, f)
1780 else:
1789 else:
1781 # We've update-merged a locally modified file, so
1790 # We've update-merged a locally modified file, so
1782 # we set the dirstate to emulate a normal checkout
1791 # we set the dirstate to emulate a normal checkout
1783 # of that file some time in the past. Thus our
1792 # of that file some time in the past. Thus our
1784 # merge will appear as a normal local file
1793 # merge will appear as a normal local file
1785 # modification.
1794 # modification.
1786 if f2 == f: # file not locally copied/moved
1795 if f2 == f: # file not locally copied/moved
1787 repo.dirstate.normallookup(f)
1796 repo.dirstate.normallookup(f)
1788 if move:
1797 if move:
1789 repo.dirstate.drop(f1)
1798 repo.dirstate.drop(f1)
1790
1799
1791 # directory rename, move local
1800 # directory rename, move local
1792 for f, args, msg in actions.get('dm', []):
1801 for f, args, msg in actions.get('dm', []):
1793 f0, flag = args
1802 f0, flag = args
1794 if branchmerge:
1803 if branchmerge:
1795 repo.dirstate.add(f)
1804 repo.dirstate.add(f)
1796 repo.dirstate.remove(f0)
1805 repo.dirstate.remove(f0)
1797 repo.dirstate.copy(f0, f)
1806 repo.dirstate.copy(f0, f)
1798 else:
1807 else:
1799 repo.dirstate.normal(f)
1808 repo.dirstate.normal(f)
1800 repo.dirstate.drop(f0)
1809 repo.dirstate.drop(f0)
1801
1810
1802 # directory rename, get
1811 # directory rename, get
1803 for f, args, msg in actions.get('dg', []):
1812 for f, args, msg in actions.get('dg', []):
1804 f0, flag = args
1813 f0, flag = args
1805 if branchmerge:
1814 if branchmerge:
1806 repo.dirstate.add(f)
1815 repo.dirstate.add(f)
1807 repo.dirstate.copy(f0, f)
1816 repo.dirstate.copy(f0, f)
1808 else:
1817 else:
1809 repo.dirstate.normal(f)
1818 repo.dirstate.normal(f)
1810
1819
1811 def update(repo, node, branchmerge, force, ancestor=None,
1820 def update(repo, node, branchmerge, force, ancestor=None,
1812 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1821 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1813 updatecheck=None, wc=None):
1822 updatecheck=None, wc=None):
1814 """
1823 """
1815 Perform a merge between the working directory and the given node
1824 Perform a merge between the working directory and the given node
1816
1825
1817 node = the node to update to
1826 node = the node to update to
1818 branchmerge = whether to merge between branches
1827 branchmerge = whether to merge between branches
1819 force = whether to force branch merging or file overwriting
1828 force = whether to force branch merging or file overwriting
1820 matcher = a matcher to filter file lists (dirstate not updated)
1829 matcher = a matcher to filter file lists (dirstate not updated)
1821 mergeancestor = whether it is merging with an ancestor. If true,
1830 mergeancestor = whether it is merging with an ancestor. If true,
1822 we should accept the incoming changes for any prompts that occur.
1831 we should accept the incoming changes for any prompts that occur.
1823 If false, merging with an ancestor (fast-forward) is only allowed
1832 If false, merging with an ancestor (fast-forward) is only allowed
1824 between different named branches. This flag is used by rebase extension
1833 between different named branches. This flag is used by rebase extension
1825 as a temporary fix and should be avoided in general.
1834 as a temporary fix and should be avoided in general.
1826 labels = labels to use for base, local and other
1835 labels = labels to use for base, local and other
1827 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1836 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1828 this is True, then 'force' should be True as well.
1837 this is True, then 'force' should be True as well.
1829
1838
1830 The table below shows all the behaviors of the update command given the
1839 The table below shows all the behaviors of the update command given the
1831 -c/--check and -C/--clean or no options, whether the working directory is
1840 -c/--check and -C/--clean or no options, whether the working directory is
1832 dirty, whether a revision is specified, and the relationship of the parent
1841 dirty, whether a revision is specified, and the relationship of the parent
1833 rev to the target rev (linear or not). Match from top first. The -n
1842 rev to the target rev (linear or not). Match from top first. The -n
1834 option doesn't exist on the command line, but represents the
1843 option doesn't exist on the command line, but represents the
1835 experimental.updatecheck=noconflict option.
1844 experimental.updatecheck=noconflict option.
1836
1845
1837 This logic is tested by test-update-branches.t.
1846 This logic is tested by test-update-branches.t.
1838
1847
1839 -c -C -n -m dirty rev linear | result
1848 -c -C -n -m dirty rev linear | result
1840 y y * * * * * | (1)
1849 y y * * * * * | (1)
1841 y * y * * * * | (1)
1850 y * y * * * * | (1)
1842 y * * y * * * | (1)
1851 y * * y * * * | (1)
1843 * y y * * * * | (1)
1852 * y y * * * * | (1)
1844 * y * y * * * | (1)
1853 * y * y * * * | (1)
1845 * * y y * * * | (1)
1854 * * y y * * * | (1)
1846 * * * * * n n | x
1855 * * * * * n n | x
1847 * * * * n * * | ok
1856 * * * * n * * | ok
1848 n n n n y * y | merge
1857 n n n n y * y | merge
1849 n n n n y y n | (2)
1858 n n n n y y n | (2)
1850 n n n y y * * | merge
1859 n n n y y * * | merge
1851 n n y n y * * | merge if no conflict
1860 n n y n y * * | merge if no conflict
1852 n y n n y * * | discard
1861 n y n n y * * | discard
1853 y n n n y * * | (3)
1862 y n n n y * * | (3)
1854
1863
1855 x = can't happen
1864 x = can't happen
1856 * = don't-care
1865 * = don't-care
1857 1 = incompatible options (checked in commands.py)
1866 1 = incompatible options (checked in commands.py)
1858 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1867 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1859 3 = abort: uncommitted changes (checked in commands.py)
1868 3 = abort: uncommitted changes (checked in commands.py)
1860
1869
1861 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1870 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1862 to repo[None] if None is passed.
1871 to repo[None] if None is passed.
1863
1872
1864 Return the same tuple as applyupdates().
1873 Return the same tuple as applyupdates().
1865 """
1874 """
1866 # Avoid cycle.
1875 # Avoid cycle.
1867 from . import sparse
1876 from . import sparse
1868
1877
1869 # This function used to find the default destination if node was None, but
1878 # This function used to find the default destination if node was None, but
1870 # that's now in destutil.py.
1879 # that's now in destutil.py.
1871 assert node is not None
1880 assert node is not None
1872 if not branchmerge and not force:
1881 if not branchmerge and not force:
1873 # TODO: remove the default once all callers that pass branchmerge=False
1882 # TODO: remove the default once all callers that pass branchmerge=False
1874 # and force=False pass a value for updatecheck. We may want to allow
1883 # and force=False pass a value for updatecheck. We may want to allow
1875 # updatecheck='abort' to better suppport some of these callers.
1884 # updatecheck='abort' to better suppport some of these callers.
1876 if updatecheck is None:
1885 if updatecheck is None:
1877 updatecheck = 'linear'
1886 updatecheck = 'linear'
1878 assert updatecheck in ('none', 'linear', 'noconflict')
1887 assert updatecheck in ('none', 'linear', 'noconflict')
1879 # If we're doing a partial update, we need to skip updating
1888 # If we're doing a partial update, we need to skip updating
1880 # the dirstate, so make a note of any partial-ness to the
1889 # the dirstate, so make a note of any partial-ness to the
1881 # update here.
1890 # update here.
1882 if matcher is None or matcher.always():
1891 if matcher is None or matcher.always():
1883 partial = False
1892 partial = False
1884 else:
1893 else:
1885 partial = True
1894 partial = True
1886 with repo.wlock():
1895 with repo.wlock():
1887 if wc is None:
1896 if wc is None:
1888 wc = repo[None]
1897 wc = repo[None]
1889 pl = wc.parents()
1898 pl = wc.parents()
1890 p1 = pl[0]
1899 p1 = pl[0]
1891 pas = [None]
1900 pas = [None]
1892 if ancestor is not None:
1901 if ancestor is not None:
1893 pas = [repo[ancestor]]
1902 pas = [repo[ancestor]]
1894
1903
1895 overwrite = force and not branchmerge
1904 overwrite = force and not branchmerge
1896
1905
1897 p2 = repo[node]
1906 p2 = repo[node]
1898 if pas[0] is None:
1907 if pas[0] is None:
1899 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1908 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1900 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1909 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1901 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1910 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1902 else:
1911 else:
1903 pas = [p1.ancestor(p2, warn=branchmerge)]
1912 pas = [p1.ancestor(p2, warn=branchmerge)]
1904
1913
1905 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1914 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1906
1915
1907 ### check phase
1916 ### check phase
1908 if not overwrite:
1917 if not overwrite:
1909 if len(pl) > 1:
1918 if len(pl) > 1:
1910 raise error.Abort(_("outstanding uncommitted merge"))
1919 raise error.Abort(_("outstanding uncommitted merge"))
1911 ms = mergestate.read(repo)
1920 ms = mergestate.read(repo)
1912 if list(ms.unresolved()):
1921 if list(ms.unresolved()):
1913 raise error.Abort(_("outstanding merge conflicts"))
1922 raise error.Abort(_("outstanding merge conflicts"))
1914 if branchmerge:
1923 if branchmerge:
1915 if pas == [p2]:
1924 if pas == [p2]:
1916 raise error.Abort(_("merging with a working directory ancestor"
1925 raise error.Abort(_("merging with a working directory ancestor"
1917 " has no effect"))
1926 " has no effect"))
1918 elif pas == [p1]:
1927 elif pas == [p1]:
1919 if not mergeancestor and wc.branch() == p2.branch():
1928 if not mergeancestor and wc.branch() == p2.branch():
1920 raise error.Abort(_("nothing to merge"),
1929 raise error.Abort(_("nothing to merge"),
1921 hint=_("use 'hg update' "
1930 hint=_("use 'hg update' "
1922 "or check 'hg heads'"))
1931 "or check 'hg heads'"))
1923 if not force and (wc.files() or wc.deleted()):
1932 if not force and (wc.files() or wc.deleted()):
1924 raise error.Abort(_("uncommitted changes"),
1933 raise error.Abort(_("uncommitted changes"),
1925 hint=_("use 'hg status' to list changes"))
1934 hint=_("use 'hg status' to list changes"))
1926 if not wc.isinmemory():
1935 if not wc.isinmemory():
1927 for s in sorted(wc.substate):
1936 for s in sorted(wc.substate):
1928 wc.sub(s).bailifchanged()
1937 wc.sub(s).bailifchanged()
1929
1938
1930 elif not overwrite:
1939 elif not overwrite:
1931 if p1 == p2: # no-op update
1940 if p1 == p2: # no-op update
1932 # call the hooks and exit early
1941 # call the hooks and exit early
1933 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1942 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1934 repo.hook('update', parent1=xp2, parent2='', error=0)
1943 repo.hook('update', parent1=xp2, parent2='', error=0)
1935 return updateresult(0, 0, 0, 0)
1944 return updateresult(0, 0, 0, 0)
1936
1945
1937 if (updatecheck == 'linear' and
1946 if (updatecheck == 'linear' and
1938 pas not in ([p1], [p2])): # nonlinear
1947 pas not in ([p1], [p2])): # nonlinear
1939 dirty = wc.dirty(missing=True)
1948 dirty = wc.dirty(missing=True)
1940 if dirty:
1949 if dirty:
1941 # Branching is a bit strange to ensure we do the minimal
1950 # Branching is a bit strange to ensure we do the minimal
1942 # amount of call to obsutil.foreground.
1951 # amount of call to obsutil.foreground.
1943 foreground = obsutil.foreground(repo, [p1.node()])
1952 foreground = obsutil.foreground(repo, [p1.node()])
1944 # note: the <node> variable contains a random identifier
1953 # note: the <node> variable contains a random identifier
1945 if repo[node].node() in foreground:
1954 if repo[node].node() in foreground:
1946 pass # allow updating to successors
1955 pass # allow updating to successors
1947 else:
1956 else:
1948 msg = _("uncommitted changes")
1957 msg = _("uncommitted changes")
1949 hint = _("commit or update --clean to discard changes")
1958 hint = _("commit or update --clean to discard changes")
1950 raise error.UpdateAbort(msg, hint=hint)
1959 raise error.UpdateAbort(msg, hint=hint)
1951 else:
1960 else:
1952 # Allow jumping branches if clean and specific rev given
1961 # Allow jumping branches if clean and specific rev given
1953 pass
1962 pass
1954
1963
1955 if overwrite:
1964 if overwrite:
1956 pas = [wc]
1965 pas = [wc]
1957 elif not branchmerge:
1966 elif not branchmerge:
1958 pas = [p1]
1967 pas = [p1]
1959
1968
1960 # deprecated config: merge.followcopies
1969 # deprecated config: merge.followcopies
1961 followcopies = repo.ui.configbool('merge', 'followcopies')
1970 followcopies = repo.ui.configbool('merge', 'followcopies')
1962 if overwrite:
1971 if overwrite:
1963 followcopies = False
1972 followcopies = False
1964 elif not pas[0]:
1973 elif not pas[0]:
1965 followcopies = False
1974 followcopies = False
1966 if not branchmerge and not wc.dirty(missing=True):
1975 if not branchmerge and not wc.dirty(missing=True):
1967 followcopies = False
1976 followcopies = False
1968
1977
1969 ### calculate phase
1978 ### calculate phase
1970 actionbyfile, diverge, renamedelete = calculateupdates(
1979 actionbyfile, diverge, renamedelete = calculateupdates(
1971 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1980 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1972 followcopies, matcher=matcher, mergeforce=mergeforce)
1981 followcopies, matcher=matcher, mergeforce=mergeforce)
1973
1982
1974 if updatecheck == 'noconflict':
1983 if updatecheck == 'noconflict':
1975 for f, (m, args, msg) in actionbyfile.iteritems():
1984 for f, (m, args, msg) in actionbyfile.iteritems():
1976 if m not in ('g', 'k', 'e', 'r', 'pr'):
1985 if m not in ('g', 'k', 'e', 'r', 'pr'):
1977 msg = _("conflicting changes")
1986 msg = _("conflicting changes")
1978 hint = _("commit or update --clean to discard changes")
1987 hint = _("commit or update --clean to discard changes")
1979 raise error.Abort(msg, hint=hint)
1988 raise error.Abort(msg, hint=hint)
1980
1989
1981 # Prompt and create actions. Most of this is in the resolve phase
1990 # Prompt and create actions. Most of this is in the resolve phase
1982 # already, but we can't handle .hgsubstate in filemerge or
1991 # already, but we can't handle .hgsubstate in filemerge or
1983 # subrepoutil.submerge yet so we have to keep prompting for it.
1992 # subrepoutil.submerge yet so we have to keep prompting for it.
1984 if '.hgsubstate' in actionbyfile:
1993 if '.hgsubstate' in actionbyfile:
1985 f = '.hgsubstate'
1994 f = '.hgsubstate'
1986 m, args, msg = actionbyfile[f]
1995 m, args, msg = actionbyfile[f]
1987 prompts = filemerge.partextras(labels)
1996 prompts = filemerge.partextras(labels)
1988 prompts['f'] = f
1997 prompts['f'] = f
1989 if m == 'cd':
1998 if m == 'cd':
1990 if repo.ui.promptchoice(
1999 if repo.ui.promptchoice(
1991 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2000 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1992 "use (c)hanged version or (d)elete?"
2001 "use (c)hanged version or (d)elete?"
1993 "$$ &Changed $$ &Delete") % prompts, 0):
2002 "$$ &Changed $$ &Delete") % prompts, 0):
1994 actionbyfile[f] = ('r', None, "prompt delete")
2003 actionbyfile[f] = ('r', None, "prompt delete")
1995 elif f in p1:
2004 elif f in p1:
1996 actionbyfile[f] = ('am', None, "prompt keep")
2005 actionbyfile[f] = ('am', None, "prompt keep")
1997 else:
2006 else:
1998 actionbyfile[f] = ('a', None, "prompt keep")
2007 actionbyfile[f] = ('a', None, "prompt keep")
1999 elif m == 'dc':
2008 elif m == 'dc':
2000 f1, f2, fa, move, anc = args
2009 f1, f2, fa, move, anc = args
2001 flags = p2[f2].flags()
2010 flags = p2[f2].flags()
2002 if repo.ui.promptchoice(
2011 if repo.ui.promptchoice(
2003 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2012 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2004 "use (c)hanged version or leave (d)eleted?"
2013 "use (c)hanged version or leave (d)eleted?"
2005 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2014 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2006 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
2015 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
2007 else:
2016 else:
2008 del actionbyfile[f]
2017 del actionbyfile[f]
2009
2018
2010 # Convert to dictionary-of-lists format
2019 # Convert to dictionary-of-lists format
2011 actions = dict((m, [])
2020 actions = dict((m, [])
2012 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2021 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2013 for f, (m, args, msg) in actionbyfile.iteritems():
2022 for f, (m, args, msg) in actionbyfile.iteritems():
2014 if m not in actions:
2023 if m not in actions:
2015 actions[m] = []
2024 actions[m] = []
2016 actions[m].append((f, args, msg))
2025 actions[m].append((f, args, msg))
2017
2026
2018 if not util.fscasesensitive(repo.path):
2027 if not util.fscasesensitive(repo.path):
2019 # check collision between files only in p2 for clean update
2028 # check collision between files only in p2 for clean update
2020 if (not branchmerge and
2029 if (not branchmerge and
2021 (force or not wc.dirty(missing=True, branch=False))):
2030 (force or not wc.dirty(missing=True, branch=False))):
2022 _checkcollision(repo, p2.manifest(), None)
2031 _checkcollision(repo, p2.manifest(), None)
2023 else:
2032 else:
2024 _checkcollision(repo, wc.manifest(), actions)
2033 _checkcollision(repo, wc.manifest(), actions)
2025
2034
2026 # divergent renames
2035 # divergent renames
2027 for f, fl in sorted(diverge.iteritems()):
2036 for f, fl in sorted(diverge.iteritems()):
2028 repo.ui.warn(_("note: possible conflict - %s was renamed "
2037 repo.ui.warn(_("note: possible conflict - %s was renamed "
2029 "multiple times to:\n") % f)
2038 "multiple times to:\n") % f)
2030 for nf in fl:
2039 for nf in fl:
2031 repo.ui.warn(" %s\n" % nf)
2040 repo.ui.warn(" %s\n" % nf)
2032
2041
2033 # rename and delete
2042 # rename and delete
2034 for f, fl in sorted(renamedelete.iteritems()):
2043 for f, fl in sorted(renamedelete.iteritems()):
2035 repo.ui.warn(_("note: possible conflict - %s was deleted "
2044 repo.ui.warn(_("note: possible conflict - %s was deleted "
2036 "and renamed to:\n") % f)
2045 "and renamed to:\n") % f)
2037 for nf in fl:
2046 for nf in fl:
2038 repo.ui.warn(" %s\n" % nf)
2047 repo.ui.warn(" %s\n" % nf)
2039
2048
2040 ### apply phase
2049 ### apply phase
2041 if not branchmerge: # just jump to the new rev
2050 if not branchmerge: # just jump to the new rev
2042 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2051 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2043 if not partial and not wc.isinmemory():
2052 if not partial and not wc.isinmemory():
2044 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2053 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2045 # note that we're in the middle of an update
2054 # note that we're in the middle of an update
2046 repo.vfs.write('updatestate', p2.hex())
2055 repo.vfs.write('updatestate', p2.hex())
2047
2056
2048 # Advertise fsmonitor when its presence could be useful.
2057 # Advertise fsmonitor when its presence could be useful.
2049 #
2058 #
2050 # We only advertise when performing an update from an empty working
2059 # We only advertise when performing an update from an empty working
2051 # directory. This typically only occurs during initial clone.
2060 # directory. This typically only occurs during initial clone.
2052 #
2061 #
2053 # We give users a mechanism to disable the warning in case it is
2062 # We give users a mechanism to disable the warning in case it is
2054 # annoying.
2063 # annoying.
2055 #
2064 #
2056 # We only allow on Linux and MacOS because that's where fsmonitor is
2065 # We only allow on Linux and MacOS because that's where fsmonitor is
2057 # considered stable.
2066 # considered stable.
2058 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2067 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2059 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2068 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2060 'warn_update_file_count')
2069 'warn_update_file_count')
2061 try:
2070 try:
2062 # avoid cycle: extensions -> cmdutil -> merge
2071 # avoid cycle: extensions -> cmdutil -> merge
2063 from . import extensions
2072 from . import extensions
2064 extensions.find('fsmonitor')
2073 extensions.find('fsmonitor')
2065 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2074 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2066 # We intentionally don't look at whether fsmonitor has disabled
2075 # We intentionally don't look at whether fsmonitor has disabled
2067 # itself because a) fsmonitor may have already printed a warning
2076 # itself because a) fsmonitor may have already printed a warning
2068 # b) we only care about the config state here.
2077 # b) we only care about the config state here.
2069 except KeyError:
2078 except KeyError:
2070 fsmonitorenabled = False
2079 fsmonitorenabled = False
2071
2080
2072 if (fsmonitorwarning
2081 if (fsmonitorwarning
2073 and not fsmonitorenabled
2082 and not fsmonitorenabled
2074 and p1.node() == nullid
2083 and p1.node() == nullid
2075 and len(actions['g']) >= fsmonitorthreshold
2084 and len(actions['g']) >= fsmonitorthreshold
2076 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2085 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2077 repo.ui.warn(
2086 repo.ui.warn(
2078 _('(warning: large working directory being used without '
2087 _('(warning: large working directory being used without '
2079 'fsmonitor enabled; enable fsmonitor to improve performance; '
2088 'fsmonitor enabled; enable fsmonitor to improve performance; '
2080 'see "hg help -e fsmonitor")\n'))
2089 'see "hg help -e fsmonitor")\n'))
2081
2090
2082 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2091 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2083
2092
2084 if not partial and not wc.isinmemory():
2093 if not partial and not wc.isinmemory():
2085 with repo.dirstate.parentchange():
2094 with repo.dirstate.parentchange():
2086 repo.setparents(fp1, fp2)
2095 repo.setparents(fp1, fp2)
2087 recordupdates(repo, actions, branchmerge)
2096 recordupdates(repo, actions, branchmerge)
2088 # update completed, clear state
2097 # update completed, clear state
2089 util.unlink(repo.vfs.join('updatestate'))
2098 util.unlink(repo.vfs.join('updatestate'))
2090
2099
2091 if not branchmerge:
2100 if not branchmerge:
2092 repo.dirstate.setbranch(p2.branch())
2101 repo.dirstate.setbranch(p2.branch())
2093
2102
2094 # If we're updating to a location, clean up any stale temporary includes
2103 # If we're updating to a location, clean up any stale temporary includes
2095 # (ex: this happens during hg rebase --abort).
2104 # (ex: this happens during hg rebase --abort).
2096 if not branchmerge:
2105 if not branchmerge:
2097 sparse.prunetemporaryincludes(repo)
2106 sparse.prunetemporaryincludes(repo)
2098
2107
2099 if not partial:
2108 if not partial:
2100 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2109 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2101 return stats
2110 return stats
2102
2111
2103 def graft(repo, ctx, pctx, labels, keepparent=False):
2112 def graft(repo, ctx, pctx, labels, keepparent=False):
2104 """Do a graft-like merge.
2113 """Do a graft-like merge.
2105
2114
2106 This is a merge where the merge ancestor is chosen such that one
2115 This is a merge where the merge ancestor is chosen such that one
2107 or more changesets are grafted onto the current changeset. In
2116 or more changesets are grafted onto the current changeset. In
2108 addition to the merge, this fixes up the dirstate to include only
2117 addition to the merge, this fixes up the dirstate to include only
2109 a single parent (if keepparent is False) and tries to duplicate any
2118 a single parent (if keepparent is False) and tries to duplicate any
2110 renames/copies appropriately.
2119 renames/copies appropriately.
2111
2120
2112 ctx - changeset to rebase
2121 ctx - changeset to rebase
2113 pctx - merge base, usually ctx.p1()
2122 pctx - merge base, usually ctx.p1()
2114 labels - merge labels eg ['local', 'graft']
2123 labels - merge labels eg ['local', 'graft']
2115 keepparent - keep second parent if any
2124 keepparent - keep second parent if any
2116
2125
2117 """
2126 """
2118 # If we're grafting a descendant onto an ancestor, be sure to pass
2127 # If we're grafting a descendant onto an ancestor, be sure to pass
2119 # mergeancestor=True to update. This does two things: 1) allows the merge if
2128 # mergeancestor=True to update. This does two things: 1) allows the merge if
2120 # the destination is the same as the parent of the ctx (so we can use graft
2129 # the destination is the same as the parent of the ctx (so we can use graft
2121 # to copy commits), and 2) informs update that the incoming changes are
2130 # to copy commits), and 2) informs update that the incoming changes are
2122 # newer than the destination so it doesn't prompt about "remote changed foo
2131 # newer than the destination so it doesn't prompt about "remote changed foo
2123 # which local deleted".
2132 # which local deleted".
2124 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2133 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2125
2134
2126 stats = update(repo, ctx.node(), True, True, pctx.node(),
2135 stats = update(repo, ctx.node(), True, True, pctx.node(),
2127 mergeancestor=mergeancestor, labels=labels)
2136 mergeancestor=mergeancestor, labels=labels)
2128
2137
2129 pother = nullid
2138 pother = nullid
2130 parents = ctx.parents()
2139 parents = ctx.parents()
2131 if keepparent and len(parents) == 2 and pctx in parents:
2140 if keepparent and len(parents) == 2 and pctx in parents:
2132 parents.remove(pctx)
2141 parents.remove(pctx)
2133 pother = parents[0].node()
2142 pother = parents[0].node()
2134
2143
2135 with repo.dirstate.parentchange():
2144 with repo.dirstate.parentchange():
2136 repo.setparents(repo['.'].node(), pother)
2145 repo.setparents(repo['.'].node(), pother)
2137 repo.dirstate.write(repo.currenttransaction())
2146 repo.dirstate.write(repo.currenttransaction())
2138 # fix up dirstate for copies and renames
2147 # fix up dirstate for copies and renames
2139 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2148 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2140 return stats
2149 return stats
General Comments 0
You need to be logged in to leave comments. Login now