##// END OF EJS Templates
merge: use constants for merge driver state...
Gregory Szorc -
r37128:1b158ca3 default
parent child Browse files
Show More
@@ -1,2133 +1,2140 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .thirdparty import (
25 from .thirdparty import (
26 attr,
26 attr,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 subrepoutil,
36 subrepoutil,
37 util,
37 util,
38 worker,
38 worker,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43
43
44 def _droponode(data):
44 def _droponode(data):
45 # used for compatibility for v1
45 # used for compatibility for v1
46 bits = data.split('\0')
46 bits = data.split('\0')
47 bits = bits[:-2] + bits[-1:]
47 bits = bits[:-2] + bits[-1:]
48 return '\0'.join(bits)
48 return '\0'.join(bits)
49
49
50 # Merge state record types. See ``mergestate`` docs for more.
50 # Merge state record types. See ``mergestate`` docs for more.
51 RECORD_LOCAL = b'L'
51 RECORD_LOCAL = b'L'
52 RECORD_OTHER = b'O'
52 RECORD_OTHER = b'O'
53 RECORD_MERGED = b'F'
53 RECORD_MERGED = b'F'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 RECORD_PATH_CONFLICT = b'P'
56 RECORD_PATH_CONFLICT = b'P'
57 RECORD_MERGE_DRIVER_STATE = b'm'
57 RECORD_MERGE_DRIVER_STATE = b'm'
58 RECORD_FILE_VALUES = b'f'
58 RECORD_FILE_VALUES = b'f'
59 RECORD_LABELS = b'l'
59 RECORD_LABELS = b'l'
60 RECORD_OVERRIDE = b't'
60 RECORD_OVERRIDE = b't'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63
63
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
65 MERGE_DRIVER_STATE_MARKED = b'm'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
67
64 class mergestate(object):
68 class mergestate(object):
65 '''track 3-way merge state of individual files
69 '''track 3-way merge state of individual files
66
70
67 The merge state is stored on disk when needed. Two files are used: one with
71 The merge state is stored on disk when needed. Two files are used: one with
68 an old format (version 1), and one with a new format (version 2). Version 2
72 an old format (version 1), and one with a new format (version 2). Version 2
69 stores a superset of the data in version 1, including new kinds of records
73 stores a superset of the data in version 1, including new kinds of records
70 in the future. For more about the new format, see the documentation for
74 in the future. For more about the new format, see the documentation for
71 `_readrecordsv2`.
75 `_readrecordsv2`.
72
76
73 Each record can contain arbitrary content, and has an associated type. This
77 Each record can contain arbitrary content, and has an associated type. This
74 `type` should be a letter. If `type` is uppercase, the record is mandatory:
78 `type` should be a letter. If `type` is uppercase, the record is mandatory:
75 versions of Mercurial that don't support it should abort. If `type` is
79 versions of Mercurial that don't support it should abort. If `type` is
76 lowercase, the record can be safely ignored.
80 lowercase, the record can be safely ignored.
77
81
78 Currently known records:
82 Currently known records:
79
83
80 L: the node of the "local" part of the merge (hexified version)
84 L: the node of the "local" part of the merge (hexified version)
81 O: the node of the "other" part of the merge (hexified version)
85 O: the node of the "other" part of the merge (hexified version)
82 F: a file to be merged entry
86 F: a file to be merged entry
83 C: a change/delete or delete/change conflict
87 C: a change/delete or delete/change conflict
84 D: a file that the external merge driver will merge internally
88 D: a file that the external merge driver will merge internally
85 (experimental)
89 (experimental)
86 P: a path conflict (file vs directory)
90 P: a path conflict (file vs directory)
87 m: the external merge driver defined for this merge plus its run state
91 m: the external merge driver defined for this merge plus its run state
88 (experimental)
92 (experimental)
89 f: a (filename, dictionary) tuple of optional values for a given file
93 f: a (filename, dictionary) tuple of optional values for a given file
90 X: unsupported mandatory record type (used in tests)
94 X: unsupported mandatory record type (used in tests)
91 x: unsupported advisory record type (used in tests)
95 x: unsupported advisory record type (used in tests)
92 l: the labels for the parts of the merge.
96 l: the labels for the parts of the merge.
93
97
94 Merge driver run states (experimental):
98 Merge driver run states (experimental):
95 u: driver-resolved files unmarked -- needs to be run next time we're about
99 u: driver-resolved files unmarked -- needs to be run next time we're about
96 to resolve or commit
100 to resolve or commit
97 m: driver-resolved files marked -- only needs to be run before commit
101 m: driver-resolved files marked -- only needs to be run before commit
98 s: success/skipped -- does not need to be run any more
102 s: success/skipped -- does not need to be run any more
99
103
100 Merge record states (stored in self._state, indexed by filename):
104 Merge record states (stored in self._state, indexed by filename):
101 u: unresolved conflict
105 u: unresolved conflict
102 r: resolved conflict
106 r: resolved conflict
103 pu: unresolved path conflict (file conflicts with directory)
107 pu: unresolved path conflict (file conflicts with directory)
104 pr: resolved path conflict
108 pr: resolved path conflict
105 d: driver-resolved conflict
109 d: driver-resolved conflict
106
110
107 The resolve command transitions between 'u' and 'r' for conflicts and
111 The resolve command transitions between 'u' and 'r' for conflicts and
108 'pu' and 'pr' for path conflicts.
112 'pu' and 'pr' for path conflicts.
109 '''
113 '''
110 statepathv1 = 'merge/state'
114 statepathv1 = 'merge/state'
111 statepathv2 = 'merge/state2'
115 statepathv2 = 'merge/state2'
112
116
113 @staticmethod
117 @staticmethod
114 def clean(repo, node=None, other=None, labels=None):
118 def clean(repo, node=None, other=None, labels=None):
115 """Initialize a brand new merge state, removing any existing state on
119 """Initialize a brand new merge state, removing any existing state on
116 disk."""
120 disk."""
117 ms = mergestate(repo)
121 ms = mergestate(repo)
118 ms.reset(node, other, labels)
122 ms.reset(node, other, labels)
119 return ms
123 return ms
120
124
121 @staticmethod
125 @staticmethod
122 def read(repo):
126 def read(repo):
123 """Initialize the merge state, reading it from disk."""
127 """Initialize the merge state, reading it from disk."""
124 ms = mergestate(repo)
128 ms = mergestate(repo)
125 ms._read()
129 ms._read()
126 return ms
130 return ms
127
131
128 def __init__(self, repo):
132 def __init__(self, repo):
129 """Initialize the merge state.
133 """Initialize the merge state.
130
134
131 Do not use this directly! Instead call read() or clean()."""
135 Do not use this directly! Instead call read() or clean()."""
132 self._repo = repo
136 self._repo = repo
133 self._dirty = False
137 self._dirty = False
134 self._labels = None
138 self._labels = None
135
139
136 def reset(self, node=None, other=None, labels=None):
140 def reset(self, node=None, other=None, labels=None):
137 self._state = {}
141 self._state = {}
138 self._stateextras = {}
142 self._stateextras = {}
139 self._local = None
143 self._local = None
140 self._other = None
144 self._other = None
141 self._labels = labels
145 self._labels = labels
142 for var in ('localctx', 'otherctx'):
146 for var in ('localctx', 'otherctx'):
143 if var in vars(self):
147 if var in vars(self):
144 delattr(self, var)
148 delattr(self, var)
145 if node:
149 if node:
146 self._local = node
150 self._local = node
147 self._other = other
151 self._other = other
148 self._readmergedriver = None
152 self._readmergedriver = None
149 if self.mergedriver:
153 if self.mergedriver:
150 self._mdstate = 's'
154 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
151 else:
155 else:
152 self._mdstate = 'u'
156 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
153 shutil.rmtree(self._repo.vfs.join('merge'), True)
157 shutil.rmtree(self._repo.vfs.join('merge'), True)
154 self._results = {}
158 self._results = {}
155 self._dirty = False
159 self._dirty = False
156
160
157 def _read(self):
161 def _read(self):
158 """Analyse each record content to restore a serialized state from disk
162 """Analyse each record content to restore a serialized state from disk
159
163
160 This function process "record" entry produced by the de-serialization
164 This function process "record" entry produced by the de-serialization
161 of on disk file.
165 of on disk file.
162 """
166 """
163 self._state = {}
167 self._state = {}
164 self._stateextras = {}
168 self._stateextras = {}
165 self._local = None
169 self._local = None
166 self._other = None
170 self._other = None
167 for var in ('localctx', 'otherctx'):
171 for var in ('localctx', 'otherctx'):
168 if var in vars(self):
172 if var in vars(self):
169 delattr(self, var)
173 delattr(self, var)
170 self._readmergedriver = None
174 self._readmergedriver = None
171 self._mdstate = 's'
175 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
172 unsupported = set()
176 unsupported = set()
173 records = self._readrecords()
177 records = self._readrecords()
174 for rtype, record in records:
178 for rtype, record in records:
175 if rtype == RECORD_LOCAL:
179 if rtype == RECORD_LOCAL:
176 self._local = bin(record)
180 self._local = bin(record)
177 elif rtype == RECORD_OTHER:
181 elif rtype == RECORD_OTHER:
178 self._other = bin(record)
182 self._other = bin(record)
179 elif rtype == RECORD_MERGE_DRIVER_STATE:
183 elif rtype == RECORD_MERGE_DRIVER_STATE:
180 bits = record.split('\0', 1)
184 bits = record.split('\0', 1)
181 mdstate = bits[1]
185 mdstate = bits[1]
182 if len(mdstate) != 1 or mdstate not in 'ums':
186 if len(mdstate) != 1 or mdstate not in (
187 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
188 MERGE_DRIVER_STATE_SUCCESS):
183 # the merge driver should be idempotent, so just rerun it
189 # the merge driver should be idempotent, so just rerun it
184 mdstate = 'u'
190 mdstate = MERGE_DRIVER_STATE_UNMARKED
185
191
186 self._readmergedriver = bits[0]
192 self._readmergedriver = bits[0]
187 self._mdstate = mdstate
193 self._mdstate = mdstate
188 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
194 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
189 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
195 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
190 bits = record.split('\0')
196 bits = record.split('\0')
191 self._state[bits[0]] = bits[1:]
197 self._state[bits[0]] = bits[1:]
192 elif rtype == RECORD_FILE_VALUES:
198 elif rtype == RECORD_FILE_VALUES:
193 filename, rawextras = record.split('\0', 1)
199 filename, rawextras = record.split('\0', 1)
194 extraparts = rawextras.split('\0')
200 extraparts = rawextras.split('\0')
195 extras = {}
201 extras = {}
196 i = 0
202 i = 0
197 while i < len(extraparts):
203 while i < len(extraparts):
198 extras[extraparts[i]] = extraparts[i + 1]
204 extras[extraparts[i]] = extraparts[i + 1]
199 i += 2
205 i += 2
200
206
201 self._stateextras[filename] = extras
207 self._stateextras[filename] = extras
202 elif rtype == RECORD_LABELS:
208 elif rtype == RECORD_LABELS:
203 labels = record.split('\0', 2)
209 labels = record.split('\0', 2)
204 self._labels = [l for l in labels if len(l) > 0]
210 self._labels = [l for l in labels if len(l) > 0]
205 elif not rtype.islower():
211 elif not rtype.islower():
206 unsupported.add(rtype)
212 unsupported.add(rtype)
207 self._results = {}
213 self._results = {}
208 self._dirty = False
214 self._dirty = False
209
215
210 if unsupported:
216 if unsupported:
211 raise error.UnsupportedMergeRecords(unsupported)
217 raise error.UnsupportedMergeRecords(unsupported)
212
218
213 def _readrecords(self):
219 def _readrecords(self):
214 """Read merge state from disk and return a list of record (TYPE, data)
220 """Read merge state from disk and return a list of record (TYPE, data)
215
221
216 We read data from both v1 and v2 files and decide which one to use.
222 We read data from both v1 and v2 files and decide which one to use.
217
223
218 V1 has been used by version prior to 2.9.1 and contains less data than
224 V1 has been used by version prior to 2.9.1 and contains less data than
219 v2. We read both versions and check if no data in v2 contradicts
225 v2. We read both versions and check if no data in v2 contradicts
220 v1. If there is not contradiction we can safely assume that both v1
226 v1. If there is not contradiction we can safely assume that both v1
221 and v2 were written at the same time and use the extract data in v2. If
227 and v2 were written at the same time and use the extract data in v2. If
222 there is contradiction we ignore v2 content as we assume an old version
228 there is contradiction we ignore v2 content as we assume an old version
223 of Mercurial has overwritten the mergestate file and left an old v2
229 of Mercurial has overwritten the mergestate file and left an old v2
224 file around.
230 file around.
225
231
226 returns list of record [(TYPE, data), ...]"""
232 returns list of record [(TYPE, data), ...]"""
227 v1records = self._readrecordsv1()
233 v1records = self._readrecordsv1()
228 v2records = self._readrecordsv2()
234 v2records = self._readrecordsv2()
229 if self._v1v2match(v1records, v2records):
235 if self._v1v2match(v1records, v2records):
230 return v2records
236 return v2records
231 else:
237 else:
232 # v1 file is newer than v2 file, use it
238 # v1 file is newer than v2 file, use it
233 # we have to infer the "other" changeset of the merge
239 # we have to infer the "other" changeset of the merge
234 # we cannot do better than that with v1 of the format
240 # we cannot do better than that with v1 of the format
235 mctx = self._repo[None].parents()[-1]
241 mctx = self._repo[None].parents()[-1]
236 v1records.append((RECORD_OTHER, mctx.hex()))
242 v1records.append((RECORD_OTHER, mctx.hex()))
237 # add place holder "other" file node information
243 # add place holder "other" file node information
238 # nobody is using it yet so we do no need to fetch the data
244 # nobody is using it yet so we do no need to fetch the data
239 # if mctx was wrong `mctx[bits[-2]]` may fails.
245 # if mctx was wrong `mctx[bits[-2]]` may fails.
240 for idx, r in enumerate(v1records):
246 for idx, r in enumerate(v1records):
241 if r[0] == RECORD_MERGED:
247 if r[0] == RECORD_MERGED:
242 bits = r[1].split('\0')
248 bits = r[1].split('\0')
243 bits.insert(-2, '')
249 bits.insert(-2, '')
244 v1records[idx] = (r[0], '\0'.join(bits))
250 v1records[idx] = (r[0], '\0'.join(bits))
245 return v1records
251 return v1records
246
252
247 def _v1v2match(self, v1records, v2records):
253 def _v1v2match(self, v1records, v2records):
248 oldv2 = set() # old format version of v2 record
254 oldv2 = set() # old format version of v2 record
249 for rec in v2records:
255 for rec in v2records:
250 if rec[0] == RECORD_LOCAL:
256 if rec[0] == RECORD_LOCAL:
251 oldv2.add(rec)
257 oldv2.add(rec)
252 elif rec[0] == RECORD_MERGED:
258 elif rec[0] == RECORD_MERGED:
253 # drop the onode data (not contained in v1)
259 # drop the onode data (not contained in v1)
254 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
260 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
255 for rec in v1records:
261 for rec in v1records:
256 if rec not in oldv2:
262 if rec not in oldv2:
257 return False
263 return False
258 else:
264 else:
259 return True
265 return True
260
266
261 def _readrecordsv1(self):
267 def _readrecordsv1(self):
262 """read on disk merge state for version 1 file
268 """read on disk merge state for version 1 file
263
269
264 returns list of record [(TYPE, data), ...]
270 returns list of record [(TYPE, data), ...]
265
271
266 Note: the "F" data from this file are one entry short
272 Note: the "F" data from this file are one entry short
267 (no "other file node" entry)
273 (no "other file node" entry)
268 """
274 """
269 records = []
275 records = []
270 try:
276 try:
271 f = self._repo.vfs(self.statepathv1)
277 f = self._repo.vfs(self.statepathv1)
272 for i, l in enumerate(f):
278 for i, l in enumerate(f):
273 if i == 0:
279 if i == 0:
274 records.append((RECORD_LOCAL, l[:-1]))
280 records.append((RECORD_LOCAL, l[:-1]))
275 else:
281 else:
276 records.append((RECORD_MERGED, l[:-1]))
282 records.append((RECORD_MERGED, l[:-1]))
277 f.close()
283 f.close()
278 except IOError as err:
284 except IOError as err:
279 if err.errno != errno.ENOENT:
285 if err.errno != errno.ENOENT:
280 raise
286 raise
281 return records
287 return records
282
288
283 def _readrecordsv2(self):
289 def _readrecordsv2(self):
284 """read on disk merge state for version 2 file
290 """read on disk merge state for version 2 file
285
291
286 This format is a list of arbitrary records of the form:
292 This format is a list of arbitrary records of the form:
287
293
288 [type][length][content]
294 [type][length][content]
289
295
290 `type` is a single character, `length` is a 4 byte integer, and
296 `type` is a single character, `length` is a 4 byte integer, and
291 `content` is an arbitrary byte sequence of length `length`.
297 `content` is an arbitrary byte sequence of length `length`.
292
298
293 Mercurial versions prior to 3.7 have a bug where if there are
299 Mercurial versions prior to 3.7 have a bug where if there are
294 unsupported mandatory merge records, attempting to clear out the merge
300 unsupported mandatory merge records, attempting to clear out the merge
295 state with hg update --clean or similar aborts. The 't' record type
301 state with hg update --clean or similar aborts. The 't' record type
296 works around that by writing out what those versions treat as an
302 works around that by writing out what those versions treat as an
297 advisory record, but later versions interpret as special: the first
303 advisory record, but later versions interpret as special: the first
298 character is the 'real' record type and everything onwards is the data.
304 character is the 'real' record type and everything onwards is the data.
299
305
300 Returns list of records [(TYPE, data), ...]."""
306 Returns list of records [(TYPE, data), ...]."""
301 records = []
307 records = []
302 try:
308 try:
303 f = self._repo.vfs(self.statepathv2)
309 f = self._repo.vfs(self.statepathv2)
304 data = f.read()
310 data = f.read()
305 off = 0
311 off = 0
306 end = len(data)
312 end = len(data)
307 while off < end:
313 while off < end:
308 rtype = data[off:off + 1]
314 rtype = data[off:off + 1]
309 off += 1
315 off += 1
310 length = _unpack('>I', data[off:(off + 4)])[0]
316 length = _unpack('>I', data[off:(off + 4)])[0]
311 off += 4
317 off += 4
312 record = data[off:(off + length)]
318 record = data[off:(off + length)]
313 off += length
319 off += length
314 if rtype == RECORD_OVERRIDE:
320 if rtype == RECORD_OVERRIDE:
315 rtype, record = record[0:1], record[1:]
321 rtype, record = record[0:1], record[1:]
316 records.append((rtype, record))
322 records.append((rtype, record))
317 f.close()
323 f.close()
318 except IOError as err:
324 except IOError as err:
319 if err.errno != errno.ENOENT:
325 if err.errno != errno.ENOENT:
320 raise
326 raise
321 return records
327 return records
322
328
323 @util.propertycache
329 @util.propertycache
324 def mergedriver(self):
330 def mergedriver(self):
325 # protect against the following:
331 # protect against the following:
326 # - A configures a malicious merge driver in their hgrc, then
332 # - A configures a malicious merge driver in their hgrc, then
327 # pauses the merge
333 # pauses the merge
328 # - A edits their hgrc to remove references to the merge driver
334 # - A edits their hgrc to remove references to the merge driver
329 # - A gives a copy of their entire repo, including .hg, to B
335 # - A gives a copy of their entire repo, including .hg, to B
330 # - B inspects .hgrc and finds it to be clean
336 # - B inspects .hgrc and finds it to be clean
331 # - B then continues the merge and the malicious merge driver
337 # - B then continues the merge and the malicious merge driver
332 # gets invoked
338 # gets invoked
333 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
339 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
334 if (self._readmergedriver is not None
340 if (self._readmergedriver is not None
335 and self._readmergedriver != configmergedriver):
341 and self._readmergedriver != configmergedriver):
336 raise error.ConfigError(
342 raise error.ConfigError(
337 _("merge driver changed since merge started"),
343 _("merge driver changed since merge started"),
338 hint=_("revert merge driver change or abort merge"))
344 hint=_("revert merge driver change or abort merge"))
339
345
340 return configmergedriver
346 return configmergedriver
341
347
342 @util.propertycache
348 @util.propertycache
343 def localctx(self):
349 def localctx(self):
344 if self._local is None:
350 if self._local is None:
345 msg = "localctx accessed but self._local isn't set"
351 msg = "localctx accessed but self._local isn't set"
346 raise error.ProgrammingError(msg)
352 raise error.ProgrammingError(msg)
347 return self._repo[self._local]
353 return self._repo[self._local]
348
354
349 @util.propertycache
355 @util.propertycache
350 def otherctx(self):
356 def otherctx(self):
351 if self._other is None:
357 if self._other is None:
352 msg = "otherctx accessed but self._other isn't set"
358 msg = "otherctx accessed but self._other isn't set"
353 raise error.ProgrammingError(msg)
359 raise error.ProgrammingError(msg)
354 return self._repo[self._other]
360 return self._repo[self._other]
355
361
356 def active(self):
362 def active(self):
357 """Whether mergestate is active.
363 """Whether mergestate is active.
358
364
359 Returns True if there appears to be mergestate. This is a rough proxy
365 Returns True if there appears to be mergestate. This is a rough proxy
360 for "is a merge in progress."
366 for "is a merge in progress."
361 """
367 """
362 # Check local variables before looking at filesystem for performance
368 # Check local variables before looking at filesystem for performance
363 # reasons.
369 # reasons.
364 return bool(self._local) or bool(self._state) or \
370 return bool(self._local) or bool(self._state) or \
365 self._repo.vfs.exists(self.statepathv1) or \
371 self._repo.vfs.exists(self.statepathv1) or \
366 self._repo.vfs.exists(self.statepathv2)
372 self._repo.vfs.exists(self.statepathv2)
367
373
368 def commit(self):
374 def commit(self):
369 """Write current state on disk (if necessary)"""
375 """Write current state on disk (if necessary)"""
370 if self._dirty:
376 if self._dirty:
371 records = self._makerecords()
377 records = self._makerecords()
372 self._writerecords(records)
378 self._writerecords(records)
373 self._dirty = False
379 self._dirty = False
374
380
375 def _makerecords(self):
381 def _makerecords(self):
376 records = []
382 records = []
377 records.append((RECORD_LOCAL, hex(self._local)))
383 records.append((RECORD_LOCAL, hex(self._local)))
378 records.append((RECORD_OTHER, hex(self._other)))
384 records.append((RECORD_OTHER, hex(self._other)))
379 if self.mergedriver:
385 if self.mergedriver:
380 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
386 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
381 self.mergedriver, self._mdstate])))
387 self.mergedriver, self._mdstate])))
382 # Write out state items. In all cases, the value of the state map entry
388 # Write out state items. In all cases, the value of the state map entry
383 # is written as the contents of the record. The record type depends on
389 # is written as the contents of the record. The record type depends on
384 # the type of state that is stored, and capital-letter records are used
390 # the type of state that is stored, and capital-letter records are used
385 # to prevent older versions of Mercurial that do not support the feature
391 # to prevent older versions of Mercurial that do not support the feature
386 # from loading them.
392 # from loading them.
387 for filename, v in self._state.iteritems():
393 for filename, v in self._state.iteritems():
388 if v[0] == 'd':
394 if v[0] == 'd':
389 # Driver-resolved merge. These are stored in 'D' records.
395 # Driver-resolved merge. These are stored in 'D' records.
390 records.append((RECORD_MERGE_DRIVER_MERGE,
396 records.append((RECORD_MERGE_DRIVER_MERGE,
391 '\0'.join([filename] + v)))
397 '\0'.join([filename] + v)))
392 elif v[0] in ('pu', 'pr'):
398 elif v[0] in ('pu', 'pr'):
393 # Path conflicts. These are stored in 'P' records. The current
399 # Path conflicts. These are stored in 'P' records. The current
394 # resolution state ('pu' or 'pr') is stored within the record.
400 # resolution state ('pu' or 'pr') is stored within the record.
395 records.append((RECORD_PATH_CONFLICT,
401 records.append((RECORD_PATH_CONFLICT,
396 '\0'.join([filename] + v)))
402 '\0'.join([filename] + v)))
397 elif v[1] == nullhex or v[6] == nullhex:
403 elif v[1] == nullhex or v[6] == nullhex:
398 # Change/Delete or Delete/Change conflicts. These are stored in
404 # Change/Delete or Delete/Change conflicts. These are stored in
399 # 'C' records. v[1] is the local file, and is nullhex when the
405 # 'C' records. v[1] is the local file, and is nullhex when the
400 # file is deleted locally ('dc'). v[6] is the remote file, and
406 # file is deleted locally ('dc'). v[6] is the remote file, and
401 # is nullhex when the file is deleted remotely ('cd').
407 # is nullhex when the file is deleted remotely ('cd').
402 records.append((RECORD_CHANGEDELETE_CONFLICT,
408 records.append((RECORD_CHANGEDELETE_CONFLICT,
403 '\0'.join([filename] + v)))
409 '\0'.join([filename] + v)))
404 else:
410 else:
405 # Normal files. These are stored in 'F' records.
411 # Normal files. These are stored in 'F' records.
406 records.append((RECORD_MERGED,
412 records.append((RECORD_MERGED,
407 '\0'.join([filename] + v)))
413 '\0'.join([filename] + v)))
408 for filename, extras in sorted(self._stateextras.iteritems()):
414 for filename, extras in sorted(self._stateextras.iteritems()):
409 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
415 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
410 extras.iteritems())
416 extras.iteritems())
411 records.append((RECORD_FILE_VALUES,
417 records.append((RECORD_FILE_VALUES,
412 '%s\0%s' % (filename, rawextras)))
418 '%s\0%s' % (filename, rawextras)))
413 if self._labels is not None:
419 if self._labels is not None:
414 labels = '\0'.join(self._labels)
420 labels = '\0'.join(self._labels)
415 records.append((RECORD_LABELS, labels))
421 records.append((RECORD_LABELS, labels))
416 return records
422 return records
417
423
418 def _writerecords(self, records):
424 def _writerecords(self, records):
419 """Write current state on disk (both v1 and v2)"""
425 """Write current state on disk (both v1 and v2)"""
420 self._writerecordsv1(records)
426 self._writerecordsv1(records)
421 self._writerecordsv2(records)
427 self._writerecordsv2(records)
422
428
423 def _writerecordsv1(self, records):
429 def _writerecordsv1(self, records):
424 """Write current state on disk in a version 1 file"""
430 """Write current state on disk in a version 1 file"""
425 f = self._repo.vfs(self.statepathv1, 'wb')
431 f = self._repo.vfs(self.statepathv1, 'wb')
426 irecords = iter(records)
432 irecords = iter(records)
427 lrecords = next(irecords)
433 lrecords = next(irecords)
428 assert lrecords[0] == RECORD_LOCAL
434 assert lrecords[0] == RECORD_LOCAL
429 f.write(hex(self._local) + '\n')
435 f.write(hex(self._local) + '\n')
430 for rtype, data in irecords:
436 for rtype, data in irecords:
431 if rtype == RECORD_MERGED:
437 if rtype == RECORD_MERGED:
432 f.write('%s\n' % _droponode(data))
438 f.write('%s\n' % _droponode(data))
433 f.close()
439 f.close()
434
440
435 def _writerecordsv2(self, records):
441 def _writerecordsv2(self, records):
436 """Write current state on disk in a version 2 file
442 """Write current state on disk in a version 2 file
437
443
438 See the docstring for _readrecordsv2 for why we use 't'."""
444 See the docstring for _readrecordsv2 for why we use 't'."""
439 # these are the records that all version 2 clients can read
445 # these are the records that all version 2 clients can read
440 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
446 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
441 f = self._repo.vfs(self.statepathv2, 'wb')
447 f = self._repo.vfs(self.statepathv2, 'wb')
442 for key, data in records:
448 for key, data in records:
443 assert len(key) == 1
449 assert len(key) == 1
444 if key not in allowlist:
450 if key not in allowlist:
445 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
451 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
446 format = '>sI%is' % len(data)
452 format = '>sI%is' % len(data)
447 f.write(_pack(format, key, len(data), data))
453 f.write(_pack(format, key, len(data), data))
448 f.close()
454 f.close()
449
455
450 def add(self, fcl, fco, fca, fd):
456 def add(self, fcl, fco, fca, fd):
451 """add a new (potentially?) conflicting file the merge state
457 """add a new (potentially?) conflicting file the merge state
452 fcl: file context for local,
458 fcl: file context for local,
453 fco: file context for remote,
459 fco: file context for remote,
454 fca: file context for ancestors,
460 fca: file context for ancestors,
455 fd: file path of the resulting merge.
461 fd: file path of the resulting merge.
456
462
457 note: also write the local version to the `.hg/merge` directory.
463 note: also write the local version to the `.hg/merge` directory.
458 """
464 """
459 if fcl.isabsent():
465 if fcl.isabsent():
460 hash = nullhex
466 hash = nullhex
461 else:
467 else:
462 hash = hex(hashlib.sha1(fcl.path()).digest())
468 hash = hex(hashlib.sha1(fcl.path()).digest())
463 self._repo.vfs.write('merge/' + hash, fcl.data())
469 self._repo.vfs.write('merge/' + hash, fcl.data())
464 self._state[fd] = ['u', hash, fcl.path(),
470 self._state[fd] = ['u', hash, fcl.path(),
465 fca.path(), hex(fca.filenode()),
471 fca.path(), hex(fca.filenode()),
466 fco.path(), hex(fco.filenode()),
472 fco.path(), hex(fco.filenode()),
467 fcl.flags()]
473 fcl.flags()]
468 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
474 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
469 self._dirty = True
475 self._dirty = True
470
476
471 def addpath(self, path, frename, forigin):
477 def addpath(self, path, frename, forigin):
472 """add a new conflicting path to the merge state
478 """add a new conflicting path to the merge state
473 path: the path that conflicts
479 path: the path that conflicts
474 frename: the filename the conflicting file was renamed to
480 frename: the filename the conflicting file was renamed to
475 forigin: origin of the file ('l' or 'r' for local/remote)
481 forigin: origin of the file ('l' or 'r' for local/remote)
476 """
482 """
477 self._state[path] = ['pu', frename, forigin]
483 self._state[path] = ['pu', frename, forigin]
478 self._dirty = True
484 self._dirty = True
479
485
480 def __contains__(self, dfile):
486 def __contains__(self, dfile):
481 return dfile in self._state
487 return dfile in self._state
482
488
483 def __getitem__(self, dfile):
489 def __getitem__(self, dfile):
484 return self._state[dfile][0]
490 return self._state[dfile][0]
485
491
486 def __iter__(self):
492 def __iter__(self):
487 return iter(sorted(self._state))
493 return iter(sorted(self._state))
488
494
489 def files(self):
495 def files(self):
490 return self._state.keys()
496 return self._state.keys()
491
497
492 def mark(self, dfile, state):
498 def mark(self, dfile, state):
493 self._state[dfile][0] = state
499 self._state[dfile][0] = state
494 self._dirty = True
500 self._dirty = True
495
501
496 def mdstate(self):
502 def mdstate(self):
497 return self._mdstate
503 return self._mdstate
498
504
499 def unresolved(self):
505 def unresolved(self):
500 """Obtain the paths of unresolved files."""
506 """Obtain the paths of unresolved files."""
501
507
502 for f, entry in self._state.iteritems():
508 for f, entry in self._state.iteritems():
503 if entry[0] in ('u', 'pu'):
509 if entry[0] in ('u', 'pu'):
504 yield f
510 yield f
505
511
506 def driverresolved(self):
512 def driverresolved(self):
507 """Obtain the paths of driver-resolved files."""
513 """Obtain the paths of driver-resolved files."""
508
514
509 for f, entry in self._state.items():
515 for f, entry in self._state.items():
510 if entry[0] == 'd':
516 if entry[0] == 'd':
511 yield f
517 yield f
512
518
513 def extras(self, filename):
519 def extras(self, filename):
514 return self._stateextras.setdefault(filename, {})
520 return self._stateextras.setdefault(filename, {})
515
521
516 def _resolve(self, preresolve, dfile, wctx):
522 def _resolve(self, preresolve, dfile, wctx):
517 """rerun merge process for file path `dfile`"""
523 """rerun merge process for file path `dfile`"""
518 if self[dfile] in 'rd':
524 if self[dfile] in 'rd':
519 return True, 0
525 return True, 0
520 stateentry = self._state[dfile]
526 stateentry = self._state[dfile]
521 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
527 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
522 octx = self._repo[self._other]
528 octx = self._repo[self._other]
523 extras = self.extras(dfile)
529 extras = self.extras(dfile)
524 anccommitnode = extras.get('ancestorlinknode')
530 anccommitnode = extras.get('ancestorlinknode')
525 if anccommitnode:
531 if anccommitnode:
526 actx = self._repo[anccommitnode]
532 actx = self._repo[anccommitnode]
527 else:
533 else:
528 actx = None
534 actx = None
529 fcd = self._filectxorabsent(hash, wctx, dfile)
535 fcd = self._filectxorabsent(hash, wctx, dfile)
530 fco = self._filectxorabsent(onode, octx, ofile)
536 fco = self._filectxorabsent(onode, octx, ofile)
531 # TODO: move this to filectxorabsent
537 # TODO: move this to filectxorabsent
532 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
538 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
533 # "premerge" x flags
539 # "premerge" x flags
534 flo = fco.flags()
540 flo = fco.flags()
535 fla = fca.flags()
541 fla = fca.flags()
536 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
542 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
537 if fca.node() == nullid and flags != flo:
543 if fca.node() == nullid and flags != flo:
538 if preresolve:
544 if preresolve:
539 self._repo.ui.warn(
545 self._repo.ui.warn(
540 _('warning: cannot merge flags for %s '
546 _('warning: cannot merge flags for %s '
541 'without common ancestor - keeping local flags\n')
547 'without common ancestor - keeping local flags\n')
542 % afile)
548 % afile)
543 elif flags == fla:
549 elif flags == fla:
544 flags = flo
550 flags = flo
545 if preresolve:
551 if preresolve:
546 # restore local
552 # restore local
547 if hash != nullhex:
553 if hash != nullhex:
548 f = self._repo.vfs('merge/' + hash)
554 f = self._repo.vfs('merge/' + hash)
549 wctx[dfile].write(f.read(), flags)
555 wctx[dfile].write(f.read(), flags)
550 f.close()
556 f.close()
551 else:
557 else:
552 wctx[dfile].remove(ignoremissing=True)
558 wctx[dfile].remove(ignoremissing=True)
553 complete, r, deleted = filemerge.premerge(self._repo, wctx,
559 complete, r, deleted = filemerge.premerge(self._repo, wctx,
554 self._local, lfile, fcd,
560 self._local, lfile, fcd,
555 fco, fca,
561 fco, fca,
556 labels=self._labels)
562 labels=self._labels)
557 else:
563 else:
558 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
564 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
559 self._local, lfile, fcd,
565 self._local, lfile, fcd,
560 fco, fca,
566 fco, fca,
561 labels=self._labels)
567 labels=self._labels)
562 if r is None:
568 if r is None:
563 # no real conflict
569 # no real conflict
564 del self._state[dfile]
570 del self._state[dfile]
565 self._stateextras.pop(dfile, None)
571 self._stateextras.pop(dfile, None)
566 self._dirty = True
572 self._dirty = True
567 elif not r:
573 elif not r:
568 self.mark(dfile, 'r')
574 self.mark(dfile, 'r')
569
575
570 if complete:
576 if complete:
571 action = None
577 action = None
572 if deleted:
578 if deleted:
573 if fcd.isabsent():
579 if fcd.isabsent():
574 # dc: local picked. Need to drop if present, which may
580 # dc: local picked. Need to drop if present, which may
575 # happen on re-resolves.
581 # happen on re-resolves.
576 action = 'f'
582 action = 'f'
577 else:
583 else:
578 # cd: remote picked (or otherwise deleted)
584 # cd: remote picked (or otherwise deleted)
579 action = 'r'
585 action = 'r'
580 else:
586 else:
581 if fcd.isabsent(): # dc: remote picked
587 if fcd.isabsent(): # dc: remote picked
582 action = 'g'
588 action = 'g'
583 elif fco.isabsent(): # cd: local picked
589 elif fco.isabsent(): # cd: local picked
584 if dfile in self.localctx:
590 if dfile in self.localctx:
585 action = 'am'
591 action = 'am'
586 else:
592 else:
587 action = 'a'
593 action = 'a'
588 # else: regular merges (no action necessary)
594 # else: regular merges (no action necessary)
589 self._results[dfile] = r, action
595 self._results[dfile] = r, action
590
596
591 return complete, r
597 return complete, r
592
598
593 def _filectxorabsent(self, hexnode, ctx, f):
599 def _filectxorabsent(self, hexnode, ctx, f):
594 if hexnode == nullhex:
600 if hexnode == nullhex:
595 return filemerge.absentfilectx(ctx, f)
601 return filemerge.absentfilectx(ctx, f)
596 else:
602 else:
597 return ctx[f]
603 return ctx[f]
598
604
599 def preresolve(self, dfile, wctx):
605 def preresolve(self, dfile, wctx):
600 """run premerge process for dfile
606 """run premerge process for dfile
601
607
602 Returns whether the merge is complete, and the exit code."""
608 Returns whether the merge is complete, and the exit code."""
603 return self._resolve(True, dfile, wctx)
609 return self._resolve(True, dfile, wctx)
604
610
605 def resolve(self, dfile, wctx):
611 def resolve(self, dfile, wctx):
606 """run merge process (assuming premerge was run) for dfile
612 """run merge process (assuming premerge was run) for dfile
607
613
608 Returns the exit code of the merge."""
614 Returns the exit code of the merge."""
609 return self._resolve(False, dfile, wctx)[1]
615 return self._resolve(False, dfile, wctx)[1]
610
616
611 def counts(self):
617 def counts(self):
612 """return counts for updated, merged and removed files in this
618 """return counts for updated, merged and removed files in this
613 session"""
619 session"""
614 updated, merged, removed = 0, 0, 0
620 updated, merged, removed = 0, 0, 0
615 for r, action in self._results.itervalues():
621 for r, action in self._results.itervalues():
616 if r is None:
622 if r is None:
617 updated += 1
623 updated += 1
618 elif r == 0:
624 elif r == 0:
619 if action == 'r':
625 if action == 'r':
620 removed += 1
626 removed += 1
621 else:
627 else:
622 merged += 1
628 merged += 1
623 return updated, merged, removed
629 return updated, merged, removed
624
630
625 def unresolvedcount(self):
631 def unresolvedcount(self):
626 """get unresolved count for this merge (persistent)"""
632 """get unresolved count for this merge (persistent)"""
627 return len(list(self.unresolved()))
633 return len(list(self.unresolved()))
628
634
629 def actions(self):
635 def actions(self):
630 """return lists of actions to perform on the dirstate"""
636 """return lists of actions to perform on the dirstate"""
631 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
637 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
632 for f, (r, action) in self._results.iteritems():
638 for f, (r, action) in self._results.iteritems():
633 if action is not None:
639 if action is not None:
634 actions[action].append((f, None, "merge result"))
640 actions[action].append((f, None, "merge result"))
635 return actions
641 return actions
636
642
637 def recordactions(self):
643 def recordactions(self):
638 """record remove/add/get actions in the dirstate"""
644 """record remove/add/get actions in the dirstate"""
639 branchmerge = self._repo.dirstate.p2() != nullid
645 branchmerge = self._repo.dirstate.p2() != nullid
640 recordupdates(self._repo, self.actions(), branchmerge)
646 recordupdates(self._repo, self.actions(), branchmerge)
641
647
642 def queueremove(self, f):
648 def queueremove(self, f):
643 """queues a file to be removed from the dirstate
649 """queues a file to be removed from the dirstate
644
650
645 Meant for use by custom merge drivers."""
651 Meant for use by custom merge drivers."""
646 self._results[f] = 0, 'r'
652 self._results[f] = 0, 'r'
647
653
648 def queueadd(self, f):
654 def queueadd(self, f):
649 """queues a file to be added to the dirstate
655 """queues a file to be added to the dirstate
650
656
651 Meant for use by custom merge drivers."""
657 Meant for use by custom merge drivers."""
652 self._results[f] = 0, 'a'
658 self._results[f] = 0, 'a'
653
659
654 def queueget(self, f):
660 def queueget(self, f):
655 """queues a file to be marked modified in the dirstate
661 """queues a file to be marked modified in the dirstate
656
662
657 Meant for use by custom merge drivers."""
663 Meant for use by custom merge drivers."""
658 self._results[f] = 0, 'g'
664 self._results[f] = 0, 'g'
659
665
660 def _getcheckunknownconfig(repo, section, name):
666 def _getcheckunknownconfig(repo, section, name):
661 config = repo.ui.config(section, name)
667 config = repo.ui.config(section, name)
662 valid = ['abort', 'ignore', 'warn']
668 valid = ['abort', 'ignore', 'warn']
663 if config not in valid:
669 if config not in valid:
664 validstr = ', '.join(["'" + v + "'" for v in valid])
670 validstr = ', '.join(["'" + v + "'" for v in valid])
665 raise error.ConfigError(_("%s.%s not valid "
671 raise error.ConfigError(_("%s.%s not valid "
666 "('%s' is none of %s)")
672 "('%s' is none of %s)")
667 % (section, name, config, validstr))
673 % (section, name, config, validstr))
668 return config
674 return config
669
675
670 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
676 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
671 if wctx.isinmemory():
677 if wctx.isinmemory():
672 # Nothing to do in IMM because nothing in the "working copy" can be an
678 # Nothing to do in IMM because nothing in the "working copy" can be an
673 # unknown file.
679 # unknown file.
674 #
680 #
675 # Note that we should bail out here, not in ``_checkunknownfiles()``,
681 # Note that we should bail out here, not in ``_checkunknownfiles()``,
676 # because that function does other useful work.
682 # because that function does other useful work.
677 return False
683 return False
678
684
679 if f2 is None:
685 if f2 is None:
680 f2 = f
686 f2 = f
681 return (repo.wvfs.audit.check(f)
687 return (repo.wvfs.audit.check(f)
682 and repo.wvfs.isfileorlink(f)
688 and repo.wvfs.isfileorlink(f)
683 and repo.dirstate.normalize(f) not in repo.dirstate
689 and repo.dirstate.normalize(f) not in repo.dirstate
684 and mctx[f2].cmp(wctx[f]))
690 and mctx[f2].cmp(wctx[f]))
685
691
686 class _unknowndirschecker(object):
692 class _unknowndirschecker(object):
687 """
693 """
688 Look for any unknown files or directories that may have a path conflict
694 Look for any unknown files or directories that may have a path conflict
689 with a file. If any path prefix of the file exists as a file or link,
695 with a file. If any path prefix of the file exists as a file or link,
690 then it conflicts. If the file itself is a directory that contains any
696 then it conflicts. If the file itself is a directory that contains any
691 file that is not tracked, then it conflicts.
697 file that is not tracked, then it conflicts.
692
698
693 Returns the shortest path at which a conflict occurs, or None if there is
699 Returns the shortest path at which a conflict occurs, or None if there is
694 no conflict.
700 no conflict.
695 """
701 """
696 def __init__(self):
702 def __init__(self):
697 # A set of paths known to be good. This prevents repeated checking of
703 # A set of paths known to be good. This prevents repeated checking of
698 # dirs. It will be updated with any new dirs that are checked and found
704 # dirs. It will be updated with any new dirs that are checked and found
699 # to be safe.
705 # to be safe.
700 self._unknowndircache = set()
706 self._unknowndircache = set()
701
707
702 # A set of paths that are known to be absent. This prevents repeated
708 # A set of paths that are known to be absent. This prevents repeated
703 # checking of subdirectories that are known not to exist. It will be
709 # checking of subdirectories that are known not to exist. It will be
704 # updated with any new dirs that are checked and found to be absent.
710 # updated with any new dirs that are checked and found to be absent.
705 self._missingdircache = set()
711 self._missingdircache = set()
706
712
707 def __call__(self, repo, wctx, f):
713 def __call__(self, repo, wctx, f):
708 if wctx.isinmemory():
714 if wctx.isinmemory():
709 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
715 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
710 return False
716 return False
711
717
712 # Check for path prefixes that exist as unknown files.
718 # Check for path prefixes that exist as unknown files.
713 for p in reversed(list(util.finddirs(f))):
719 for p in reversed(list(util.finddirs(f))):
714 if p in self._missingdircache:
720 if p in self._missingdircache:
715 return
721 return
716 if p in self._unknowndircache:
722 if p in self._unknowndircache:
717 continue
723 continue
718 if repo.wvfs.audit.check(p):
724 if repo.wvfs.audit.check(p):
719 if (repo.wvfs.isfileorlink(p)
725 if (repo.wvfs.isfileorlink(p)
720 and repo.dirstate.normalize(p) not in repo.dirstate):
726 and repo.dirstate.normalize(p) not in repo.dirstate):
721 return p
727 return p
722 if not repo.wvfs.lexists(p):
728 if not repo.wvfs.lexists(p):
723 self._missingdircache.add(p)
729 self._missingdircache.add(p)
724 return
730 return
725 self._unknowndircache.add(p)
731 self._unknowndircache.add(p)
726
732
727 # Check if the file conflicts with a directory containing unknown files.
733 # Check if the file conflicts with a directory containing unknown files.
728 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
734 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
729 # Does the directory contain any files that are not in the dirstate?
735 # Does the directory contain any files that are not in the dirstate?
730 for p, dirs, files in repo.wvfs.walk(f):
736 for p, dirs, files in repo.wvfs.walk(f):
731 for fn in files:
737 for fn in files:
732 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
738 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
733 relf = repo.dirstate.normalize(relf, isknown=True)
739 relf = repo.dirstate.normalize(relf, isknown=True)
734 if relf not in repo.dirstate:
740 if relf not in repo.dirstate:
735 return f
741 return f
736 return None
742 return None
737
743
738 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
744 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
739 """
745 """
740 Considers any actions that care about the presence of conflicting unknown
746 Considers any actions that care about the presence of conflicting unknown
741 files. For some actions, the result is to abort; for others, it is to
747 files. For some actions, the result is to abort; for others, it is to
742 choose a different action.
748 choose a different action.
743 """
749 """
744 fileconflicts = set()
750 fileconflicts = set()
745 pathconflicts = set()
751 pathconflicts = set()
746 warnconflicts = set()
752 warnconflicts = set()
747 abortconflicts = set()
753 abortconflicts = set()
748 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
754 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
749 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
755 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
750 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
756 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
751 if not force:
757 if not force:
752 def collectconflicts(conflicts, config):
758 def collectconflicts(conflicts, config):
753 if config == 'abort':
759 if config == 'abort':
754 abortconflicts.update(conflicts)
760 abortconflicts.update(conflicts)
755 elif config == 'warn':
761 elif config == 'warn':
756 warnconflicts.update(conflicts)
762 warnconflicts.update(conflicts)
757
763
758 checkunknowndirs = _unknowndirschecker()
764 checkunknowndirs = _unknowndirschecker()
759 for f, (m, args, msg) in actions.iteritems():
765 for f, (m, args, msg) in actions.iteritems():
760 if m in ('c', 'dc'):
766 if m in ('c', 'dc'):
761 if _checkunknownfile(repo, wctx, mctx, f):
767 if _checkunknownfile(repo, wctx, mctx, f):
762 fileconflicts.add(f)
768 fileconflicts.add(f)
763 elif pathconfig and f not in wctx:
769 elif pathconfig and f not in wctx:
764 path = checkunknowndirs(repo, wctx, f)
770 path = checkunknowndirs(repo, wctx, f)
765 if path is not None:
771 if path is not None:
766 pathconflicts.add(path)
772 pathconflicts.add(path)
767 elif m == 'dg':
773 elif m == 'dg':
768 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
774 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
769 fileconflicts.add(f)
775 fileconflicts.add(f)
770
776
771 allconflicts = fileconflicts | pathconflicts
777 allconflicts = fileconflicts | pathconflicts
772 ignoredconflicts = set([c for c in allconflicts
778 ignoredconflicts = set([c for c in allconflicts
773 if repo.dirstate._ignore(c)])
779 if repo.dirstate._ignore(c)])
774 unknownconflicts = allconflicts - ignoredconflicts
780 unknownconflicts = allconflicts - ignoredconflicts
775 collectconflicts(ignoredconflicts, ignoredconfig)
781 collectconflicts(ignoredconflicts, ignoredconfig)
776 collectconflicts(unknownconflicts, unknownconfig)
782 collectconflicts(unknownconflicts, unknownconfig)
777 else:
783 else:
778 for f, (m, args, msg) in actions.iteritems():
784 for f, (m, args, msg) in actions.iteritems():
779 if m == 'cm':
785 if m == 'cm':
780 fl2, anc = args
786 fl2, anc = args
781 different = _checkunknownfile(repo, wctx, mctx, f)
787 different = _checkunknownfile(repo, wctx, mctx, f)
782 if repo.dirstate._ignore(f):
788 if repo.dirstate._ignore(f):
783 config = ignoredconfig
789 config = ignoredconfig
784 else:
790 else:
785 config = unknownconfig
791 config = unknownconfig
786
792
787 # The behavior when force is True is described by this table:
793 # The behavior when force is True is described by this table:
788 # config different mergeforce | action backup
794 # config different mergeforce | action backup
789 # * n * | get n
795 # * n * | get n
790 # * y y | merge -
796 # * y y | merge -
791 # abort y n | merge - (1)
797 # abort y n | merge - (1)
792 # warn y n | warn + get y
798 # warn y n | warn + get y
793 # ignore y n | get y
799 # ignore y n | get y
794 #
800 #
795 # (1) this is probably the wrong behavior here -- we should
801 # (1) this is probably the wrong behavior here -- we should
796 # probably abort, but some actions like rebases currently
802 # probably abort, but some actions like rebases currently
797 # don't like an abort happening in the middle of
803 # don't like an abort happening in the middle of
798 # merge.update.
804 # merge.update.
799 if not different:
805 if not different:
800 actions[f] = ('g', (fl2, False), "remote created")
806 actions[f] = ('g', (fl2, False), "remote created")
801 elif mergeforce or config == 'abort':
807 elif mergeforce or config == 'abort':
802 actions[f] = ('m', (f, f, None, False, anc),
808 actions[f] = ('m', (f, f, None, False, anc),
803 "remote differs from untracked local")
809 "remote differs from untracked local")
804 elif config == 'abort':
810 elif config == 'abort':
805 abortconflicts.add(f)
811 abortconflicts.add(f)
806 else:
812 else:
807 if config == 'warn':
813 if config == 'warn':
808 warnconflicts.add(f)
814 warnconflicts.add(f)
809 actions[f] = ('g', (fl2, True), "remote created")
815 actions[f] = ('g', (fl2, True), "remote created")
810
816
811 for f in sorted(abortconflicts):
817 for f in sorted(abortconflicts):
812 warn = repo.ui.warn
818 warn = repo.ui.warn
813 if f in pathconflicts:
819 if f in pathconflicts:
814 if repo.wvfs.isfileorlink(f):
820 if repo.wvfs.isfileorlink(f):
815 warn(_("%s: untracked file conflicts with directory\n") % f)
821 warn(_("%s: untracked file conflicts with directory\n") % f)
816 else:
822 else:
817 warn(_("%s: untracked directory conflicts with file\n") % f)
823 warn(_("%s: untracked directory conflicts with file\n") % f)
818 else:
824 else:
819 warn(_("%s: untracked file differs\n") % f)
825 warn(_("%s: untracked file differs\n") % f)
820 if abortconflicts:
826 if abortconflicts:
821 raise error.Abort(_("untracked files in working directory "
827 raise error.Abort(_("untracked files in working directory "
822 "differ from files in requested revision"))
828 "differ from files in requested revision"))
823
829
824 for f in sorted(warnconflicts):
830 for f in sorted(warnconflicts):
825 if repo.wvfs.isfileorlink(f):
831 if repo.wvfs.isfileorlink(f):
826 repo.ui.warn(_("%s: replacing untracked file\n") % f)
832 repo.ui.warn(_("%s: replacing untracked file\n") % f)
827 else:
833 else:
828 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
834 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
829
835
830 for f, (m, args, msg) in actions.iteritems():
836 for f, (m, args, msg) in actions.iteritems():
831 if m == 'c':
837 if m == 'c':
832 backup = (f in fileconflicts or f in pathconflicts or
838 backup = (f in fileconflicts or f in pathconflicts or
833 any(p in pathconflicts for p in util.finddirs(f)))
839 any(p in pathconflicts for p in util.finddirs(f)))
834 flags, = args
840 flags, = args
835 actions[f] = ('g', (flags, backup), msg)
841 actions[f] = ('g', (flags, backup), msg)
836
842
837 def _forgetremoved(wctx, mctx, branchmerge):
843 def _forgetremoved(wctx, mctx, branchmerge):
838 """
844 """
839 Forget removed files
845 Forget removed files
840
846
841 If we're jumping between revisions (as opposed to merging), and if
847 If we're jumping between revisions (as opposed to merging), and if
842 neither the working directory nor the target rev has the file,
848 neither the working directory nor the target rev has the file,
843 then we need to remove it from the dirstate, to prevent the
849 then we need to remove it from the dirstate, to prevent the
844 dirstate from listing the file when it is no longer in the
850 dirstate from listing the file when it is no longer in the
845 manifest.
851 manifest.
846
852
847 If we're merging, and the other revision has removed a file
853 If we're merging, and the other revision has removed a file
848 that is not present in the working directory, we need to mark it
854 that is not present in the working directory, we need to mark it
849 as removed.
855 as removed.
850 """
856 """
851
857
852 actions = {}
858 actions = {}
853 m = 'f'
859 m = 'f'
854 if branchmerge:
860 if branchmerge:
855 m = 'r'
861 m = 'r'
856 for f in wctx.deleted():
862 for f in wctx.deleted():
857 if f not in mctx:
863 if f not in mctx:
858 actions[f] = m, None, "forget deleted"
864 actions[f] = m, None, "forget deleted"
859
865
860 if not branchmerge:
866 if not branchmerge:
861 for f in wctx.removed():
867 for f in wctx.removed():
862 if f not in mctx:
868 if f not in mctx:
863 actions[f] = 'f', None, "forget removed"
869 actions[f] = 'f', None, "forget removed"
864
870
865 return actions
871 return actions
866
872
867 def _checkcollision(repo, wmf, actions):
873 def _checkcollision(repo, wmf, actions):
868 # build provisional merged manifest up
874 # build provisional merged manifest up
869 pmmf = set(wmf)
875 pmmf = set(wmf)
870
876
871 if actions:
877 if actions:
872 # k, dr, e and rd are no-op
878 # k, dr, e and rd are no-op
873 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
879 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
874 for f, args, msg in actions[m]:
880 for f, args, msg in actions[m]:
875 pmmf.add(f)
881 pmmf.add(f)
876 for f, args, msg in actions['r']:
882 for f, args, msg in actions['r']:
877 pmmf.discard(f)
883 pmmf.discard(f)
878 for f, args, msg in actions['dm']:
884 for f, args, msg in actions['dm']:
879 f2, flags = args
885 f2, flags = args
880 pmmf.discard(f2)
886 pmmf.discard(f2)
881 pmmf.add(f)
887 pmmf.add(f)
882 for f, args, msg in actions['dg']:
888 for f, args, msg in actions['dg']:
883 pmmf.add(f)
889 pmmf.add(f)
884 for f, args, msg in actions['m']:
890 for f, args, msg in actions['m']:
885 f1, f2, fa, move, anc = args
891 f1, f2, fa, move, anc = args
886 if move:
892 if move:
887 pmmf.discard(f1)
893 pmmf.discard(f1)
888 pmmf.add(f)
894 pmmf.add(f)
889
895
890 # check case-folding collision in provisional merged manifest
896 # check case-folding collision in provisional merged manifest
891 foldmap = {}
897 foldmap = {}
892 for f in pmmf:
898 for f in pmmf:
893 fold = util.normcase(f)
899 fold = util.normcase(f)
894 if fold in foldmap:
900 if fold in foldmap:
895 raise error.Abort(_("case-folding collision between %s and %s")
901 raise error.Abort(_("case-folding collision between %s and %s")
896 % (f, foldmap[fold]))
902 % (f, foldmap[fold]))
897 foldmap[fold] = f
903 foldmap[fold] = f
898
904
899 # check case-folding of directories
905 # check case-folding of directories
900 foldprefix = unfoldprefix = lastfull = ''
906 foldprefix = unfoldprefix = lastfull = ''
901 for fold, f in sorted(foldmap.items()):
907 for fold, f in sorted(foldmap.items()):
902 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
908 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
903 # the folded prefix matches but actual casing is different
909 # the folded prefix matches but actual casing is different
904 raise error.Abort(_("case-folding collision between "
910 raise error.Abort(_("case-folding collision between "
905 "%s and directory of %s") % (lastfull, f))
911 "%s and directory of %s") % (lastfull, f))
906 foldprefix = fold + '/'
912 foldprefix = fold + '/'
907 unfoldprefix = f + '/'
913 unfoldprefix = f + '/'
908 lastfull = f
914 lastfull = f
909
915
910 def driverpreprocess(repo, ms, wctx, labels=None):
916 def driverpreprocess(repo, ms, wctx, labels=None):
911 """run the preprocess step of the merge driver, if any
917 """run the preprocess step of the merge driver, if any
912
918
913 This is currently not implemented -- it's an extension point."""
919 This is currently not implemented -- it's an extension point."""
914 return True
920 return True
915
921
916 def driverconclude(repo, ms, wctx, labels=None):
922 def driverconclude(repo, ms, wctx, labels=None):
917 """run the conclude step of the merge driver, if any
923 """run the conclude step of the merge driver, if any
918
924
919 This is currently not implemented -- it's an extension point."""
925 This is currently not implemented -- it's an extension point."""
920 return True
926 return True
921
927
922 def _filesindirs(repo, manifest, dirs):
928 def _filesindirs(repo, manifest, dirs):
923 """
929 """
924 Generator that yields pairs of all the files in the manifest that are found
930 Generator that yields pairs of all the files in the manifest that are found
925 inside the directories listed in dirs, and which directory they are found
931 inside the directories listed in dirs, and which directory they are found
926 in.
932 in.
927 """
933 """
928 for f in manifest:
934 for f in manifest:
929 for p in util.finddirs(f):
935 for p in util.finddirs(f):
930 if p in dirs:
936 if p in dirs:
931 yield f, p
937 yield f, p
932 break
938 break
933
939
934 def checkpathconflicts(repo, wctx, mctx, actions):
940 def checkpathconflicts(repo, wctx, mctx, actions):
935 """
941 """
936 Check if any actions introduce path conflicts in the repository, updating
942 Check if any actions introduce path conflicts in the repository, updating
937 actions to record or handle the path conflict accordingly.
943 actions to record or handle the path conflict accordingly.
938 """
944 """
939 mf = wctx.manifest()
945 mf = wctx.manifest()
940
946
941 # The set of local files that conflict with a remote directory.
947 # The set of local files that conflict with a remote directory.
942 localconflicts = set()
948 localconflicts = set()
943
949
944 # The set of directories that conflict with a remote file, and so may cause
950 # The set of directories that conflict with a remote file, and so may cause
945 # conflicts if they still contain any files after the merge.
951 # conflicts if they still contain any files after the merge.
946 remoteconflicts = set()
952 remoteconflicts = set()
947
953
948 # The set of directories that appear as both a file and a directory in the
954 # The set of directories that appear as both a file and a directory in the
949 # remote manifest. These indicate an invalid remote manifest, which
955 # remote manifest. These indicate an invalid remote manifest, which
950 # can't be updated to cleanly.
956 # can't be updated to cleanly.
951 invalidconflicts = set()
957 invalidconflicts = set()
952
958
953 # The set of directories that contain files that are being created.
959 # The set of directories that contain files that are being created.
954 createdfiledirs = set()
960 createdfiledirs = set()
955
961
956 # The set of files deleted by all the actions.
962 # The set of files deleted by all the actions.
957 deletedfiles = set()
963 deletedfiles = set()
958
964
959 for f, (m, args, msg) in actions.items():
965 for f, (m, args, msg) in actions.items():
960 if m in ('c', 'dc', 'm', 'cm'):
966 if m in ('c', 'dc', 'm', 'cm'):
961 # This action may create a new local file.
967 # This action may create a new local file.
962 createdfiledirs.update(util.finddirs(f))
968 createdfiledirs.update(util.finddirs(f))
963 if mf.hasdir(f):
969 if mf.hasdir(f):
964 # The file aliases a local directory. This might be ok if all
970 # The file aliases a local directory. This might be ok if all
965 # the files in the local directory are being deleted. This
971 # the files in the local directory are being deleted. This
966 # will be checked once we know what all the deleted files are.
972 # will be checked once we know what all the deleted files are.
967 remoteconflicts.add(f)
973 remoteconflicts.add(f)
968 # Track the names of all deleted files.
974 # Track the names of all deleted files.
969 if m == 'r':
975 if m == 'r':
970 deletedfiles.add(f)
976 deletedfiles.add(f)
971 if m == 'm':
977 if m == 'm':
972 f1, f2, fa, move, anc = args
978 f1, f2, fa, move, anc = args
973 if move:
979 if move:
974 deletedfiles.add(f1)
980 deletedfiles.add(f1)
975 if m == 'dm':
981 if m == 'dm':
976 f2, flags = args
982 f2, flags = args
977 deletedfiles.add(f2)
983 deletedfiles.add(f2)
978
984
979 # Check all directories that contain created files for path conflicts.
985 # Check all directories that contain created files for path conflicts.
980 for p in createdfiledirs:
986 for p in createdfiledirs:
981 if p in mf:
987 if p in mf:
982 if p in mctx:
988 if p in mctx:
983 # A file is in a directory which aliases both a local
989 # A file is in a directory which aliases both a local
984 # and a remote file. This is an internal inconsistency
990 # and a remote file. This is an internal inconsistency
985 # within the remote manifest.
991 # within the remote manifest.
986 invalidconflicts.add(p)
992 invalidconflicts.add(p)
987 else:
993 else:
988 # A file is in a directory which aliases a local file.
994 # A file is in a directory which aliases a local file.
989 # We will need to rename the local file.
995 # We will need to rename the local file.
990 localconflicts.add(p)
996 localconflicts.add(p)
991 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
997 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
992 # The file is in a directory which aliases a remote file.
998 # The file is in a directory which aliases a remote file.
993 # This is an internal inconsistency within the remote
999 # This is an internal inconsistency within the remote
994 # manifest.
1000 # manifest.
995 invalidconflicts.add(p)
1001 invalidconflicts.add(p)
996
1002
997 # Rename all local conflicting files that have not been deleted.
1003 # Rename all local conflicting files that have not been deleted.
998 for p in localconflicts:
1004 for p in localconflicts:
999 if p not in deletedfiles:
1005 if p not in deletedfiles:
1000 ctxname = bytes(wctx).rstrip('+')
1006 ctxname = bytes(wctx).rstrip('+')
1001 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1007 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1002 actions[pnew] = ('pr', (p,), "local path conflict")
1008 actions[pnew] = ('pr', (p,), "local path conflict")
1003 actions[p] = ('p', (pnew, 'l'), "path conflict")
1009 actions[p] = ('p', (pnew, 'l'), "path conflict")
1004
1010
1005 if remoteconflicts:
1011 if remoteconflicts:
1006 # Check if all files in the conflicting directories have been removed.
1012 # Check if all files in the conflicting directories have been removed.
1007 ctxname = bytes(mctx).rstrip('+')
1013 ctxname = bytes(mctx).rstrip('+')
1008 for f, p in _filesindirs(repo, mf, remoteconflicts):
1014 for f, p in _filesindirs(repo, mf, remoteconflicts):
1009 if f not in deletedfiles:
1015 if f not in deletedfiles:
1010 m, args, msg = actions[p]
1016 m, args, msg = actions[p]
1011 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1017 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1012 if m in ('dc', 'm'):
1018 if m in ('dc', 'm'):
1013 # Action was merge, just update target.
1019 # Action was merge, just update target.
1014 actions[pnew] = (m, args, msg)
1020 actions[pnew] = (m, args, msg)
1015 else:
1021 else:
1016 # Action was create, change to renamed get action.
1022 # Action was create, change to renamed get action.
1017 fl = args[0]
1023 fl = args[0]
1018 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1024 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1019 actions[p] = ('p', (pnew, 'r'), "path conflict")
1025 actions[p] = ('p', (pnew, 'r'), "path conflict")
1020 remoteconflicts.remove(p)
1026 remoteconflicts.remove(p)
1021 break
1027 break
1022
1028
1023 if invalidconflicts:
1029 if invalidconflicts:
1024 for p in invalidconflicts:
1030 for p in invalidconflicts:
1025 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1031 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1026 raise error.Abort(_("destination manifest contains path conflicts"))
1032 raise error.Abort(_("destination manifest contains path conflicts"))
1027
1033
1028 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1034 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1029 acceptremote, followcopies, forcefulldiff=False):
1035 acceptremote, followcopies, forcefulldiff=False):
1030 """
1036 """
1031 Merge wctx and p2 with ancestor pa and generate merge action list
1037 Merge wctx and p2 with ancestor pa and generate merge action list
1032
1038
1033 branchmerge and force are as passed in to update
1039 branchmerge and force are as passed in to update
1034 matcher = matcher to filter file lists
1040 matcher = matcher to filter file lists
1035 acceptremote = accept the incoming changes without prompting
1041 acceptremote = accept the incoming changes without prompting
1036 """
1042 """
1037 if matcher is not None and matcher.always():
1043 if matcher is not None and matcher.always():
1038 matcher = None
1044 matcher = None
1039
1045
1040 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1046 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1041
1047
1042 # manifests fetched in order are going to be faster, so prime the caches
1048 # manifests fetched in order are going to be faster, so prime the caches
1043 [x.manifest() for x in
1049 [x.manifest() for x in
1044 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1050 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1045
1051
1046 if followcopies:
1052 if followcopies:
1047 ret = copies.mergecopies(repo, wctx, p2, pa)
1053 ret = copies.mergecopies(repo, wctx, p2, pa)
1048 copy, movewithdir, diverge, renamedelete, dirmove = ret
1054 copy, movewithdir, diverge, renamedelete, dirmove = ret
1049
1055
1050 boolbm = pycompat.bytestr(bool(branchmerge))
1056 boolbm = pycompat.bytestr(bool(branchmerge))
1051 boolf = pycompat.bytestr(bool(force))
1057 boolf = pycompat.bytestr(bool(force))
1052 boolm = pycompat.bytestr(bool(matcher))
1058 boolm = pycompat.bytestr(bool(matcher))
1053 repo.ui.note(_("resolving manifests\n"))
1059 repo.ui.note(_("resolving manifests\n"))
1054 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1060 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1055 % (boolbm, boolf, boolm))
1061 % (boolbm, boolf, boolm))
1056 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1062 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1057
1063
1058 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1064 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1059 copied = set(copy.values())
1065 copied = set(copy.values())
1060 copied.update(movewithdir.values())
1066 copied.update(movewithdir.values())
1061
1067
1062 if '.hgsubstate' in m1:
1068 if '.hgsubstate' in m1:
1063 # check whether sub state is modified
1069 # check whether sub state is modified
1064 if any(wctx.sub(s).dirty() for s in wctx.substate):
1070 if any(wctx.sub(s).dirty() for s in wctx.substate):
1065 m1['.hgsubstate'] = modifiednodeid
1071 m1['.hgsubstate'] = modifiednodeid
1066
1072
1067 # Don't use m2-vs-ma optimization if:
1073 # Don't use m2-vs-ma optimization if:
1068 # - ma is the same as m1 or m2, which we're just going to diff again later
1074 # - ma is the same as m1 or m2, which we're just going to diff again later
1069 # - The caller specifically asks for a full diff, which is useful during bid
1075 # - The caller specifically asks for a full diff, which is useful during bid
1070 # merge.
1076 # merge.
1071 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1077 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1072 # Identify which files are relevant to the merge, so we can limit the
1078 # Identify which files are relevant to the merge, so we can limit the
1073 # total m1-vs-m2 diff to just those files. This has significant
1079 # total m1-vs-m2 diff to just those files. This has significant
1074 # performance benefits in large repositories.
1080 # performance benefits in large repositories.
1075 relevantfiles = set(ma.diff(m2).keys())
1081 relevantfiles = set(ma.diff(m2).keys())
1076
1082
1077 # For copied and moved files, we need to add the source file too.
1083 # For copied and moved files, we need to add the source file too.
1078 for copykey, copyvalue in copy.iteritems():
1084 for copykey, copyvalue in copy.iteritems():
1079 if copyvalue in relevantfiles:
1085 if copyvalue in relevantfiles:
1080 relevantfiles.add(copykey)
1086 relevantfiles.add(copykey)
1081 for movedirkey in movewithdir:
1087 for movedirkey in movewithdir:
1082 relevantfiles.add(movedirkey)
1088 relevantfiles.add(movedirkey)
1083 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1089 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1084 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1090 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1085
1091
1086 diff = m1.diff(m2, match=matcher)
1092 diff = m1.diff(m2, match=matcher)
1087
1093
1088 if matcher is None:
1094 if matcher is None:
1089 matcher = matchmod.always('', '')
1095 matcher = matchmod.always('', '')
1090
1096
1091 actions = {}
1097 actions = {}
1092 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1098 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1093 if n1 and n2: # file exists on both local and remote side
1099 if n1 and n2: # file exists on both local and remote side
1094 if f not in ma:
1100 if f not in ma:
1095 fa = copy.get(f, None)
1101 fa = copy.get(f, None)
1096 if fa is not None:
1102 if fa is not None:
1097 actions[f] = ('m', (f, f, fa, False, pa.node()),
1103 actions[f] = ('m', (f, f, fa, False, pa.node()),
1098 "both renamed from " + fa)
1104 "both renamed from " + fa)
1099 else:
1105 else:
1100 actions[f] = ('m', (f, f, None, False, pa.node()),
1106 actions[f] = ('m', (f, f, None, False, pa.node()),
1101 "both created")
1107 "both created")
1102 else:
1108 else:
1103 a = ma[f]
1109 a = ma[f]
1104 fla = ma.flags(f)
1110 fla = ma.flags(f)
1105 nol = 'l' not in fl1 + fl2 + fla
1111 nol = 'l' not in fl1 + fl2 + fla
1106 if n2 == a and fl2 == fla:
1112 if n2 == a and fl2 == fla:
1107 actions[f] = ('k', (), "remote unchanged")
1113 actions[f] = ('k', (), "remote unchanged")
1108 elif n1 == a and fl1 == fla: # local unchanged - use remote
1114 elif n1 == a and fl1 == fla: # local unchanged - use remote
1109 if n1 == n2: # optimization: keep local content
1115 if n1 == n2: # optimization: keep local content
1110 actions[f] = ('e', (fl2,), "update permissions")
1116 actions[f] = ('e', (fl2,), "update permissions")
1111 else:
1117 else:
1112 actions[f] = ('g', (fl2, False), "remote is newer")
1118 actions[f] = ('g', (fl2, False), "remote is newer")
1113 elif nol and n2 == a: # remote only changed 'x'
1119 elif nol and n2 == a: # remote only changed 'x'
1114 actions[f] = ('e', (fl2,), "update permissions")
1120 actions[f] = ('e', (fl2,), "update permissions")
1115 elif nol and n1 == a: # local only changed 'x'
1121 elif nol and n1 == a: # local only changed 'x'
1116 actions[f] = ('g', (fl1, False), "remote is newer")
1122 actions[f] = ('g', (fl1, False), "remote is newer")
1117 else: # both changed something
1123 else: # both changed something
1118 actions[f] = ('m', (f, f, f, False, pa.node()),
1124 actions[f] = ('m', (f, f, f, False, pa.node()),
1119 "versions differ")
1125 "versions differ")
1120 elif n1: # file exists only on local side
1126 elif n1: # file exists only on local side
1121 if f in copied:
1127 if f in copied:
1122 pass # we'll deal with it on m2 side
1128 pass # we'll deal with it on m2 side
1123 elif f in movewithdir: # directory rename, move local
1129 elif f in movewithdir: # directory rename, move local
1124 f2 = movewithdir[f]
1130 f2 = movewithdir[f]
1125 if f2 in m2:
1131 if f2 in m2:
1126 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1132 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1127 "remote directory rename, both created")
1133 "remote directory rename, both created")
1128 else:
1134 else:
1129 actions[f2] = ('dm', (f, fl1),
1135 actions[f2] = ('dm', (f, fl1),
1130 "remote directory rename - move from " + f)
1136 "remote directory rename - move from " + f)
1131 elif f in copy:
1137 elif f in copy:
1132 f2 = copy[f]
1138 f2 = copy[f]
1133 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1139 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1134 "local copied/moved from " + f2)
1140 "local copied/moved from " + f2)
1135 elif f in ma: # clean, a different, no remote
1141 elif f in ma: # clean, a different, no remote
1136 if n1 != ma[f]:
1142 if n1 != ma[f]:
1137 if acceptremote:
1143 if acceptremote:
1138 actions[f] = ('r', None, "remote delete")
1144 actions[f] = ('r', None, "remote delete")
1139 else:
1145 else:
1140 actions[f] = ('cd', (f, None, f, False, pa.node()),
1146 actions[f] = ('cd', (f, None, f, False, pa.node()),
1141 "prompt changed/deleted")
1147 "prompt changed/deleted")
1142 elif n1 == addednodeid:
1148 elif n1 == addednodeid:
1143 # This extra 'a' is added by working copy manifest to mark
1149 # This extra 'a' is added by working copy manifest to mark
1144 # the file as locally added. We should forget it instead of
1150 # the file as locally added. We should forget it instead of
1145 # deleting it.
1151 # deleting it.
1146 actions[f] = ('f', None, "remote deleted")
1152 actions[f] = ('f', None, "remote deleted")
1147 else:
1153 else:
1148 actions[f] = ('r', None, "other deleted")
1154 actions[f] = ('r', None, "other deleted")
1149 elif n2: # file exists only on remote side
1155 elif n2: # file exists only on remote side
1150 if f in copied:
1156 if f in copied:
1151 pass # we'll deal with it on m1 side
1157 pass # we'll deal with it on m1 side
1152 elif f in movewithdir:
1158 elif f in movewithdir:
1153 f2 = movewithdir[f]
1159 f2 = movewithdir[f]
1154 if f2 in m1:
1160 if f2 in m1:
1155 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1161 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1156 "local directory rename, both created")
1162 "local directory rename, both created")
1157 else:
1163 else:
1158 actions[f2] = ('dg', (f, fl2),
1164 actions[f2] = ('dg', (f, fl2),
1159 "local directory rename - get from " + f)
1165 "local directory rename - get from " + f)
1160 elif f in copy:
1166 elif f in copy:
1161 f2 = copy[f]
1167 f2 = copy[f]
1162 if f2 in m2:
1168 if f2 in m2:
1163 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1169 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1164 "remote copied from " + f2)
1170 "remote copied from " + f2)
1165 else:
1171 else:
1166 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1172 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1167 "remote moved from " + f2)
1173 "remote moved from " + f2)
1168 elif f not in ma:
1174 elif f not in ma:
1169 # local unknown, remote created: the logic is described by the
1175 # local unknown, remote created: the logic is described by the
1170 # following table:
1176 # following table:
1171 #
1177 #
1172 # force branchmerge different | action
1178 # force branchmerge different | action
1173 # n * * | create
1179 # n * * | create
1174 # y n * | create
1180 # y n * | create
1175 # y y n | create
1181 # y y n | create
1176 # y y y | merge
1182 # y y y | merge
1177 #
1183 #
1178 # Checking whether the files are different is expensive, so we
1184 # Checking whether the files are different is expensive, so we
1179 # don't do that when we can avoid it.
1185 # don't do that when we can avoid it.
1180 if not force:
1186 if not force:
1181 actions[f] = ('c', (fl2,), "remote created")
1187 actions[f] = ('c', (fl2,), "remote created")
1182 elif not branchmerge:
1188 elif not branchmerge:
1183 actions[f] = ('c', (fl2,), "remote created")
1189 actions[f] = ('c', (fl2,), "remote created")
1184 else:
1190 else:
1185 actions[f] = ('cm', (fl2, pa.node()),
1191 actions[f] = ('cm', (fl2, pa.node()),
1186 "remote created, get or merge")
1192 "remote created, get or merge")
1187 elif n2 != ma[f]:
1193 elif n2 != ma[f]:
1188 df = None
1194 df = None
1189 for d in dirmove:
1195 for d in dirmove:
1190 if f.startswith(d):
1196 if f.startswith(d):
1191 # new file added in a directory that was moved
1197 # new file added in a directory that was moved
1192 df = dirmove[d] + f[len(d):]
1198 df = dirmove[d] + f[len(d):]
1193 break
1199 break
1194 if df is not None and df in m1:
1200 if df is not None and df in m1:
1195 actions[df] = ('m', (df, f, f, False, pa.node()),
1201 actions[df] = ('m', (df, f, f, False, pa.node()),
1196 "local directory rename - respect move from " + f)
1202 "local directory rename - respect move from " + f)
1197 elif acceptremote:
1203 elif acceptremote:
1198 actions[f] = ('c', (fl2,), "remote recreating")
1204 actions[f] = ('c', (fl2,), "remote recreating")
1199 else:
1205 else:
1200 actions[f] = ('dc', (None, f, f, False, pa.node()),
1206 actions[f] = ('dc', (None, f, f, False, pa.node()),
1201 "prompt deleted/changed")
1207 "prompt deleted/changed")
1202
1208
1203 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1209 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1204 # If we are merging, look for path conflicts.
1210 # If we are merging, look for path conflicts.
1205 checkpathconflicts(repo, wctx, p2, actions)
1211 checkpathconflicts(repo, wctx, p2, actions)
1206
1212
1207 return actions, diverge, renamedelete
1213 return actions, diverge, renamedelete
1208
1214
1209 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1215 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1210 """Resolves false conflicts where the nodeid changed but the content
1216 """Resolves false conflicts where the nodeid changed but the content
1211 remained the same."""
1217 remained the same."""
1212 # We force a copy of actions.items() because we're going to mutate
1218 # We force a copy of actions.items() because we're going to mutate
1213 # actions as we resolve trivial conflicts.
1219 # actions as we resolve trivial conflicts.
1214 for f, (m, args, msg) in list(actions.items()):
1220 for f, (m, args, msg) in list(actions.items()):
1215 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1221 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1216 # local did change but ended up with same content
1222 # local did change but ended up with same content
1217 actions[f] = 'r', None, "prompt same"
1223 actions[f] = 'r', None, "prompt same"
1218 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1224 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1219 # remote did change but ended up with same content
1225 # remote did change but ended up with same content
1220 del actions[f] # don't get = keep local deleted
1226 del actions[f] # don't get = keep local deleted
1221
1227
1222 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1228 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1223 acceptremote, followcopies, matcher=None,
1229 acceptremote, followcopies, matcher=None,
1224 mergeforce=False):
1230 mergeforce=False):
1225 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1231 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1226 # Avoid cycle.
1232 # Avoid cycle.
1227 from . import sparse
1233 from . import sparse
1228
1234
1229 if len(ancestors) == 1: # default
1235 if len(ancestors) == 1: # default
1230 actions, diverge, renamedelete = manifestmerge(
1236 actions, diverge, renamedelete = manifestmerge(
1231 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1237 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1232 acceptremote, followcopies)
1238 acceptremote, followcopies)
1233 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1239 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1234
1240
1235 else: # only when merge.preferancestor=* - the default
1241 else: # only when merge.preferancestor=* - the default
1236 repo.ui.note(
1242 repo.ui.note(
1237 _("note: merging %s and %s using bids from ancestors %s\n") %
1243 _("note: merging %s and %s using bids from ancestors %s\n") %
1238 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1244 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1239 for anc in ancestors)))
1245 for anc in ancestors)))
1240
1246
1241 # Call for bids
1247 # Call for bids
1242 fbids = {} # mapping filename to bids (action method to list af actions)
1248 fbids = {} # mapping filename to bids (action method to list af actions)
1243 diverge, renamedelete = None, None
1249 diverge, renamedelete = None, None
1244 for ancestor in ancestors:
1250 for ancestor in ancestors:
1245 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1251 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1246 actions, diverge1, renamedelete1 = manifestmerge(
1252 actions, diverge1, renamedelete1 = manifestmerge(
1247 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1253 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1248 acceptremote, followcopies, forcefulldiff=True)
1254 acceptremote, followcopies, forcefulldiff=True)
1249 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1255 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1250
1256
1251 # Track the shortest set of warning on the theory that bid
1257 # Track the shortest set of warning on the theory that bid
1252 # merge will correctly incorporate more information
1258 # merge will correctly incorporate more information
1253 if diverge is None or len(diverge1) < len(diverge):
1259 if diverge is None or len(diverge1) < len(diverge):
1254 diverge = diverge1
1260 diverge = diverge1
1255 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1261 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1256 renamedelete = renamedelete1
1262 renamedelete = renamedelete1
1257
1263
1258 for f, a in sorted(actions.iteritems()):
1264 for f, a in sorted(actions.iteritems()):
1259 m, args, msg = a
1265 m, args, msg = a
1260 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1266 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1261 if f in fbids:
1267 if f in fbids:
1262 d = fbids[f]
1268 d = fbids[f]
1263 if m in d:
1269 if m in d:
1264 d[m].append(a)
1270 d[m].append(a)
1265 else:
1271 else:
1266 d[m] = [a]
1272 d[m] = [a]
1267 else:
1273 else:
1268 fbids[f] = {m: [a]}
1274 fbids[f] = {m: [a]}
1269
1275
1270 # Pick the best bid for each file
1276 # Pick the best bid for each file
1271 repo.ui.note(_('\nauction for merging merge bids\n'))
1277 repo.ui.note(_('\nauction for merging merge bids\n'))
1272 actions = {}
1278 actions = {}
1273 dms = [] # filenames that have dm actions
1279 dms = [] # filenames that have dm actions
1274 for f, bids in sorted(fbids.items()):
1280 for f, bids in sorted(fbids.items()):
1275 # bids is a mapping from action method to list af actions
1281 # bids is a mapping from action method to list af actions
1276 # Consensus?
1282 # Consensus?
1277 if len(bids) == 1: # all bids are the same kind of method
1283 if len(bids) == 1: # all bids are the same kind of method
1278 m, l = list(bids.items())[0]
1284 m, l = list(bids.items())[0]
1279 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1285 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1280 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1286 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1281 actions[f] = l[0]
1287 actions[f] = l[0]
1282 if m == 'dm':
1288 if m == 'dm':
1283 dms.append(f)
1289 dms.append(f)
1284 continue
1290 continue
1285 # If keep is an option, just do it.
1291 # If keep is an option, just do it.
1286 if 'k' in bids:
1292 if 'k' in bids:
1287 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1293 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1288 actions[f] = bids['k'][0]
1294 actions[f] = bids['k'][0]
1289 continue
1295 continue
1290 # If there are gets and they all agree [how could they not?], do it.
1296 # If there are gets and they all agree [how could they not?], do it.
1291 if 'g' in bids:
1297 if 'g' in bids:
1292 ga0 = bids['g'][0]
1298 ga0 = bids['g'][0]
1293 if all(a == ga0 for a in bids['g'][1:]):
1299 if all(a == ga0 for a in bids['g'][1:]):
1294 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1300 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1295 actions[f] = ga0
1301 actions[f] = ga0
1296 continue
1302 continue
1297 # TODO: Consider other simple actions such as mode changes
1303 # TODO: Consider other simple actions such as mode changes
1298 # Handle inefficient democrazy.
1304 # Handle inefficient democrazy.
1299 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1305 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1300 for m, l in sorted(bids.items()):
1306 for m, l in sorted(bids.items()):
1301 for _f, args, msg in l:
1307 for _f, args, msg in l:
1302 repo.ui.note(' %s -> %s\n' % (msg, m))
1308 repo.ui.note(' %s -> %s\n' % (msg, m))
1303 # Pick random action. TODO: Instead, prompt user when resolving
1309 # Pick random action. TODO: Instead, prompt user when resolving
1304 m, l = list(bids.items())[0]
1310 m, l = list(bids.items())[0]
1305 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1311 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1306 (f, m))
1312 (f, m))
1307 actions[f] = l[0]
1313 actions[f] = l[0]
1308 if m == 'dm':
1314 if m == 'dm':
1309 dms.append(f)
1315 dms.append(f)
1310 continue
1316 continue
1311 # Work around 'dm' that can cause multiple actions for the same file
1317 # Work around 'dm' that can cause multiple actions for the same file
1312 for f in dms:
1318 for f in dms:
1313 dm, (f0, flags), msg = actions[f]
1319 dm, (f0, flags), msg = actions[f]
1314 assert dm == 'dm', dm
1320 assert dm == 'dm', dm
1315 if f0 in actions and actions[f0][0] == 'r':
1321 if f0 in actions and actions[f0][0] == 'r':
1316 # We have one bid for removing a file and another for moving it.
1322 # We have one bid for removing a file and another for moving it.
1317 # These two could be merged as first move and then delete ...
1323 # These two could be merged as first move and then delete ...
1318 # but instead drop moving and just delete.
1324 # but instead drop moving and just delete.
1319 del actions[f]
1325 del actions[f]
1320 repo.ui.note(_('end of auction\n\n'))
1326 repo.ui.note(_('end of auction\n\n'))
1321
1327
1322 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1328 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1323
1329
1324 if wctx.rev() is None:
1330 if wctx.rev() is None:
1325 fractions = _forgetremoved(wctx, mctx, branchmerge)
1331 fractions = _forgetremoved(wctx, mctx, branchmerge)
1326 actions.update(fractions)
1332 actions.update(fractions)
1327
1333
1328 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1334 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1329 actions)
1335 actions)
1330
1336
1331 return prunedactions, diverge, renamedelete
1337 return prunedactions, diverge, renamedelete
1332
1338
1333 def _getcwd():
1339 def _getcwd():
1334 try:
1340 try:
1335 return pycompat.getcwd()
1341 return pycompat.getcwd()
1336 except OSError as err:
1342 except OSError as err:
1337 if err.errno == errno.ENOENT:
1343 if err.errno == errno.ENOENT:
1338 return None
1344 return None
1339 raise
1345 raise
1340
1346
1341 def batchremove(repo, wctx, actions):
1347 def batchremove(repo, wctx, actions):
1342 """apply removes to the working directory
1348 """apply removes to the working directory
1343
1349
1344 yields tuples for progress updates
1350 yields tuples for progress updates
1345 """
1351 """
1346 verbose = repo.ui.verbose
1352 verbose = repo.ui.verbose
1347 cwd = _getcwd()
1353 cwd = _getcwd()
1348 i = 0
1354 i = 0
1349 for f, args, msg in actions:
1355 for f, args, msg in actions:
1350 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1356 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1351 if verbose:
1357 if verbose:
1352 repo.ui.note(_("removing %s\n") % f)
1358 repo.ui.note(_("removing %s\n") % f)
1353 wctx[f].audit()
1359 wctx[f].audit()
1354 try:
1360 try:
1355 wctx[f].remove(ignoremissing=True)
1361 wctx[f].remove(ignoremissing=True)
1356 except OSError as inst:
1362 except OSError as inst:
1357 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1363 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1358 (f, inst.strerror))
1364 (f, inst.strerror))
1359 if i == 100:
1365 if i == 100:
1360 yield i, f
1366 yield i, f
1361 i = 0
1367 i = 0
1362 i += 1
1368 i += 1
1363 if i > 0:
1369 if i > 0:
1364 yield i, f
1370 yield i, f
1365
1371
1366 if cwd and not _getcwd():
1372 if cwd and not _getcwd():
1367 # cwd was removed in the course of removing files; print a helpful
1373 # cwd was removed in the course of removing files; print a helpful
1368 # warning.
1374 # warning.
1369 repo.ui.warn(_("current directory was removed\n"
1375 repo.ui.warn(_("current directory was removed\n"
1370 "(consider changing to repo root: %s)\n") % repo.root)
1376 "(consider changing to repo root: %s)\n") % repo.root)
1371
1377
1372 def batchget(repo, mctx, wctx, actions):
1378 def batchget(repo, mctx, wctx, actions):
1373 """apply gets to the working directory
1379 """apply gets to the working directory
1374
1380
1375 mctx is the context to get from
1381 mctx is the context to get from
1376
1382
1377 yields tuples for progress updates
1383 yields tuples for progress updates
1378 """
1384 """
1379 verbose = repo.ui.verbose
1385 verbose = repo.ui.verbose
1380 fctx = mctx.filectx
1386 fctx = mctx.filectx
1381 ui = repo.ui
1387 ui = repo.ui
1382 i = 0
1388 i = 0
1383 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1389 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1384 for f, (flags, backup), msg in actions:
1390 for f, (flags, backup), msg in actions:
1385 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1391 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1386 if verbose:
1392 if verbose:
1387 repo.ui.note(_("getting %s\n") % f)
1393 repo.ui.note(_("getting %s\n") % f)
1388
1394
1389 if backup:
1395 if backup:
1390 # If a file or directory exists with the same name, back that
1396 # If a file or directory exists with the same name, back that
1391 # up. Otherwise, look to see if there is a file that conflicts
1397 # up. Otherwise, look to see if there is a file that conflicts
1392 # with a directory this file is in, and if so, back that up.
1398 # with a directory this file is in, and if so, back that up.
1393 absf = repo.wjoin(f)
1399 absf = repo.wjoin(f)
1394 if not repo.wvfs.lexists(f):
1400 if not repo.wvfs.lexists(f):
1395 for p in util.finddirs(f):
1401 for p in util.finddirs(f):
1396 if repo.wvfs.isfileorlink(p):
1402 if repo.wvfs.isfileorlink(p):
1397 absf = repo.wjoin(p)
1403 absf = repo.wjoin(p)
1398 break
1404 break
1399 orig = scmutil.origpath(ui, repo, absf)
1405 orig = scmutil.origpath(ui, repo, absf)
1400 if repo.wvfs.lexists(absf):
1406 if repo.wvfs.lexists(absf):
1401 util.rename(absf, orig)
1407 util.rename(absf, orig)
1402 wctx[f].clearunknown()
1408 wctx[f].clearunknown()
1403 atomictemp = ui.configbool("experimental", "update.atomic-file")
1409 atomictemp = ui.configbool("experimental", "update.atomic-file")
1404 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1410 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1405 atomictemp=atomictemp)
1411 atomictemp=atomictemp)
1406 if i == 100:
1412 if i == 100:
1407 yield i, f
1413 yield i, f
1408 i = 0
1414 i = 0
1409 i += 1
1415 i += 1
1410 if i > 0:
1416 if i > 0:
1411 yield i, f
1417 yield i, f
1412
1418
1413 def _prefetchfiles(repo, ctx, actions):
1419 def _prefetchfiles(repo, ctx, actions):
1414 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1420 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1415 of merge actions. ``ctx`` is the context being merged in."""
1421 of merge actions. ``ctx`` is the context being merged in."""
1416
1422
1417 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1423 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1418 # don't touch the context to be merged in. 'cd' is skipped, because
1424 # don't touch the context to be merged in. 'cd' is skipped, because
1419 # changed/deleted never resolves to something from the remote side.
1425 # changed/deleted never resolves to something from the remote side.
1420 oplist = [actions[a] for a in 'g dc dg m'.split()]
1426 oplist = [actions[a] for a in 'g dc dg m'.split()]
1421 prefetch = scmutil.fileprefetchhooks
1427 prefetch = scmutil.fileprefetchhooks
1422 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1428 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1423
1429
1424 @attr.s(frozen=True)
1430 @attr.s(frozen=True)
1425 class updateresult(object):
1431 class updateresult(object):
1426 updatedcount = attr.ib()
1432 updatedcount = attr.ib()
1427 mergedcount = attr.ib()
1433 mergedcount = attr.ib()
1428 removedcount = attr.ib()
1434 removedcount = attr.ib()
1429 unresolvedcount = attr.ib()
1435 unresolvedcount = attr.ib()
1430
1436
1431 # TODO remove container emulation once consumers switch to new API.
1437 # TODO remove container emulation once consumers switch to new API.
1432
1438
1433 def __getitem__(self, x):
1439 def __getitem__(self, x):
1434 if x == 0:
1440 if x == 0:
1435 return self.updatedcount
1441 return self.updatedcount
1436 elif x == 1:
1442 elif x == 1:
1437 return self.mergedcount
1443 return self.mergedcount
1438 elif x == 2:
1444 elif x == 2:
1439 return self.removedcount
1445 return self.removedcount
1440 elif x == 3:
1446 elif x == 3:
1441 return self.unresolvedcount
1447 return self.unresolvedcount
1442 else:
1448 else:
1443 raise IndexError('can only access items 0-3')
1449 raise IndexError('can only access items 0-3')
1444
1450
1445 def __len__(self):
1451 def __len__(self):
1446 return 4
1452 return 4
1447
1453
1448 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1454 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1449 """apply the merge action list to the working directory
1455 """apply the merge action list to the working directory
1450
1456
1451 wctx is the working copy context
1457 wctx is the working copy context
1452 mctx is the context to be merged into the working copy
1458 mctx is the context to be merged into the working copy
1453
1459
1454 Return a tuple of counts (updated, merged, removed, unresolved) that
1460 Return a tuple of counts (updated, merged, removed, unresolved) that
1455 describes how many files were affected by the update.
1461 describes how many files were affected by the update.
1456 """
1462 """
1457
1463
1458 _prefetchfiles(repo, mctx, actions)
1464 _prefetchfiles(repo, mctx, actions)
1459
1465
1460 updated, merged, removed = 0, 0, 0
1466 updated, merged, removed = 0, 0, 0
1461 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1467 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1462 moves = []
1468 moves = []
1463 for m, l in actions.items():
1469 for m, l in actions.items():
1464 l.sort()
1470 l.sort()
1465
1471
1466 # 'cd' and 'dc' actions are treated like other merge conflicts
1472 # 'cd' and 'dc' actions are treated like other merge conflicts
1467 mergeactions = sorted(actions['cd'])
1473 mergeactions = sorted(actions['cd'])
1468 mergeactions.extend(sorted(actions['dc']))
1474 mergeactions.extend(sorted(actions['dc']))
1469 mergeactions.extend(actions['m'])
1475 mergeactions.extend(actions['m'])
1470 for f, args, msg in mergeactions:
1476 for f, args, msg in mergeactions:
1471 f1, f2, fa, move, anc = args
1477 f1, f2, fa, move, anc = args
1472 if f == '.hgsubstate': # merged internally
1478 if f == '.hgsubstate': # merged internally
1473 continue
1479 continue
1474 if f1 is None:
1480 if f1 is None:
1475 fcl = filemerge.absentfilectx(wctx, fa)
1481 fcl = filemerge.absentfilectx(wctx, fa)
1476 else:
1482 else:
1477 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1483 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1478 fcl = wctx[f1]
1484 fcl = wctx[f1]
1479 if f2 is None:
1485 if f2 is None:
1480 fco = filemerge.absentfilectx(mctx, fa)
1486 fco = filemerge.absentfilectx(mctx, fa)
1481 else:
1487 else:
1482 fco = mctx[f2]
1488 fco = mctx[f2]
1483 actx = repo[anc]
1489 actx = repo[anc]
1484 if fa in actx:
1490 if fa in actx:
1485 fca = actx[fa]
1491 fca = actx[fa]
1486 else:
1492 else:
1487 # TODO: move to absentfilectx
1493 # TODO: move to absentfilectx
1488 fca = repo.filectx(f1, fileid=nullrev)
1494 fca = repo.filectx(f1, fileid=nullrev)
1489 ms.add(fcl, fco, fca, f)
1495 ms.add(fcl, fco, fca, f)
1490 if f1 != f and move:
1496 if f1 != f and move:
1491 moves.append(f1)
1497 moves.append(f1)
1492
1498
1493 _updating = _('updating')
1499 _updating = _('updating')
1494 _files = _('files')
1500 _files = _('files')
1495 progress = repo.ui.progress
1501 progress = repo.ui.progress
1496
1502
1497 # remove renamed files after safely stored
1503 # remove renamed files after safely stored
1498 for f in moves:
1504 for f in moves:
1499 if wctx[f].lexists():
1505 if wctx[f].lexists():
1500 repo.ui.debug("removing %s\n" % f)
1506 repo.ui.debug("removing %s\n" % f)
1501 wctx[f].audit()
1507 wctx[f].audit()
1502 wctx[f].remove()
1508 wctx[f].remove()
1503
1509
1504 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1510 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1505 z = 0
1511 z = 0
1506
1512
1507 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1513 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1508 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1514 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1509
1515
1510 # record path conflicts
1516 # record path conflicts
1511 for f, args, msg in actions['p']:
1517 for f, args, msg in actions['p']:
1512 f1, fo = args
1518 f1, fo = args
1513 s = repo.ui.status
1519 s = repo.ui.status
1514 s(_("%s: path conflict - a file or link has the same name as a "
1520 s(_("%s: path conflict - a file or link has the same name as a "
1515 "directory\n") % f)
1521 "directory\n") % f)
1516 if fo == 'l':
1522 if fo == 'l':
1517 s(_("the local file has been renamed to %s\n") % f1)
1523 s(_("the local file has been renamed to %s\n") % f1)
1518 else:
1524 else:
1519 s(_("the remote file has been renamed to %s\n") % f1)
1525 s(_("the remote file has been renamed to %s\n") % f1)
1520 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1526 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1521 ms.addpath(f, f1, fo)
1527 ms.addpath(f, f1, fo)
1522 z += 1
1528 z += 1
1523 progress(_updating, z, item=f, total=numupdates, unit=_files)
1529 progress(_updating, z, item=f, total=numupdates, unit=_files)
1524
1530
1525 # When merging in-memory, we can't support worker processes, so set the
1531 # When merging in-memory, we can't support worker processes, so set the
1526 # per-item cost at 0 in that case.
1532 # per-item cost at 0 in that case.
1527 cost = 0 if wctx.isinmemory() else 0.001
1533 cost = 0 if wctx.isinmemory() else 0.001
1528
1534
1529 # remove in parallel (must come before resolving path conflicts and getting)
1535 # remove in parallel (must come before resolving path conflicts and getting)
1530 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1536 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1531 actions['r'])
1537 actions['r'])
1532 for i, item in prog:
1538 for i, item in prog:
1533 z += i
1539 z += i
1534 progress(_updating, z, item=item, total=numupdates, unit=_files)
1540 progress(_updating, z, item=item, total=numupdates, unit=_files)
1535 removed = len(actions['r'])
1541 removed = len(actions['r'])
1536
1542
1537 # resolve path conflicts (must come before getting)
1543 # resolve path conflicts (must come before getting)
1538 for f, args, msg in actions['pr']:
1544 for f, args, msg in actions['pr']:
1539 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1545 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1540 f0, = args
1546 f0, = args
1541 if wctx[f0].lexists():
1547 if wctx[f0].lexists():
1542 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1548 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1543 wctx[f].audit()
1549 wctx[f].audit()
1544 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1550 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1545 wctx[f0].remove()
1551 wctx[f0].remove()
1546 z += 1
1552 z += 1
1547 progress(_updating, z, item=f, total=numupdates, unit=_files)
1553 progress(_updating, z, item=f, total=numupdates, unit=_files)
1548
1554
1549 # get in parallel
1555 # get in parallel
1550 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1556 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1551 actions['g'])
1557 actions['g'])
1552 for i, item in prog:
1558 for i, item in prog:
1553 z += i
1559 z += i
1554 progress(_updating, z, item=item, total=numupdates, unit=_files)
1560 progress(_updating, z, item=item, total=numupdates, unit=_files)
1555 updated = len(actions['g'])
1561 updated = len(actions['g'])
1556
1562
1557 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1563 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1558 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1564 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1559
1565
1560 # forget (manifest only, just log it) (must come first)
1566 # forget (manifest only, just log it) (must come first)
1561 for f, args, msg in actions['f']:
1567 for f, args, msg in actions['f']:
1562 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1568 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1563 z += 1
1569 z += 1
1564 progress(_updating, z, item=f, total=numupdates, unit=_files)
1570 progress(_updating, z, item=f, total=numupdates, unit=_files)
1565
1571
1566 # re-add (manifest only, just log it)
1572 # re-add (manifest only, just log it)
1567 for f, args, msg in actions['a']:
1573 for f, args, msg in actions['a']:
1568 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1574 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1569 z += 1
1575 z += 1
1570 progress(_updating, z, item=f, total=numupdates, unit=_files)
1576 progress(_updating, z, item=f, total=numupdates, unit=_files)
1571
1577
1572 # re-add/mark as modified (manifest only, just log it)
1578 # re-add/mark as modified (manifest only, just log it)
1573 for f, args, msg in actions['am']:
1579 for f, args, msg in actions['am']:
1574 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1580 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1575 z += 1
1581 z += 1
1576 progress(_updating, z, item=f, total=numupdates, unit=_files)
1582 progress(_updating, z, item=f, total=numupdates, unit=_files)
1577
1583
1578 # keep (noop, just log it)
1584 # keep (noop, just log it)
1579 for f, args, msg in actions['k']:
1585 for f, args, msg in actions['k']:
1580 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1586 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1581 # no progress
1587 # no progress
1582
1588
1583 # directory rename, move local
1589 # directory rename, move local
1584 for f, args, msg in actions['dm']:
1590 for f, args, msg in actions['dm']:
1585 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1591 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1586 z += 1
1592 z += 1
1587 progress(_updating, z, item=f, total=numupdates, unit=_files)
1593 progress(_updating, z, item=f, total=numupdates, unit=_files)
1588 f0, flags = args
1594 f0, flags = args
1589 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1595 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1590 wctx[f].audit()
1596 wctx[f].audit()
1591 wctx[f].write(wctx.filectx(f0).data(), flags)
1597 wctx[f].write(wctx.filectx(f0).data(), flags)
1592 wctx[f0].remove()
1598 wctx[f0].remove()
1593 updated += 1
1599 updated += 1
1594
1600
1595 # local directory rename, get
1601 # local directory rename, get
1596 for f, args, msg in actions['dg']:
1602 for f, args, msg in actions['dg']:
1597 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1603 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1598 z += 1
1604 z += 1
1599 progress(_updating, z, item=f, total=numupdates, unit=_files)
1605 progress(_updating, z, item=f, total=numupdates, unit=_files)
1600 f0, flags = args
1606 f0, flags = args
1601 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1607 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1602 wctx[f].write(mctx.filectx(f0).data(), flags)
1608 wctx[f].write(mctx.filectx(f0).data(), flags)
1603 updated += 1
1609 updated += 1
1604
1610
1605 # exec
1611 # exec
1606 for f, args, msg in actions['e']:
1612 for f, args, msg in actions['e']:
1607 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1613 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1608 z += 1
1614 z += 1
1609 progress(_updating, z, item=f, total=numupdates, unit=_files)
1615 progress(_updating, z, item=f, total=numupdates, unit=_files)
1610 flags, = args
1616 flags, = args
1611 wctx[f].audit()
1617 wctx[f].audit()
1612 wctx[f].setflags('l' in flags, 'x' in flags)
1618 wctx[f].setflags('l' in flags, 'x' in flags)
1613 updated += 1
1619 updated += 1
1614
1620
1615 # the ordering is important here -- ms.mergedriver will raise if the merge
1621 # the ordering is important here -- ms.mergedriver will raise if the merge
1616 # driver has changed, and we want to be able to bypass it when overwrite is
1622 # driver has changed, and we want to be able to bypass it when overwrite is
1617 # True
1623 # True
1618 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1624 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1619
1625
1620 if usemergedriver:
1626 if usemergedriver:
1621 if wctx.isinmemory():
1627 if wctx.isinmemory():
1622 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1628 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1623 "support mergedriver")
1629 "support mergedriver")
1624 ms.commit()
1630 ms.commit()
1625 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1631 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1626 # the driver might leave some files unresolved
1632 # the driver might leave some files unresolved
1627 unresolvedf = set(ms.unresolved())
1633 unresolvedf = set(ms.unresolved())
1628 if not proceed:
1634 if not proceed:
1629 # XXX setting unresolved to at least 1 is a hack to make sure we
1635 # XXX setting unresolved to at least 1 is a hack to make sure we
1630 # error out
1636 # error out
1631 return updateresult(updated, merged, removed,
1637 return updateresult(updated, merged, removed,
1632 max(len(unresolvedf), 1))
1638 max(len(unresolvedf), 1))
1633 newactions = []
1639 newactions = []
1634 for f, args, msg in mergeactions:
1640 for f, args, msg in mergeactions:
1635 if f in unresolvedf:
1641 if f in unresolvedf:
1636 newactions.append((f, args, msg))
1642 newactions.append((f, args, msg))
1637 mergeactions = newactions
1643 mergeactions = newactions
1638
1644
1639 try:
1645 try:
1640 # premerge
1646 # premerge
1641 tocomplete = []
1647 tocomplete = []
1642 for f, args, msg in mergeactions:
1648 for f, args, msg in mergeactions:
1643 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1649 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1644 z += 1
1650 z += 1
1645 progress(_updating, z, item=f, total=numupdates, unit=_files)
1651 progress(_updating, z, item=f, total=numupdates, unit=_files)
1646 if f == '.hgsubstate': # subrepo states need updating
1652 if f == '.hgsubstate': # subrepo states need updating
1647 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1653 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1648 overwrite, labels)
1654 overwrite, labels)
1649 continue
1655 continue
1650 wctx[f].audit()
1656 wctx[f].audit()
1651 complete, r = ms.preresolve(f, wctx)
1657 complete, r = ms.preresolve(f, wctx)
1652 if not complete:
1658 if not complete:
1653 numupdates += 1
1659 numupdates += 1
1654 tocomplete.append((f, args, msg))
1660 tocomplete.append((f, args, msg))
1655
1661
1656 # merge
1662 # merge
1657 for f, args, msg in tocomplete:
1663 for f, args, msg in tocomplete:
1658 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1664 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1659 z += 1
1665 z += 1
1660 progress(_updating, z, item=f, total=numupdates, unit=_files)
1666 progress(_updating, z, item=f, total=numupdates, unit=_files)
1661 ms.resolve(f, wctx)
1667 ms.resolve(f, wctx)
1662
1668
1663 finally:
1669 finally:
1664 ms.commit()
1670 ms.commit()
1665
1671
1666 unresolved = ms.unresolvedcount()
1672 unresolved = ms.unresolvedcount()
1667
1673
1668 if usemergedriver and not unresolved and ms.mdstate() != 's':
1674 if (usemergedriver and not unresolved
1675 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1669 if not driverconclude(repo, ms, wctx, labels=labels):
1676 if not driverconclude(repo, ms, wctx, labels=labels):
1670 # XXX setting unresolved to at least 1 is a hack to make sure we
1677 # XXX setting unresolved to at least 1 is a hack to make sure we
1671 # error out
1678 # error out
1672 unresolved = max(unresolved, 1)
1679 unresolved = max(unresolved, 1)
1673
1680
1674 ms.commit()
1681 ms.commit()
1675
1682
1676 msupdated, msmerged, msremoved = ms.counts()
1683 msupdated, msmerged, msremoved = ms.counts()
1677 updated += msupdated
1684 updated += msupdated
1678 merged += msmerged
1685 merged += msmerged
1679 removed += msremoved
1686 removed += msremoved
1680
1687
1681 extraactions = ms.actions()
1688 extraactions = ms.actions()
1682 if extraactions:
1689 if extraactions:
1683 mfiles = set(a[0] for a in actions['m'])
1690 mfiles = set(a[0] for a in actions['m'])
1684 for k, acts in extraactions.iteritems():
1691 for k, acts in extraactions.iteritems():
1685 actions[k].extend(acts)
1692 actions[k].extend(acts)
1686 # Remove these files from actions['m'] as well. This is important
1693 # Remove these files from actions['m'] as well. This is important
1687 # because in recordupdates, files in actions['m'] are processed
1694 # because in recordupdates, files in actions['m'] are processed
1688 # after files in other actions, and the merge driver might add
1695 # after files in other actions, and the merge driver might add
1689 # files to those actions via extraactions above. This can lead to a
1696 # files to those actions via extraactions above. This can lead to a
1690 # file being recorded twice, with poor results. This is especially
1697 # file being recorded twice, with poor results. This is especially
1691 # problematic for actions['r'] (currently only possible with the
1698 # problematic for actions['r'] (currently only possible with the
1692 # merge driver in the initial merge process; interrupted merges
1699 # merge driver in the initial merge process; interrupted merges
1693 # don't go through this flow).
1700 # don't go through this flow).
1694 #
1701 #
1695 # The real fix here is to have indexes by both file and action so
1702 # The real fix here is to have indexes by both file and action so
1696 # that when the action for a file is changed it is automatically
1703 # that when the action for a file is changed it is automatically
1697 # reflected in the other action lists. But that involves a more
1704 # reflected in the other action lists. But that involves a more
1698 # complex data structure, so this will do for now.
1705 # complex data structure, so this will do for now.
1699 #
1706 #
1700 # We don't need to do the same operation for 'dc' and 'cd' because
1707 # We don't need to do the same operation for 'dc' and 'cd' because
1701 # those lists aren't consulted again.
1708 # those lists aren't consulted again.
1702 mfiles.difference_update(a[0] for a in acts)
1709 mfiles.difference_update(a[0] for a in acts)
1703
1710
1704 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1711 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1705
1712
1706 progress(_updating, None, total=numupdates, unit=_files)
1713 progress(_updating, None, total=numupdates, unit=_files)
1707 return updateresult(updated, merged, removed, unresolved)
1714 return updateresult(updated, merged, removed, unresolved)
1708
1715
1709 def recordupdates(repo, actions, branchmerge):
1716 def recordupdates(repo, actions, branchmerge):
1710 "record merge actions to the dirstate"
1717 "record merge actions to the dirstate"
1711 # remove (must come first)
1718 # remove (must come first)
1712 for f, args, msg in actions.get('r', []):
1719 for f, args, msg in actions.get('r', []):
1713 if branchmerge:
1720 if branchmerge:
1714 repo.dirstate.remove(f)
1721 repo.dirstate.remove(f)
1715 else:
1722 else:
1716 repo.dirstate.drop(f)
1723 repo.dirstate.drop(f)
1717
1724
1718 # forget (must come first)
1725 # forget (must come first)
1719 for f, args, msg in actions.get('f', []):
1726 for f, args, msg in actions.get('f', []):
1720 repo.dirstate.drop(f)
1727 repo.dirstate.drop(f)
1721
1728
1722 # resolve path conflicts
1729 # resolve path conflicts
1723 for f, args, msg in actions.get('pr', []):
1730 for f, args, msg in actions.get('pr', []):
1724 f0, = args
1731 f0, = args
1725 origf0 = repo.dirstate.copied(f0) or f0
1732 origf0 = repo.dirstate.copied(f0) or f0
1726 repo.dirstate.add(f)
1733 repo.dirstate.add(f)
1727 repo.dirstate.copy(origf0, f)
1734 repo.dirstate.copy(origf0, f)
1728 if f0 == origf0:
1735 if f0 == origf0:
1729 repo.dirstate.remove(f0)
1736 repo.dirstate.remove(f0)
1730 else:
1737 else:
1731 repo.dirstate.drop(f0)
1738 repo.dirstate.drop(f0)
1732
1739
1733 # re-add
1740 # re-add
1734 for f, args, msg in actions.get('a', []):
1741 for f, args, msg in actions.get('a', []):
1735 repo.dirstate.add(f)
1742 repo.dirstate.add(f)
1736
1743
1737 # re-add/mark as modified
1744 # re-add/mark as modified
1738 for f, args, msg in actions.get('am', []):
1745 for f, args, msg in actions.get('am', []):
1739 if branchmerge:
1746 if branchmerge:
1740 repo.dirstate.normallookup(f)
1747 repo.dirstate.normallookup(f)
1741 else:
1748 else:
1742 repo.dirstate.add(f)
1749 repo.dirstate.add(f)
1743
1750
1744 # exec change
1751 # exec change
1745 for f, args, msg in actions.get('e', []):
1752 for f, args, msg in actions.get('e', []):
1746 repo.dirstate.normallookup(f)
1753 repo.dirstate.normallookup(f)
1747
1754
1748 # keep
1755 # keep
1749 for f, args, msg in actions.get('k', []):
1756 for f, args, msg in actions.get('k', []):
1750 pass
1757 pass
1751
1758
1752 # get
1759 # get
1753 for f, args, msg in actions.get('g', []):
1760 for f, args, msg in actions.get('g', []):
1754 if branchmerge:
1761 if branchmerge:
1755 repo.dirstate.otherparent(f)
1762 repo.dirstate.otherparent(f)
1756 else:
1763 else:
1757 repo.dirstate.normal(f)
1764 repo.dirstate.normal(f)
1758
1765
1759 # merge
1766 # merge
1760 for f, args, msg in actions.get('m', []):
1767 for f, args, msg in actions.get('m', []):
1761 f1, f2, fa, move, anc = args
1768 f1, f2, fa, move, anc = args
1762 if branchmerge:
1769 if branchmerge:
1763 # We've done a branch merge, mark this file as merged
1770 # We've done a branch merge, mark this file as merged
1764 # so that we properly record the merger later
1771 # so that we properly record the merger later
1765 repo.dirstate.merge(f)
1772 repo.dirstate.merge(f)
1766 if f1 != f2: # copy/rename
1773 if f1 != f2: # copy/rename
1767 if move:
1774 if move:
1768 repo.dirstate.remove(f1)
1775 repo.dirstate.remove(f1)
1769 if f1 != f:
1776 if f1 != f:
1770 repo.dirstate.copy(f1, f)
1777 repo.dirstate.copy(f1, f)
1771 else:
1778 else:
1772 repo.dirstate.copy(f2, f)
1779 repo.dirstate.copy(f2, f)
1773 else:
1780 else:
1774 # We've update-merged a locally modified file, so
1781 # We've update-merged a locally modified file, so
1775 # we set the dirstate to emulate a normal checkout
1782 # we set the dirstate to emulate a normal checkout
1776 # of that file some time in the past. Thus our
1783 # of that file some time in the past. Thus our
1777 # merge will appear as a normal local file
1784 # merge will appear as a normal local file
1778 # modification.
1785 # modification.
1779 if f2 == f: # file not locally copied/moved
1786 if f2 == f: # file not locally copied/moved
1780 repo.dirstate.normallookup(f)
1787 repo.dirstate.normallookup(f)
1781 if move:
1788 if move:
1782 repo.dirstate.drop(f1)
1789 repo.dirstate.drop(f1)
1783
1790
1784 # directory rename, move local
1791 # directory rename, move local
1785 for f, args, msg in actions.get('dm', []):
1792 for f, args, msg in actions.get('dm', []):
1786 f0, flag = args
1793 f0, flag = args
1787 if branchmerge:
1794 if branchmerge:
1788 repo.dirstate.add(f)
1795 repo.dirstate.add(f)
1789 repo.dirstate.remove(f0)
1796 repo.dirstate.remove(f0)
1790 repo.dirstate.copy(f0, f)
1797 repo.dirstate.copy(f0, f)
1791 else:
1798 else:
1792 repo.dirstate.normal(f)
1799 repo.dirstate.normal(f)
1793 repo.dirstate.drop(f0)
1800 repo.dirstate.drop(f0)
1794
1801
1795 # directory rename, get
1802 # directory rename, get
1796 for f, args, msg in actions.get('dg', []):
1803 for f, args, msg in actions.get('dg', []):
1797 f0, flag = args
1804 f0, flag = args
1798 if branchmerge:
1805 if branchmerge:
1799 repo.dirstate.add(f)
1806 repo.dirstate.add(f)
1800 repo.dirstate.copy(f0, f)
1807 repo.dirstate.copy(f0, f)
1801 else:
1808 else:
1802 repo.dirstate.normal(f)
1809 repo.dirstate.normal(f)
1803
1810
1804 def update(repo, node, branchmerge, force, ancestor=None,
1811 def update(repo, node, branchmerge, force, ancestor=None,
1805 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1812 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1806 updatecheck=None, wc=None):
1813 updatecheck=None, wc=None):
1807 """
1814 """
1808 Perform a merge between the working directory and the given node
1815 Perform a merge between the working directory and the given node
1809
1816
1810 node = the node to update to
1817 node = the node to update to
1811 branchmerge = whether to merge between branches
1818 branchmerge = whether to merge between branches
1812 force = whether to force branch merging or file overwriting
1819 force = whether to force branch merging or file overwriting
1813 matcher = a matcher to filter file lists (dirstate not updated)
1820 matcher = a matcher to filter file lists (dirstate not updated)
1814 mergeancestor = whether it is merging with an ancestor. If true,
1821 mergeancestor = whether it is merging with an ancestor. If true,
1815 we should accept the incoming changes for any prompts that occur.
1822 we should accept the incoming changes for any prompts that occur.
1816 If false, merging with an ancestor (fast-forward) is only allowed
1823 If false, merging with an ancestor (fast-forward) is only allowed
1817 between different named branches. This flag is used by rebase extension
1824 between different named branches. This flag is used by rebase extension
1818 as a temporary fix and should be avoided in general.
1825 as a temporary fix and should be avoided in general.
1819 labels = labels to use for base, local and other
1826 labels = labels to use for base, local and other
1820 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1827 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1821 this is True, then 'force' should be True as well.
1828 this is True, then 'force' should be True as well.
1822
1829
1823 The table below shows all the behaviors of the update command given the
1830 The table below shows all the behaviors of the update command given the
1824 -c/--check and -C/--clean or no options, whether the working directory is
1831 -c/--check and -C/--clean or no options, whether the working directory is
1825 dirty, whether a revision is specified, and the relationship of the parent
1832 dirty, whether a revision is specified, and the relationship of the parent
1826 rev to the target rev (linear or not). Match from top first. The -n
1833 rev to the target rev (linear or not). Match from top first. The -n
1827 option doesn't exist on the command line, but represents the
1834 option doesn't exist on the command line, but represents the
1828 experimental.updatecheck=noconflict option.
1835 experimental.updatecheck=noconflict option.
1829
1836
1830 This logic is tested by test-update-branches.t.
1837 This logic is tested by test-update-branches.t.
1831
1838
1832 -c -C -n -m dirty rev linear | result
1839 -c -C -n -m dirty rev linear | result
1833 y y * * * * * | (1)
1840 y y * * * * * | (1)
1834 y * y * * * * | (1)
1841 y * y * * * * | (1)
1835 y * * y * * * | (1)
1842 y * * y * * * | (1)
1836 * y y * * * * | (1)
1843 * y y * * * * | (1)
1837 * y * y * * * | (1)
1844 * y * y * * * | (1)
1838 * * y y * * * | (1)
1845 * * y y * * * | (1)
1839 * * * * * n n | x
1846 * * * * * n n | x
1840 * * * * n * * | ok
1847 * * * * n * * | ok
1841 n n n n y * y | merge
1848 n n n n y * y | merge
1842 n n n n y y n | (2)
1849 n n n n y y n | (2)
1843 n n n y y * * | merge
1850 n n n y y * * | merge
1844 n n y n y * * | merge if no conflict
1851 n n y n y * * | merge if no conflict
1845 n y n n y * * | discard
1852 n y n n y * * | discard
1846 y n n n y * * | (3)
1853 y n n n y * * | (3)
1847
1854
1848 x = can't happen
1855 x = can't happen
1849 * = don't-care
1856 * = don't-care
1850 1 = incompatible options (checked in commands.py)
1857 1 = incompatible options (checked in commands.py)
1851 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1858 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1852 3 = abort: uncommitted changes (checked in commands.py)
1859 3 = abort: uncommitted changes (checked in commands.py)
1853
1860
1854 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1861 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1855 to repo[None] if None is passed.
1862 to repo[None] if None is passed.
1856
1863
1857 Return the same tuple as applyupdates().
1864 Return the same tuple as applyupdates().
1858 """
1865 """
1859 # Avoid cycle.
1866 # Avoid cycle.
1860 from . import sparse
1867 from . import sparse
1861
1868
1862 # This function used to find the default destination if node was None, but
1869 # This function used to find the default destination if node was None, but
1863 # that's now in destutil.py.
1870 # that's now in destutil.py.
1864 assert node is not None
1871 assert node is not None
1865 if not branchmerge and not force:
1872 if not branchmerge and not force:
1866 # TODO: remove the default once all callers that pass branchmerge=False
1873 # TODO: remove the default once all callers that pass branchmerge=False
1867 # and force=False pass a value for updatecheck. We may want to allow
1874 # and force=False pass a value for updatecheck. We may want to allow
1868 # updatecheck='abort' to better suppport some of these callers.
1875 # updatecheck='abort' to better suppport some of these callers.
1869 if updatecheck is None:
1876 if updatecheck is None:
1870 updatecheck = 'linear'
1877 updatecheck = 'linear'
1871 assert updatecheck in ('none', 'linear', 'noconflict')
1878 assert updatecheck in ('none', 'linear', 'noconflict')
1872 # If we're doing a partial update, we need to skip updating
1879 # If we're doing a partial update, we need to skip updating
1873 # the dirstate, so make a note of any partial-ness to the
1880 # the dirstate, so make a note of any partial-ness to the
1874 # update here.
1881 # update here.
1875 if matcher is None or matcher.always():
1882 if matcher is None or matcher.always():
1876 partial = False
1883 partial = False
1877 else:
1884 else:
1878 partial = True
1885 partial = True
1879 with repo.wlock():
1886 with repo.wlock():
1880 if wc is None:
1887 if wc is None:
1881 wc = repo[None]
1888 wc = repo[None]
1882 pl = wc.parents()
1889 pl = wc.parents()
1883 p1 = pl[0]
1890 p1 = pl[0]
1884 pas = [None]
1891 pas = [None]
1885 if ancestor is not None:
1892 if ancestor is not None:
1886 pas = [repo[ancestor]]
1893 pas = [repo[ancestor]]
1887
1894
1888 overwrite = force and not branchmerge
1895 overwrite = force and not branchmerge
1889
1896
1890 p2 = repo[node]
1897 p2 = repo[node]
1891 if pas[0] is None:
1898 if pas[0] is None:
1892 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1899 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1893 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1900 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1894 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1901 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1895 else:
1902 else:
1896 pas = [p1.ancestor(p2, warn=branchmerge)]
1903 pas = [p1.ancestor(p2, warn=branchmerge)]
1897
1904
1898 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1905 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1899
1906
1900 ### check phase
1907 ### check phase
1901 if not overwrite:
1908 if not overwrite:
1902 if len(pl) > 1:
1909 if len(pl) > 1:
1903 raise error.Abort(_("outstanding uncommitted merge"))
1910 raise error.Abort(_("outstanding uncommitted merge"))
1904 ms = mergestate.read(repo)
1911 ms = mergestate.read(repo)
1905 if list(ms.unresolved()):
1912 if list(ms.unresolved()):
1906 raise error.Abort(_("outstanding merge conflicts"))
1913 raise error.Abort(_("outstanding merge conflicts"))
1907 if branchmerge:
1914 if branchmerge:
1908 if pas == [p2]:
1915 if pas == [p2]:
1909 raise error.Abort(_("merging with a working directory ancestor"
1916 raise error.Abort(_("merging with a working directory ancestor"
1910 " has no effect"))
1917 " has no effect"))
1911 elif pas == [p1]:
1918 elif pas == [p1]:
1912 if not mergeancestor and wc.branch() == p2.branch():
1919 if not mergeancestor and wc.branch() == p2.branch():
1913 raise error.Abort(_("nothing to merge"),
1920 raise error.Abort(_("nothing to merge"),
1914 hint=_("use 'hg update' "
1921 hint=_("use 'hg update' "
1915 "or check 'hg heads'"))
1922 "or check 'hg heads'"))
1916 if not force and (wc.files() or wc.deleted()):
1923 if not force and (wc.files() or wc.deleted()):
1917 raise error.Abort(_("uncommitted changes"),
1924 raise error.Abort(_("uncommitted changes"),
1918 hint=_("use 'hg status' to list changes"))
1925 hint=_("use 'hg status' to list changes"))
1919 if not wc.isinmemory():
1926 if not wc.isinmemory():
1920 for s in sorted(wc.substate):
1927 for s in sorted(wc.substate):
1921 wc.sub(s).bailifchanged()
1928 wc.sub(s).bailifchanged()
1922
1929
1923 elif not overwrite:
1930 elif not overwrite:
1924 if p1 == p2: # no-op update
1931 if p1 == p2: # no-op update
1925 # call the hooks and exit early
1932 # call the hooks and exit early
1926 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1933 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1927 repo.hook('update', parent1=xp2, parent2='', error=0)
1934 repo.hook('update', parent1=xp2, parent2='', error=0)
1928 return updateresult(0, 0, 0, 0)
1935 return updateresult(0, 0, 0, 0)
1929
1936
1930 if (updatecheck == 'linear' and
1937 if (updatecheck == 'linear' and
1931 pas not in ([p1], [p2])): # nonlinear
1938 pas not in ([p1], [p2])): # nonlinear
1932 dirty = wc.dirty(missing=True)
1939 dirty = wc.dirty(missing=True)
1933 if dirty:
1940 if dirty:
1934 # Branching is a bit strange to ensure we do the minimal
1941 # Branching is a bit strange to ensure we do the minimal
1935 # amount of call to obsutil.foreground.
1942 # amount of call to obsutil.foreground.
1936 foreground = obsutil.foreground(repo, [p1.node()])
1943 foreground = obsutil.foreground(repo, [p1.node()])
1937 # note: the <node> variable contains a random identifier
1944 # note: the <node> variable contains a random identifier
1938 if repo[node].node() in foreground:
1945 if repo[node].node() in foreground:
1939 pass # allow updating to successors
1946 pass # allow updating to successors
1940 else:
1947 else:
1941 msg = _("uncommitted changes")
1948 msg = _("uncommitted changes")
1942 hint = _("commit or update --clean to discard changes")
1949 hint = _("commit or update --clean to discard changes")
1943 raise error.UpdateAbort(msg, hint=hint)
1950 raise error.UpdateAbort(msg, hint=hint)
1944 else:
1951 else:
1945 # Allow jumping branches if clean and specific rev given
1952 # Allow jumping branches if clean and specific rev given
1946 pass
1953 pass
1947
1954
1948 if overwrite:
1955 if overwrite:
1949 pas = [wc]
1956 pas = [wc]
1950 elif not branchmerge:
1957 elif not branchmerge:
1951 pas = [p1]
1958 pas = [p1]
1952
1959
1953 # deprecated config: merge.followcopies
1960 # deprecated config: merge.followcopies
1954 followcopies = repo.ui.configbool('merge', 'followcopies')
1961 followcopies = repo.ui.configbool('merge', 'followcopies')
1955 if overwrite:
1962 if overwrite:
1956 followcopies = False
1963 followcopies = False
1957 elif not pas[0]:
1964 elif not pas[0]:
1958 followcopies = False
1965 followcopies = False
1959 if not branchmerge and not wc.dirty(missing=True):
1966 if not branchmerge and not wc.dirty(missing=True):
1960 followcopies = False
1967 followcopies = False
1961
1968
1962 ### calculate phase
1969 ### calculate phase
1963 actionbyfile, diverge, renamedelete = calculateupdates(
1970 actionbyfile, diverge, renamedelete = calculateupdates(
1964 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1971 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1965 followcopies, matcher=matcher, mergeforce=mergeforce)
1972 followcopies, matcher=matcher, mergeforce=mergeforce)
1966
1973
1967 if updatecheck == 'noconflict':
1974 if updatecheck == 'noconflict':
1968 for f, (m, args, msg) in actionbyfile.iteritems():
1975 for f, (m, args, msg) in actionbyfile.iteritems():
1969 if m not in ('g', 'k', 'e', 'r', 'pr'):
1976 if m not in ('g', 'k', 'e', 'r', 'pr'):
1970 msg = _("conflicting changes")
1977 msg = _("conflicting changes")
1971 hint = _("commit or update --clean to discard changes")
1978 hint = _("commit or update --clean to discard changes")
1972 raise error.Abort(msg, hint=hint)
1979 raise error.Abort(msg, hint=hint)
1973
1980
1974 # Prompt and create actions. Most of this is in the resolve phase
1981 # Prompt and create actions. Most of this is in the resolve phase
1975 # already, but we can't handle .hgsubstate in filemerge or
1982 # already, but we can't handle .hgsubstate in filemerge or
1976 # subrepoutil.submerge yet so we have to keep prompting for it.
1983 # subrepoutil.submerge yet so we have to keep prompting for it.
1977 if '.hgsubstate' in actionbyfile:
1984 if '.hgsubstate' in actionbyfile:
1978 f = '.hgsubstate'
1985 f = '.hgsubstate'
1979 m, args, msg = actionbyfile[f]
1986 m, args, msg = actionbyfile[f]
1980 prompts = filemerge.partextras(labels)
1987 prompts = filemerge.partextras(labels)
1981 prompts['f'] = f
1988 prompts['f'] = f
1982 if m == 'cd':
1989 if m == 'cd':
1983 if repo.ui.promptchoice(
1990 if repo.ui.promptchoice(
1984 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1991 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1985 "use (c)hanged version or (d)elete?"
1992 "use (c)hanged version or (d)elete?"
1986 "$$ &Changed $$ &Delete") % prompts, 0):
1993 "$$ &Changed $$ &Delete") % prompts, 0):
1987 actionbyfile[f] = ('r', None, "prompt delete")
1994 actionbyfile[f] = ('r', None, "prompt delete")
1988 elif f in p1:
1995 elif f in p1:
1989 actionbyfile[f] = ('am', None, "prompt keep")
1996 actionbyfile[f] = ('am', None, "prompt keep")
1990 else:
1997 else:
1991 actionbyfile[f] = ('a', None, "prompt keep")
1998 actionbyfile[f] = ('a', None, "prompt keep")
1992 elif m == 'dc':
1999 elif m == 'dc':
1993 f1, f2, fa, move, anc = args
2000 f1, f2, fa, move, anc = args
1994 flags = p2[f2].flags()
2001 flags = p2[f2].flags()
1995 if repo.ui.promptchoice(
2002 if repo.ui.promptchoice(
1996 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2003 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1997 "use (c)hanged version or leave (d)eleted?"
2004 "use (c)hanged version or leave (d)eleted?"
1998 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2005 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1999 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
2006 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
2000 else:
2007 else:
2001 del actionbyfile[f]
2008 del actionbyfile[f]
2002
2009
2003 # Convert to dictionary-of-lists format
2010 # Convert to dictionary-of-lists format
2004 actions = dict((m, [])
2011 actions = dict((m, [])
2005 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2012 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2006 for f, (m, args, msg) in actionbyfile.iteritems():
2013 for f, (m, args, msg) in actionbyfile.iteritems():
2007 if m not in actions:
2014 if m not in actions:
2008 actions[m] = []
2015 actions[m] = []
2009 actions[m].append((f, args, msg))
2016 actions[m].append((f, args, msg))
2010
2017
2011 if not util.fscasesensitive(repo.path):
2018 if not util.fscasesensitive(repo.path):
2012 # check collision between files only in p2 for clean update
2019 # check collision between files only in p2 for clean update
2013 if (not branchmerge and
2020 if (not branchmerge and
2014 (force or not wc.dirty(missing=True, branch=False))):
2021 (force or not wc.dirty(missing=True, branch=False))):
2015 _checkcollision(repo, p2.manifest(), None)
2022 _checkcollision(repo, p2.manifest(), None)
2016 else:
2023 else:
2017 _checkcollision(repo, wc.manifest(), actions)
2024 _checkcollision(repo, wc.manifest(), actions)
2018
2025
2019 # divergent renames
2026 # divergent renames
2020 for f, fl in sorted(diverge.iteritems()):
2027 for f, fl in sorted(diverge.iteritems()):
2021 repo.ui.warn(_("note: possible conflict - %s was renamed "
2028 repo.ui.warn(_("note: possible conflict - %s was renamed "
2022 "multiple times to:\n") % f)
2029 "multiple times to:\n") % f)
2023 for nf in fl:
2030 for nf in fl:
2024 repo.ui.warn(" %s\n" % nf)
2031 repo.ui.warn(" %s\n" % nf)
2025
2032
2026 # rename and delete
2033 # rename and delete
2027 for f, fl in sorted(renamedelete.iteritems()):
2034 for f, fl in sorted(renamedelete.iteritems()):
2028 repo.ui.warn(_("note: possible conflict - %s was deleted "
2035 repo.ui.warn(_("note: possible conflict - %s was deleted "
2029 "and renamed to:\n") % f)
2036 "and renamed to:\n") % f)
2030 for nf in fl:
2037 for nf in fl:
2031 repo.ui.warn(" %s\n" % nf)
2038 repo.ui.warn(" %s\n" % nf)
2032
2039
2033 ### apply phase
2040 ### apply phase
2034 if not branchmerge: # just jump to the new rev
2041 if not branchmerge: # just jump to the new rev
2035 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2042 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2036 if not partial and not wc.isinmemory():
2043 if not partial and not wc.isinmemory():
2037 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2044 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2038 # note that we're in the middle of an update
2045 # note that we're in the middle of an update
2039 repo.vfs.write('updatestate', p2.hex())
2046 repo.vfs.write('updatestate', p2.hex())
2040
2047
2041 # Advertise fsmonitor when its presence could be useful.
2048 # Advertise fsmonitor when its presence could be useful.
2042 #
2049 #
2043 # We only advertise when performing an update from an empty working
2050 # We only advertise when performing an update from an empty working
2044 # directory. This typically only occurs during initial clone.
2051 # directory. This typically only occurs during initial clone.
2045 #
2052 #
2046 # We give users a mechanism to disable the warning in case it is
2053 # We give users a mechanism to disable the warning in case it is
2047 # annoying.
2054 # annoying.
2048 #
2055 #
2049 # We only allow on Linux and MacOS because that's where fsmonitor is
2056 # We only allow on Linux and MacOS because that's where fsmonitor is
2050 # considered stable.
2057 # considered stable.
2051 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2058 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2052 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2059 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2053 'warn_update_file_count')
2060 'warn_update_file_count')
2054 try:
2061 try:
2055 # avoid cycle: extensions -> cmdutil -> merge
2062 # avoid cycle: extensions -> cmdutil -> merge
2056 from . import extensions
2063 from . import extensions
2057 extensions.find('fsmonitor')
2064 extensions.find('fsmonitor')
2058 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2065 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2059 # We intentionally don't look at whether fsmonitor has disabled
2066 # We intentionally don't look at whether fsmonitor has disabled
2060 # itself because a) fsmonitor may have already printed a warning
2067 # itself because a) fsmonitor may have already printed a warning
2061 # b) we only care about the config state here.
2068 # b) we only care about the config state here.
2062 except KeyError:
2069 except KeyError:
2063 fsmonitorenabled = False
2070 fsmonitorenabled = False
2064
2071
2065 if (fsmonitorwarning
2072 if (fsmonitorwarning
2066 and not fsmonitorenabled
2073 and not fsmonitorenabled
2067 and p1.node() == nullid
2074 and p1.node() == nullid
2068 and len(actions['g']) >= fsmonitorthreshold
2075 and len(actions['g']) >= fsmonitorthreshold
2069 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2076 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2070 repo.ui.warn(
2077 repo.ui.warn(
2071 _('(warning: large working directory being used without '
2078 _('(warning: large working directory being used without '
2072 'fsmonitor enabled; enable fsmonitor to improve performance; '
2079 'fsmonitor enabled; enable fsmonitor to improve performance; '
2073 'see "hg help -e fsmonitor")\n'))
2080 'see "hg help -e fsmonitor")\n'))
2074
2081
2075 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2082 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2076
2083
2077 if not partial and not wc.isinmemory():
2084 if not partial and not wc.isinmemory():
2078 with repo.dirstate.parentchange():
2085 with repo.dirstate.parentchange():
2079 repo.setparents(fp1, fp2)
2086 repo.setparents(fp1, fp2)
2080 recordupdates(repo, actions, branchmerge)
2087 recordupdates(repo, actions, branchmerge)
2081 # update completed, clear state
2088 # update completed, clear state
2082 util.unlink(repo.vfs.join('updatestate'))
2089 util.unlink(repo.vfs.join('updatestate'))
2083
2090
2084 if not branchmerge:
2091 if not branchmerge:
2085 repo.dirstate.setbranch(p2.branch())
2092 repo.dirstate.setbranch(p2.branch())
2086
2093
2087 # If we're updating to a location, clean up any stale temporary includes
2094 # If we're updating to a location, clean up any stale temporary includes
2088 # (ex: this happens during hg rebase --abort).
2095 # (ex: this happens during hg rebase --abort).
2089 if not branchmerge:
2096 if not branchmerge:
2090 sparse.prunetemporaryincludes(repo)
2097 sparse.prunetemporaryincludes(repo)
2091
2098
2092 if not partial:
2099 if not partial:
2093 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2100 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2094 return stats
2101 return stats
2095
2102
2096 def graft(repo, ctx, pctx, labels, keepparent=False):
2103 def graft(repo, ctx, pctx, labels, keepparent=False):
2097 """Do a graft-like merge.
2104 """Do a graft-like merge.
2098
2105
2099 This is a merge where the merge ancestor is chosen such that one
2106 This is a merge where the merge ancestor is chosen such that one
2100 or more changesets are grafted onto the current changeset. In
2107 or more changesets are grafted onto the current changeset. In
2101 addition to the merge, this fixes up the dirstate to include only
2108 addition to the merge, this fixes up the dirstate to include only
2102 a single parent (if keepparent is False) and tries to duplicate any
2109 a single parent (if keepparent is False) and tries to duplicate any
2103 renames/copies appropriately.
2110 renames/copies appropriately.
2104
2111
2105 ctx - changeset to rebase
2112 ctx - changeset to rebase
2106 pctx - merge base, usually ctx.p1()
2113 pctx - merge base, usually ctx.p1()
2107 labels - merge labels eg ['local', 'graft']
2114 labels - merge labels eg ['local', 'graft']
2108 keepparent - keep second parent if any
2115 keepparent - keep second parent if any
2109
2116
2110 """
2117 """
2111 # If we're grafting a descendant onto an ancestor, be sure to pass
2118 # If we're grafting a descendant onto an ancestor, be sure to pass
2112 # mergeancestor=True to update. This does two things: 1) allows the merge if
2119 # mergeancestor=True to update. This does two things: 1) allows the merge if
2113 # the destination is the same as the parent of the ctx (so we can use graft
2120 # the destination is the same as the parent of the ctx (so we can use graft
2114 # to copy commits), and 2) informs update that the incoming changes are
2121 # to copy commits), and 2) informs update that the incoming changes are
2115 # newer than the destination so it doesn't prompt about "remote changed foo
2122 # newer than the destination so it doesn't prompt about "remote changed foo
2116 # which local deleted".
2123 # which local deleted".
2117 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2124 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2118
2125
2119 stats = update(repo, ctx.node(), True, True, pctx.node(),
2126 stats = update(repo, ctx.node(), True, True, pctx.node(),
2120 mergeancestor=mergeancestor, labels=labels)
2127 mergeancestor=mergeancestor, labels=labels)
2121
2128
2122 pother = nullid
2129 pother = nullid
2123 parents = ctx.parents()
2130 parents = ctx.parents()
2124 if keepparent and len(parents) == 2 and pctx in parents:
2131 if keepparent and len(parents) == 2 and pctx in parents:
2125 parents.remove(pctx)
2132 parents.remove(pctx)
2126 pother = parents[0].node()
2133 pother = parents[0].node()
2127
2134
2128 with repo.dirstate.parentchange():
2135 with repo.dirstate.parentchange():
2129 repo.setparents(repo['.'].node(), pother)
2136 repo.setparents(repo['.'].node(), pother)
2130 repo.dirstate.write(repo.currenttransaction())
2137 repo.dirstate.write(repo.currenttransaction())
2131 # fix up dirstate for copies and renames
2138 # fix up dirstate for copies and renames
2132 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2139 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2133 return stats
2140 return stats
General Comments 0
You need to be logged in to leave comments. Login now