##// END OF EJS Templates
merge: use constants for actions...
Gregory Szorc -
r37130:43ffd907 default
parent child Browse files
Show More
@@ -1,2149 +1,2207 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .thirdparty import (
25 from .thirdparty import (
26 attr,
26 attr,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 subrepoutil,
36 subrepoutil,
37 util,
37 util,
38 worker,
38 worker,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43
43
44 def _droponode(data):
44 def _droponode(data):
45 # used for compatibility for v1
45 # used for compatibility for v1
46 bits = data.split('\0')
46 bits = data.split('\0')
47 bits = bits[:-2] + bits[-1:]
47 bits = bits[:-2] + bits[-1:]
48 return '\0'.join(bits)
48 return '\0'.join(bits)
49
49
50 # Merge state record types. See ``mergestate`` docs for more.
50 # Merge state record types. See ``mergestate`` docs for more.
51 RECORD_LOCAL = b'L'
51 RECORD_LOCAL = b'L'
52 RECORD_OTHER = b'O'
52 RECORD_OTHER = b'O'
53 RECORD_MERGED = b'F'
53 RECORD_MERGED = b'F'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 RECORD_PATH_CONFLICT = b'P'
56 RECORD_PATH_CONFLICT = b'P'
57 RECORD_MERGE_DRIVER_STATE = b'm'
57 RECORD_MERGE_DRIVER_STATE = b'm'
58 RECORD_FILE_VALUES = b'f'
58 RECORD_FILE_VALUES = b'f'
59 RECORD_LABELS = b'l'
59 RECORD_LABELS = b'l'
60 RECORD_OVERRIDE = b't'
60 RECORD_OVERRIDE = b't'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63
63
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
65 MERGE_DRIVER_STATE_MARKED = b'm'
65 MERGE_DRIVER_STATE_MARKED = b'm'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
67
67
68 MERGE_RECORD_UNRESOLVED = b'u'
68 MERGE_RECORD_UNRESOLVED = b'u'
69 MERGE_RECORD_RESOLVED = b'r'
69 MERGE_RECORD_RESOLVED = b'r'
70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
71 MERGE_RECORD_RESOLVED_PATH = b'pr'
71 MERGE_RECORD_RESOLVED_PATH = b'pr'
72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
73
73
74 ACTION_FORGET = b'f'
75 ACTION_REMOVE = b'r'
76 ACTION_ADD = b'a'
77 ACTION_GET = b'g'
78 ACTION_PATH_CONFLICT = b'p'
79 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
80 ACTION_ADD_MODIFIED = b'am'
81 ACTION_CREATED = b'c'
82 ACTION_DELETED_CHANGED = b'dc'
83 ACTION_CHANGED_DELETED = b'cd'
84 ACTION_MERGE = b'm'
85 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
86 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
87 ACTION_KEEP = b'k'
88 ACTION_EXEC = b'e'
89 ACTION_CREATED_MERGE = b'cm'
90
74 class mergestate(object):
91 class mergestate(object):
75 '''track 3-way merge state of individual files
92 '''track 3-way merge state of individual files
76
93
77 The merge state is stored on disk when needed. Two files are used: one with
94 The merge state is stored on disk when needed. Two files are used: one with
78 an old format (version 1), and one with a new format (version 2). Version 2
95 an old format (version 1), and one with a new format (version 2). Version 2
79 stores a superset of the data in version 1, including new kinds of records
96 stores a superset of the data in version 1, including new kinds of records
80 in the future. For more about the new format, see the documentation for
97 in the future. For more about the new format, see the documentation for
81 `_readrecordsv2`.
98 `_readrecordsv2`.
82
99
83 Each record can contain arbitrary content, and has an associated type. This
100 Each record can contain arbitrary content, and has an associated type. This
84 `type` should be a letter. If `type` is uppercase, the record is mandatory:
101 `type` should be a letter. If `type` is uppercase, the record is mandatory:
85 versions of Mercurial that don't support it should abort. If `type` is
102 versions of Mercurial that don't support it should abort. If `type` is
86 lowercase, the record can be safely ignored.
103 lowercase, the record can be safely ignored.
87
104
88 Currently known records:
105 Currently known records:
89
106
90 L: the node of the "local" part of the merge (hexified version)
107 L: the node of the "local" part of the merge (hexified version)
91 O: the node of the "other" part of the merge (hexified version)
108 O: the node of the "other" part of the merge (hexified version)
92 F: a file to be merged entry
109 F: a file to be merged entry
93 C: a change/delete or delete/change conflict
110 C: a change/delete or delete/change conflict
94 D: a file that the external merge driver will merge internally
111 D: a file that the external merge driver will merge internally
95 (experimental)
112 (experimental)
96 P: a path conflict (file vs directory)
113 P: a path conflict (file vs directory)
97 m: the external merge driver defined for this merge plus its run state
114 m: the external merge driver defined for this merge plus its run state
98 (experimental)
115 (experimental)
99 f: a (filename, dictionary) tuple of optional values for a given file
116 f: a (filename, dictionary) tuple of optional values for a given file
100 X: unsupported mandatory record type (used in tests)
117 X: unsupported mandatory record type (used in tests)
101 x: unsupported advisory record type (used in tests)
118 x: unsupported advisory record type (used in tests)
102 l: the labels for the parts of the merge.
119 l: the labels for the parts of the merge.
103
120
104 Merge driver run states (experimental):
121 Merge driver run states (experimental):
105 u: driver-resolved files unmarked -- needs to be run next time we're about
122 u: driver-resolved files unmarked -- needs to be run next time we're about
106 to resolve or commit
123 to resolve or commit
107 m: driver-resolved files marked -- only needs to be run before commit
124 m: driver-resolved files marked -- only needs to be run before commit
108 s: success/skipped -- does not need to be run any more
125 s: success/skipped -- does not need to be run any more
109
126
110 Merge record states (stored in self._state, indexed by filename):
127 Merge record states (stored in self._state, indexed by filename):
111 u: unresolved conflict
128 u: unresolved conflict
112 r: resolved conflict
129 r: resolved conflict
113 pu: unresolved path conflict (file conflicts with directory)
130 pu: unresolved path conflict (file conflicts with directory)
114 pr: resolved path conflict
131 pr: resolved path conflict
115 d: driver-resolved conflict
132 d: driver-resolved conflict
116
133
117 The resolve command transitions between 'u' and 'r' for conflicts and
134 The resolve command transitions between 'u' and 'r' for conflicts and
118 'pu' and 'pr' for path conflicts.
135 'pu' and 'pr' for path conflicts.
119 '''
136 '''
120 statepathv1 = 'merge/state'
137 statepathv1 = 'merge/state'
121 statepathv2 = 'merge/state2'
138 statepathv2 = 'merge/state2'
122
139
123 @staticmethod
140 @staticmethod
124 def clean(repo, node=None, other=None, labels=None):
141 def clean(repo, node=None, other=None, labels=None):
125 """Initialize a brand new merge state, removing any existing state on
142 """Initialize a brand new merge state, removing any existing state on
126 disk."""
143 disk."""
127 ms = mergestate(repo)
144 ms = mergestate(repo)
128 ms.reset(node, other, labels)
145 ms.reset(node, other, labels)
129 return ms
146 return ms
130
147
131 @staticmethod
148 @staticmethod
132 def read(repo):
149 def read(repo):
133 """Initialize the merge state, reading it from disk."""
150 """Initialize the merge state, reading it from disk."""
134 ms = mergestate(repo)
151 ms = mergestate(repo)
135 ms._read()
152 ms._read()
136 return ms
153 return ms
137
154
138 def __init__(self, repo):
155 def __init__(self, repo):
139 """Initialize the merge state.
156 """Initialize the merge state.
140
157
141 Do not use this directly! Instead call read() or clean()."""
158 Do not use this directly! Instead call read() or clean()."""
142 self._repo = repo
159 self._repo = repo
143 self._dirty = False
160 self._dirty = False
144 self._labels = None
161 self._labels = None
145
162
146 def reset(self, node=None, other=None, labels=None):
163 def reset(self, node=None, other=None, labels=None):
147 self._state = {}
164 self._state = {}
148 self._stateextras = {}
165 self._stateextras = {}
149 self._local = None
166 self._local = None
150 self._other = None
167 self._other = None
151 self._labels = labels
168 self._labels = labels
152 for var in ('localctx', 'otherctx'):
169 for var in ('localctx', 'otherctx'):
153 if var in vars(self):
170 if var in vars(self):
154 delattr(self, var)
171 delattr(self, var)
155 if node:
172 if node:
156 self._local = node
173 self._local = node
157 self._other = other
174 self._other = other
158 self._readmergedriver = None
175 self._readmergedriver = None
159 if self.mergedriver:
176 if self.mergedriver:
160 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
177 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
161 else:
178 else:
162 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
179 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
163 shutil.rmtree(self._repo.vfs.join('merge'), True)
180 shutil.rmtree(self._repo.vfs.join('merge'), True)
164 self._results = {}
181 self._results = {}
165 self._dirty = False
182 self._dirty = False
166
183
167 def _read(self):
184 def _read(self):
168 """Analyse each record content to restore a serialized state from disk
185 """Analyse each record content to restore a serialized state from disk
169
186
170 This function process "record" entry produced by the de-serialization
187 This function process "record" entry produced by the de-serialization
171 of on disk file.
188 of on disk file.
172 """
189 """
173 self._state = {}
190 self._state = {}
174 self._stateextras = {}
191 self._stateextras = {}
175 self._local = None
192 self._local = None
176 self._other = None
193 self._other = None
177 for var in ('localctx', 'otherctx'):
194 for var in ('localctx', 'otherctx'):
178 if var in vars(self):
195 if var in vars(self):
179 delattr(self, var)
196 delattr(self, var)
180 self._readmergedriver = None
197 self._readmergedriver = None
181 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
198 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
182 unsupported = set()
199 unsupported = set()
183 records = self._readrecords()
200 records = self._readrecords()
184 for rtype, record in records:
201 for rtype, record in records:
185 if rtype == RECORD_LOCAL:
202 if rtype == RECORD_LOCAL:
186 self._local = bin(record)
203 self._local = bin(record)
187 elif rtype == RECORD_OTHER:
204 elif rtype == RECORD_OTHER:
188 self._other = bin(record)
205 self._other = bin(record)
189 elif rtype == RECORD_MERGE_DRIVER_STATE:
206 elif rtype == RECORD_MERGE_DRIVER_STATE:
190 bits = record.split('\0', 1)
207 bits = record.split('\0', 1)
191 mdstate = bits[1]
208 mdstate = bits[1]
192 if len(mdstate) != 1 or mdstate not in (
209 if len(mdstate) != 1 or mdstate not in (
193 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
210 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
194 MERGE_DRIVER_STATE_SUCCESS):
211 MERGE_DRIVER_STATE_SUCCESS):
195 # the merge driver should be idempotent, so just rerun it
212 # the merge driver should be idempotent, so just rerun it
196 mdstate = MERGE_DRIVER_STATE_UNMARKED
213 mdstate = MERGE_DRIVER_STATE_UNMARKED
197
214
198 self._readmergedriver = bits[0]
215 self._readmergedriver = bits[0]
199 self._mdstate = mdstate
216 self._mdstate = mdstate
200 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
217 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
201 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
218 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
202 bits = record.split('\0')
219 bits = record.split('\0')
203 self._state[bits[0]] = bits[1:]
220 self._state[bits[0]] = bits[1:]
204 elif rtype == RECORD_FILE_VALUES:
221 elif rtype == RECORD_FILE_VALUES:
205 filename, rawextras = record.split('\0', 1)
222 filename, rawextras = record.split('\0', 1)
206 extraparts = rawextras.split('\0')
223 extraparts = rawextras.split('\0')
207 extras = {}
224 extras = {}
208 i = 0
225 i = 0
209 while i < len(extraparts):
226 while i < len(extraparts):
210 extras[extraparts[i]] = extraparts[i + 1]
227 extras[extraparts[i]] = extraparts[i + 1]
211 i += 2
228 i += 2
212
229
213 self._stateextras[filename] = extras
230 self._stateextras[filename] = extras
214 elif rtype == RECORD_LABELS:
231 elif rtype == RECORD_LABELS:
215 labels = record.split('\0', 2)
232 labels = record.split('\0', 2)
216 self._labels = [l for l in labels if len(l) > 0]
233 self._labels = [l for l in labels if len(l) > 0]
217 elif not rtype.islower():
234 elif not rtype.islower():
218 unsupported.add(rtype)
235 unsupported.add(rtype)
219 self._results = {}
236 self._results = {}
220 self._dirty = False
237 self._dirty = False
221
238
222 if unsupported:
239 if unsupported:
223 raise error.UnsupportedMergeRecords(unsupported)
240 raise error.UnsupportedMergeRecords(unsupported)
224
241
225 def _readrecords(self):
242 def _readrecords(self):
226 """Read merge state from disk and return a list of record (TYPE, data)
243 """Read merge state from disk and return a list of record (TYPE, data)
227
244
228 We read data from both v1 and v2 files and decide which one to use.
245 We read data from both v1 and v2 files and decide which one to use.
229
246
230 V1 has been used by version prior to 2.9.1 and contains less data than
247 V1 has been used by version prior to 2.9.1 and contains less data than
231 v2. We read both versions and check if no data in v2 contradicts
248 v2. We read both versions and check if no data in v2 contradicts
232 v1. If there is not contradiction we can safely assume that both v1
249 v1. If there is not contradiction we can safely assume that both v1
233 and v2 were written at the same time and use the extract data in v2. If
250 and v2 were written at the same time and use the extract data in v2. If
234 there is contradiction we ignore v2 content as we assume an old version
251 there is contradiction we ignore v2 content as we assume an old version
235 of Mercurial has overwritten the mergestate file and left an old v2
252 of Mercurial has overwritten the mergestate file and left an old v2
236 file around.
253 file around.
237
254
238 returns list of record [(TYPE, data), ...]"""
255 returns list of record [(TYPE, data), ...]"""
239 v1records = self._readrecordsv1()
256 v1records = self._readrecordsv1()
240 v2records = self._readrecordsv2()
257 v2records = self._readrecordsv2()
241 if self._v1v2match(v1records, v2records):
258 if self._v1v2match(v1records, v2records):
242 return v2records
259 return v2records
243 else:
260 else:
244 # v1 file is newer than v2 file, use it
261 # v1 file is newer than v2 file, use it
245 # we have to infer the "other" changeset of the merge
262 # we have to infer the "other" changeset of the merge
246 # we cannot do better than that with v1 of the format
263 # we cannot do better than that with v1 of the format
247 mctx = self._repo[None].parents()[-1]
264 mctx = self._repo[None].parents()[-1]
248 v1records.append((RECORD_OTHER, mctx.hex()))
265 v1records.append((RECORD_OTHER, mctx.hex()))
249 # add place holder "other" file node information
266 # add place holder "other" file node information
250 # nobody is using it yet so we do no need to fetch the data
267 # nobody is using it yet so we do no need to fetch the data
251 # if mctx was wrong `mctx[bits[-2]]` may fails.
268 # if mctx was wrong `mctx[bits[-2]]` may fails.
252 for idx, r in enumerate(v1records):
269 for idx, r in enumerate(v1records):
253 if r[0] == RECORD_MERGED:
270 if r[0] == RECORD_MERGED:
254 bits = r[1].split('\0')
271 bits = r[1].split('\0')
255 bits.insert(-2, '')
272 bits.insert(-2, '')
256 v1records[idx] = (r[0], '\0'.join(bits))
273 v1records[idx] = (r[0], '\0'.join(bits))
257 return v1records
274 return v1records
258
275
259 def _v1v2match(self, v1records, v2records):
276 def _v1v2match(self, v1records, v2records):
260 oldv2 = set() # old format version of v2 record
277 oldv2 = set() # old format version of v2 record
261 for rec in v2records:
278 for rec in v2records:
262 if rec[0] == RECORD_LOCAL:
279 if rec[0] == RECORD_LOCAL:
263 oldv2.add(rec)
280 oldv2.add(rec)
264 elif rec[0] == RECORD_MERGED:
281 elif rec[0] == RECORD_MERGED:
265 # drop the onode data (not contained in v1)
282 # drop the onode data (not contained in v1)
266 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
283 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
267 for rec in v1records:
284 for rec in v1records:
268 if rec not in oldv2:
285 if rec not in oldv2:
269 return False
286 return False
270 else:
287 else:
271 return True
288 return True
272
289
273 def _readrecordsv1(self):
290 def _readrecordsv1(self):
274 """read on disk merge state for version 1 file
291 """read on disk merge state for version 1 file
275
292
276 returns list of record [(TYPE, data), ...]
293 returns list of record [(TYPE, data), ...]
277
294
278 Note: the "F" data from this file are one entry short
295 Note: the "F" data from this file are one entry short
279 (no "other file node" entry)
296 (no "other file node" entry)
280 """
297 """
281 records = []
298 records = []
282 try:
299 try:
283 f = self._repo.vfs(self.statepathv1)
300 f = self._repo.vfs(self.statepathv1)
284 for i, l in enumerate(f):
301 for i, l in enumerate(f):
285 if i == 0:
302 if i == 0:
286 records.append((RECORD_LOCAL, l[:-1]))
303 records.append((RECORD_LOCAL, l[:-1]))
287 else:
304 else:
288 records.append((RECORD_MERGED, l[:-1]))
305 records.append((RECORD_MERGED, l[:-1]))
289 f.close()
306 f.close()
290 except IOError as err:
307 except IOError as err:
291 if err.errno != errno.ENOENT:
308 if err.errno != errno.ENOENT:
292 raise
309 raise
293 return records
310 return records
294
311
295 def _readrecordsv2(self):
312 def _readrecordsv2(self):
296 """read on disk merge state for version 2 file
313 """read on disk merge state for version 2 file
297
314
298 This format is a list of arbitrary records of the form:
315 This format is a list of arbitrary records of the form:
299
316
300 [type][length][content]
317 [type][length][content]
301
318
302 `type` is a single character, `length` is a 4 byte integer, and
319 `type` is a single character, `length` is a 4 byte integer, and
303 `content` is an arbitrary byte sequence of length `length`.
320 `content` is an arbitrary byte sequence of length `length`.
304
321
305 Mercurial versions prior to 3.7 have a bug where if there are
322 Mercurial versions prior to 3.7 have a bug where if there are
306 unsupported mandatory merge records, attempting to clear out the merge
323 unsupported mandatory merge records, attempting to clear out the merge
307 state with hg update --clean or similar aborts. The 't' record type
324 state with hg update --clean or similar aborts. The 't' record type
308 works around that by writing out what those versions treat as an
325 works around that by writing out what those versions treat as an
309 advisory record, but later versions interpret as special: the first
326 advisory record, but later versions interpret as special: the first
310 character is the 'real' record type and everything onwards is the data.
327 character is the 'real' record type and everything onwards is the data.
311
328
312 Returns list of records [(TYPE, data), ...]."""
329 Returns list of records [(TYPE, data), ...]."""
313 records = []
330 records = []
314 try:
331 try:
315 f = self._repo.vfs(self.statepathv2)
332 f = self._repo.vfs(self.statepathv2)
316 data = f.read()
333 data = f.read()
317 off = 0
334 off = 0
318 end = len(data)
335 end = len(data)
319 while off < end:
336 while off < end:
320 rtype = data[off:off + 1]
337 rtype = data[off:off + 1]
321 off += 1
338 off += 1
322 length = _unpack('>I', data[off:(off + 4)])[0]
339 length = _unpack('>I', data[off:(off + 4)])[0]
323 off += 4
340 off += 4
324 record = data[off:(off + length)]
341 record = data[off:(off + length)]
325 off += length
342 off += length
326 if rtype == RECORD_OVERRIDE:
343 if rtype == RECORD_OVERRIDE:
327 rtype, record = record[0:1], record[1:]
344 rtype, record = record[0:1], record[1:]
328 records.append((rtype, record))
345 records.append((rtype, record))
329 f.close()
346 f.close()
330 except IOError as err:
347 except IOError as err:
331 if err.errno != errno.ENOENT:
348 if err.errno != errno.ENOENT:
332 raise
349 raise
333 return records
350 return records
334
351
335 @util.propertycache
352 @util.propertycache
336 def mergedriver(self):
353 def mergedriver(self):
337 # protect against the following:
354 # protect against the following:
338 # - A configures a malicious merge driver in their hgrc, then
355 # - A configures a malicious merge driver in their hgrc, then
339 # pauses the merge
356 # pauses the merge
340 # - A edits their hgrc to remove references to the merge driver
357 # - A edits their hgrc to remove references to the merge driver
341 # - A gives a copy of their entire repo, including .hg, to B
358 # - A gives a copy of their entire repo, including .hg, to B
342 # - B inspects .hgrc and finds it to be clean
359 # - B inspects .hgrc and finds it to be clean
343 # - B then continues the merge and the malicious merge driver
360 # - B then continues the merge and the malicious merge driver
344 # gets invoked
361 # gets invoked
345 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
362 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
346 if (self._readmergedriver is not None
363 if (self._readmergedriver is not None
347 and self._readmergedriver != configmergedriver):
364 and self._readmergedriver != configmergedriver):
348 raise error.ConfigError(
365 raise error.ConfigError(
349 _("merge driver changed since merge started"),
366 _("merge driver changed since merge started"),
350 hint=_("revert merge driver change or abort merge"))
367 hint=_("revert merge driver change or abort merge"))
351
368
352 return configmergedriver
369 return configmergedriver
353
370
354 @util.propertycache
371 @util.propertycache
355 def localctx(self):
372 def localctx(self):
356 if self._local is None:
373 if self._local is None:
357 msg = "localctx accessed but self._local isn't set"
374 msg = "localctx accessed but self._local isn't set"
358 raise error.ProgrammingError(msg)
375 raise error.ProgrammingError(msg)
359 return self._repo[self._local]
376 return self._repo[self._local]
360
377
361 @util.propertycache
378 @util.propertycache
362 def otherctx(self):
379 def otherctx(self):
363 if self._other is None:
380 if self._other is None:
364 msg = "otherctx accessed but self._other isn't set"
381 msg = "otherctx accessed but self._other isn't set"
365 raise error.ProgrammingError(msg)
382 raise error.ProgrammingError(msg)
366 return self._repo[self._other]
383 return self._repo[self._other]
367
384
368 def active(self):
385 def active(self):
369 """Whether mergestate is active.
386 """Whether mergestate is active.
370
387
371 Returns True if there appears to be mergestate. This is a rough proxy
388 Returns True if there appears to be mergestate. This is a rough proxy
372 for "is a merge in progress."
389 for "is a merge in progress."
373 """
390 """
374 # Check local variables before looking at filesystem for performance
391 # Check local variables before looking at filesystem for performance
375 # reasons.
392 # reasons.
376 return bool(self._local) or bool(self._state) or \
393 return bool(self._local) or bool(self._state) or \
377 self._repo.vfs.exists(self.statepathv1) or \
394 self._repo.vfs.exists(self.statepathv1) or \
378 self._repo.vfs.exists(self.statepathv2)
395 self._repo.vfs.exists(self.statepathv2)
379
396
380 def commit(self):
397 def commit(self):
381 """Write current state on disk (if necessary)"""
398 """Write current state on disk (if necessary)"""
382 if self._dirty:
399 if self._dirty:
383 records = self._makerecords()
400 records = self._makerecords()
384 self._writerecords(records)
401 self._writerecords(records)
385 self._dirty = False
402 self._dirty = False
386
403
387 def _makerecords(self):
404 def _makerecords(self):
388 records = []
405 records = []
389 records.append((RECORD_LOCAL, hex(self._local)))
406 records.append((RECORD_LOCAL, hex(self._local)))
390 records.append((RECORD_OTHER, hex(self._other)))
407 records.append((RECORD_OTHER, hex(self._other)))
391 if self.mergedriver:
408 if self.mergedriver:
392 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
409 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
393 self.mergedriver, self._mdstate])))
410 self.mergedriver, self._mdstate])))
394 # Write out state items. In all cases, the value of the state map entry
411 # Write out state items. In all cases, the value of the state map entry
395 # is written as the contents of the record. The record type depends on
412 # is written as the contents of the record. The record type depends on
396 # the type of state that is stored, and capital-letter records are used
413 # the type of state that is stored, and capital-letter records are used
397 # to prevent older versions of Mercurial that do not support the feature
414 # to prevent older versions of Mercurial that do not support the feature
398 # from loading them.
415 # from loading them.
399 for filename, v in self._state.iteritems():
416 for filename, v in self._state.iteritems():
400 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
417 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
401 # Driver-resolved merge. These are stored in 'D' records.
418 # Driver-resolved merge. These are stored in 'D' records.
402 records.append((RECORD_MERGE_DRIVER_MERGE,
419 records.append((RECORD_MERGE_DRIVER_MERGE,
403 '\0'.join([filename] + v)))
420 '\0'.join([filename] + v)))
404 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
421 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
405 MERGE_RECORD_RESOLVED_PATH):
422 MERGE_RECORD_RESOLVED_PATH):
406 # Path conflicts. These are stored in 'P' records. The current
423 # Path conflicts. These are stored in 'P' records. The current
407 # resolution state ('pu' or 'pr') is stored within the record.
424 # resolution state ('pu' or 'pr') is stored within the record.
408 records.append((RECORD_PATH_CONFLICT,
425 records.append((RECORD_PATH_CONFLICT,
409 '\0'.join([filename] + v)))
426 '\0'.join([filename] + v)))
410 elif v[1] == nullhex or v[6] == nullhex:
427 elif v[1] == nullhex or v[6] == nullhex:
411 # Change/Delete or Delete/Change conflicts. These are stored in
428 # Change/Delete or Delete/Change conflicts. These are stored in
412 # 'C' records. v[1] is the local file, and is nullhex when the
429 # 'C' records. v[1] is the local file, and is nullhex when the
413 # file is deleted locally ('dc'). v[6] is the remote file, and
430 # file is deleted locally ('dc'). v[6] is the remote file, and
414 # is nullhex when the file is deleted remotely ('cd').
431 # is nullhex when the file is deleted remotely ('cd').
415 records.append((RECORD_CHANGEDELETE_CONFLICT,
432 records.append((RECORD_CHANGEDELETE_CONFLICT,
416 '\0'.join([filename] + v)))
433 '\0'.join([filename] + v)))
417 else:
434 else:
418 # Normal files. These are stored in 'F' records.
435 # Normal files. These are stored in 'F' records.
419 records.append((RECORD_MERGED,
436 records.append((RECORD_MERGED,
420 '\0'.join([filename] + v)))
437 '\0'.join([filename] + v)))
421 for filename, extras in sorted(self._stateextras.iteritems()):
438 for filename, extras in sorted(self._stateextras.iteritems()):
422 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
439 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
423 extras.iteritems())
440 extras.iteritems())
424 records.append((RECORD_FILE_VALUES,
441 records.append((RECORD_FILE_VALUES,
425 '%s\0%s' % (filename, rawextras)))
442 '%s\0%s' % (filename, rawextras)))
426 if self._labels is not None:
443 if self._labels is not None:
427 labels = '\0'.join(self._labels)
444 labels = '\0'.join(self._labels)
428 records.append((RECORD_LABELS, labels))
445 records.append((RECORD_LABELS, labels))
429 return records
446 return records
430
447
431 def _writerecords(self, records):
448 def _writerecords(self, records):
432 """Write current state on disk (both v1 and v2)"""
449 """Write current state on disk (both v1 and v2)"""
433 self._writerecordsv1(records)
450 self._writerecordsv1(records)
434 self._writerecordsv2(records)
451 self._writerecordsv2(records)
435
452
436 def _writerecordsv1(self, records):
453 def _writerecordsv1(self, records):
437 """Write current state on disk in a version 1 file"""
454 """Write current state on disk in a version 1 file"""
438 f = self._repo.vfs(self.statepathv1, 'wb')
455 f = self._repo.vfs(self.statepathv1, 'wb')
439 irecords = iter(records)
456 irecords = iter(records)
440 lrecords = next(irecords)
457 lrecords = next(irecords)
441 assert lrecords[0] == RECORD_LOCAL
458 assert lrecords[0] == RECORD_LOCAL
442 f.write(hex(self._local) + '\n')
459 f.write(hex(self._local) + '\n')
443 for rtype, data in irecords:
460 for rtype, data in irecords:
444 if rtype == RECORD_MERGED:
461 if rtype == RECORD_MERGED:
445 f.write('%s\n' % _droponode(data))
462 f.write('%s\n' % _droponode(data))
446 f.close()
463 f.close()
447
464
448 def _writerecordsv2(self, records):
465 def _writerecordsv2(self, records):
449 """Write current state on disk in a version 2 file
466 """Write current state on disk in a version 2 file
450
467
451 See the docstring for _readrecordsv2 for why we use 't'."""
468 See the docstring for _readrecordsv2 for why we use 't'."""
452 # these are the records that all version 2 clients can read
469 # these are the records that all version 2 clients can read
453 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
470 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
454 f = self._repo.vfs(self.statepathv2, 'wb')
471 f = self._repo.vfs(self.statepathv2, 'wb')
455 for key, data in records:
472 for key, data in records:
456 assert len(key) == 1
473 assert len(key) == 1
457 if key not in allowlist:
474 if key not in allowlist:
458 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
475 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
459 format = '>sI%is' % len(data)
476 format = '>sI%is' % len(data)
460 f.write(_pack(format, key, len(data), data))
477 f.write(_pack(format, key, len(data), data))
461 f.close()
478 f.close()
462
479
463 def add(self, fcl, fco, fca, fd):
480 def add(self, fcl, fco, fca, fd):
464 """add a new (potentially?) conflicting file the merge state
481 """add a new (potentially?) conflicting file the merge state
465 fcl: file context for local,
482 fcl: file context for local,
466 fco: file context for remote,
483 fco: file context for remote,
467 fca: file context for ancestors,
484 fca: file context for ancestors,
468 fd: file path of the resulting merge.
485 fd: file path of the resulting merge.
469
486
470 note: also write the local version to the `.hg/merge` directory.
487 note: also write the local version to the `.hg/merge` directory.
471 """
488 """
472 if fcl.isabsent():
489 if fcl.isabsent():
473 hash = nullhex
490 hash = nullhex
474 else:
491 else:
475 hash = hex(hashlib.sha1(fcl.path()).digest())
492 hash = hex(hashlib.sha1(fcl.path()).digest())
476 self._repo.vfs.write('merge/' + hash, fcl.data())
493 self._repo.vfs.write('merge/' + hash, fcl.data())
477 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
494 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
478 fca.path(), hex(fca.filenode()),
495 fca.path(), hex(fca.filenode()),
479 fco.path(), hex(fco.filenode()),
496 fco.path(), hex(fco.filenode()),
480 fcl.flags()]
497 fcl.flags()]
481 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
498 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
482 self._dirty = True
499 self._dirty = True
483
500
484 def addpath(self, path, frename, forigin):
501 def addpath(self, path, frename, forigin):
485 """add a new conflicting path to the merge state
502 """add a new conflicting path to the merge state
486 path: the path that conflicts
503 path: the path that conflicts
487 frename: the filename the conflicting file was renamed to
504 frename: the filename the conflicting file was renamed to
488 forigin: origin of the file ('l' or 'r' for local/remote)
505 forigin: origin of the file ('l' or 'r' for local/remote)
489 """
506 """
490 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
507 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
491 self._dirty = True
508 self._dirty = True
492
509
493 def __contains__(self, dfile):
510 def __contains__(self, dfile):
494 return dfile in self._state
511 return dfile in self._state
495
512
496 def __getitem__(self, dfile):
513 def __getitem__(self, dfile):
497 return self._state[dfile][0]
514 return self._state[dfile][0]
498
515
499 def __iter__(self):
516 def __iter__(self):
500 return iter(sorted(self._state))
517 return iter(sorted(self._state))
501
518
502 def files(self):
519 def files(self):
503 return self._state.keys()
520 return self._state.keys()
504
521
505 def mark(self, dfile, state):
522 def mark(self, dfile, state):
506 self._state[dfile][0] = state
523 self._state[dfile][0] = state
507 self._dirty = True
524 self._dirty = True
508
525
509 def mdstate(self):
526 def mdstate(self):
510 return self._mdstate
527 return self._mdstate
511
528
512 def unresolved(self):
529 def unresolved(self):
513 """Obtain the paths of unresolved files."""
530 """Obtain the paths of unresolved files."""
514
531
515 for f, entry in self._state.iteritems():
532 for f, entry in self._state.iteritems():
516 if entry[0] in (MERGE_RECORD_UNRESOLVED,
533 if entry[0] in (MERGE_RECORD_UNRESOLVED,
517 MERGE_RECORD_UNRESOLVED_PATH):
534 MERGE_RECORD_UNRESOLVED_PATH):
518 yield f
535 yield f
519
536
520 def driverresolved(self):
537 def driverresolved(self):
521 """Obtain the paths of driver-resolved files."""
538 """Obtain the paths of driver-resolved files."""
522
539
523 for f, entry in self._state.items():
540 for f, entry in self._state.items():
524 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
541 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
525 yield f
542 yield f
526
543
527 def extras(self, filename):
544 def extras(self, filename):
528 return self._stateextras.setdefault(filename, {})
545 return self._stateextras.setdefault(filename, {})
529
546
530 def _resolve(self, preresolve, dfile, wctx):
547 def _resolve(self, preresolve, dfile, wctx):
531 """rerun merge process for file path `dfile`"""
548 """rerun merge process for file path `dfile`"""
532 if self[dfile] in (MERGE_RECORD_RESOLVED,
549 if self[dfile] in (MERGE_RECORD_RESOLVED,
533 MERGE_RECORD_DRIVER_RESOLVED):
550 MERGE_RECORD_DRIVER_RESOLVED):
534 return True, 0
551 return True, 0
535 stateentry = self._state[dfile]
552 stateentry = self._state[dfile]
536 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
553 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
537 octx = self._repo[self._other]
554 octx = self._repo[self._other]
538 extras = self.extras(dfile)
555 extras = self.extras(dfile)
539 anccommitnode = extras.get('ancestorlinknode')
556 anccommitnode = extras.get('ancestorlinknode')
540 if anccommitnode:
557 if anccommitnode:
541 actx = self._repo[anccommitnode]
558 actx = self._repo[anccommitnode]
542 else:
559 else:
543 actx = None
560 actx = None
544 fcd = self._filectxorabsent(hash, wctx, dfile)
561 fcd = self._filectxorabsent(hash, wctx, dfile)
545 fco = self._filectxorabsent(onode, octx, ofile)
562 fco = self._filectxorabsent(onode, octx, ofile)
546 # TODO: move this to filectxorabsent
563 # TODO: move this to filectxorabsent
547 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
564 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
548 # "premerge" x flags
565 # "premerge" x flags
549 flo = fco.flags()
566 flo = fco.flags()
550 fla = fca.flags()
567 fla = fca.flags()
551 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
568 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
552 if fca.node() == nullid and flags != flo:
569 if fca.node() == nullid and flags != flo:
553 if preresolve:
570 if preresolve:
554 self._repo.ui.warn(
571 self._repo.ui.warn(
555 _('warning: cannot merge flags for %s '
572 _('warning: cannot merge flags for %s '
556 'without common ancestor - keeping local flags\n')
573 'without common ancestor - keeping local flags\n')
557 % afile)
574 % afile)
558 elif flags == fla:
575 elif flags == fla:
559 flags = flo
576 flags = flo
560 if preresolve:
577 if preresolve:
561 # restore local
578 # restore local
562 if hash != nullhex:
579 if hash != nullhex:
563 f = self._repo.vfs('merge/' + hash)
580 f = self._repo.vfs('merge/' + hash)
564 wctx[dfile].write(f.read(), flags)
581 wctx[dfile].write(f.read(), flags)
565 f.close()
582 f.close()
566 else:
583 else:
567 wctx[dfile].remove(ignoremissing=True)
584 wctx[dfile].remove(ignoremissing=True)
568 complete, r, deleted = filemerge.premerge(self._repo, wctx,
585 complete, r, deleted = filemerge.premerge(self._repo, wctx,
569 self._local, lfile, fcd,
586 self._local, lfile, fcd,
570 fco, fca,
587 fco, fca,
571 labels=self._labels)
588 labels=self._labels)
572 else:
589 else:
573 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
590 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
574 self._local, lfile, fcd,
591 self._local, lfile, fcd,
575 fco, fca,
592 fco, fca,
576 labels=self._labels)
593 labels=self._labels)
577 if r is None:
594 if r is None:
578 # no real conflict
595 # no real conflict
579 del self._state[dfile]
596 del self._state[dfile]
580 self._stateextras.pop(dfile, None)
597 self._stateextras.pop(dfile, None)
581 self._dirty = True
598 self._dirty = True
582 elif not r:
599 elif not r:
583 self.mark(dfile, MERGE_RECORD_RESOLVED)
600 self.mark(dfile, MERGE_RECORD_RESOLVED)
584
601
585 if complete:
602 if complete:
586 action = None
603 action = None
587 if deleted:
604 if deleted:
588 if fcd.isabsent():
605 if fcd.isabsent():
589 # dc: local picked. Need to drop if present, which may
606 # dc: local picked. Need to drop if present, which may
590 # happen on re-resolves.
607 # happen on re-resolves.
591 action = 'f'
608 action = ACTION_FORGET
592 else:
609 else:
593 # cd: remote picked (or otherwise deleted)
610 # cd: remote picked (or otherwise deleted)
594 action = 'r'
611 action = ACTION_REMOVE
595 else:
612 else:
596 if fcd.isabsent(): # dc: remote picked
613 if fcd.isabsent(): # dc: remote picked
597 action = 'g'
614 action = ACTION_GET
598 elif fco.isabsent(): # cd: local picked
615 elif fco.isabsent(): # cd: local picked
599 if dfile in self.localctx:
616 if dfile in self.localctx:
600 action = 'am'
617 action = ACTION_ADD_MODIFIED
601 else:
618 else:
602 action = 'a'
619 action = ACTION_ADD
603 # else: regular merges (no action necessary)
620 # else: regular merges (no action necessary)
604 self._results[dfile] = r, action
621 self._results[dfile] = r, action
605
622
606 return complete, r
623 return complete, r
607
624
608 def _filectxorabsent(self, hexnode, ctx, f):
625 def _filectxorabsent(self, hexnode, ctx, f):
609 if hexnode == nullhex:
626 if hexnode == nullhex:
610 return filemerge.absentfilectx(ctx, f)
627 return filemerge.absentfilectx(ctx, f)
611 else:
628 else:
612 return ctx[f]
629 return ctx[f]
613
630
614 def preresolve(self, dfile, wctx):
631 def preresolve(self, dfile, wctx):
615 """run premerge process for dfile
632 """run premerge process for dfile
616
633
617 Returns whether the merge is complete, and the exit code."""
634 Returns whether the merge is complete, and the exit code."""
618 return self._resolve(True, dfile, wctx)
635 return self._resolve(True, dfile, wctx)
619
636
620 def resolve(self, dfile, wctx):
637 def resolve(self, dfile, wctx):
621 """run merge process (assuming premerge was run) for dfile
638 """run merge process (assuming premerge was run) for dfile
622
639
623 Returns the exit code of the merge."""
640 Returns the exit code of the merge."""
624 return self._resolve(False, dfile, wctx)[1]
641 return self._resolve(False, dfile, wctx)[1]
625
642
626 def counts(self):
643 def counts(self):
627 """return counts for updated, merged and removed files in this
644 """return counts for updated, merged and removed files in this
628 session"""
645 session"""
629 updated, merged, removed = 0, 0, 0
646 updated, merged, removed = 0, 0, 0
630 for r, action in self._results.itervalues():
647 for r, action in self._results.itervalues():
631 if r is None:
648 if r is None:
632 updated += 1
649 updated += 1
633 elif r == 0:
650 elif r == 0:
634 if action == 'r':
651 if action == ACTION_REMOVE:
635 removed += 1
652 removed += 1
636 else:
653 else:
637 merged += 1
654 merged += 1
638 return updated, merged, removed
655 return updated, merged, removed
639
656
640 def unresolvedcount(self):
657 def unresolvedcount(self):
641 """get unresolved count for this merge (persistent)"""
658 """get unresolved count for this merge (persistent)"""
642 return len(list(self.unresolved()))
659 return len(list(self.unresolved()))
643
660
644 def actions(self):
661 def actions(self):
645 """return lists of actions to perform on the dirstate"""
662 """return lists of actions to perform on the dirstate"""
646 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
663 actions = {
664 ACTION_REMOVE: [],
665 ACTION_FORGET: [],
666 ACTION_ADD: [],
667 ACTION_ADD_MODIFIED: [],
668 ACTION_GET: [],
669 }
647 for f, (r, action) in self._results.iteritems():
670 for f, (r, action) in self._results.iteritems():
648 if action is not None:
671 if action is not None:
649 actions[action].append((f, None, "merge result"))
672 actions[action].append((f, None, "merge result"))
650 return actions
673 return actions
651
674
652 def recordactions(self):
675 def recordactions(self):
653 """record remove/add/get actions in the dirstate"""
676 """record remove/add/get actions in the dirstate"""
654 branchmerge = self._repo.dirstate.p2() != nullid
677 branchmerge = self._repo.dirstate.p2() != nullid
655 recordupdates(self._repo, self.actions(), branchmerge)
678 recordupdates(self._repo, self.actions(), branchmerge)
656
679
657 def queueremove(self, f):
680 def queueremove(self, f):
658 """queues a file to be removed from the dirstate
681 """queues a file to be removed from the dirstate
659
682
660 Meant for use by custom merge drivers."""
683 Meant for use by custom merge drivers."""
661 self._results[f] = 0, 'r'
684 self._results[f] = 0, ACTION_REMOVE
662
685
663 def queueadd(self, f):
686 def queueadd(self, f):
664 """queues a file to be added to the dirstate
687 """queues a file to be added to the dirstate
665
688
666 Meant for use by custom merge drivers."""
689 Meant for use by custom merge drivers."""
667 self._results[f] = 0, 'a'
690 self._results[f] = 0, ACTION_ADD
668
691
669 def queueget(self, f):
692 def queueget(self, f):
670 """queues a file to be marked modified in the dirstate
693 """queues a file to be marked modified in the dirstate
671
694
672 Meant for use by custom merge drivers."""
695 Meant for use by custom merge drivers."""
673 self._results[f] = 0, 'g'
696 self._results[f] = 0, ACTION_GET
674
697
675 def _getcheckunknownconfig(repo, section, name):
698 def _getcheckunknownconfig(repo, section, name):
676 config = repo.ui.config(section, name)
699 config = repo.ui.config(section, name)
677 valid = ['abort', 'ignore', 'warn']
700 valid = ['abort', 'ignore', 'warn']
678 if config not in valid:
701 if config not in valid:
679 validstr = ', '.join(["'" + v + "'" for v in valid])
702 validstr = ', '.join(["'" + v + "'" for v in valid])
680 raise error.ConfigError(_("%s.%s not valid "
703 raise error.ConfigError(_("%s.%s not valid "
681 "('%s' is none of %s)")
704 "('%s' is none of %s)")
682 % (section, name, config, validstr))
705 % (section, name, config, validstr))
683 return config
706 return config
684
707
685 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
708 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
686 if wctx.isinmemory():
709 if wctx.isinmemory():
687 # Nothing to do in IMM because nothing in the "working copy" can be an
710 # Nothing to do in IMM because nothing in the "working copy" can be an
688 # unknown file.
711 # unknown file.
689 #
712 #
690 # Note that we should bail out here, not in ``_checkunknownfiles()``,
713 # Note that we should bail out here, not in ``_checkunknownfiles()``,
691 # because that function does other useful work.
714 # because that function does other useful work.
692 return False
715 return False
693
716
694 if f2 is None:
717 if f2 is None:
695 f2 = f
718 f2 = f
696 return (repo.wvfs.audit.check(f)
719 return (repo.wvfs.audit.check(f)
697 and repo.wvfs.isfileorlink(f)
720 and repo.wvfs.isfileorlink(f)
698 and repo.dirstate.normalize(f) not in repo.dirstate
721 and repo.dirstate.normalize(f) not in repo.dirstate
699 and mctx[f2].cmp(wctx[f]))
722 and mctx[f2].cmp(wctx[f]))
700
723
701 class _unknowndirschecker(object):
724 class _unknowndirschecker(object):
702 """
725 """
703 Look for any unknown files or directories that may have a path conflict
726 Look for any unknown files or directories that may have a path conflict
704 with a file. If any path prefix of the file exists as a file or link,
727 with a file. If any path prefix of the file exists as a file or link,
705 then it conflicts. If the file itself is a directory that contains any
728 then it conflicts. If the file itself is a directory that contains any
706 file that is not tracked, then it conflicts.
729 file that is not tracked, then it conflicts.
707
730
708 Returns the shortest path at which a conflict occurs, or None if there is
731 Returns the shortest path at which a conflict occurs, or None if there is
709 no conflict.
732 no conflict.
710 """
733 """
711 def __init__(self):
734 def __init__(self):
712 # A set of paths known to be good. This prevents repeated checking of
735 # A set of paths known to be good. This prevents repeated checking of
713 # dirs. It will be updated with any new dirs that are checked and found
736 # dirs. It will be updated with any new dirs that are checked and found
714 # to be safe.
737 # to be safe.
715 self._unknowndircache = set()
738 self._unknowndircache = set()
716
739
717 # A set of paths that are known to be absent. This prevents repeated
740 # A set of paths that are known to be absent. This prevents repeated
718 # checking of subdirectories that are known not to exist. It will be
741 # checking of subdirectories that are known not to exist. It will be
719 # updated with any new dirs that are checked and found to be absent.
742 # updated with any new dirs that are checked and found to be absent.
720 self._missingdircache = set()
743 self._missingdircache = set()
721
744
722 def __call__(self, repo, wctx, f):
745 def __call__(self, repo, wctx, f):
723 if wctx.isinmemory():
746 if wctx.isinmemory():
724 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
747 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
725 return False
748 return False
726
749
727 # Check for path prefixes that exist as unknown files.
750 # Check for path prefixes that exist as unknown files.
728 for p in reversed(list(util.finddirs(f))):
751 for p in reversed(list(util.finddirs(f))):
729 if p in self._missingdircache:
752 if p in self._missingdircache:
730 return
753 return
731 if p in self._unknowndircache:
754 if p in self._unknowndircache:
732 continue
755 continue
733 if repo.wvfs.audit.check(p):
756 if repo.wvfs.audit.check(p):
734 if (repo.wvfs.isfileorlink(p)
757 if (repo.wvfs.isfileorlink(p)
735 and repo.dirstate.normalize(p) not in repo.dirstate):
758 and repo.dirstate.normalize(p) not in repo.dirstate):
736 return p
759 return p
737 if not repo.wvfs.lexists(p):
760 if not repo.wvfs.lexists(p):
738 self._missingdircache.add(p)
761 self._missingdircache.add(p)
739 return
762 return
740 self._unknowndircache.add(p)
763 self._unknowndircache.add(p)
741
764
742 # Check if the file conflicts with a directory containing unknown files.
765 # Check if the file conflicts with a directory containing unknown files.
743 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
766 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
744 # Does the directory contain any files that are not in the dirstate?
767 # Does the directory contain any files that are not in the dirstate?
745 for p, dirs, files in repo.wvfs.walk(f):
768 for p, dirs, files in repo.wvfs.walk(f):
746 for fn in files:
769 for fn in files:
747 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
770 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
748 relf = repo.dirstate.normalize(relf, isknown=True)
771 relf = repo.dirstate.normalize(relf, isknown=True)
749 if relf not in repo.dirstate:
772 if relf not in repo.dirstate:
750 return f
773 return f
751 return None
774 return None
752
775
753 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
776 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
754 """
777 """
755 Considers any actions that care about the presence of conflicting unknown
778 Considers any actions that care about the presence of conflicting unknown
756 files. For some actions, the result is to abort; for others, it is to
779 files. For some actions, the result is to abort; for others, it is to
757 choose a different action.
780 choose a different action.
758 """
781 """
759 fileconflicts = set()
782 fileconflicts = set()
760 pathconflicts = set()
783 pathconflicts = set()
761 warnconflicts = set()
784 warnconflicts = set()
762 abortconflicts = set()
785 abortconflicts = set()
763 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
786 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
764 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
787 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
765 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
788 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
766 if not force:
789 if not force:
767 def collectconflicts(conflicts, config):
790 def collectconflicts(conflicts, config):
768 if config == 'abort':
791 if config == 'abort':
769 abortconflicts.update(conflicts)
792 abortconflicts.update(conflicts)
770 elif config == 'warn':
793 elif config == 'warn':
771 warnconflicts.update(conflicts)
794 warnconflicts.update(conflicts)
772
795
773 checkunknowndirs = _unknowndirschecker()
796 checkunknowndirs = _unknowndirschecker()
774 for f, (m, args, msg) in actions.iteritems():
797 for f, (m, args, msg) in actions.iteritems():
775 if m in ('c', 'dc'):
798 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
776 if _checkunknownfile(repo, wctx, mctx, f):
799 if _checkunknownfile(repo, wctx, mctx, f):
777 fileconflicts.add(f)
800 fileconflicts.add(f)
778 elif pathconfig and f not in wctx:
801 elif pathconfig and f not in wctx:
779 path = checkunknowndirs(repo, wctx, f)
802 path = checkunknowndirs(repo, wctx, f)
780 if path is not None:
803 if path is not None:
781 pathconflicts.add(path)
804 pathconflicts.add(path)
782 elif m == 'dg':
805 elif m == ACTION_LOCAL_DIR_RENAME_GET:
783 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
806 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
784 fileconflicts.add(f)
807 fileconflicts.add(f)
785
808
786 allconflicts = fileconflicts | pathconflicts
809 allconflicts = fileconflicts | pathconflicts
787 ignoredconflicts = set([c for c in allconflicts
810 ignoredconflicts = set([c for c in allconflicts
788 if repo.dirstate._ignore(c)])
811 if repo.dirstate._ignore(c)])
789 unknownconflicts = allconflicts - ignoredconflicts
812 unknownconflicts = allconflicts - ignoredconflicts
790 collectconflicts(ignoredconflicts, ignoredconfig)
813 collectconflicts(ignoredconflicts, ignoredconfig)
791 collectconflicts(unknownconflicts, unknownconfig)
814 collectconflicts(unknownconflicts, unknownconfig)
792 else:
815 else:
793 for f, (m, args, msg) in actions.iteritems():
816 for f, (m, args, msg) in actions.iteritems():
794 if m == 'cm':
817 if m == ACTION_CREATED_MERGE:
795 fl2, anc = args
818 fl2, anc = args
796 different = _checkunknownfile(repo, wctx, mctx, f)
819 different = _checkunknownfile(repo, wctx, mctx, f)
797 if repo.dirstate._ignore(f):
820 if repo.dirstate._ignore(f):
798 config = ignoredconfig
821 config = ignoredconfig
799 else:
822 else:
800 config = unknownconfig
823 config = unknownconfig
801
824
802 # The behavior when force is True is described by this table:
825 # The behavior when force is True is described by this table:
803 # config different mergeforce | action backup
826 # config different mergeforce | action backup
804 # * n * | get n
827 # * n * | get n
805 # * y y | merge -
828 # * y y | merge -
806 # abort y n | merge - (1)
829 # abort y n | merge - (1)
807 # warn y n | warn + get y
830 # warn y n | warn + get y
808 # ignore y n | get y
831 # ignore y n | get y
809 #
832 #
810 # (1) this is probably the wrong behavior here -- we should
833 # (1) this is probably the wrong behavior here -- we should
811 # probably abort, but some actions like rebases currently
834 # probably abort, but some actions like rebases currently
812 # don't like an abort happening in the middle of
835 # don't like an abort happening in the middle of
813 # merge.update.
836 # merge.update.
814 if not different:
837 if not different:
815 actions[f] = ('g', (fl2, False), "remote created")
838 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
816 elif mergeforce or config == 'abort':
839 elif mergeforce or config == 'abort':
817 actions[f] = ('m', (f, f, None, False, anc),
840 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
818 "remote differs from untracked local")
841 'remote differs from untracked local')
819 elif config == 'abort':
842 elif config == 'abort':
820 abortconflicts.add(f)
843 abortconflicts.add(f)
821 else:
844 else:
822 if config == 'warn':
845 if config == 'warn':
823 warnconflicts.add(f)
846 warnconflicts.add(f)
824 actions[f] = ('g', (fl2, True), "remote created")
847 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
825
848
826 for f in sorted(abortconflicts):
849 for f in sorted(abortconflicts):
827 warn = repo.ui.warn
850 warn = repo.ui.warn
828 if f in pathconflicts:
851 if f in pathconflicts:
829 if repo.wvfs.isfileorlink(f):
852 if repo.wvfs.isfileorlink(f):
830 warn(_("%s: untracked file conflicts with directory\n") % f)
853 warn(_("%s: untracked file conflicts with directory\n") % f)
831 else:
854 else:
832 warn(_("%s: untracked directory conflicts with file\n") % f)
855 warn(_("%s: untracked directory conflicts with file\n") % f)
833 else:
856 else:
834 warn(_("%s: untracked file differs\n") % f)
857 warn(_("%s: untracked file differs\n") % f)
835 if abortconflicts:
858 if abortconflicts:
836 raise error.Abort(_("untracked files in working directory "
859 raise error.Abort(_("untracked files in working directory "
837 "differ from files in requested revision"))
860 "differ from files in requested revision"))
838
861
839 for f in sorted(warnconflicts):
862 for f in sorted(warnconflicts):
840 if repo.wvfs.isfileorlink(f):
863 if repo.wvfs.isfileorlink(f):
841 repo.ui.warn(_("%s: replacing untracked file\n") % f)
864 repo.ui.warn(_("%s: replacing untracked file\n") % f)
842 else:
865 else:
843 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
866 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
844
867
845 for f, (m, args, msg) in actions.iteritems():
868 for f, (m, args, msg) in actions.iteritems():
846 if m == 'c':
869 if m == ACTION_CREATED:
847 backup = (f in fileconflicts or f in pathconflicts or
870 backup = (f in fileconflicts or f in pathconflicts or
848 any(p in pathconflicts for p in util.finddirs(f)))
871 any(p in pathconflicts for p in util.finddirs(f)))
849 flags, = args
872 flags, = args
850 actions[f] = ('g', (flags, backup), msg)
873 actions[f] = (ACTION_GET, (flags, backup), msg)
851
874
852 def _forgetremoved(wctx, mctx, branchmerge):
875 def _forgetremoved(wctx, mctx, branchmerge):
853 """
876 """
854 Forget removed files
877 Forget removed files
855
878
856 If we're jumping between revisions (as opposed to merging), and if
879 If we're jumping between revisions (as opposed to merging), and if
857 neither the working directory nor the target rev has the file,
880 neither the working directory nor the target rev has the file,
858 then we need to remove it from the dirstate, to prevent the
881 then we need to remove it from the dirstate, to prevent the
859 dirstate from listing the file when it is no longer in the
882 dirstate from listing the file when it is no longer in the
860 manifest.
883 manifest.
861
884
862 If we're merging, and the other revision has removed a file
885 If we're merging, and the other revision has removed a file
863 that is not present in the working directory, we need to mark it
886 that is not present in the working directory, we need to mark it
864 as removed.
887 as removed.
865 """
888 """
866
889
867 actions = {}
890 actions = {}
868 m = 'f'
891 m = ACTION_FORGET
869 if branchmerge:
892 if branchmerge:
870 m = 'r'
893 m = ACTION_REMOVE
871 for f in wctx.deleted():
894 for f in wctx.deleted():
872 if f not in mctx:
895 if f not in mctx:
873 actions[f] = m, None, "forget deleted"
896 actions[f] = m, None, "forget deleted"
874
897
875 if not branchmerge:
898 if not branchmerge:
876 for f in wctx.removed():
899 for f in wctx.removed():
877 if f not in mctx:
900 if f not in mctx:
878 actions[f] = 'f', None, "forget removed"
901 actions[f] = ACTION_FORGET, None, "forget removed"
879
902
880 return actions
903 return actions
881
904
882 def _checkcollision(repo, wmf, actions):
905 def _checkcollision(repo, wmf, actions):
883 # build provisional merged manifest up
906 # build provisional merged manifest up
884 pmmf = set(wmf)
907 pmmf = set(wmf)
885
908
886 if actions:
909 if actions:
887 # k, dr, e and rd are no-op
910 # KEEP and EXEC are no-op
888 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
911 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
912 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
889 for f, args, msg in actions[m]:
913 for f, args, msg in actions[m]:
890 pmmf.add(f)
914 pmmf.add(f)
891 for f, args, msg in actions['r']:
915 for f, args, msg in actions[ACTION_REMOVE]:
892 pmmf.discard(f)
916 pmmf.discard(f)
893 for f, args, msg in actions['dm']:
917 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
894 f2, flags = args
918 f2, flags = args
895 pmmf.discard(f2)
919 pmmf.discard(f2)
896 pmmf.add(f)
920 pmmf.add(f)
897 for f, args, msg in actions['dg']:
921 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
898 pmmf.add(f)
922 pmmf.add(f)
899 for f, args, msg in actions['m']:
923 for f, args, msg in actions[ACTION_MERGE]:
900 f1, f2, fa, move, anc = args
924 f1, f2, fa, move, anc = args
901 if move:
925 if move:
902 pmmf.discard(f1)
926 pmmf.discard(f1)
903 pmmf.add(f)
927 pmmf.add(f)
904
928
905 # check case-folding collision in provisional merged manifest
929 # check case-folding collision in provisional merged manifest
906 foldmap = {}
930 foldmap = {}
907 for f in pmmf:
931 for f in pmmf:
908 fold = util.normcase(f)
932 fold = util.normcase(f)
909 if fold in foldmap:
933 if fold in foldmap:
910 raise error.Abort(_("case-folding collision between %s and %s")
934 raise error.Abort(_("case-folding collision between %s and %s")
911 % (f, foldmap[fold]))
935 % (f, foldmap[fold]))
912 foldmap[fold] = f
936 foldmap[fold] = f
913
937
914 # check case-folding of directories
938 # check case-folding of directories
915 foldprefix = unfoldprefix = lastfull = ''
939 foldprefix = unfoldprefix = lastfull = ''
916 for fold, f in sorted(foldmap.items()):
940 for fold, f in sorted(foldmap.items()):
917 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
941 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
918 # the folded prefix matches but actual casing is different
942 # the folded prefix matches but actual casing is different
919 raise error.Abort(_("case-folding collision between "
943 raise error.Abort(_("case-folding collision between "
920 "%s and directory of %s") % (lastfull, f))
944 "%s and directory of %s") % (lastfull, f))
921 foldprefix = fold + '/'
945 foldprefix = fold + '/'
922 unfoldprefix = f + '/'
946 unfoldprefix = f + '/'
923 lastfull = f
947 lastfull = f
924
948
925 def driverpreprocess(repo, ms, wctx, labels=None):
949 def driverpreprocess(repo, ms, wctx, labels=None):
926 """run the preprocess step of the merge driver, if any
950 """run the preprocess step of the merge driver, if any
927
951
928 This is currently not implemented -- it's an extension point."""
952 This is currently not implemented -- it's an extension point."""
929 return True
953 return True
930
954
931 def driverconclude(repo, ms, wctx, labels=None):
955 def driverconclude(repo, ms, wctx, labels=None):
932 """run the conclude step of the merge driver, if any
956 """run the conclude step of the merge driver, if any
933
957
934 This is currently not implemented -- it's an extension point."""
958 This is currently not implemented -- it's an extension point."""
935 return True
959 return True
936
960
937 def _filesindirs(repo, manifest, dirs):
961 def _filesindirs(repo, manifest, dirs):
938 """
962 """
939 Generator that yields pairs of all the files in the manifest that are found
963 Generator that yields pairs of all the files in the manifest that are found
940 inside the directories listed in dirs, and which directory they are found
964 inside the directories listed in dirs, and which directory they are found
941 in.
965 in.
942 """
966 """
943 for f in manifest:
967 for f in manifest:
944 for p in util.finddirs(f):
968 for p in util.finddirs(f):
945 if p in dirs:
969 if p in dirs:
946 yield f, p
970 yield f, p
947 break
971 break
948
972
949 def checkpathconflicts(repo, wctx, mctx, actions):
973 def checkpathconflicts(repo, wctx, mctx, actions):
950 """
974 """
951 Check if any actions introduce path conflicts in the repository, updating
975 Check if any actions introduce path conflicts in the repository, updating
952 actions to record or handle the path conflict accordingly.
976 actions to record or handle the path conflict accordingly.
953 """
977 """
954 mf = wctx.manifest()
978 mf = wctx.manifest()
955
979
956 # The set of local files that conflict with a remote directory.
980 # The set of local files that conflict with a remote directory.
957 localconflicts = set()
981 localconflicts = set()
958
982
959 # The set of directories that conflict with a remote file, and so may cause
983 # The set of directories that conflict with a remote file, and so may cause
960 # conflicts if they still contain any files after the merge.
984 # conflicts if they still contain any files after the merge.
961 remoteconflicts = set()
985 remoteconflicts = set()
962
986
963 # The set of directories that appear as both a file and a directory in the
987 # The set of directories that appear as both a file and a directory in the
964 # remote manifest. These indicate an invalid remote manifest, which
988 # remote manifest. These indicate an invalid remote manifest, which
965 # can't be updated to cleanly.
989 # can't be updated to cleanly.
966 invalidconflicts = set()
990 invalidconflicts = set()
967
991
968 # The set of directories that contain files that are being created.
992 # The set of directories that contain files that are being created.
969 createdfiledirs = set()
993 createdfiledirs = set()
970
994
971 # The set of files deleted by all the actions.
995 # The set of files deleted by all the actions.
972 deletedfiles = set()
996 deletedfiles = set()
973
997
974 for f, (m, args, msg) in actions.items():
998 for f, (m, args, msg) in actions.items():
975 if m in ('c', 'dc', 'm', 'cm'):
999 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1000 ACTION_CREATED_MERGE):
976 # This action may create a new local file.
1001 # This action may create a new local file.
977 createdfiledirs.update(util.finddirs(f))
1002 createdfiledirs.update(util.finddirs(f))
978 if mf.hasdir(f):
1003 if mf.hasdir(f):
979 # The file aliases a local directory. This might be ok if all
1004 # The file aliases a local directory. This might be ok if all
980 # the files in the local directory are being deleted. This
1005 # the files in the local directory are being deleted. This
981 # will be checked once we know what all the deleted files are.
1006 # will be checked once we know what all the deleted files are.
982 remoteconflicts.add(f)
1007 remoteconflicts.add(f)
983 # Track the names of all deleted files.
1008 # Track the names of all deleted files.
984 if m == 'r':
1009 if m == ACTION_REMOVE:
985 deletedfiles.add(f)
1010 deletedfiles.add(f)
986 if m == 'm':
1011 if m == ACTION_MERGE:
987 f1, f2, fa, move, anc = args
1012 f1, f2, fa, move, anc = args
988 if move:
1013 if move:
989 deletedfiles.add(f1)
1014 deletedfiles.add(f1)
990 if m == 'dm':
1015 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
991 f2, flags = args
1016 f2, flags = args
992 deletedfiles.add(f2)
1017 deletedfiles.add(f2)
993
1018
994 # Check all directories that contain created files for path conflicts.
1019 # Check all directories that contain created files for path conflicts.
995 for p in createdfiledirs:
1020 for p in createdfiledirs:
996 if p in mf:
1021 if p in mf:
997 if p in mctx:
1022 if p in mctx:
998 # A file is in a directory which aliases both a local
1023 # A file is in a directory which aliases both a local
999 # and a remote file. This is an internal inconsistency
1024 # and a remote file. This is an internal inconsistency
1000 # within the remote manifest.
1025 # within the remote manifest.
1001 invalidconflicts.add(p)
1026 invalidconflicts.add(p)
1002 else:
1027 else:
1003 # A file is in a directory which aliases a local file.
1028 # A file is in a directory which aliases a local file.
1004 # We will need to rename the local file.
1029 # We will need to rename the local file.
1005 localconflicts.add(p)
1030 localconflicts.add(p)
1006 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
1031 if p in actions and actions[p][0] in (ACTION_CREATED,
1032 ACTION_DELETED_CHANGED,
1033 ACTION_MERGE,
1034 ACTION_CREATED_MERGE):
1007 # The file is in a directory which aliases a remote file.
1035 # The file is in a directory which aliases a remote file.
1008 # This is an internal inconsistency within the remote
1036 # This is an internal inconsistency within the remote
1009 # manifest.
1037 # manifest.
1010 invalidconflicts.add(p)
1038 invalidconflicts.add(p)
1011
1039
1012 # Rename all local conflicting files that have not been deleted.
1040 # Rename all local conflicting files that have not been deleted.
1013 for p in localconflicts:
1041 for p in localconflicts:
1014 if p not in deletedfiles:
1042 if p not in deletedfiles:
1015 ctxname = bytes(wctx).rstrip('+')
1043 ctxname = bytes(wctx).rstrip('+')
1016 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1044 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1017 actions[pnew] = ('pr', (p,), "local path conflict")
1045 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1018 actions[p] = ('p', (pnew, 'l'), "path conflict")
1046 'local path conflict')
1047 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1048 'path conflict')
1019
1049
1020 if remoteconflicts:
1050 if remoteconflicts:
1021 # Check if all files in the conflicting directories have been removed.
1051 # Check if all files in the conflicting directories have been removed.
1022 ctxname = bytes(mctx).rstrip('+')
1052 ctxname = bytes(mctx).rstrip('+')
1023 for f, p in _filesindirs(repo, mf, remoteconflicts):
1053 for f, p in _filesindirs(repo, mf, remoteconflicts):
1024 if f not in deletedfiles:
1054 if f not in deletedfiles:
1025 m, args, msg = actions[p]
1055 m, args, msg = actions[p]
1026 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1056 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1027 if m in ('dc', 'm'):
1057 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1028 # Action was merge, just update target.
1058 # Action was merge, just update target.
1029 actions[pnew] = (m, args, msg)
1059 actions[pnew] = (m, args, msg)
1030 else:
1060 else:
1031 # Action was create, change to renamed get action.
1061 # Action was create, change to renamed get action.
1032 fl = args[0]
1062 fl = args[0]
1033 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1063 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1034 actions[p] = ('p', (pnew, 'r'), "path conflict")
1064 'remote path conflict')
1065 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1066 'path conflict')
1035 remoteconflicts.remove(p)
1067 remoteconflicts.remove(p)
1036 break
1068 break
1037
1069
1038 if invalidconflicts:
1070 if invalidconflicts:
1039 for p in invalidconflicts:
1071 for p in invalidconflicts:
1040 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1072 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1041 raise error.Abort(_("destination manifest contains path conflicts"))
1073 raise error.Abort(_("destination manifest contains path conflicts"))
1042
1074
1043 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1075 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1044 acceptremote, followcopies, forcefulldiff=False):
1076 acceptremote, followcopies, forcefulldiff=False):
1045 """
1077 """
1046 Merge wctx and p2 with ancestor pa and generate merge action list
1078 Merge wctx and p2 with ancestor pa and generate merge action list
1047
1079
1048 branchmerge and force are as passed in to update
1080 branchmerge and force are as passed in to update
1049 matcher = matcher to filter file lists
1081 matcher = matcher to filter file lists
1050 acceptremote = accept the incoming changes without prompting
1082 acceptremote = accept the incoming changes without prompting
1051 """
1083 """
1052 if matcher is not None and matcher.always():
1084 if matcher is not None and matcher.always():
1053 matcher = None
1085 matcher = None
1054
1086
1055 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1087 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1056
1088
1057 # manifests fetched in order are going to be faster, so prime the caches
1089 # manifests fetched in order are going to be faster, so prime the caches
1058 [x.manifest() for x in
1090 [x.manifest() for x in
1059 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1091 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1060
1092
1061 if followcopies:
1093 if followcopies:
1062 ret = copies.mergecopies(repo, wctx, p2, pa)
1094 ret = copies.mergecopies(repo, wctx, p2, pa)
1063 copy, movewithdir, diverge, renamedelete, dirmove = ret
1095 copy, movewithdir, diverge, renamedelete, dirmove = ret
1064
1096
1065 boolbm = pycompat.bytestr(bool(branchmerge))
1097 boolbm = pycompat.bytestr(bool(branchmerge))
1066 boolf = pycompat.bytestr(bool(force))
1098 boolf = pycompat.bytestr(bool(force))
1067 boolm = pycompat.bytestr(bool(matcher))
1099 boolm = pycompat.bytestr(bool(matcher))
1068 repo.ui.note(_("resolving manifests\n"))
1100 repo.ui.note(_("resolving manifests\n"))
1069 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1101 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1070 % (boolbm, boolf, boolm))
1102 % (boolbm, boolf, boolm))
1071 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1103 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1072
1104
1073 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1105 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1074 copied = set(copy.values())
1106 copied = set(copy.values())
1075 copied.update(movewithdir.values())
1107 copied.update(movewithdir.values())
1076
1108
1077 if '.hgsubstate' in m1:
1109 if '.hgsubstate' in m1:
1078 # check whether sub state is modified
1110 # check whether sub state is modified
1079 if any(wctx.sub(s).dirty() for s in wctx.substate):
1111 if any(wctx.sub(s).dirty() for s in wctx.substate):
1080 m1['.hgsubstate'] = modifiednodeid
1112 m1['.hgsubstate'] = modifiednodeid
1081
1113
1082 # Don't use m2-vs-ma optimization if:
1114 # Don't use m2-vs-ma optimization if:
1083 # - ma is the same as m1 or m2, which we're just going to diff again later
1115 # - ma is the same as m1 or m2, which we're just going to diff again later
1084 # - The caller specifically asks for a full diff, which is useful during bid
1116 # - The caller specifically asks for a full diff, which is useful during bid
1085 # merge.
1117 # merge.
1086 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1118 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1087 # Identify which files are relevant to the merge, so we can limit the
1119 # Identify which files are relevant to the merge, so we can limit the
1088 # total m1-vs-m2 diff to just those files. This has significant
1120 # total m1-vs-m2 diff to just those files. This has significant
1089 # performance benefits in large repositories.
1121 # performance benefits in large repositories.
1090 relevantfiles = set(ma.diff(m2).keys())
1122 relevantfiles = set(ma.diff(m2).keys())
1091
1123
1092 # For copied and moved files, we need to add the source file too.
1124 # For copied and moved files, we need to add the source file too.
1093 for copykey, copyvalue in copy.iteritems():
1125 for copykey, copyvalue in copy.iteritems():
1094 if copyvalue in relevantfiles:
1126 if copyvalue in relevantfiles:
1095 relevantfiles.add(copykey)
1127 relevantfiles.add(copykey)
1096 for movedirkey in movewithdir:
1128 for movedirkey in movewithdir:
1097 relevantfiles.add(movedirkey)
1129 relevantfiles.add(movedirkey)
1098 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1130 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1099 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1131 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1100
1132
1101 diff = m1.diff(m2, match=matcher)
1133 diff = m1.diff(m2, match=matcher)
1102
1134
1103 if matcher is None:
1135 if matcher is None:
1104 matcher = matchmod.always('', '')
1136 matcher = matchmod.always('', '')
1105
1137
1106 actions = {}
1138 actions = {}
1107 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1139 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1108 if n1 and n2: # file exists on both local and remote side
1140 if n1 and n2: # file exists on both local and remote side
1109 if f not in ma:
1141 if f not in ma:
1110 fa = copy.get(f, None)
1142 fa = copy.get(f, None)
1111 if fa is not None:
1143 if fa is not None:
1112 actions[f] = ('m', (f, f, fa, False, pa.node()),
1144 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1113 "both renamed from " + fa)
1145 'both renamed from %s' % fa)
1114 else:
1146 else:
1115 actions[f] = ('m', (f, f, None, False, pa.node()),
1147 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1116 "both created")
1148 'both created')
1117 else:
1149 else:
1118 a = ma[f]
1150 a = ma[f]
1119 fla = ma.flags(f)
1151 fla = ma.flags(f)
1120 nol = 'l' not in fl1 + fl2 + fla
1152 nol = 'l' not in fl1 + fl2 + fla
1121 if n2 == a and fl2 == fla:
1153 if n2 == a and fl2 == fla:
1122 actions[f] = ('k', (), "remote unchanged")
1154 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1123 elif n1 == a and fl1 == fla: # local unchanged - use remote
1155 elif n1 == a and fl1 == fla: # local unchanged - use remote
1124 if n1 == n2: # optimization: keep local content
1156 if n1 == n2: # optimization: keep local content
1125 actions[f] = ('e', (fl2,), "update permissions")
1157 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1126 else:
1158 else:
1127 actions[f] = ('g', (fl2, False), "remote is newer")
1159 actions[f] = (ACTION_GET, (fl2, False),
1160 'remote is newer')
1128 elif nol and n2 == a: # remote only changed 'x'
1161 elif nol and n2 == a: # remote only changed 'x'
1129 actions[f] = ('e', (fl2,), "update permissions")
1162 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1130 elif nol and n1 == a: # local only changed 'x'
1163 elif nol and n1 == a: # local only changed 'x'
1131 actions[f] = ('g', (fl1, False), "remote is newer")
1164 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1132 else: # both changed something
1165 else: # both changed something
1133 actions[f] = ('m', (f, f, f, False, pa.node()),
1166 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1134 "versions differ")
1167 'versions differ')
1135 elif n1: # file exists only on local side
1168 elif n1: # file exists only on local side
1136 if f in copied:
1169 if f in copied:
1137 pass # we'll deal with it on m2 side
1170 pass # we'll deal with it on m2 side
1138 elif f in movewithdir: # directory rename, move local
1171 elif f in movewithdir: # directory rename, move local
1139 f2 = movewithdir[f]
1172 f2 = movewithdir[f]
1140 if f2 in m2:
1173 if f2 in m2:
1141 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1174 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1142 "remote directory rename, both created")
1175 'remote directory rename, both created')
1143 else:
1176 else:
1144 actions[f2] = ('dm', (f, fl1),
1177 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1145 "remote directory rename - move from " + f)
1178 'remote directory rename - move from %s' % f)
1146 elif f in copy:
1179 elif f in copy:
1147 f2 = copy[f]
1180 f2 = copy[f]
1148 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1181 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1149 "local copied/moved from " + f2)
1182 'local copied/moved from %s' % f2)
1150 elif f in ma: # clean, a different, no remote
1183 elif f in ma: # clean, a different, no remote
1151 if n1 != ma[f]:
1184 if n1 != ma[f]:
1152 if acceptremote:
1185 if acceptremote:
1153 actions[f] = ('r', None, "remote delete")
1186 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1154 else:
1187 else:
1155 actions[f] = ('cd', (f, None, f, False, pa.node()),
1188 actions[f] = (ACTION_CHANGED_DELETED,
1156 "prompt changed/deleted")
1189 (f, None, f, False, pa.node()),
1190 'prompt changed/deleted')
1157 elif n1 == addednodeid:
1191 elif n1 == addednodeid:
1158 # This extra 'a' is added by working copy manifest to mark
1192 # This extra 'a' is added by working copy manifest to mark
1159 # the file as locally added. We should forget it instead of
1193 # the file as locally added. We should forget it instead of
1160 # deleting it.
1194 # deleting it.
1161 actions[f] = ('f', None, "remote deleted")
1195 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1162 else:
1196 else:
1163 actions[f] = ('r', None, "other deleted")
1197 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1164 elif n2: # file exists only on remote side
1198 elif n2: # file exists only on remote side
1165 if f in copied:
1199 if f in copied:
1166 pass # we'll deal with it on m1 side
1200 pass # we'll deal with it on m1 side
1167 elif f in movewithdir:
1201 elif f in movewithdir:
1168 f2 = movewithdir[f]
1202 f2 = movewithdir[f]
1169 if f2 in m1:
1203 if f2 in m1:
1170 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1204 actions[f2] = (ACTION_MERGE,
1171 "local directory rename, both created")
1205 (f2, f, None, False, pa.node()),
1206 'local directory rename, both created')
1172 else:
1207 else:
1173 actions[f2] = ('dg', (f, fl2),
1208 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1174 "local directory rename - get from " + f)
1209 'local directory rename - get from %s' % f)
1175 elif f in copy:
1210 elif f in copy:
1176 f2 = copy[f]
1211 f2 = copy[f]
1177 if f2 in m2:
1212 if f2 in m2:
1178 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1213 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1179 "remote copied from " + f2)
1214 'remote copied from %s' % f2)
1180 else:
1215 else:
1181 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1216 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1182 "remote moved from " + f2)
1217 'remote moved from %s' % f2)
1183 elif f not in ma:
1218 elif f not in ma:
1184 # local unknown, remote created: the logic is described by the
1219 # local unknown, remote created: the logic is described by the
1185 # following table:
1220 # following table:
1186 #
1221 #
1187 # force branchmerge different | action
1222 # force branchmerge different | action
1188 # n * * | create
1223 # n * * | create
1189 # y n * | create
1224 # y n * | create
1190 # y y n | create
1225 # y y n | create
1191 # y y y | merge
1226 # y y y | merge
1192 #
1227 #
1193 # Checking whether the files are different is expensive, so we
1228 # Checking whether the files are different is expensive, so we
1194 # don't do that when we can avoid it.
1229 # don't do that when we can avoid it.
1195 if not force:
1230 if not force:
1196 actions[f] = ('c', (fl2,), "remote created")
1231 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1197 elif not branchmerge:
1232 elif not branchmerge:
1198 actions[f] = ('c', (fl2,), "remote created")
1233 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1199 else:
1234 else:
1200 actions[f] = ('cm', (fl2, pa.node()),
1235 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1201 "remote created, get or merge")
1236 'remote created, get or merge')
1202 elif n2 != ma[f]:
1237 elif n2 != ma[f]:
1203 df = None
1238 df = None
1204 for d in dirmove:
1239 for d in dirmove:
1205 if f.startswith(d):
1240 if f.startswith(d):
1206 # new file added in a directory that was moved
1241 # new file added in a directory that was moved
1207 df = dirmove[d] + f[len(d):]
1242 df = dirmove[d] + f[len(d):]
1208 break
1243 break
1209 if df is not None and df in m1:
1244 if df is not None and df in m1:
1210 actions[df] = ('m', (df, f, f, False, pa.node()),
1245 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1211 "local directory rename - respect move from " + f)
1246 'local directory rename - respect move '
1247 'from %s' % f)
1212 elif acceptremote:
1248 elif acceptremote:
1213 actions[f] = ('c', (fl2,), "remote recreating")
1249 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1214 else:
1250 else:
1215 actions[f] = ('dc', (None, f, f, False, pa.node()),
1251 actions[f] = (ACTION_DELETED_CHANGED,
1216 "prompt deleted/changed")
1252 (None, f, f, False, pa.node()),
1253 'prompt deleted/changed')
1217
1254
1218 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1255 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1219 # If we are merging, look for path conflicts.
1256 # If we are merging, look for path conflicts.
1220 checkpathconflicts(repo, wctx, p2, actions)
1257 checkpathconflicts(repo, wctx, p2, actions)
1221
1258
1222 return actions, diverge, renamedelete
1259 return actions, diverge, renamedelete
1223
1260
1224 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1261 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1225 """Resolves false conflicts where the nodeid changed but the content
1262 """Resolves false conflicts where the nodeid changed but the content
1226 remained the same."""
1263 remained the same."""
1227 # We force a copy of actions.items() because we're going to mutate
1264 # We force a copy of actions.items() because we're going to mutate
1228 # actions as we resolve trivial conflicts.
1265 # actions as we resolve trivial conflicts.
1229 for f, (m, args, msg) in list(actions.items()):
1266 for f, (m, args, msg) in list(actions.items()):
1230 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1267 if (m == ACTION_CHANGED_DELETED and f in ancestor
1268 and not wctx[f].cmp(ancestor[f])):
1231 # local did change but ended up with same content
1269 # local did change but ended up with same content
1232 actions[f] = 'r', None, "prompt same"
1270 actions[f] = ACTION_REMOVE, None, 'prompt same'
1233 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1271 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1272 and not mctx[f].cmp(ancestor[f])):
1234 # remote did change but ended up with same content
1273 # remote did change but ended up with same content
1235 del actions[f] # don't get = keep local deleted
1274 del actions[f] # don't get = keep local deleted
1236
1275
1237 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1276 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1238 acceptremote, followcopies, matcher=None,
1277 acceptremote, followcopies, matcher=None,
1239 mergeforce=False):
1278 mergeforce=False):
1240 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1279 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1241 # Avoid cycle.
1280 # Avoid cycle.
1242 from . import sparse
1281 from . import sparse
1243
1282
1244 if len(ancestors) == 1: # default
1283 if len(ancestors) == 1: # default
1245 actions, diverge, renamedelete = manifestmerge(
1284 actions, diverge, renamedelete = manifestmerge(
1246 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1285 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1247 acceptremote, followcopies)
1286 acceptremote, followcopies)
1248 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1287 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1249
1288
1250 else: # only when merge.preferancestor=* - the default
1289 else: # only when merge.preferancestor=* - the default
1251 repo.ui.note(
1290 repo.ui.note(
1252 _("note: merging %s and %s using bids from ancestors %s\n") %
1291 _("note: merging %s and %s using bids from ancestors %s\n") %
1253 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1292 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1254 for anc in ancestors)))
1293 for anc in ancestors)))
1255
1294
1256 # Call for bids
1295 # Call for bids
1257 fbids = {} # mapping filename to bids (action method to list af actions)
1296 fbids = {} # mapping filename to bids (action method to list af actions)
1258 diverge, renamedelete = None, None
1297 diverge, renamedelete = None, None
1259 for ancestor in ancestors:
1298 for ancestor in ancestors:
1260 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1299 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1261 actions, diverge1, renamedelete1 = manifestmerge(
1300 actions, diverge1, renamedelete1 = manifestmerge(
1262 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1301 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1263 acceptremote, followcopies, forcefulldiff=True)
1302 acceptremote, followcopies, forcefulldiff=True)
1264 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1303 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1265
1304
1266 # Track the shortest set of warning on the theory that bid
1305 # Track the shortest set of warning on the theory that bid
1267 # merge will correctly incorporate more information
1306 # merge will correctly incorporate more information
1268 if diverge is None or len(diverge1) < len(diverge):
1307 if diverge is None or len(diverge1) < len(diverge):
1269 diverge = diverge1
1308 diverge = diverge1
1270 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1309 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1271 renamedelete = renamedelete1
1310 renamedelete = renamedelete1
1272
1311
1273 for f, a in sorted(actions.iteritems()):
1312 for f, a in sorted(actions.iteritems()):
1274 m, args, msg = a
1313 m, args, msg = a
1275 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1314 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1276 if f in fbids:
1315 if f in fbids:
1277 d = fbids[f]
1316 d = fbids[f]
1278 if m in d:
1317 if m in d:
1279 d[m].append(a)
1318 d[m].append(a)
1280 else:
1319 else:
1281 d[m] = [a]
1320 d[m] = [a]
1282 else:
1321 else:
1283 fbids[f] = {m: [a]}
1322 fbids[f] = {m: [a]}
1284
1323
1285 # Pick the best bid for each file
1324 # Pick the best bid for each file
1286 repo.ui.note(_('\nauction for merging merge bids\n'))
1325 repo.ui.note(_('\nauction for merging merge bids\n'))
1287 actions = {}
1326 actions = {}
1288 dms = [] # filenames that have dm actions
1327 dms = [] # filenames that have dm actions
1289 for f, bids in sorted(fbids.items()):
1328 for f, bids in sorted(fbids.items()):
1290 # bids is a mapping from action method to list af actions
1329 # bids is a mapping from action method to list af actions
1291 # Consensus?
1330 # Consensus?
1292 if len(bids) == 1: # all bids are the same kind of method
1331 if len(bids) == 1: # all bids are the same kind of method
1293 m, l = list(bids.items())[0]
1332 m, l = list(bids.items())[0]
1294 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1333 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1295 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1334 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1296 actions[f] = l[0]
1335 actions[f] = l[0]
1297 if m == 'dm':
1336 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1298 dms.append(f)
1337 dms.append(f)
1299 continue
1338 continue
1300 # If keep is an option, just do it.
1339 # If keep is an option, just do it.
1301 if 'k' in bids:
1340 if ACTION_KEEP in bids:
1302 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1341 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1303 actions[f] = bids['k'][0]
1342 actions[f] = bids[ACTION_KEEP][0]
1304 continue
1343 continue
1305 # If there are gets and they all agree [how could they not?], do it.
1344 # If there are gets and they all agree [how could they not?], do it.
1306 if 'g' in bids:
1345 if ACTION_GET in bids:
1307 ga0 = bids['g'][0]
1346 ga0 = bids[ACTION_GET][0]
1308 if all(a == ga0 for a in bids['g'][1:]):
1347 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1309 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1348 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1310 actions[f] = ga0
1349 actions[f] = ga0
1311 continue
1350 continue
1312 # TODO: Consider other simple actions such as mode changes
1351 # TODO: Consider other simple actions such as mode changes
1313 # Handle inefficient democrazy.
1352 # Handle inefficient democrazy.
1314 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1353 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1315 for m, l in sorted(bids.items()):
1354 for m, l in sorted(bids.items()):
1316 for _f, args, msg in l:
1355 for _f, args, msg in l:
1317 repo.ui.note(' %s -> %s\n' % (msg, m))
1356 repo.ui.note(' %s -> %s\n' % (msg, m))
1318 # Pick random action. TODO: Instead, prompt user when resolving
1357 # Pick random action. TODO: Instead, prompt user when resolving
1319 m, l = list(bids.items())[0]
1358 m, l = list(bids.items())[0]
1320 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1359 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1321 (f, m))
1360 (f, m))
1322 actions[f] = l[0]
1361 actions[f] = l[0]
1323 if m == 'dm':
1362 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1324 dms.append(f)
1363 dms.append(f)
1325 continue
1364 continue
1326 # Work around 'dm' that can cause multiple actions for the same file
1365 # Work around 'dm' that can cause multiple actions for the same file
1327 for f in dms:
1366 for f in dms:
1328 dm, (f0, flags), msg = actions[f]
1367 dm, (f0, flags), msg = actions[f]
1329 assert dm == 'dm', dm
1368 assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm
1330 if f0 in actions and actions[f0][0] == 'r':
1369 if f0 in actions and actions[f0][0] == ACTION_REMOVE:
1331 # We have one bid for removing a file and another for moving it.
1370 # We have one bid for removing a file and another for moving it.
1332 # These two could be merged as first move and then delete ...
1371 # These two could be merged as first move and then delete ...
1333 # but instead drop moving and just delete.
1372 # but instead drop moving and just delete.
1334 del actions[f]
1373 del actions[f]
1335 repo.ui.note(_('end of auction\n\n'))
1374 repo.ui.note(_('end of auction\n\n'))
1336
1375
1337 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1376 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1338
1377
1339 if wctx.rev() is None:
1378 if wctx.rev() is None:
1340 fractions = _forgetremoved(wctx, mctx, branchmerge)
1379 fractions = _forgetremoved(wctx, mctx, branchmerge)
1341 actions.update(fractions)
1380 actions.update(fractions)
1342
1381
1343 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1382 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1344 actions)
1383 actions)
1345
1384
1346 return prunedactions, diverge, renamedelete
1385 return prunedactions, diverge, renamedelete
1347
1386
1348 def _getcwd():
1387 def _getcwd():
1349 try:
1388 try:
1350 return pycompat.getcwd()
1389 return pycompat.getcwd()
1351 except OSError as err:
1390 except OSError as err:
1352 if err.errno == errno.ENOENT:
1391 if err.errno == errno.ENOENT:
1353 return None
1392 return None
1354 raise
1393 raise
1355
1394
1356 def batchremove(repo, wctx, actions):
1395 def batchremove(repo, wctx, actions):
1357 """apply removes to the working directory
1396 """apply removes to the working directory
1358
1397
1359 yields tuples for progress updates
1398 yields tuples for progress updates
1360 """
1399 """
1361 verbose = repo.ui.verbose
1400 verbose = repo.ui.verbose
1362 cwd = _getcwd()
1401 cwd = _getcwd()
1363 i = 0
1402 i = 0
1364 for f, args, msg in actions:
1403 for f, args, msg in actions:
1365 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1404 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1366 if verbose:
1405 if verbose:
1367 repo.ui.note(_("removing %s\n") % f)
1406 repo.ui.note(_("removing %s\n") % f)
1368 wctx[f].audit()
1407 wctx[f].audit()
1369 try:
1408 try:
1370 wctx[f].remove(ignoremissing=True)
1409 wctx[f].remove(ignoremissing=True)
1371 except OSError as inst:
1410 except OSError as inst:
1372 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1411 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1373 (f, inst.strerror))
1412 (f, inst.strerror))
1374 if i == 100:
1413 if i == 100:
1375 yield i, f
1414 yield i, f
1376 i = 0
1415 i = 0
1377 i += 1
1416 i += 1
1378 if i > 0:
1417 if i > 0:
1379 yield i, f
1418 yield i, f
1380
1419
1381 if cwd and not _getcwd():
1420 if cwd and not _getcwd():
1382 # cwd was removed in the course of removing files; print a helpful
1421 # cwd was removed in the course of removing files; print a helpful
1383 # warning.
1422 # warning.
1384 repo.ui.warn(_("current directory was removed\n"
1423 repo.ui.warn(_("current directory was removed\n"
1385 "(consider changing to repo root: %s)\n") % repo.root)
1424 "(consider changing to repo root: %s)\n") % repo.root)
1386
1425
1387 def batchget(repo, mctx, wctx, actions):
1426 def batchget(repo, mctx, wctx, actions):
1388 """apply gets to the working directory
1427 """apply gets to the working directory
1389
1428
1390 mctx is the context to get from
1429 mctx is the context to get from
1391
1430
1392 yields tuples for progress updates
1431 yields tuples for progress updates
1393 """
1432 """
1394 verbose = repo.ui.verbose
1433 verbose = repo.ui.verbose
1395 fctx = mctx.filectx
1434 fctx = mctx.filectx
1396 ui = repo.ui
1435 ui = repo.ui
1397 i = 0
1436 i = 0
1398 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1437 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1399 for f, (flags, backup), msg in actions:
1438 for f, (flags, backup), msg in actions:
1400 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1439 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1401 if verbose:
1440 if verbose:
1402 repo.ui.note(_("getting %s\n") % f)
1441 repo.ui.note(_("getting %s\n") % f)
1403
1442
1404 if backup:
1443 if backup:
1405 # If a file or directory exists with the same name, back that
1444 # If a file or directory exists with the same name, back that
1406 # up. Otherwise, look to see if there is a file that conflicts
1445 # up. Otherwise, look to see if there is a file that conflicts
1407 # with a directory this file is in, and if so, back that up.
1446 # with a directory this file is in, and if so, back that up.
1408 absf = repo.wjoin(f)
1447 absf = repo.wjoin(f)
1409 if not repo.wvfs.lexists(f):
1448 if not repo.wvfs.lexists(f):
1410 for p in util.finddirs(f):
1449 for p in util.finddirs(f):
1411 if repo.wvfs.isfileorlink(p):
1450 if repo.wvfs.isfileorlink(p):
1412 absf = repo.wjoin(p)
1451 absf = repo.wjoin(p)
1413 break
1452 break
1414 orig = scmutil.origpath(ui, repo, absf)
1453 orig = scmutil.origpath(ui, repo, absf)
1415 if repo.wvfs.lexists(absf):
1454 if repo.wvfs.lexists(absf):
1416 util.rename(absf, orig)
1455 util.rename(absf, orig)
1417 wctx[f].clearunknown()
1456 wctx[f].clearunknown()
1418 atomictemp = ui.configbool("experimental", "update.atomic-file")
1457 atomictemp = ui.configbool("experimental", "update.atomic-file")
1419 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1458 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1420 atomictemp=atomictemp)
1459 atomictemp=atomictemp)
1421 if i == 100:
1460 if i == 100:
1422 yield i, f
1461 yield i, f
1423 i = 0
1462 i = 0
1424 i += 1
1463 i += 1
1425 if i > 0:
1464 if i > 0:
1426 yield i, f
1465 yield i, f
1427
1466
1428 def _prefetchfiles(repo, ctx, actions):
1467 def _prefetchfiles(repo, ctx, actions):
1429 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1468 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1430 of merge actions. ``ctx`` is the context being merged in."""
1469 of merge actions. ``ctx`` is the context being merged in."""
1431
1470
1432 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1471 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1433 # don't touch the context to be merged in. 'cd' is skipped, because
1472 # don't touch the context to be merged in. 'cd' is skipped, because
1434 # changed/deleted never resolves to something from the remote side.
1473 # changed/deleted never resolves to something from the remote side.
1435 oplist = [actions[a] for a in 'g dc dg m'.split()]
1474 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1475 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1436 prefetch = scmutil.fileprefetchhooks
1476 prefetch = scmutil.fileprefetchhooks
1437 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1477 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1438
1478
1439 @attr.s(frozen=True)
1479 @attr.s(frozen=True)
1440 class updateresult(object):
1480 class updateresult(object):
1441 updatedcount = attr.ib()
1481 updatedcount = attr.ib()
1442 mergedcount = attr.ib()
1482 mergedcount = attr.ib()
1443 removedcount = attr.ib()
1483 removedcount = attr.ib()
1444 unresolvedcount = attr.ib()
1484 unresolvedcount = attr.ib()
1445
1485
1446 # TODO remove container emulation once consumers switch to new API.
1486 # TODO remove container emulation once consumers switch to new API.
1447
1487
1448 def __getitem__(self, x):
1488 def __getitem__(self, x):
1449 if x == 0:
1489 if x == 0:
1450 return self.updatedcount
1490 return self.updatedcount
1451 elif x == 1:
1491 elif x == 1:
1452 return self.mergedcount
1492 return self.mergedcount
1453 elif x == 2:
1493 elif x == 2:
1454 return self.removedcount
1494 return self.removedcount
1455 elif x == 3:
1495 elif x == 3:
1456 return self.unresolvedcount
1496 return self.unresolvedcount
1457 else:
1497 else:
1458 raise IndexError('can only access items 0-3')
1498 raise IndexError('can only access items 0-3')
1459
1499
1460 def __len__(self):
1500 def __len__(self):
1461 return 4
1501 return 4
1462
1502
1463 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1503 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1464 """apply the merge action list to the working directory
1504 """apply the merge action list to the working directory
1465
1505
1466 wctx is the working copy context
1506 wctx is the working copy context
1467 mctx is the context to be merged into the working copy
1507 mctx is the context to be merged into the working copy
1468
1508
1469 Return a tuple of counts (updated, merged, removed, unresolved) that
1509 Return a tuple of counts (updated, merged, removed, unresolved) that
1470 describes how many files were affected by the update.
1510 describes how many files were affected by the update.
1471 """
1511 """
1472
1512
1473 _prefetchfiles(repo, mctx, actions)
1513 _prefetchfiles(repo, mctx, actions)
1474
1514
1475 updated, merged, removed = 0, 0, 0
1515 updated, merged, removed = 0, 0, 0
1476 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1516 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1477 moves = []
1517 moves = []
1478 for m, l in actions.items():
1518 for m, l in actions.items():
1479 l.sort()
1519 l.sort()
1480
1520
1481 # 'cd' and 'dc' actions are treated like other merge conflicts
1521 # 'cd' and 'dc' actions are treated like other merge conflicts
1482 mergeactions = sorted(actions['cd'])
1522 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1483 mergeactions.extend(sorted(actions['dc']))
1523 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1484 mergeactions.extend(actions['m'])
1524 mergeactions.extend(actions[ACTION_MERGE])
1485 for f, args, msg in mergeactions:
1525 for f, args, msg in mergeactions:
1486 f1, f2, fa, move, anc = args
1526 f1, f2, fa, move, anc = args
1487 if f == '.hgsubstate': # merged internally
1527 if f == '.hgsubstate': # merged internally
1488 continue
1528 continue
1489 if f1 is None:
1529 if f1 is None:
1490 fcl = filemerge.absentfilectx(wctx, fa)
1530 fcl = filemerge.absentfilectx(wctx, fa)
1491 else:
1531 else:
1492 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1532 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1493 fcl = wctx[f1]
1533 fcl = wctx[f1]
1494 if f2 is None:
1534 if f2 is None:
1495 fco = filemerge.absentfilectx(mctx, fa)
1535 fco = filemerge.absentfilectx(mctx, fa)
1496 else:
1536 else:
1497 fco = mctx[f2]
1537 fco = mctx[f2]
1498 actx = repo[anc]
1538 actx = repo[anc]
1499 if fa in actx:
1539 if fa in actx:
1500 fca = actx[fa]
1540 fca = actx[fa]
1501 else:
1541 else:
1502 # TODO: move to absentfilectx
1542 # TODO: move to absentfilectx
1503 fca = repo.filectx(f1, fileid=nullrev)
1543 fca = repo.filectx(f1, fileid=nullrev)
1504 ms.add(fcl, fco, fca, f)
1544 ms.add(fcl, fco, fca, f)
1505 if f1 != f and move:
1545 if f1 != f and move:
1506 moves.append(f1)
1546 moves.append(f1)
1507
1547
1508 _updating = _('updating')
1548 _updating = _('updating')
1509 _files = _('files')
1549 _files = _('files')
1510 progress = repo.ui.progress
1550 progress = repo.ui.progress
1511
1551
1512 # remove renamed files after safely stored
1552 # remove renamed files after safely stored
1513 for f in moves:
1553 for f in moves:
1514 if wctx[f].lexists():
1554 if wctx[f].lexists():
1515 repo.ui.debug("removing %s\n" % f)
1555 repo.ui.debug("removing %s\n" % f)
1516 wctx[f].audit()
1556 wctx[f].audit()
1517 wctx[f].remove()
1557 wctx[f].remove()
1518
1558
1519 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1559 numupdates = sum(len(l) for m, l in actions.items()
1560 if m != ACTION_KEEP)
1520 z = 0
1561 z = 0
1521
1562
1522 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1563 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1523 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1564 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1524
1565
1525 # record path conflicts
1566 # record path conflicts
1526 for f, args, msg in actions['p']:
1567 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1527 f1, fo = args
1568 f1, fo = args
1528 s = repo.ui.status
1569 s = repo.ui.status
1529 s(_("%s: path conflict - a file or link has the same name as a "
1570 s(_("%s: path conflict - a file or link has the same name as a "
1530 "directory\n") % f)
1571 "directory\n") % f)
1531 if fo == 'l':
1572 if fo == 'l':
1532 s(_("the local file has been renamed to %s\n") % f1)
1573 s(_("the local file has been renamed to %s\n") % f1)
1533 else:
1574 else:
1534 s(_("the remote file has been renamed to %s\n") % f1)
1575 s(_("the remote file has been renamed to %s\n") % f1)
1535 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1576 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1536 ms.addpath(f, f1, fo)
1577 ms.addpath(f, f1, fo)
1537 z += 1
1578 z += 1
1538 progress(_updating, z, item=f, total=numupdates, unit=_files)
1579 progress(_updating, z, item=f, total=numupdates, unit=_files)
1539
1580
1540 # When merging in-memory, we can't support worker processes, so set the
1581 # When merging in-memory, we can't support worker processes, so set the
1541 # per-item cost at 0 in that case.
1582 # per-item cost at 0 in that case.
1542 cost = 0 if wctx.isinmemory() else 0.001
1583 cost = 0 if wctx.isinmemory() else 0.001
1543
1584
1544 # remove in parallel (must come before resolving path conflicts and getting)
1585 # remove in parallel (must come before resolving path conflicts and getting)
1545 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1586 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1546 actions['r'])
1587 actions[ACTION_REMOVE])
1547 for i, item in prog:
1588 for i, item in prog:
1548 z += i
1589 z += i
1549 progress(_updating, z, item=item, total=numupdates, unit=_files)
1590 progress(_updating, z, item=item, total=numupdates, unit=_files)
1550 removed = len(actions['r'])
1591 removed = len(actions[ACTION_REMOVE])
1551
1592
1552 # resolve path conflicts (must come before getting)
1593 # resolve path conflicts (must come before getting)
1553 for f, args, msg in actions['pr']:
1594 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1554 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1595 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1555 f0, = args
1596 f0, = args
1556 if wctx[f0].lexists():
1597 if wctx[f0].lexists():
1557 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1598 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1558 wctx[f].audit()
1599 wctx[f].audit()
1559 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1600 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1560 wctx[f0].remove()
1601 wctx[f0].remove()
1561 z += 1
1602 z += 1
1562 progress(_updating, z, item=f, total=numupdates, unit=_files)
1603 progress(_updating, z, item=f, total=numupdates, unit=_files)
1563
1604
1564 # get in parallel
1605 # get in parallel
1565 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1606 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1566 actions['g'])
1607 actions[ACTION_GET])
1567 for i, item in prog:
1608 for i, item in prog:
1568 z += i
1609 z += i
1569 progress(_updating, z, item=item, total=numupdates, unit=_files)
1610 progress(_updating, z, item=item, total=numupdates, unit=_files)
1570 updated = len(actions['g'])
1611 updated = len(actions[ACTION_GET])
1571
1612
1572 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1613 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1573 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1614 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1574
1615
1575 # forget (manifest only, just log it) (must come first)
1616 # forget (manifest only, just log it) (must come first)
1576 for f, args, msg in actions['f']:
1617 for f, args, msg in actions[ACTION_FORGET]:
1577 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1618 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1578 z += 1
1619 z += 1
1579 progress(_updating, z, item=f, total=numupdates, unit=_files)
1620 progress(_updating, z, item=f, total=numupdates, unit=_files)
1580
1621
1581 # re-add (manifest only, just log it)
1622 # re-add (manifest only, just log it)
1582 for f, args, msg in actions['a']:
1623 for f, args, msg in actions[ACTION_ADD]:
1583 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1624 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1584 z += 1
1625 z += 1
1585 progress(_updating, z, item=f, total=numupdates, unit=_files)
1626 progress(_updating, z, item=f, total=numupdates, unit=_files)
1586
1627
1587 # re-add/mark as modified (manifest only, just log it)
1628 # re-add/mark as modified (manifest only, just log it)
1588 for f, args, msg in actions['am']:
1629 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1589 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1630 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1590 z += 1
1631 z += 1
1591 progress(_updating, z, item=f, total=numupdates, unit=_files)
1632 progress(_updating, z, item=f, total=numupdates, unit=_files)
1592
1633
1593 # keep (noop, just log it)
1634 # keep (noop, just log it)
1594 for f, args, msg in actions['k']:
1635 for f, args, msg in actions[ACTION_KEEP]:
1595 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1636 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1596 # no progress
1637 # no progress
1597
1638
1598 # directory rename, move local
1639 # directory rename, move local
1599 for f, args, msg in actions['dm']:
1640 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1600 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1641 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1601 z += 1
1642 z += 1
1602 progress(_updating, z, item=f, total=numupdates, unit=_files)
1643 progress(_updating, z, item=f, total=numupdates, unit=_files)
1603 f0, flags = args
1644 f0, flags = args
1604 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1645 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1605 wctx[f].audit()
1646 wctx[f].audit()
1606 wctx[f].write(wctx.filectx(f0).data(), flags)
1647 wctx[f].write(wctx.filectx(f0).data(), flags)
1607 wctx[f0].remove()
1648 wctx[f0].remove()
1608 updated += 1
1649 updated += 1
1609
1650
1610 # local directory rename, get
1651 # local directory rename, get
1611 for f, args, msg in actions['dg']:
1652 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1612 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1653 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1613 z += 1
1654 z += 1
1614 progress(_updating, z, item=f, total=numupdates, unit=_files)
1655 progress(_updating, z, item=f, total=numupdates, unit=_files)
1615 f0, flags = args
1656 f0, flags = args
1616 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1657 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1617 wctx[f].write(mctx.filectx(f0).data(), flags)
1658 wctx[f].write(mctx.filectx(f0).data(), flags)
1618 updated += 1
1659 updated += 1
1619
1660
1620 # exec
1661 # exec
1621 for f, args, msg in actions['e']:
1662 for f, args, msg in actions[ACTION_EXEC]:
1622 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1663 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1623 z += 1
1664 z += 1
1624 progress(_updating, z, item=f, total=numupdates, unit=_files)
1665 progress(_updating, z, item=f, total=numupdates, unit=_files)
1625 flags, = args
1666 flags, = args
1626 wctx[f].audit()
1667 wctx[f].audit()
1627 wctx[f].setflags('l' in flags, 'x' in flags)
1668 wctx[f].setflags('l' in flags, 'x' in flags)
1628 updated += 1
1669 updated += 1
1629
1670
1630 # the ordering is important here -- ms.mergedriver will raise if the merge
1671 # the ordering is important here -- ms.mergedriver will raise if the merge
1631 # driver has changed, and we want to be able to bypass it when overwrite is
1672 # driver has changed, and we want to be able to bypass it when overwrite is
1632 # True
1673 # True
1633 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1674 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1634
1675
1635 if usemergedriver:
1676 if usemergedriver:
1636 if wctx.isinmemory():
1677 if wctx.isinmemory():
1637 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1678 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1638 "support mergedriver")
1679 "support mergedriver")
1639 ms.commit()
1680 ms.commit()
1640 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1681 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1641 # the driver might leave some files unresolved
1682 # the driver might leave some files unresolved
1642 unresolvedf = set(ms.unresolved())
1683 unresolvedf = set(ms.unresolved())
1643 if not proceed:
1684 if not proceed:
1644 # XXX setting unresolved to at least 1 is a hack to make sure we
1685 # XXX setting unresolved to at least 1 is a hack to make sure we
1645 # error out
1686 # error out
1646 return updateresult(updated, merged, removed,
1687 return updateresult(updated, merged, removed,
1647 max(len(unresolvedf), 1))
1688 max(len(unresolvedf), 1))
1648 newactions = []
1689 newactions = []
1649 for f, args, msg in mergeactions:
1690 for f, args, msg in mergeactions:
1650 if f in unresolvedf:
1691 if f in unresolvedf:
1651 newactions.append((f, args, msg))
1692 newactions.append((f, args, msg))
1652 mergeactions = newactions
1693 mergeactions = newactions
1653
1694
1654 try:
1695 try:
1655 # premerge
1696 # premerge
1656 tocomplete = []
1697 tocomplete = []
1657 for f, args, msg in mergeactions:
1698 for f, args, msg in mergeactions:
1658 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1699 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1659 z += 1
1700 z += 1
1660 progress(_updating, z, item=f, total=numupdates, unit=_files)
1701 progress(_updating, z, item=f, total=numupdates, unit=_files)
1661 if f == '.hgsubstate': # subrepo states need updating
1702 if f == '.hgsubstate': # subrepo states need updating
1662 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1703 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1663 overwrite, labels)
1704 overwrite, labels)
1664 continue
1705 continue
1665 wctx[f].audit()
1706 wctx[f].audit()
1666 complete, r = ms.preresolve(f, wctx)
1707 complete, r = ms.preresolve(f, wctx)
1667 if not complete:
1708 if not complete:
1668 numupdates += 1
1709 numupdates += 1
1669 tocomplete.append((f, args, msg))
1710 tocomplete.append((f, args, msg))
1670
1711
1671 # merge
1712 # merge
1672 for f, args, msg in tocomplete:
1713 for f, args, msg in tocomplete:
1673 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1714 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1674 z += 1
1715 z += 1
1675 progress(_updating, z, item=f, total=numupdates, unit=_files)
1716 progress(_updating, z, item=f, total=numupdates, unit=_files)
1676 ms.resolve(f, wctx)
1717 ms.resolve(f, wctx)
1677
1718
1678 finally:
1719 finally:
1679 ms.commit()
1720 ms.commit()
1680
1721
1681 unresolved = ms.unresolvedcount()
1722 unresolved = ms.unresolvedcount()
1682
1723
1683 if (usemergedriver and not unresolved
1724 if (usemergedriver and not unresolved
1684 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1725 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1685 if not driverconclude(repo, ms, wctx, labels=labels):
1726 if not driverconclude(repo, ms, wctx, labels=labels):
1686 # XXX setting unresolved to at least 1 is a hack to make sure we
1727 # XXX setting unresolved to at least 1 is a hack to make sure we
1687 # error out
1728 # error out
1688 unresolved = max(unresolved, 1)
1729 unresolved = max(unresolved, 1)
1689
1730
1690 ms.commit()
1731 ms.commit()
1691
1732
1692 msupdated, msmerged, msremoved = ms.counts()
1733 msupdated, msmerged, msremoved = ms.counts()
1693 updated += msupdated
1734 updated += msupdated
1694 merged += msmerged
1735 merged += msmerged
1695 removed += msremoved
1736 removed += msremoved
1696
1737
1697 extraactions = ms.actions()
1738 extraactions = ms.actions()
1698 if extraactions:
1739 if extraactions:
1699 mfiles = set(a[0] for a in actions['m'])
1740 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1700 for k, acts in extraactions.iteritems():
1741 for k, acts in extraactions.iteritems():
1701 actions[k].extend(acts)
1742 actions[k].extend(acts)
1702 # Remove these files from actions['m'] as well. This is important
1743 # Remove these files from actions[ACTION_MERGE] as well. This is
1703 # because in recordupdates, files in actions['m'] are processed
1744 # important because in recordupdates, files in actions[ACTION_MERGE]
1704 # after files in other actions, and the merge driver might add
1745 # are processed after files in other actions, and the merge driver
1705 # files to those actions via extraactions above. This can lead to a
1746 # might add files to those actions via extraactions above. This can
1706 # file being recorded twice, with poor results. This is especially
1747 # lead to a file being recorded twice, with poor results. This is
1707 # problematic for actions['r'] (currently only possible with the
1748 # especially problematic for actions[ACTION_REMOVE] (currently only
1708 # merge driver in the initial merge process; interrupted merges
1749 # possible with the merge driver in the initial merge process;
1709 # don't go through this flow).
1750 # interrupted merges don't go through this flow).
1710 #
1751 #
1711 # The real fix here is to have indexes by both file and action so
1752 # The real fix here is to have indexes by both file and action so
1712 # that when the action for a file is changed it is automatically
1753 # that when the action for a file is changed it is automatically
1713 # reflected in the other action lists. But that involves a more
1754 # reflected in the other action lists. But that involves a more
1714 # complex data structure, so this will do for now.
1755 # complex data structure, so this will do for now.
1715 #
1756 #
1716 # We don't need to do the same operation for 'dc' and 'cd' because
1757 # We don't need to do the same operation for 'dc' and 'cd' because
1717 # those lists aren't consulted again.
1758 # those lists aren't consulted again.
1718 mfiles.difference_update(a[0] for a in acts)
1759 mfiles.difference_update(a[0] for a in acts)
1719
1760
1720 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1761 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1762 if a[0] in mfiles]
1721
1763
1722 progress(_updating, None, total=numupdates, unit=_files)
1764 progress(_updating, None, total=numupdates, unit=_files)
1723 return updateresult(updated, merged, removed, unresolved)
1765 return updateresult(updated, merged, removed, unresolved)
1724
1766
1725 def recordupdates(repo, actions, branchmerge):
1767 def recordupdates(repo, actions, branchmerge):
1726 "record merge actions to the dirstate"
1768 "record merge actions to the dirstate"
1727 # remove (must come first)
1769 # remove (must come first)
1728 for f, args, msg in actions.get('r', []):
1770 for f, args, msg in actions.get(ACTION_REMOVE, []):
1729 if branchmerge:
1771 if branchmerge:
1730 repo.dirstate.remove(f)
1772 repo.dirstate.remove(f)
1731 else:
1773 else:
1732 repo.dirstate.drop(f)
1774 repo.dirstate.drop(f)
1733
1775
1734 # forget (must come first)
1776 # forget (must come first)
1735 for f, args, msg in actions.get('f', []):
1777 for f, args, msg in actions.get(ACTION_FORGET, []):
1736 repo.dirstate.drop(f)
1778 repo.dirstate.drop(f)
1737
1779
1738 # resolve path conflicts
1780 # resolve path conflicts
1739 for f, args, msg in actions.get('pr', []):
1781 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1740 f0, = args
1782 f0, = args
1741 origf0 = repo.dirstate.copied(f0) or f0
1783 origf0 = repo.dirstate.copied(f0) or f0
1742 repo.dirstate.add(f)
1784 repo.dirstate.add(f)
1743 repo.dirstate.copy(origf0, f)
1785 repo.dirstate.copy(origf0, f)
1744 if f0 == origf0:
1786 if f0 == origf0:
1745 repo.dirstate.remove(f0)
1787 repo.dirstate.remove(f0)
1746 else:
1788 else:
1747 repo.dirstate.drop(f0)
1789 repo.dirstate.drop(f0)
1748
1790
1749 # re-add
1791 # re-add
1750 for f, args, msg in actions.get('a', []):
1792 for f, args, msg in actions.get(ACTION_ADD, []):
1751 repo.dirstate.add(f)
1793 repo.dirstate.add(f)
1752
1794
1753 # re-add/mark as modified
1795 # re-add/mark as modified
1754 for f, args, msg in actions.get('am', []):
1796 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1755 if branchmerge:
1797 if branchmerge:
1756 repo.dirstate.normallookup(f)
1798 repo.dirstate.normallookup(f)
1757 else:
1799 else:
1758 repo.dirstate.add(f)
1800 repo.dirstate.add(f)
1759
1801
1760 # exec change
1802 # exec change
1761 for f, args, msg in actions.get('e', []):
1803 for f, args, msg in actions.get(ACTION_EXEC, []):
1762 repo.dirstate.normallookup(f)
1804 repo.dirstate.normallookup(f)
1763
1805
1764 # keep
1806 # keep
1765 for f, args, msg in actions.get('k', []):
1807 for f, args, msg in actions.get(ACTION_KEEP, []):
1766 pass
1808 pass
1767
1809
1768 # get
1810 # get
1769 for f, args, msg in actions.get('g', []):
1811 for f, args, msg in actions.get(ACTION_GET, []):
1770 if branchmerge:
1812 if branchmerge:
1771 repo.dirstate.otherparent(f)
1813 repo.dirstate.otherparent(f)
1772 else:
1814 else:
1773 repo.dirstate.normal(f)
1815 repo.dirstate.normal(f)
1774
1816
1775 # merge
1817 # merge
1776 for f, args, msg in actions.get('m', []):
1818 for f, args, msg in actions.get(ACTION_MERGE, []):
1777 f1, f2, fa, move, anc = args
1819 f1, f2, fa, move, anc = args
1778 if branchmerge:
1820 if branchmerge:
1779 # We've done a branch merge, mark this file as merged
1821 # We've done a branch merge, mark this file as merged
1780 # so that we properly record the merger later
1822 # so that we properly record the merger later
1781 repo.dirstate.merge(f)
1823 repo.dirstate.merge(f)
1782 if f1 != f2: # copy/rename
1824 if f1 != f2: # copy/rename
1783 if move:
1825 if move:
1784 repo.dirstate.remove(f1)
1826 repo.dirstate.remove(f1)
1785 if f1 != f:
1827 if f1 != f:
1786 repo.dirstate.copy(f1, f)
1828 repo.dirstate.copy(f1, f)
1787 else:
1829 else:
1788 repo.dirstate.copy(f2, f)
1830 repo.dirstate.copy(f2, f)
1789 else:
1831 else:
1790 # We've update-merged a locally modified file, so
1832 # We've update-merged a locally modified file, so
1791 # we set the dirstate to emulate a normal checkout
1833 # we set the dirstate to emulate a normal checkout
1792 # of that file some time in the past. Thus our
1834 # of that file some time in the past. Thus our
1793 # merge will appear as a normal local file
1835 # merge will appear as a normal local file
1794 # modification.
1836 # modification.
1795 if f2 == f: # file not locally copied/moved
1837 if f2 == f: # file not locally copied/moved
1796 repo.dirstate.normallookup(f)
1838 repo.dirstate.normallookup(f)
1797 if move:
1839 if move:
1798 repo.dirstate.drop(f1)
1840 repo.dirstate.drop(f1)
1799
1841
1800 # directory rename, move local
1842 # directory rename, move local
1801 for f, args, msg in actions.get('dm', []):
1843 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1802 f0, flag = args
1844 f0, flag = args
1803 if branchmerge:
1845 if branchmerge:
1804 repo.dirstate.add(f)
1846 repo.dirstate.add(f)
1805 repo.dirstate.remove(f0)
1847 repo.dirstate.remove(f0)
1806 repo.dirstate.copy(f0, f)
1848 repo.dirstate.copy(f0, f)
1807 else:
1849 else:
1808 repo.dirstate.normal(f)
1850 repo.dirstate.normal(f)
1809 repo.dirstate.drop(f0)
1851 repo.dirstate.drop(f0)
1810
1852
1811 # directory rename, get
1853 # directory rename, get
1812 for f, args, msg in actions.get('dg', []):
1854 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1813 f0, flag = args
1855 f0, flag = args
1814 if branchmerge:
1856 if branchmerge:
1815 repo.dirstate.add(f)
1857 repo.dirstate.add(f)
1816 repo.dirstate.copy(f0, f)
1858 repo.dirstate.copy(f0, f)
1817 else:
1859 else:
1818 repo.dirstate.normal(f)
1860 repo.dirstate.normal(f)
1819
1861
1820 def update(repo, node, branchmerge, force, ancestor=None,
1862 def update(repo, node, branchmerge, force, ancestor=None,
1821 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1863 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1822 updatecheck=None, wc=None):
1864 updatecheck=None, wc=None):
1823 """
1865 """
1824 Perform a merge between the working directory and the given node
1866 Perform a merge between the working directory and the given node
1825
1867
1826 node = the node to update to
1868 node = the node to update to
1827 branchmerge = whether to merge between branches
1869 branchmerge = whether to merge between branches
1828 force = whether to force branch merging or file overwriting
1870 force = whether to force branch merging or file overwriting
1829 matcher = a matcher to filter file lists (dirstate not updated)
1871 matcher = a matcher to filter file lists (dirstate not updated)
1830 mergeancestor = whether it is merging with an ancestor. If true,
1872 mergeancestor = whether it is merging with an ancestor. If true,
1831 we should accept the incoming changes for any prompts that occur.
1873 we should accept the incoming changes for any prompts that occur.
1832 If false, merging with an ancestor (fast-forward) is only allowed
1874 If false, merging with an ancestor (fast-forward) is only allowed
1833 between different named branches. This flag is used by rebase extension
1875 between different named branches. This flag is used by rebase extension
1834 as a temporary fix and should be avoided in general.
1876 as a temporary fix and should be avoided in general.
1835 labels = labels to use for base, local and other
1877 labels = labels to use for base, local and other
1836 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1878 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1837 this is True, then 'force' should be True as well.
1879 this is True, then 'force' should be True as well.
1838
1880
1839 The table below shows all the behaviors of the update command given the
1881 The table below shows all the behaviors of the update command given the
1840 -c/--check and -C/--clean or no options, whether the working directory is
1882 -c/--check and -C/--clean or no options, whether the working directory is
1841 dirty, whether a revision is specified, and the relationship of the parent
1883 dirty, whether a revision is specified, and the relationship of the parent
1842 rev to the target rev (linear or not). Match from top first. The -n
1884 rev to the target rev (linear or not). Match from top first. The -n
1843 option doesn't exist on the command line, but represents the
1885 option doesn't exist on the command line, but represents the
1844 experimental.updatecheck=noconflict option.
1886 experimental.updatecheck=noconflict option.
1845
1887
1846 This logic is tested by test-update-branches.t.
1888 This logic is tested by test-update-branches.t.
1847
1889
1848 -c -C -n -m dirty rev linear | result
1890 -c -C -n -m dirty rev linear | result
1849 y y * * * * * | (1)
1891 y y * * * * * | (1)
1850 y * y * * * * | (1)
1892 y * y * * * * | (1)
1851 y * * y * * * | (1)
1893 y * * y * * * | (1)
1852 * y y * * * * | (1)
1894 * y y * * * * | (1)
1853 * y * y * * * | (1)
1895 * y * y * * * | (1)
1854 * * y y * * * | (1)
1896 * * y y * * * | (1)
1855 * * * * * n n | x
1897 * * * * * n n | x
1856 * * * * n * * | ok
1898 * * * * n * * | ok
1857 n n n n y * y | merge
1899 n n n n y * y | merge
1858 n n n n y y n | (2)
1900 n n n n y y n | (2)
1859 n n n y y * * | merge
1901 n n n y y * * | merge
1860 n n y n y * * | merge if no conflict
1902 n n y n y * * | merge if no conflict
1861 n y n n y * * | discard
1903 n y n n y * * | discard
1862 y n n n y * * | (3)
1904 y n n n y * * | (3)
1863
1905
1864 x = can't happen
1906 x = can't happen
1865 * = don't-care
1907 * = don't-care
1866 1 = incompatible options (checked in commands.py)
1908 1 = incompatible options (checked in commands.py)
1867 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1909 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1868 3 = abort: uncommitted changes (checked in commands.py)
1910 3 = abort: uncommitted changes (checked in commands.py)
1869
1911
1870 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1912 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1871 to repo[None] if None is passed.
1913 to repo[None] if None is passed.
1872
1914
1873 Return the same tuple as applyupdates().
1915 Return the same tuple as applyupdates().
1874 """
1916 """
1875 # Avoid cycle.
1917 # Avoid cycle.
1876 from . import sparse
1918 from . import sparse
1877
1919
1878 # This function used to find the default destination if node was None, but
1920 # This function used to find the default destination if node was None, but
1879 # that's now in destutil.py.
1921 # that's now in destutil.py.
1880 assert node is not None
1922 assert node is not None
1881 if not branchmerge and not force:
1923 if not branchmerge and not force:
1882 # TODO: remove the default once all callers that pass branchmerge=False
1924 # TODO: remove the default once all callers that pass branchmerge=False
1883 # and force=False pass a value for updatecheck. We may want to allow
1925 # and force=False pass a value for updatecheck. We may want to allow
1884 # updatecheck='abort' to better suppport some of these callers.
1926 # updatecheck='abort' to better suppport some of these callers.
1885 if updatecheck is None:
1927 if updatecheck is None:
1886 updatecheck = 'linear'
1928 updatecheck = 'linear'
1887 assert updatecheck in ('none', 'linear', 'noconflict')
1929 assert updatecheck in ('none', 'linear', 'noconflict')
1888 # If we're doing a partial update, we need to skip updating
1930 # If we're doing a partial update, we need to skip updating
1889 # the dirstate, so make a note of any partial-ness to the
1931 # the dirstate, so make a note of any partial-ness to the
1890 # update here.
1932 # update here.
1891 if matcher is None or matcher.always():
1933 if matcher is None or matcher.always():
1892 partial = False
1934 partial = False
1893 else:
1935 else:
1894 partial = True
1936 partial = True
1895 with repo.wlock():
1937 with repo.wlock():
1896 if wc is None:
1938 if wc is None:
1897 wc = repo[None]
1939 wc = repo[None]
1898 pl = wc.parents()
1940 pl = wc.parents()
1899 p1 = pl[0]
1941 p1 = pl[0]
1900 pas = [None]
1942 pas = [None]
1901 if ancestor is not None:
1943 if ancestor is not None:
1902 pas = [repo[ancestor]]
1944 pas = [repo[ancestor]]
1903
1945
1904 overwrite = force and not branchmerge
1946 overwrite = force and not branchmerge
1905
1947
1906 p2 = repo[node]
1948 p2 = repo[node]
1907 if pas[0] is None:
1949 if pas[0] is None:
1908 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1950 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1909 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1951 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1910 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1952 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1911 else:
1953 else:
1912 pas = [p1.ancestor(p2, warn=branchmerge)]
1954 pas = [p1.ancestor(p2, warn=branchmerge)]
1913
1955
1914 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1956 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1915
1957
1916 ### check phase
1958 ### check phase
1917 if not overwrite:
1959 if not overwrite:
1918 if len(pl) > 1:
1960 if len(pl) > 1:
1919 raise error.Abort(_("outstanding uncommitted merge"))
1961 raise error.Abort(_("outstanding uncommitted merge"))
1920 ms = mergestate.read(repo)
1962 ms = mergestate.read(repo)
1921 if list(ms.unresolved()):
1963 if list(ms.unresolved()):
1922 raise error.Abort(_("outstanding merge conflicts"))
1964 raise error.Abort(_("outstanding merge conflicts"))
1923 if branchmerge:
1965 if branchmerge:
1924 if pas == [p2]:
1966 if pas == [p2]:
1925 raise error.Abort(_("merging with a working directory ancestor"
1967 raise error.Abort(_("merging with a working directory ancestor"
1926 " has no effect"))
1968 " has no effect"))
1927 elif pas == [p1]:
1969 elif pas == [p1]:
1928 if not mergeancestor and wc.branch() == p2.branch():
1970 if not mergeancestor and wc.branch() == p2.branch():
1929 raise error.Abort(_("nothing to merge"),
1971 raise error.Abort(_("nothing to merge"),
1930 hint=_("use 'hg update' "
1972 hint=_("use 'hg update' "
1931 "or check 'hg heads'"))
1973 "or check 'hg heads'"))
1932 if not force and (wc.files() or wc.deleted()):
1974 if not force and (wc.files() or wc.deleted()):
1933 raise error.Abort(_("uncommitted changes"),
1975 raise error.Abort(_("uncommitted changes"),
1934 hint=_("use 'hg status' to list changes"))
1976 hint=_("use 'hg status' to list changes"))
1935 if not wc.isinmemory():
1977 if not wc.isinmemory():
1936 for s in sorted(wc.substate):
1978 for s in sorted(wc.substate):
1937 wc.sub(s).bailifchanged()
1979 wc.sub(s).bailifchanged()
1938
1980
1939 elif not overwrite:
1981 elif not overwrite:
1940 if p1 == p2: # no-op update
1982 if p1 == p2: # no-op update
1941 # call the hooks and exit early
1983 # call the hooks and exit early
1942 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1984 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1943 repo.hook('update', parent1=xp2, parent2='', error=0)
1985 repo.hook('update', parent1=xp2, parent2='', error=0)
1944 return updateresult(0, 0, 0, 0)
1986 return updateresult(0, 0, 0, 0)
1945
1987
1946 if (updatecheck == 'linear' and
1988 if (updatecheck == 'linear' and
1947 pas not in ([p1], [p2])): # nonlinear
1989 pas not in ([p1], [p2])): # nonlinear
1948 dirty = wc.dirty(missing=True)
1990 dirty = wc.dirty(missing=True)
1949 if dirty:
1991 if dirty:
1950 # Branching is a bit strange to ensure we do the minimal
1992 # Branching is a bit strange to ensure we do the minimal
1951 # amount of call to obsutil.foreground.
1993 # amount of call to obsutil.foreground.
1952 foreground = obsutil.foreground(repo, [p1.node()])
1994 foreground = obsutil.foreground(repo, [p1.node()])
1953 # note: the <node> variable contains a random identifier
1995 # note: the <node> variable contains a random identifier
1954 if repo[node].node() in foreground:
1996 if repo[node].node() in foreground:
1955 pass # allow updating to successors
1997 pass # allow updating to successors
1956 else:
1998 else:
1957 msg = _("uncommitted changes")
1999 msg = _("uncommitted changes")
1958 hint = _("commit or update --clean to discard changes")
2000 hint = _("commit or update --clean to discard changes")
1959 raise error.UpdateAbort(msg, hint=hint)
2001 raise error.UpdateAbort(msg, hint=hint)
1960 else:
2002 else:
1961 # Allow jumping branches if clean and specific rev given
2003 # Allow jumping branches if clean and specific rev given
1962 pass
2004 pass
1963
2005
1964 if overwrite:
2006 if overwrite:
1965 pas = [wc]
2007 pas = [wc]
1966 elif not branchmerge:
2008 elif not branchmerge:
1967 pas = [p1]
2009 pas = [p1]
1968
2010
1969 # deprecated config: merge.followcopies
2011 # deprecated config: merge.followcopies
1970 followcopies = repo.ui.configbool('merge', 'followcopies')
2012 followcopies = repo.ui.configbool('merge', 'followcopies')
1971 if overwrite:
2013 if overwrite:
1972 followcopies = False
2014 followcopies = False
1973 elif not pas[0]:
2015 elif not pas[0]:
1974 followcopies = False
2016 followcopies = False
1975 if not branchmerge and not wc.dirty(missing=True):
2017 if not branchmerge and not wc.dirty(missing=True):
1976 followcopies = False
2018 followcopies = False
1977
2019
1978 ### calculate phase
2020 ### calculate phase
1979 actionbyfile, diverge, renamedelete = calculateupdates(
2021 actionbyfile, diverge, renamedelete = calculateupdates(
1980 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2022 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1981 followcopies, matcher=matcher, mergeforce=mergeforce)
2023 followcopies, matcher=matcher, mergeforce=mergeforce)
1982
2024
1983 if updatecheck == 'noconflict':
2025 if updatecheck == 'noconflict':
1984 for f, (m, args, msg) in actionbyfile.iteritems():
2026 for f, (m, args, msg) in actionbyfile.iteritems():
1985 if m not in ('g', 'k', 'e', 'r', 'pr'):
2027 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2028 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
1986 msg = _("conflicting changes")
2029 msg = _("conflicting changes")
1987 hint = _("commit or update --clean to discard changes")
2030 hint = _("commit or update --clean to discard changes")
1988 raise error.Abort(msg, hint=hint)
2031 raise error.Abort(msg, hint=hint)
1989
2032
1990 # Prompt and create actions. Most of this is in the resolve phase
2033 # Prompt and create actions. Most of this is in the resolve phase
1991 # already, but we can't handle .hgsubstate in filemerge or
2034 # already, but we can't handle .hgsubstate in filemerge or
1992 # subrepoutil.submerge yet so we have to keep prompting for it.
2035 # subrepoutil.submerge yet so we have to keep prompting for it.
1993 if '.hgsubstate' in actionbyfile:
2036 if '.hgsubstate' in actionbyfile:
1994 f = '.hgsubstate'
2037 f = '.hgsubstate'
1995 m, args, msg = actionbyfile[f]
2038 m, args, msg = actionbyfile[f]
1996 prompts = filemerge.partextras(labels)
2039 prompts = filemerge.partextras(labels)
1997 prompts['f'] = f
2040 prompts['f'] = f
1998 if m == 'cd':
2041 if m == ACTION_CHANGED_DELETED:
1999 if repo.ui.promptchoice(
2042 if repo.ui.promptchoice(
2000 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2043 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2001 "use (c)hanged version or (d)elete?"
2044 "use (c)hanged version or (d)elete?"
2002 "$$ &Changed $$ &Delete") % prompts, 0):
2045 "$$ &Changed $$ &Delete") % prompts, 0):
2003 actionbyfile[f] = ('r', None, "prompt delete")
2046 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2004 elif f in p1:
2047 elif f in p1:
2005 actionbyfile[f] = ('am', None, "prompt keep")
2048 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2006 else:
2049 else:
2007 actionbyfile[f] = ('a', None, "prompt keep")
2050 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2008 elif m == 'dc':
2051 elif m == ACTION_DELETED_CHANGED:
2009 f1, f2, fa, move, anc = args
2052 f1, f2, fa, move, anc = args
2010 flags = p2[f2].flags()
2053 flags = p2[f2].flags()
2011 if repo.ui.promptchoice(
2054 if repo.ui.promptchoice(
2012 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2055 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2013 "use (c)hanged version or leave (d)eleted?"
2056 "use (c)hanged version or leave (d)eleted?"
2014 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2057 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2015 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
2058 actionbyfile[f] = (ACTION_GET, (flags, False),
2059 'prompt recreating')
2016 else:
2060 else:
2017 del actionbyfile[f]
2061 del actionbyfile[f]
2018
2062
2019 # Convert to dictionary-of-lists format
2063 # Convert to dictionary-of-lists format
2020 actions = dict((m, [])
2064 actions = dict((m, [])
2021 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2065 for m in (
2066 ACTION_ADD,
2067 ACTION_ADD_MODIFIED,
2068 ACTION_FORGET,
2069 ACTION_GET,
2070 ACTION_CHANGED_DELETED,
2071 ACTION_DELETED_CHANGED,
2072 ACTION_REMOVE,
2073 ACTION_DIR_RENAME_MOVE_LOCAL,
2074 ACTION_LOCAL_DIR_RENAME_GET,
2075 ACTION_MERGE,
2076 ACTION_EXEC,
2077 ACTION_KEEP,
2078 ACTION_PATH_CONFLICT,
2079 ACTION_PATH_CONFLICT_RESOLVE))
2022 for f, (m, args, msg) in actionbyfile.iteritems():
2080 for f, (m, args, msg) in actionbyfile.iteritems():
2023 if m not in actions:
2081 if m not in actions:
2024 actions[m] = []
2082 actions[m] = []
2025 actions[m].append((f, args, msg))
2083 actions[m].append((f, args, msg))
2026
2084
2027 if not util.fscasesensitive(repo.path):
2085 if not util.fscasesensitive(repo.path):
2028 # check collision between files only in p2 for clean update
2086 # check collision between files only in p2 for clean update
2029 if (not branchmerge and
2087 if (not branchmerge and
2030 (force or not wc.dirty(missing=True, branch=False))):
2088 (force or not wc.dirty(missing=True, branch=False))):
2031 _checkcollision(repo, p2.manifest(), None)
2089 _checkcollision(repo, p2.manifest(), None)
2032 else:
2090 else:
2033 _checkcollision(repo, wc.manifest(), actions)
2091 _checkcollision(repo, wc.manifest(), actions)
2034
2092
2035 # divergent renames
2093 # divergent renames
2036 for f, fl in sorted(diverge.iteritems()):
2094 for f, fl in sorted(diverge.iteritems()):
2037 repo.ui.warn(_("note: possible conflict - %s was renamed "
2095 repo.ui.warn(_("note: possible conflict - %s was renamed "
2038 "multiple times to:\n") % f)
2096 "multiple times to:\n") % f)
2039 for nf in fl:
2097 for nf in fl:
2040 repo.ui.warn(" %s\n" % nf)
2098 repo.ui.warn(" %s\n" % nf)
2041
2099
2042 # rename and delete
2100 # rename and delete
2043 for f, fl in sorted(renamedelete.iteritems()):
2101 for f, fl in sorted(renamedelete.iteritems()):
2044 repo.ui.warn(_("note: possible conflict - %s was deleted "
2102 repo.ui.warn(_("note: possible conflict - %s was deleted "
2045 "and renamed to:\n") % f)
2103 "and renamed to:\n") % f)
2046 for nf in fl:
2104 for nf in fl:
2047 repo.ui.warn(" %s\n" % nf)
2105 repo.ui.warn(" %s\n" % nf)
2048
2106
2049 ### apply phase
2107 ### apply phase
2050 if not branchmerge: # just jump to the new rev
2108 if not branchmerge: # just jump to the new rev
2051 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2109 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2052 if not partial and not wc.isinmemory():
2110 if not partial and not wc.isinmemory():
2053 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2111 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2054 # note that we're in the middle of an update
2112 # note that we're in the middle of an update
2055 repo.vfs.write('updatestate', p2.hex())
2113 repo.vfs.write('updatestate', p2.hex())
2056
2114
2057 # Advertise fsmonitor when its presence could be useful.
2115 # Advertise fsmonitor when its presence could be useful.
2058 #
2116 #
2059 # We only advertise when performing an update from an empty working
2117 # We only advertise when performing an update from an empty working
2060 # directory. This typically only occurs during initial clone.
2118 # directory. This typically only occurs during initial clone.
2061 #
2119 #
2062 # We give users a mechanism to disable the warning in case it is
2120 # We give users a mechanism to disable the warning in case it is
2063 # annoying.
2121 # annoying.
2064 #
2122 #
2065 # We only allow on Linux and MacOS because that's where fsmonitor is
2123 # We only allow on Linux and MacOS because that's where fsmonitor is
2066 # considered stable.
2124 # considered stable.
2067 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2125 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2068 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2126 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2069 'warn_update_file_count')
2127 'warn_update_file_count')
2070 try:
2128 try:
2071 # avoid cycle: extensions -> cmdutil -> merge
2129 # avoid cycle: extensions -> cmdutil -> merge
2072 from . import extensions
2130 from . import extensions
2073 extensions.find('fsmonitor')
2131 extensions.find('fsmonitor')
2074 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2132 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2075 # We intentionally don't look at whether fsmonitor has disabled
2133 # We intentionally don't look at whether fsmonitor has disabled
2076 # itself because a) fsmonitor may have already printed a warning
2134 # itself because a) fsmonitor may have already printed a warning
2077 # b) we only care about the config state here.
2135 # b) we only care about the config state here.
2078 except KeyError:
2136 except KeyError:
2079 fsmonitorenabled = False
2137 fsmonitorenabled = False
2080
2138
2081 if (fsmonitorwarning
2139 if (fsmonitorwarning
2082 and not fsmonitorenabled
2140 and not fsmonitorenabled
2083 and p1.node() == nullid
2141 and p1.node() == nullid
2084 and len(actions['g']) >= fsmonitorthreshold
2142 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2085 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2143 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2086 repo.ui.warn(
2144 repo.ui.warn(
2087 _('(warning: large working directory being used without '
2145 _('(warning: large working directory being used without '
2088 'fsmonitor enabled; enable fsmonitor to improve performance; '
2146 'fsmonitor enabled; enable fsmonitor to improve performance; '
2089 'see "hg help -e fsmonitor")\n'))
2147 'see "hg help -e fsmonitor")\n'))
2090
2148
2091 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2149 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2092
2150
2093 if not partial and not wc.isinmemory():
2151 if not partial and not wc.isinmemory():
2094 with repo.dirstate.parentchange():
2152 with repo.dirstate.parentchange():
2095 repo.setparents(fp1, fp2)
2153 repo.setparents(fp1, fp2)
2096 recordupdates(repo, actions, branchmerge)
2154 recordupdates(repo, actions, branchmerge)
2097 # update completed, clear state
2155 # update completed, clear state
2098 util.unlink(repo.vfs.join('updatestate'))
2156 util.unlink(repo.vfs.join('updatestate'))
2099
2157
2100 if not branchmerge:
2158 if not branchmerge:
2101 repo.dirstate.setbranch(p2.branch())
2159 repo.dirstate.setbranch(p2.branch())
2102
2160
2103 # If we're updating to a location, clean up any stale temporary includes
2161 # If we're updating to a location, clean up any stale temporary includes
2104 # (ex: this happens during hg rebase --abort).
2162 # (ex: this happens during hg rebase --abort).
2105 if not branchmerge:
2163 if not branchmerge:
2106 sparse.prunetemporaryincludes(repo)
2164 sparse.prunetemporaryincludes(repo)
2107
2165
2108 if not partial:
2166 if not partial:
2109 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2167 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2110 return stats
2168 return stats
2111
2169
2112 def graft(repo, ctx, pctx, labels, keepparent=False):
2170 def graft(repo, ctx, pctx, labels, keepparent=False):
2113 """Do a graft-like merge.
2171 """Do a graft-like merge.
2114
2172
2115 This is a merge where the merge ancestor is chosen such that one
2173 This is a merge where the merge ancestor is chosen such that one
2116 or more changesets are grafted onto the current changeset. In
2174 or more changesets are grafted onto the current changeset. In
2117 addition to the merge, this fixes up the dirstate to include only
2175 addition to the merge, this fixes up the dirstate to include only
2118 a single parent (if keepparent is False) and tries to duplicate any
2176 a single parent (if keepparent is False) and tries to duplicate any
2119 renames/copies appropriately.
2177 renames/copies appropriately.
2120
2178
2121 ctx - changeset to rebase
2179 ctx - changeset to rebase
2122 pctx - merge base, usually ctx.p1()
2180 pctx - merge base, usually ctx.p1()
2123 labels - merge labels eg ['local', 'graft']
2181 labels - merge labels eg ['local', 'graft']
2124 keepparent - keep second parent if any
2182 keepparent - keep second parent if any
2125
2183
2126 """
2184 """
2127 # If we're grafting a descendant onto an ancestor, be sure to pass
2185 # If we're grafting a descendant onto an ancestor, be sure to pass
2128 # mergeancestor=True to update. This does two things: 1) allows the merge if
2186 # mergeancestor=True to update. This does two things: 1) allows the merge if
2129 # the destination is the same as the parent of the ctx (so we can use graft
2187 # the destination is the same as the parent of the ctx (so we can use graft
2130 # to copy commits), and 2) informs update that the incoming changes are
2188 # to copy commits), and 2) informs update that the incoming changes are
2131 # newer than the destination so it doesn't prompt about "remote changed foo
2189 # newer than the destination so it doesn't prompt about "remote changed foo
2132 # which local deleted".
2190 # which local deleted".
2133 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2191 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2134
2192
2135 stats = update(repo, ctx.node(), True, True, pctx.node(),
2193 stats = update(repo, ctx.node(), True, True, pctx.node(),
2136 mergeancestor=mergeancestor, labels=labels)
2194 mergeancestor=mergeancestor, labels=labels)
2137
2195
2138 pother = nullid
2196 pother = nullid
2139 parents = ctx.parents()
2197 parents = ctx.parents()
2140 if keepparent and len(parents) == 2 and pctx in parents:
2198 if keepparent and len(parents) == 2 and pctx in parents:
2141 parents.remove(pctx)
2199 parents.remove(pctx)
2142 pother = parents[0].node()
2200 pother = parents[0].node()
2143
2201
2144 with repo.dirstate.parentchange():
2202 with repo.dirstate.parentchange():
2145 repo.setparents(repo['.'].node(), pother)
2203 repo.setparents(repo['.'].node(), pother)
2146 repo.dirstate.write(repo.currenttransaction())
2204 repo.dirstate.write(repo.currenttransaction())
2147 # fix up dirstate for copies and renames
2205 # fix up dirstate for copies and renames
2148 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2206 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2149 return stats
2207 return stats
General Comments 0
You need to be logged in to leave comments. Login now