##// END OF EJS Templates
mergestate: add accessors for local and other nodeid, not just contexts...
Martin von Zweigbergk -
r44696:b1069b36 default
parent child Browse files
Show More
@@ -1,2734 +1,2742 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import shutil
11 import shutil
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .pycompat import delattr
25 from .pycompat import delattr
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import (
27 from . import (
28 copies,
28 copies,
29 encoding,
29 encoding,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 subrepoutil,
37 subrepoutil,
38 util,
38 util,
39 worker,
39 worker,
40 )
40 )
41 from .utils import hashutil
41 from .utils import hashutil
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def localctx(self):
389 def local(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"localctx accessed but self._local isn't set"
391 msg = b"local accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._repo[self._local]
393 return self._local
394
395 @util.propertycache
396 def localctx(self):
397 return self._repo[self.local]
398
399 @util.propertycache
400 def other(self):
401 if self._other is None:
402 msg = b"other accessed but self._other isn't set"
403 raise error.ProgrammingError(msg)
404 return self._other
394
405
395 @util.propertycache
406 @util.propertycache
396 def otherctx(self):
407 def otherctx(self):
397 if self._other is None:
408 return self._repo[self.other]
398 msg = b"otherctx accessed but self._other isn't set"
399 raise error.ProgrammingError(msg)
400 return self._repo[self._other]
401
409
402 def active(self):
410 def active(self):
403 """Whether mergestate is active.
411 """Whether mergestate is active.
404
412
405 Returns True if there appears to be mergestate. This is a rough proxy
413 Returns True if there appears to be mergestate. This is a rough proxy
406 for "is a merge in progress."
414 for "is a merge in progress."
407 """
415 """
408 # Check local variables before looking at filesystem for performance
416 # Check local variables before looking at filesystem for performance
409 # reasons.
417 # reasons.
410 return (
418 return (
411 bool(self._local)
419 bool(self._local)
412 or bool(self._state)
420 or bool(self._state)
413 or self._repo.vfs.exists(self.statepathv1)
421 or self._repo.vfs.exists(self.statepathv1)
414 or self._repo.vfs.exists(self.statepathv2)
422 or self._repo.vfs.exists(self.statepathv2)
415 )
423 )
416
424
417 def commit(self):
425 def commit(self):
418 """Write current state on disk (if necessary)"""
426 """Write current state on disk (if necessary)"""
419 if self._dirty:
427 if self._dirty:
420 records = self._makerecords()
428 records = self._makerecords()
421 self._writerecords(records)
429 self._writerecords(records)
422 self._dirty = False
430 self._dirty = False
423
431
424 def _makerecords(self):
432 def _makerecords(self):
425 records = []
433 records = []
426 records.append((RECORD_LOCAL, hex(self._local)))
434 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_OTHER, hex(self._other)))
435 records.append((RECORD_OTHER, hex(self._other)))
428 if self.mergedriver:
436 if self.mergedriver:
429 records.append(
437 records.append(
430 (
438 (
431 RECORD_MERGE_DRIVER_STATE,
439 RECORD_MERGE_DRIVER_STATE,
432 b'\0'.join([self.mergedriver, self._mdstate]),
440 b'\0'.join([self.mergedriver, self._mdstate]),
433 )
441 )
434 )
442 )
435 # Write out state items. In all cases, the value of the state map entry
443 # Write out state items. In all cases, the value of the state map entry
436 # is written as the contents of the record. The record type depends on
444 # is written as the contents of the record. The record type depends on
437 # the type of state that is stored, and capital-letter records are used
445 # the type of state that is stored, and capital-letter records are used
438 # to prevent older versions of Mercurial that do not support the feature
446 # to prevent older versions of Mercurial that do not support the feature
439 # from loading them.
447 # from loading them.
440 for filename, v in pycompat.iteritems(self._state):
448 for filename, v in pycompat.iteritems(self._state):
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
449 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 # Driver-resolved merge. These are stored in 'D' records.
450 # Driver-resolved merge. These are stored in 'D' records.
443 records.append(
451 records.append(
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
452 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 )
453 )
446 elif v[0] in (
454 elif v[0] in (
447 MERGE_RECORD_UNRESOLVED_PATH,
455 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
456 MERGE_RECORD_RESOLVED_PATH,
449 ):
457 ):
450 # Path conflicts. These are stored in 'P' records. The current
458 # Path conflicts. These are stored in 'P' records. The current
451 # resolution state ('pu' or 'pr') is stored within the record.
459 # resolution state ('pu' or 'pr') is stored within the record.
452 records.append(
460 records.append(
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 )
462 )
455 elif v[1] == nullhex or v[6] == nullhex:
463 elif v[1] == nullhex or v[6] == nullhex:
456 # Change/Delete or Delete/Change conflicts. These are stored in
464 # Change/Delete or Delete/Change conflicts. These are stored in
457 # 'C' records. v[1] is the local file, and is nullhex when the
465 # 'C' records. v[1] is the local file, and is nullhex when the
458 # file is deleted locally ('dc'). v[6] is the remote file, and
466 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # is nullhex when the file is deleted remotely ('cd').
467 # is nullhex when the file is deleted remotely ('cd').
460 records.append(
468 records.append(
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
469 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 )
470 )
463 else:
471 else:
464 # Normal files. These are stored in 'F' records.
472 # Normal files. These are stored in 'F' records.
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
473 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
474 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 rawextras = b'\0'.join(
475 rawextras = b'\0'.join(
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
476 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 )
477 )
470 records.append(
478 records.append(
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
479 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 )
480 )
473 if self._labels is not None:
481 if self._labels is not None:
474 labels = b'\0'.join(self._labels)
482 labels = b'\0'.join(self._labels)
475 records.append((RECORD_LABELS, labels))
483 records.append((RECORD_LABELS, labels))
476 return records
484 return records
477
485
478 def _writerecords(self, records):
486 def _writerecords(self, records):
479 """Write current state on disk (both v1 and v2)"""
487 """Write current state on disk (both v1 and v2)"""
480 self._writerecordsv1(records)
488 self._writerecordsv1(records)
481 self._writerecordsv2(records)
489 self._writerecordsv2(records)
482
490
483 def _writerecordsv1(self, records):
491 def _writerecordsv1(self, records):
484 """Write current state on disk in a version 1 file"""
492 """Write current state on disk in a version 1 file"""
485 f = self._repo.vfs(self.statepathv1, b'wb')
493 f = self._repo.vfs(self.statepathv1, b'wb')
486 irecords = iter(records)
494 irecords = iter(records)
487 lrecords = next(irecords)
495 lrecords = next(irecords)
488 assert lrecords[0] == RECORD_LOCAL
496 assert lrecords[0] == RECORD_LOCAL
489 f.write(hex(self._local) + b'\n')
497 f.write(hex(self._local) + b'\n')
490 for rtype, data in irecords:
498 for rtype, data in irecords:
491 if rtype == RECORD_MERGED:
499 if rtype == RECORD_MERGED:
492 f.write(b'%s\n' % _droponode(data))
500 f.write(b'%s\n' % _droponode(data))
493 f.close()
501 f.close()
494
502
495 def _writerecordsv2(self, records):
503 def _writerecordsv2(self, records):
496 """Write current state on disk in a version 2 file
504 """Write current state on disk in a version 2 file
497
505
498 See the docstring for _readrecordsv2 for why we use 't'."""
506 See the docstring for _readrecordsv2 for why we use 't'."""
499 # these are the records that all version 2 clients can read
507 # these are the records that all version 2 clients can read
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
508 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 f = self._repo.vfs(self.statepathv2, b'wb')
509 f = self._repo.vfs(self.statepathv2, b'wb')
502 for key, data in records:
510 for key, data in records:
503 assert len(key) == 1
511 assert len(key) == 1
504 if key not in allowlist:
512 if key not in allowlist:
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
513 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 format = b'>sI%is' % len(data)
514 format = b'>sI%is' % len(data)
507 f.write(_pack(format, key, len(data), data))
515 f.write(_pack(format, key, len(data), data))
508 f.close()
516 f.close()
509
517
510 @staticmethod
518 @staticmethod
511 def getlocalkey(path):
519 def getlocalkey(path):
512 """hash the path of a local file context for storage in the .hg/merge
520 """hash the path of a local file context for storage in the .hg/merge
513 directory."""
521 directory."""
514
522
515 return hex(hashutil.sha1(path).digest())
523 return hex(hashutil.sha1(path).digest())
516
524
517 def add(self, fcl, fco, fca, fd):
525 def add(self, fcl, fco, fca, fd):
518 """add a new (potentially?) conflicting file the merge state
526 """add a new (potentially?) conflicting file the merge state
519 fcl: file context for local,
527 fcl: file context for local,
520 fco: file context for remote,
528 fco: file context for remote,
521 fca: file context for ancestors,
529 fca: file context for ancestors,
522 fd: file path of the resulting merge.
530 fd: file path of the resulting merge.
523
531
524 note: also write the local version to the `.hg/merge` directory.
532 note: also write the local version to the `.hg/merge` directory.
525 """
533 """
526 if fcl.isabsent():
534 if fcl.isabsent():
527 localkey = nullhex
535 localkey = nullhex
528 else:
536 else:
529 localkey = mergestate.getlocalkey(fcl.path())
537 localkey = mergestate.getlocalkey(fcl.path())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
538 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._state[fd] = [
539 self._state[fd] = [
532 MERGE_RECORD_UNRESOLVED,
540 MERGE_RECORD_UNRESOLVED,
533 localkey,
541 localkey,
534 fcl.path(),
542 fcl.path(),
535 fca.path(),
543 fca.path(),
536 hex(fca.filenode()),
544 hex(fca.filenode()),
537 fco.path(),
545 fco.path(),
538 hex(fco.filenode()),
546 hex(fco.filenode()),
539 fcl.flags(),
547 fcl.flags(),
540 ]
548 ]
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
549 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._dirty = True
550 self._dirty = True
543
551
544 def addpath(self, path, frename, forigin):
552 def addpath(self, path, frename, forigin):
545 """add a new conflicting path to the merge state
553 """add a new conflicting path to the merge state
546 path: the path that conflicts
554 path: the path that conflicts
547 frename: the filename the conflicting file was renamed to
555 frename: the filename the conflicting file was renamed to
548 forigin: origin of the file ('l' or 'r' for local/remote)
556 forigin: origin of the file ('l' or 'r' for local/remote)
549 """
557 """
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
558 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._dirty = True
559 self._dirty = True
552
560
553 def __contains__(self, dfile):
561 def __contains__(self, dfile):
554 return dfile in self._state
562 return dfile in self._state
555
563
556 def __getitem__(self, dfile):
564 def __getitem__(self, dfile):
557 return self._state[dfile][0]
565 return self._state[dfile][0]
558
566
559 def __iter__(self):
567 def __iter__(self):
560 return iter(sorted(self._state))
568 return iter(sorted(self._state))
561
569
562 def files(self):
570 def files(self):
563 return self._state.keys()
571 return self._state.keys()
564
572
565 def mark(self, dfile, state):
573 def mark(self, dfile, state):
566 self._state[dfile][0] = state
574 self._state[dfile][0] = state
567 self._dirty = True
575 self._dirty = True
568
576
569 def mdstate(self):
577 def mdstate(self):
570 return self._mdstate
578 return self._mdstate
571
579
572 def unresolved(self):
580 def unresolved(self):
573 """Obtain the paths of unresolved files."""
581 """Obtain the paths of unresolved files."""
574
582
575 for f, entry in pycompat.iteritems(self._state):
583 for f, entry in pycompat.iteritems(self._state):
576 if entry[0] in (
584 if entry[0] in (
577 MERGE_RECORD_UNRESOLVED,
585 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED_PATH,
586 MERGE_RECORD_UNRESOLVED_PATH,
579 ):
587 ):
580 yield f
588 yield f
581
589
582 def driverresolved(self):
590 def driverresolved(self):
583 """Obtain the paths of driver-resolved files."""
591 """Obtain the paths of driver-resolved files."""
584
592
585 for f, entry in self._state.items():
593 for f, entry in self._state.items():
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
594 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 yield f
595 yield f
588
596
589 def extras(self, filename):
597 def extras(self, filename):
590 return self._stateextras.setdefault(filename, {})
598 return self._stateextras.setdefault(filename, {})
591
599
592 def _resolve(self, preresolve, dfile, wctx):
600 def _resolve(self, preresolve, dfile, wctx):
593 """rerun merge process for file path `dfile`"""
601 """rerun merge process for file path `dfile`"""
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
602 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 return True, 0
603 return True, 0
596 stateentry = self._state[dfile]
604 stateentry = self._state[dfile]
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
605 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 octx = self._repo[self._other]
606 octx = self._repo[self._other]
599 extras = self.extras(dfile)
607 extras = self.extras(dfile)
600 anccommitnode = extras.get(b'ancestorlinknode')
608 anccommitnode = extras.get(b'ancestorlinknode')
601 if anccommitnode:
609 if anccommitnode:
602 actx = self._repo[anccommitnode]
610 actx = self._repo[anccommitnode]
603 else:
611 else:
604 actx = None
612 actx = None
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
613 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fco = self._filectxorabsent(onode, octx, ofile)
614 fco = self._filectxorabsent(onode, octx, ofile)
607 # TODO: move this to filectxorabsent
615 # TODO: move this to filectxorabsent
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
616 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 # "premerge" x flags
617 # "premerge" x flags
610 flo = fco.flags()
618 flo = fco.flags()
611 fla = fca.flags()
619 fla = fca.flags()
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
620 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if fca.node() == nullid and flags != flo:
621 if fca.node() == nullid and flags != flo:
614 if preresolve:
622 if preresolve:
615 self._repo.ui.warn(
623 self._repo.ui.warn(
616 _(
624 _(
617 b'warning: cannot merge flags for %s '
625 b'warning: cannot merge flags for %s '
618 b'without common ancestor - keeping local flags\n'
626 b'without common ancestor - keeping local flags\n'
619 )
627 )
620 % afile
628 % afile
621 )
629 )
622 elif flags == fla:
630 elif flags == fla:
623 flags = flo
631 flags = flo
624 if preresolve:
632 if preresolve:
625 # restore local
633 # restore local
626 if localkey != nullhex:
634 if localkey != nullhex:
627 f = self._repo.vfs(b'merge/' + localkey)
635 f = self._repo.vfs(b'merge/' + localkey)
628 wctx[dfile].write(f.read(), flags)
636 wctx[dfile].write(f.read(), flags)
629 f.close()
637 f.close()
630 else:
638 else:
631 wctx[dfile].remove(ignoremissing=True)
639 wctx[dfile].remove(ignoremissing=True)
632 complete, r, deleted = filemerge.premerge(
640 complete, r, deleted = filemerge.premerge(
633 self._repo,
641 self._repo,
634 wctx,
642 wctx,
635 self._local,
643 self._local,
636 lfile,
644 lfile,
637 fcd,
645 fcd,
638 fco,
646 fco,
639 fca,
647 fca,
640 labels=self._labels,
648 labels=self._labels,
641 )
649 )
642 else:
650 else:
643 complete, r, deleted = filemerge.filemerge(
651 complete, r, deleted = filemerge.filemerge(
644 self._repo,
652 self._repo,
645 wctx,
653 wctx,
646 self._local,
654 self._local,
647 lfile,
655 lfile,
648 fcd,
656 fcd,
649 fco,
657 fco,
650 fca,
658 fca,
651 labels=self._labels,
659 labels=self._labels,
652 )
660 )
653 if r is None:
661 if r is None:
654 # no real conflict
662 # no real conflict
655 del self._state[dfile]
663 del self._state[dfile]
656 self._stateextras.pop(dfile, None)
664 self._stateextras.pop(dfile, None)
657 self._dirty = True
665 self._dirty = True
658 elif not r:
666 elif not r:
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
667 self.mark(dfile, MERGE_RECORD_RESOLVED)
660
668
661 if complete:
669 if complete:
662 action = None
670 action = None
663 if deleted:
671 if deleted:
664 if fcd.isabsent():
672 if fcd.isabsent():
665 # dc: local picked. Need to drop if present, which may
673 # dc: local picked. Need to drop if present, which may
666 # happen on re-resolves.
674 # happen on re-resolves.
667 action = ACTION_FORGET
675 action = ACTION_FORGET
668 else:
676 else:
669 # cd: remote picked (or otherwise deleted)
677 # cd: remote picked (or otherwise deleted)
670 action = ACTION_REMOVE
678 action = ACTION_REMOVE
671 else:
679 else:
672 if fcd.isabsent(): # dc: remote picked
680 if fcd.isabsent(): # dc: remote picked
673 action = ACTION_GET
681 action = ACTION_GET
674 elif fco.isabsent(): # cd: local picked
682 elif fco.isabsent(): # cd: local picked
675 if dfile in self.localctx:
683 if dfile in self.localctx:
676 action = ACTION_ADD_MODIFIED
684 action = ACTION_ADD_MODIFIED
677 else:
685 else:
678 action = ACTION_ADD
686 action = ACTION_ADD
679 # else: regular merges (no action necessary)
687 # else: regular merges (no action necessary)
680 self._results[dfile] = r, action
688 self._results[dfile] = r, action
681
689
682 return complete, r
690 return complete, r
683
691
684 def _filectxorabsent(self, hexnode, ctx, f):
692 def _filectxorabsent(self, hexnode, ctx, f):
685 if hexnode == nullhex:
693 if hexnode == nullhex:
686 return filemerge.absentfilectx(ctx, f)
694 return filemerge.absentfilectx(ctx, f)
687 else:
695 else:
688 return ctx[f]
696 return ctx[f]
689
697
690 def preresolve(self, dfile, wctx):
698 def preresolve(self, dfile, wctx):
691 """run premerge process for dfile
699 """run premerge process for dfile
692
700
693 Returns whether the merge is complete, and the exit code."""
701 Returns whether the merge is complete, and the exit code."""
694 return self._resolve(True, dfile, wctx)
702 return self._resolve(True, dfile, wctx)
695
703
696 def resolve(self, dfile, wctx):
704 def resolve(self, dfile, wctx):
697 """run merge process (assuming premerge was run) for dfile
705 """run merge process (assuming premerge was run) for dfile
698
706
699 Returns the exit code of the merge."""
707 Returns the exit code of the merge."""
700 return self._resolve(False, dfile, wctx)[1]
708 return self._resolve(False, dfile, wctx)[1]
701
709
702 def counts(self):
710 def counts(self):
703 """return counts for updated, merged and removed files in this
711 """return counts for updated, merged and removed files in this
704 session"""
712 session"""
705 updated, merged, removed = 0, 0, 0
713 updated, merged, removed = 0, 0, 0
706 for r, action in pycompat.itervalues(self._results):
714 for r, action in pycompat.itervalues(self._results):
707 if r is None:
715 if r is None:
708 updated += 1
716 updated += 1
709 elif r == 0:
717 elif r == 0:
710 if action == ACTION_REMOVE:
718 if action == ACTION_REMOVE:
711 removed += 1
719 removed += 1
712 else:
720 else:
713 merged += 1
721 merged += 1
714 return updated, merged, removed
722 return updated, merged, removed
715
723
716 def unresolvedcount(self):
724 def unresolvedcount(self):
717 """get unresolved count for this merge (persistent)"""
725 """get unresolved count for this merge (persistent)"""
718 return len(list(self.unresolved()))
726 return len(list(self.unresolved()))
719
727
720 def actions(self):
728 def actions(self):
721 """return lists of actions to perform on the dirstate"""
729 """return lists of actions to perform on the dirstate"""
722 actions = {
730 actions = {
723 ACTION_REMOVE: [],
731 ACTION_REMOVE: [],
724 ACTION_FORGET: [],
732 ACTION_FORGET: [],
725 ACTION_ADD: [],
733 ACTION_ADD: [],
726 ACTION_ADD_MODIFIED: [],
734 ACTION_ADD_MODIFIED: [],
727 ACTION_GET: [],
735 ACTION_GET: [],
728 }
736 }
729 for f, (r, action) in pycompat.iteritems(self._results):
737 for f, (r, action) in pycompat.iteritems(self._results):
730 if action is not None:
738 if action is not None:
731 actions[action].append((f, None, b"merge result"))
739 actions[action].append((f, None, b"merge result"))
732 return actions
740 return actions
733
741
734 def recordactions(self):
742 def recordactions(self):
735 """record remove/add/get actions in the dirstate"""
743 """record remove/add/get actions in the dirstate"""
736 branchmerge = self._repo.dirstate.p2() != nullid
744 branchmerge = self._repo.dirstate.p2() != nullid
737 recordupdates(self._repo, self.actions(), branchmerge, None)
745 recordupdates(self._repo, self.actions(), branchmerge, None)
738
746
739 def queueremove(self, f):
747 def queueremove(self, f):
740 """queues a file to be removed from the dirstate
748 """queues a file to be removed from the dirstate
741
749
742 Meant for use by custom merge drivers."""
750 Meant for use by custom merge drivers."""
743 self._results[f] = 0, ACTION_REMOVE
751 self._results[f] = 0, ACTION_REMOVE
744
752
745 def queueadd(self, f):
753 def queueadd(self, f):
746 """queues a file to be added to the dirstate
754 """queues a file to be added to the dirstate
747
755
748 Meant for use by custom merge drivers."""
756 Meant for use by custom merge drivers."""
749 self._results[f] = 0, ACTION_ADD
757 self._results[f] = 0, ACTION_ADD
750
758
751 def queueget(self, f):
759 def queueget(self, f):
752 """queues a file to be marked modified in the dirstate
760 """queues a file to be marked modified in the dirstate
753
761
754 Meant for use by custom merge drivers."""
762 Meant for use by custom merge drivers."""
755 self._results[f] = 0, ACTION_GET
763 self._results[f] = 0, ACTION_GET
756
764
757
765
758 def _getcheckunknownconfig(repo, section, name):
766 def _getcheckunknownconfig(repo, section, name):
759 config = repo.ui.config(section, name)
767 config = repo.ui.config(section, name)
760 valid = [b'abort', b'ignore', b'warn']
768 valid = [b'abort', b'ignore', b'warn']
761 if config not in valid:
769 if config not in valid:
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
770 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 raise error.ConfigError(
771 raise error.ConfigError(
764 _(b"%s.%s not valid ('%s' is none of %s)")
772 _(b"%s.%s not valid ('%s' is none of %s)")
765 % (section, name, config, validstr)
773 % (section, name, config, validstr)
766 )
774 )
767 return config
775 return config
768
776
769
777
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
778 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 if wctx.isinmemory():
779 if wctx.isinmemory():
772 # Nothing to do in IMM because nothing in the "working copy" can be an
780 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # unknown file.
781 # unknown file.
774 #
782 #
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
783 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # because that function does other useful work.
784 # because that function does other useful work.
777 return False
785 return False
778
786
779 if f2 is None:
787 if f2 is None:
780 f2 = f
788 f2 = f
781 return (
789 return (
782 repo.wvfs.audit.check(f)
790 repo.wvfs.audit.check(f)
783 and repo.wvfs.isfileorlink(f)
791 and repo.wvfs.isfileorlink(f)
784 and repo.dirstate.normalize(f) not in repo.dirstate
792 and repo.dirstate.normalize(f) not in repo.dirstate
785 and mctx[f2].cmp(wctx[f])
793 and mctx[f2].cmp(wctx[f])
786 )
794 )
787
795
788
796
789 class _unknowndirschecker(object):
797 class _unknowndirschecker(object):
790 """
798 """
791 Look for any unknown files or directories that may have a path conflict
799 Look for any unknown files or directories that may have a path conflict
792 with a file. If any path prefix of the file exists as a file or link,
800 with a file. If any path prefix of the file exists as a file or link,
793 then it conflicts. If the file itself is a directory that contains any
801 then it conflicts. If the file itself is a directory that contains any
794 file that is not tracked, then it conflicts.
802 file that is not tracked, then it conflicts.
795
803
796 Returns the shortest path at which a conflict occurs, or None if there is
804 Returns the shortest path at which a conflict occurs, or None if there is
797 no conflict.
805 no conflict.
798 """
806 """
799
807
800 def __init__(self):
808 def __init__(self):
801 # A set of paths known to be good. This prevents repeated checking of
809 # A set of paths known to be good. This prevents repeated checking of
802 # dirs. It will be updated with any new dirs that are checked and found
810 # dirs. It will be updated with any new dirs that are checked and found
803 # to be safe.
811 # to be safe.
804 self._unknowndircache = set()
812 self._unknowndircache = set()
805
813
806 # A set of paths that are known to be absent. This prevents repeated
814 # A set of paths that are known to be absent. This prevents repeated
807 # checking of subdirectories that are known not to exist. It will be
815 # checking of subdirectories that are known not to exist. It will be
808 # updated with any new dirs that are checked and found to be absent.
816 # updated with any new dirs that are checked and found to be absent.
809 self._missingdircache = set()
817 self._missingdircache = set()
810
818
811 def __call__(self, repo, wctx, f):
819 def __call__(self, repo, wctx, f):
812 if wctx.isinmemory():
820 if wctx.isinmemory():
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
821 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 return False
822 return False
815
823
816 # Check for path prefixes that exist as unknown files.
824 # Check for path prefixes that exist as unknown files.
817 for p in reversed(list(pathutil.finddirs(f))):
825 for p in reversed(list(pathutil.finddirs(f))):
818 if p in self._missingdircache:
826 if p in self._missingdircache:
819 return
827 return
820 if p in self._unknowndircache:
828 if p in self._unknowndircache:
821 continue
829 continue
822 if repo.wvfs.audit.check(p):
830 if repo.wvfs.audit.check(p):
823 if (
831 if (
824 repo.wvfs.isfileorlink(p)
832 repo.wvfs.isfileorlink(p)
825 and repo.dirstate.normalize(p) not in repo.dirstate
833 and repo.dirstate.normalize(p) not in repo.dirstate
826 ):
834 ):
827 return p
835 return p
828 if not repo.wvfs.lexists(p):
836 if not repo.wvfs.lexists(p):
829 self._missingdircache.add(p)
837 self._missingdircache.add(p)
830 return
838 return
831 self._unknowndircache.add(p)
839 self._unknowndircache.add(p)
832
840
833 # Check if the file conflicts with a directory containing unknown files.
841 # Check if the file conflicts with a directory containing unknown files.
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
842 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 # Does the directory contain any files that are not in the dirstate?
843 # Does the directory contain any files that are not in the dirstate?
836 for p, dirs, files in repo.wvfs.walk(f):
844 for p, dirs, files in repo.wvfs.walk(f):
837 for fn in files:
845 for fn in files:
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
846 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = repo.dirstate.normalize(relf, isknown=True)
847 relf = repo.dirstate.normalize(relf, isknown=True)
840 if relf not in repo.dirstate:
848 if relf not in repo.dirstate:
841 return f
849 return f
842 return None
850 return None
843
851
844
852
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
853 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 """
854 """
847 Considers any actions that care about the presence of conflicting unknown
855 Considers any actions that care about the presence of conflicting unknown
848 files. For some actions, the result is to abort; for others, it is to
856 files. For some actions, the result is to abort; for others, it is to
849 choose a different action.
857 choose a different action.
850 """
858 """
851 fileconflicts = set()
859 fileconflicts = set()
852 pathconflicts = set()
860 pathconflicts = set()
853 warnconflicts = set()
861 warnconflicts = set()
854 abortconflicts = set()
862 abortconflicts = set()
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
863 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
864 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 pathconfig = repo.ui.configbool(
865 pathconfig = repo.ui.configbool(
858 b'experimental', b'merge.checkpathconflicts'
866 b'experimental', b'merge.checkpathconflicts'
859 )
867 )
860 if not force:
868 if not force:
861
869
862 def collectconflicts(conflicts, config):
870 def collectconflicts(conflicts, config):
863 if config == b'abort':
871 if config == b'abort':
864 abortconflicts.update(conflicts)
872 abortconflicts.update(conflicts)
865 elif config == b'warn':
873 elif config == b'warn':
866 warnconflicts.update(conflicts)
874 warnconflicts.update(conflicts)
867
875
868 checkunknowndirs = _unknowndirschecker()
876 checkunknowndirs = _unknowndirschecker()
869 for f, (m, args, msg) in pycompat.iteritems(actions):
877 for f, (m, args, msg) in pycompat.iteritems(actions):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
878 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if _checkunknownfile(repo, wctx, mctx, f):
879 if _checkunknownfile(repo, wctx, mctx, f):
872 fileconflicts.add(f)
880 fileconflicts.add(f)
873 elif pathconfig and f not in wctx:
881 elif pathconfig and f not in wctx:
874 path = checkunknowndirs(repo, wctx, f)
882 path = checkunknowndirs(repo, wctx, f)
875 if path is not None:
883 if path is not None:
876 pathconflicts.add(path)
884 pathconflicts.add(path)
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
885 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
886 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 fileconflicts.add(f)
887 fileconflicts.add(f)
880
888
881 allconflicts = fileconflicts | pathconflicts
889 allconflicts = fileconflicts | pathconflicts
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
890 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 unknownconflicts = allconflicts - ignoredconflicts
891 unknownconflicts = allconflicts - ignoredconflicts
884 collectconflicts(ignoredconflicts, ignoredconfig)
892 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
893 collectconflicts(unknownconflicts, unknownconfig)
886 else:
894 else:
887 for f, (m, args, msg) in pycompat.iteritems(actions):
895 for f, (m, args, msg) in pycompat.iteritems(actions):
888 if m == ACTION_CREATED_MERGE:
896 if m == ACTION_CREATED_MERGE:
889 fl2, anc = args
897 fl2, anc = args
890 different = _checkunknownfile(repo, wctx, mctx, f)
898 different = _checkunknownfile(repo, wctx, mctx, f)
891 if repo.dirstate._ignore(f):
899 if repo.dirstate._ignore(f):
892 config = ignoredconfig
900 config = ignoredconfig
893 else:
901 else:
894 config = unknownconfig
902 config = unknownconfig
895
903
896 # The behavior when force is True is described by this table:
904 # The behavior when force is True is described by this table:
897 # config different mergeforce | action backup
905 # config different mergeforce | action backup
898 # * n * | get n
906 # * n * | get n
899 # * y y | merge -
907 # * y y | merge -
900 # abort y n | merge - (1)
908 # abort y n | merge - (1)
901 # warn y n | warn + get y
909 # warn y n | warn + get y
902 # ignore y n | get y
910 # ignore y n | get y
903 #
911 #
904 # (1) this is probably the wrong behavior here -- we should
912 # (1) this is probably the wrong behavior here -- we should
905 # probably abort, but some actions like rebases currently
913 # probably abort, but some actions like rebases currently
906 # don't like an abort happening in the middle of
914 # don't like an abort happening in the middle of
907 # merge.update.
915 # merge.update.
908 if not different:
916 if not different:
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
917 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 elif mergeforce or config == b'abort':
918 elif mergeforce or config == b'abort':
911 actions[f] = (
919 actions[f] = (
912 ACTION_MERGE,
920 ACTION_MERGE,
913 (f, f, None, False, anc),
921 (f, f, None, False, anc),
914 b'remote differs from untracked local',
922 b'remote differs from untracked local',
915 )
923 )
916 elif config == b'abort':
924 elif config == b'abort':
917 abortconflicts.add(f)
925 abortconflicts.add(f)
918 else:
926 else:
919 if config == b'warn':
927 if config == b'warn':
920 warnconflicts.add(f)
928 warnconflicts.add(f)
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
929 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922
930
923 for f in sorted(abortconflicts):
931 for f in sorted(abortconflicts):
924 warn = repo.ui.warn
932 warn = repo.ui.warn
925 if f in pathconflicts:
933 if f in pathconflicts:
926 if repo.wvfs.isfileorlink(f):
934 if repo.wvfs.isfileorlink(f):
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
935 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 else:
936 else:
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
937 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 else:
938 else:
931 warn(_(b"%s: untracked file differs\n") % f)
939 warn(_(b"%s: untracked file differs\n") % f)
932 if abortconflicts:
940 if abortconflicts:
933 raise error.Abort(
941 raise error.Abort(
934 _(
942 _(
935 b"untracked files in working directory "
943 b"untracked files in working directory "
936 b"differ from files in requested revision"
944 b"differ from files in requested revision"
937 )
945 )
938 )
946 )
939
947
940 for f in sorted(warnconflicts):
948 for f in sorted(warnconflicts):
941 if repo.wvfs.isfileorlink(f):
949 if repo.wvfs.isfileorlink(f):
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
950 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 else:
951 else:
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
952 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945
953
946 for f, (m, args, msg) in pycompat.iteritems(actions):
954 for f, (m, args, msg) in pycompat.iteritems(actions):
947 if m == ACTION_CREATED:
955 if m == ACTION_CREATED:
948 backup = (
956 backup = (
949 f in fileconflicts
957 f in fileconflicts
950 or f in pathconflicts
958 or f in pathconflicts
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
959 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 )
960 )
953 (flags,) = args
961 (flags,) = args
954 actions[f] = (ACTION_GET, (flags, backup), msg)
962 actions[f] = (ACTION_GET, (flags, backup), msg)
955
963
956
964
957 def _forgetremoved(wctx, mctx, branchmerge):
965 def _forgetremoved(wctx, mctx, branchmerge):
958 """
966 """
959 Forget removed files
967 Forget removed files
960
968
961 If we're jumping between revisions (as opposed to merging), and if
969 If we're jumping between revisions (as opposed to merging), and if
962 neither the working directory nor the target rev has the file,
970 neither the working directory nor the target rev has the file,
963 then we need to remove it from the dirstate, to prevent the
971 then we need to remove it from the dirstate, to prevent the
964 dirstate from listing the file when it is no longer in the
972 dirstate from listing the file when it is no longer in the
965 manifest.
973 manifest.
966
974
967 If we're merging, and the other revision has removed a file
975 If we're merging, and the other revision has removed a file
968 that is not present in the working directory, we need to mark it
976 that is not present in the working directory, we need to mark it
969 as removed.
977 as removed.
970 """
978 """
971
979
972 actions = {}
980 actions = {}
973 m = ACTION_FORGET
981 m = ACTION_FORGET
974 if branchmerge:
982 if branchmerge:
975 m = ACTION_REMOVE
983 m = ACTION_REMOVE
976 for f in wctx.deleted():
984 for f in wctx.deleted():
977 if f not in mctx:
985 if f not in mctx:
978 actions[f] = m, None, b"forget deleted"
986 actions[f] = m, None, b"forget deleted"
979
987
980 if not branchmerge:
988 if not branchmerge:
981 for f in wctx.removed():
989 for f in wctx.removed():
982 if f not in mctx:
990 if f not in mctx:
983 actions[f] = ACTION_FORGET, None, b"forget removed"
991 actions[f] = ACTION_FORGET, None, b"forget removed"
984
992
985 return actions
993 return actions
986
994
987
995
988 def _checkcollision(repo, wmf, actions):
996 def _checkcollision(repo, wmf, actions):
989 """
997 """
990 Check for case-folding collisions.
998 Check for case-folding collisions.
991 """
999 """
992
1000
993 # If the repo is narrowed, filter out files outside the narrowspec.
1001 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
1002 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
1003 if not narrowmatch.always():
996 wmf = wmf.matches(narrowmatch)
1004 wmf = wmf.matches(narrowmatch)
997 if actions:
1005 if actions:
998 narrowactions = {}
1006 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
1007 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1008 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1009 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1010 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1011 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1012 actions = narrowactions
1005
1013
1006 # build provisional merged manifest up
1014 # build provisional merged manifest up
1007 pmmf = set(wmf)
1015 pmmf = set(wmf)
1008
1016
1009 if actions:
1017 if actions:
1010 # KEEP and EXEC are no-op
1018 # KEEP and EXEC are no-op
1011 for m in (
1019 for m in (
1012 ACTION_ADD,
1020 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1021 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1022 ACTION_FORGET,
1015 ACTION_GET,
1023 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1024 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1025 ACTION_DELETED_CHANGED,
1018 ):
1026 ):
1019 for f, args, msg in actions[m]:
1027 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1028 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1029 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1030 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1031 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1032 f2, flags = args
1025 pmmf.discard(f2)
1033 pmmf.discard(f2)
1026 pmmf.add(f)
1034 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1035 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1036 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1037 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1038 f1, f2, fa, move, anc = args
1031 if move:
1039 if move:
1032 pmmf.discard(f1)
1040 pmmf.discard(f1)
1033 pmmf.add(f)
1041 pmmf.add(f)
1034
1042
1035 # check case-folding collision in provisional merged manifest
1043 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1044 foldmap = {}
1037 for f in pmmf:
1045 for f in pmmf:
1038 fold = util.normcase(f)
1046 fold = util.normcase(f)
1039 if fold in foldmap:
1047 if fold in foldmap:
1040 raise error.Abort(
1048 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1049 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1050 % (f, foldmap[fold])
1043 )
1051 )
1044 foldmap[fold] = f
1052 foldmap[fold] = f
1045
1053
1046 # check case-folding of directories
1054 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1055 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1056 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1057 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1058 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1059 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1060 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1061 % (lastfull, f)
1054 )
1062 )
1055 foldprefix = fold + b'/'
1063 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1064 unfoldprefix = f + b'/'
1057 lastfull = f
1065 lastfull = f
1058
1066
1059
1067
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1068 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1069 """run the preprocess step of the merge driver, if any
1062
1070
1063 This is currently not implemented -- it's an extension point."""
1071 This is currently not implemented -- it's an extension point."""
1064 return True
1072 return True
1065
1073
1066
1074
1067 def driverconclude(repo, ms, wctx, labels=None):
1075 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1076 """run the conclude step of the merge driver, if any
1069
1077
1070 This is currently not implemented -- it's an extension point."""
1078 This is currently not implemented -- it's an extension point."""
1071 return True
1079 return True
1072
1080
1073
1081
1074 def _filesindirs(repo, manifest, dirs):
1082 def _filesindirs(repo, manifest, dirs):
1075 """
1083 """
1076 Generator that yields pairs of all the files in the manifest that are found
1084 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1085 inside the directories listed in dirs, and which directory they are found
1078 in.
1086 in.
1079 """
1087 """
1080 for f in manifest:
1088 for f in manifest:
1081 for p in pathutil.finddirs(f):
1089 for p in pathutil.finddirs(f):
1082 if p in dirs:
1090 if p in dirs:
1083 yield f, p
1091 yield f, p
1084 break
1092 break
1085
1093
1086
1094
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1095 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1096 """
1089 Check if any actions introduce path conflicts in the repository, updating
1097 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1098 actions to record or handle the path conflict accordingly.
1091 """
1099 """
1092 mf = wctx.manifest()
1100 mf = wctx.manifest()
1093
1101
1094 # The set of local files that conflict with a remote directory.
1102 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1103 localconflicts = set()
1096
1104
1097 # The set of directories that conflict with a remote file, and so may cause
1105 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1106 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1107 remoteconflicts = set()
1100
1108
1101 # The set of directories that appear as both a file and a directory in the
1109 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1110 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1111 # can't be updated to cleanly.
1104 invalidconflicts = set()
1112 invalidconflicts = set()
1105
1113
1106 # The set of directories that contain files that are being created.
1114 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1115 createdfiledirs = set()
1108
1116
1109 # The set of files deleted by all the actions.
1117 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1118 deletedfiles = set()
1111
1119
1112 for f, (m, args, msg) in actions.items():
1120 for f, (m, args, msg) in actions.items():
1113 if m in (
1121 if m in (
1114 ACTION_CREATED,
1122 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1123 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1124 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1125 ACTION_CREATED_MERGE,
1118 ):
1126 ):
1119 # This action may create a new local file.
1127 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1128 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1129 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1130 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1131 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1132 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1133 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1134 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1135 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1136 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1137 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1138 f1, f2, fa, move, anc = args
1131 if move:
1139 if move:
1132 deletedfiles.add(f1)
1140 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1141 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1142 f2, flags = args
1135 deletedfiles.add(f2)
1143 deletedfiles.add(f2)
1136
1144
1137 # Check all directories that contain created files for path conflicts.
1145 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1146 for p in createdfiledirs:
1139 if p in mf:
1147 if p in mf:
1140 if p in mctx:
1148 if p in mctx:
1141 # A file is in a directory which aliases both a local
1149 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1150 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1151 # within the remote manifest.
1144 invalidconflicts.add(p)
1152 invalidconflicts.add(p)
1145 else:
1153 else:
1146 # A file is in a directory which aliases a local file.
1154 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1155 # We will need to rename the local file.
1148 localconflicts.add(p)
1156 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1157 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1158 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1159 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1160 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1161 ACTION_CREATED_MERGE,
1154 ):
1162 ):
1155 # The file is in a directory which aliases a remote file.
1163 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1164 # This is an internal inconsistency within the remote
1157 # manifest.
1165 # manifest.
1158 invalidconflicts.add(p)
1166 invalidconflicts.add(p)
1159
1167
1160 # Rename all local conflicting files that have not been deleted.
1168 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1169 for p in localconflicts:
1162 if p not in deletedfiles:
1170 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1171 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1172 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1173 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1174 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1175 (p,),
1168 b'local path conflict',
1176 b'local path conflict',
1169 )
1177 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1178 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1179
1172 if remoteconflicts:
1180 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1181 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1182 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1183 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1184 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1185 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1186 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1187 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1188 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1189 actions[pnew] = (m, args, msg)
1182 else:
1190 else:
1183 # Action was create, change to renamed get action.
1191 # Action was create, change to renamed get action.
1184 fl = args[0]
1192 fl = args[0]
1185 actions[pnew] = (
1193 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1194 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1195 (p, fl),
1188 b'remote path conflict',
1196 b'remote path conflict',
1189 )
1197 )
1190 actions[p] = (
1198 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1199 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1200 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1201 b'path conflict',
1194 )
1202 )
1195 remoteconflicts.remove(p)
1203 remoteconflicts.remove(p)
1196 break
1204 break
1197
1205
1198 if invalidconflicts:
1206 if invalidconflicts:
1199 for p in invalidconflicts:
1207 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1208 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1209 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1210
1203
1211
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1212 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1213 """
1206 Filters out actions that can ignored because the repo is narrowed.
1214 Filters out actions that can ignored because the repo is narrowed.
1207
1215
1208 Raise an exception if the merge cannot be completed because the repo is
1216 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1217 narrowed.
1210 """
1218 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1219 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1220 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1221 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1222 # over a copy.
1215 for f, action in list(actions.items()):
1223 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1224 if narrowmatch(f):
1217 pass
1225 pass
1218 elif not branchmerge:
1226 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1227 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1228 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1229 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1230 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1231 raise error.Abort(
1224 _(
1232 _(
1225 b'merge affects file \'%s\' outside narrow, '
1233 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1234 b'which is not yet supported'
1227 )
1235 )
1228 % f,
1236 % f,
1229 hint=_(b'merging in the other direction may work'),
1237 hint=_(b'merging in the other direction may work'),
1230 )
1238 )
1231 else:
1239 else:
1232 raise error.Abort(
1240 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1241 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1242 )
1235
1243
1236
1244
1237 def manifestmerge(
1245 def manifestmerge(
1238 repo,
1246 repo,
1239 wctx,
1247 wctx,
1240 p2,
1248 p2,
1241 pa,
1249 pa,
1242 branchmerge,
1250 branchmerge,
1243 force,
1251 force,
1244 matcher,
1252 matcher,
1245 acceptremote,
1253 acceptremote,
1246 followcopies,
1254 followcopies,
1247 forcefulldiff=False,
1255 forcefulldiff=False,
1248 ):
1256 ):
1249 """
1257 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1258 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1259
1252 branchmerge and force are as passed in to update
1260 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1261 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1262 acceptremote = accept the incoming changes without prompting
1255 """
1263 """
1256 if matcher is not None and matcher.always():
1264 if matcher is not None and matcher.always():
1257 matcher = None
1265 matcher = None
1258
1266
1259 # manifests fetched in order are going to be faster, so prime the caches
1267 # manifests fetched in order are going to be faster, so prime the caches
1260 [
1268 [
1261 x.manifest()
1269 x.manifest()
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1270 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1263 ]
1271 ]
1264
1272
1265 branch_copies1 = copies.branch_copies()
1273 branch_copies1 = copies.branch_copies()
1266 branch_copies2 = copies.branch_copies()
1274 branch_copies2 = copies.branch_copies()
1267 diverge = {}
1275 diverge = {}
1268 if followcopies:
1276 if followcopies:
1269 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1277 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1270 repo, wctx, p2, pa
1278 repo, wctx, p2, pa
1271 )
1279 )
1272
1280
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1281 boolbm = pycompat.bytestr(bool(branchmerge))
1274 boolf = pycompat.bytestr(bool(force))
1282 boolf = pycompat.bytestr(bool(force))
1275 boolm = pycompat.bytestr(bool(matcher))
1283 boolm = pycompat.bytestr(bool(matcher))
1276 repo.ui.note(_(b"resolving manifests\n"))
1284 repo.ui.note(_(b"resolving manifests\n"))
1277 repo.ui.debug(
1285 repo.ui.debug(
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1286 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1279 )
1287 )
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1288 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1281
1289
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1290 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1283 copied1 = set(branch_copies1.copy.values())
1291 copied1 = set(branch_copies1.copy.values())
1284 copied1.update(branch_copies1.movewithdir.values())
1292 copied1.update(branch_copies1.movewithdir.values())
1285 copied2 = set(branch_copies2.copy.values())
1293 copied2 = set(branch_copies2.copy.values())
1286 copied2.update(branch_copies2.movewithdir.values())
1294 copied2.update(branch_copies2.movewithdir.values())
1287
1295
1288 if b'.hgsubstate' in m1 and wctx.rev() is None:
1296 if b'.hgsubstate' in m1 and wctx.rev() is None:
1289 # Check whether sub state is modified, and overwrite the manifest
1297 # Check whether sub state is modified, and overwrite the manifest
1290 # to flag the change. If wctx is a committed revision, we shouldn't
1298 # to flag the change. If wctx is a committed revision, we shouldn't
1291 # care for the dirty state of the working directory.
1299 # care for the dirty state of the working directory.
1292 if any(wctx.sub(s).dirty() for s in wctx.substate):
1300 if any(wctx.sub(s).dirty() for s in wctx.substate):
1293 m1[b'.hgsubstate'] = modifiednodeid
1301 m1[b'.hgsubstate'] = modifiednodeid
1294
1302
1295 # Don't use m2-vs-ma optimization if:
1303 # Don't use m2-vs-ma optimization if:
1296 # - ma is the same as m1 or m2, which we're just going to diff again later
1304 # - ma is the same as m1 or m2, which we're just going to diff again later
1297 # - The caller specifically asks for a full diff, which is useful during bid
1305 # - The caller specifically asks for a full diff, which is useful during bid
1298 # merge.
1306 # merge.
1299 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1307 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1300 # Identify which files are relevant to the merge, so we can limit the
1308 # Identify which files are relevant to the merge, so we can limit the
1301 # total m1-vs-m2 diff to just those files. This has significant
1309 # total m1-vs-m2 diff to just those files. This has significant
1302 # performance benefits in large repositories.
1310 # performance benefits in large repositories.
1303 relevantfiles = set(ma.diff(m2).keys())
1311 relevantfiles = set(ma.diff(m2).keys())
1304
1312
1305 # For copied and moved files, we need to add the source file too.
1313 # For copied and moved files, we need to add the source file too.
1306 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1314 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1307 if copyvalue in relevantfiles:
1315 if copyvalue in relevantfiles:
1308 relevantfiles.add(copykey)
1316 relevantfiles.add(copykey)
1309 for movedirkey in branch_copies1.movewithdir:
1317 for movedirkey in branch_copies1.movewithdir:
1310 relevantfiles.add(movedirkey)
1318 relevantfiles.add(movedirkey)
1311 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1319 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1312 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1320 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1313
1321
1314 diff = m1.diff(m2, match=matcher)
1322 diff = m1.diff(m2, match=matcher)
1315
1323
1316 actions = {}
1324 actions = {}
1317 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1325 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1318 if n1 and n2: # file exists on both local and remote side
1326 if n1 and n2: # file exists on both local and remote side
1319 if f not in ma:
1327 if f not in ma:
1320 # TODO: what if they're renamed from different sources?
1328 # TODO: what if they're renamed from different sources?
1321 fa = branch_copies1.copy.get(
1329 fa = branch_copies1.copy.get(
1322 f, None
1330 f, None
1323 ) or branch_copies2.copy.get(f, None)
1331 ) or branch_copies2.copy.get(f, None)
1324 if fa is not None:
1332 if fa is not None:
1325 actions[f] = (
1333 actions[f] = (
1326 ACTION_MERGE,
1334 ACTION_MERGE,
1327 (f, f, fa, False, pa.node()),
1335 (f, f, fa, False, pa.node()),
1328 b'both renamed from %s' % fa,
1336 b'both renamed from %s' % fa,
1329 )
1337 )
1330 else:
1338 else:
1331 actions[f] = (
1339 actions[f] = (
1332 ACTION_MERGE,
1340 ACTION_MERGE,
1333 (f, f, None, False, pa.node()),
1341 (f, f, None, False, pa.node()),
1334 b'both created',
1342 b'both created',
1335 )
1343 )
1336 else:
1344 else:
1337 a = ma[f]
1345 a = ma[f]
1338 fla = ma.flags(f)
1346 fla = ma.flags(f)
1339 nol = b'l' not in fl1 + fl2 + fla
1347 nol = b'l' not in fl1 + fl2 + fla
1340 if n2 == a and fl2 == fla:
1348 if n2 == a and fl2 == fla:
1341 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1349 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1342 elif n1 == a and fl1 == fla: # local unchanged - use remote
1350 elif n1 == a and fl1 == fla: # local unchanged - use remote
1343 if n1 == n2: # optimization: keep local content
1351 if n1 == n2: # optimization: keep local content
1344 actions[f] = (
1352 actions[f] = (
1345 ACTION_EXEC,
1353 ACTION_EXEC,
1346 (fl2,),
1354 (fl2,),
1347 b'update permissions',
1355 b'update permissions',
1348 )
1356 )
1349 else:
1357 else:
1350 actions[f] = (
1358 actions[f] = (
1351 ACTION_GET,
1359 ACTION_GET,
1352 (fl2, False),
1360 (fl2, False),
1353 b'remote is newer',
1361 b'remote is newer',
1354 )
1362 )
1355 elif nol and n2 == a: # remote only changed 'x'
1363 elif nol and n2 == a: # remote only changed 'x'
1356 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1364 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1357 elif nol and n1 == a: # local only changed 'x'
1365 elif nol and n1 == a: # local only changed 'x'
1358 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1366 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1359 else: # both changed something
1367 else: # both changed something
1360 actions[f] = (
1368 actions[f] = (
1361 ACTION_MERGE,
1369 ACTION_MERGE,
1362 (f, f, f, False, pa.node()),
1370 (f, f, f, False, pa.node()),
1363 b'versions differ',
1371 b'versions differ',
1364 )
1372 )
1365 elif n1: # file exists only on local side
1373 elif n1: # file exists only on local side
1366 if f in copied2:
1374 if f in copied2:
1367 pass # we'll deal with it on m2 side
1375 pass # we'll deal with it on m2 side
1368 elif (
1376 elif (
1369 f in branch_copies1.movewithdir
1377 f in branch_copies1.movewithdir
1370 ): # directory rename, move local
1378 ): # directory rename, move local
1371 f2 = branch_copies1.movewithdir[f]
1379 f2 = branch_copies1.movewithdir[f]
1372 if f2 in m2:
1380 if f2 in m2:
1373 actions[f2] = (
1381 actions[f2] = (
1374 ACTION_MERGE,
1382 ACTION_MERGE,
1375 (f, f2, None, True, pa.node()),
1383 (f, f2, None, True, pa.node()),
1376 b'remote directory rename, both created',
1384 b'remote directory rename, both created',
1377 )
1385 )
1378 else:
1386 else:
1379 actions[f2] = (
1387 actions[f2] = (
1380 ACTION_DIR_RENAME_MOVE_LOCAL,
1388 ACTION_DIR_RENAME_MOVE_LOCAL,
1381 (f, fl1),
1389 (f, fl1),
1382 b'remote directory rename - move from %s' % f,
1390 b'remote directory rename - move from %s' % f,
1383 )
1391 )
1384 elif f in branch_copies1.copy:
1392 elif f in branch_copies1.copy:
1385 f2 = branch_copies1.copy[f]
1393 f2 = branch_copies1.copy[f]
1386 actions[f] = (
1394 actions[f] = (
1387 ACTION_MERGE,
1395 ACTION_MERGE,
1388 (f, f2, f2, False, pa.node()),
1396 (f, f2, f2, False, pa.node()),
1389 b'local copied/moved from %s' % f2,
1397 b'local copied/moved from %s' % f2,
1390 )
1398 )
1391 elif f in ma: # clean, a different, no remote
1399 elif f in ma: # clean, a different, no remote
1392 if n1 != ma[f]:
1400 if n1 != ma[f]:
1393 if acceptremote:
1401 if acceptremote:
1394 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1402 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1395 else:
1403 else:
1396 actions[f] = (
1404 actions[f] = (
1397 ACTION_CHANGED_DELETED,
1405 ACTION_CHANGED_DELETED,
1398 (f, None, f, False, pa.node()),
1406 (f, None, f, False, pa.node()),
1399 b'prompt changed/deleted',
1407 b'prompt changed/deleted',
1400 )
1408 )
1401 elif n1 == addednodeid:
1409 elif n1 == addednodeid:
1402 # This extra 'a' is added by working copy manifest to mark
1410 # This extra 'a' is added by working copy manifest to mark
1403 # the file as locally added. We should forget it instead of
1411 # the file as locally added. We should forget it instead of
1404 # deleting it.
1412 # deleting it.
1405 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1413 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1406 else:
1414 else:
1407 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1415 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1408 elif n2: # file exists only on remote side
1416 elif n2: # file exists only on remote side
1409 if f in copied1:
1417 if f in copied1:
1410 pass # we'll deal with it on m1 side
1418 pass # we'll deal with it on m1 side
1411 elif f in branch_copies2.movewithdir:
1419 elif f in branch_copies2.movewithdir:
1412 f2 = branch_copies2.movewithdir[f]
1420 f2 = branch_copies2.movewithdir[f]
1413 if f2 in m1:
1421 if f2 in m1:
1414 actions[f2] = (
1422 actions[f2] = (
1415 ACTION_MERGE,
1423 ACTION_MERGE,
1416 (f2, f, None, False, pa.node()),
1424 (f2, f, None, False, pa.node()),
1417 b'local directory rename, both created',
1425 b'local directory rename, both created',
1418 )
1426 )
1419 else:
1427 else:
1420 actions[f2] = (
1428 actions[f2] = (
1421 ACTION_LOCAL_DIR_RENAME_GET,
1429 ACTION_LOCAL_DIR_RENAME_GET,
1422 (f, fl2),
1430 (f, fl2),
1423 b'local directory rename - get from %s' % f,
1431 b'local directory rename - get from %s' % f,
1424 )
1432 )
1425 elif f in branch_copies2.copy:
1433 elif f in branch_copies2.copy:
1426 f2 = branch_copies2.copy[f]
1434 f2 = branch_copies2.copy[f]
1427 if f2 in m2:
1435 if f2 in m2:
1428 actions[f] = (
1436 actions[f] = (
1429 ACTION_MERGE,
1437 ACTION_MERGE,
1430 (f2, f, f2, False, pa.node()),
1438 (f2, f, f2, False, pa.node()),
1431 b'remote copied from %s' % f2,
1439 b'remote copied from %s' % f2,
1432 )
1440 )
1433 else:
1441 else:
1434 actions[f] = (
1442 actions[f] = (
1435 ACTION_MERGE,
1443 ACTION_MERGE,
1436 (f2, f, f2, True, pa.node()),
1444 (f2, f, f2, True, pa.node()),
1437 b'remote moved from %s' % f2,
1445 b'remote moved from %s' % f2,
1438 )
1446 )
1439 elif f not in ma:
1447 elif f not in ma:
1440 # local unknown, remote created: the logic is described by the
1448 # local unknown, remote created: the logic is described by the
1441 # following table:
1449 # following table:
1442 #
1450 #
1443 # force branchmerge different | action
1451 # force branchmerge different | action
1444 # n * * | create
1452 # n * * | create
1445 # y n * | create
1453 # y n * | create
1446 # y y n | create
1454 # y y n | create
1447 # y y y | merge
1455 # y y y | merge
1448 #
1456 #
1449 # Checking whether the files are different is expensive, so we
1457 # Checking whether the files are different is expensive, so we
1450 # don't do that when we can avoid it.
1458 # don't do that when we can avoid it.
1451 if not force:
1459 if not force:
1452 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1460 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1453 elif not branchmerge:
1461 elif not branchmerge:
1454 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1462 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1455 else:
1463 else:
1456 actions[f] = (
1464 actions[f] = (
1457 ACTION_CREATED_MERGE,
1465 ACTION_CREATED_MERGE,
1458 (fl2, pa.node()),
1466 (fl2, pa.node()),
1459 b'remote created, get or merge',
1467 b'remote created, get or merge',
1460 )
1468 )
1461 elif n2 != ma[f]:
1469 elif n2 != ma[f]:
1462 df = None
1470 df = None
1463 for d in branch_copies1.dirmove:
1471 for d in branch_copies1.dirmove:
1464 if f.startswith(d):
1472 if f.startswith(d):
1465 # new file added in a directory that was moved
1473 # new file added in a directory that was moved
1466 df = branch_copies1.dirmove[d] + f[len(d) :]
1474 df = branch_copies1.dirmove[d] + f[len(d) :]
1467 break
1475 break
1468 if df is not None and df in m1:
1476 if df is not None and df in m1:
1469 actions[df] = (
1477 actions[df] = (
1470 ACTION_MERGE,
1478 ACTION_MERGE,
1471 (df, f, f, False, pa.node()),
1479 (df, f, f, False, pa.node()),
1472 b'local directory rename - respect move '
1480 b'local directory rename - respect move '
1473 b'from %s' % f,
1481 b'from %s' % f,
1474 )
1482 )
1475 elif acceptremote:
1483 elif acceptremote:
1476 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1484 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1477 else:
1485 else:
1478 actions[f] = (
1486 actions[f] = (
1479 ACTION_DELETED_CHANGED,
1487 ACTION_DELETED_CHANGED,
1480 (None, f, f, False, pa.node()),
1488 (None, f, f, False, pa.node()),
1481 b'prompt deleted/changed',
1489 b'prompt deleted/changed',
1482 )
1490 )
1483
1491
1484 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1492 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1485 # If we are merging, look for path conflicts.
1493 # If we are merging, look for path conflicts.
1486 checkpathconflicts(repo, wctx, p2, actions)
1494 checkpathconflicts(repo, wctx, p2, actions)
1487
1495
1488 narrowmatch = repo.narrowmatch()
1496 narrowmatch = repo.narrowmatch()
1489 if not narrowmatch.always():
1497 if not narrowmatch.always():
1490 # Updates "actions" in place
1498 # Updates "actions" in place
1491 _filternarrowactions(narrowmatch, branchmerge, actions)
1499 _filternarrowactions(narrowmatch, branchmerge, actions)
1492
1500
1493 renamedelete = branch_copies1.renamedelete
1501 renamedelete = branch_copies1.renamedelete
1494 renamedelete.update(branch_copies2.renamedelete)
1502 renamedelete.update(branch_copies2.renamedelete)
1495
1503
1496 return actions, diverge, renamedelete
1504 return actions, diverge, renamedelete
1497
1505
1498
1506
1499 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1507 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1500 """Resolves false conflicts where the nodeid changed but the content
1508 """Resolves false conflicts where the nodeid changed but the content
1501 remained the same."""
1509 remained the same."""
1502 # We force a copy of actions.items() because we're going to mutate
1510 # We force a copy of actions.items() because we're going to mutate
1503 # actions as we resolve trivial conflicts.
1511 # actions as we resolve trivial conflicts.
1504 for f, (m, args, msg) in list(actions.items()):
1512 for f, (m, args, msg) in list(actions.items()):
1505 if (
1513 if (
1506 m == ACTION_CHANGED_DELETED
1514 m == ACTION_CHANGED_DELETED
1507 and f in ancestor
1515 and f in ancestor
1508 and not wctx[f].cmp(ancestor[f])
1516 and not wctx[f].cmp(ancestor[f])
1509 ):
1517 ):
1510 # local did change but ended up with same content
1518 # local did change but ended up with same content
1511 actions[f] = ACTION_REMOVE, None, b'prompt same'
1519 actions[f] = ACTION_REMOVE, None, b'prompt same'
1512 elif (
1520 elif (
1513 m == ACTION_DELETED_CHANGED
1521 m == ACTION_DELETED_CHANGED
1514 and f in ancestor
1522 and f in ancestor
1515 and not mctx[f].cmp(ancestor[f])
1523 and not mctx[f].cmp(ancestor[f])
1516 ):
1524 ):
1517 # remote did change but ended up with same content
1525 # remote did change but ended up with same content
1518 del actions[f] # don't get = keep local deleted
1526 del actions[f] # don't get = keep local deleted
1519
1527
1520
1528
1521 def calculateupdates(
1529 def calculateupdates(
1522 repo,
1530 repo,
1523 wctx,
1531 wctx,
1524 mctx,
1532 mctx,
1525 ancestors,
1533 ancestors,
1526 branchmerge,
1534 branchmerge,
1527 force,
1535 force,
1528 acceptremote,
1536 acceptremote,
1529 followcopies,
1537 followcopies,
1530 matcher=None,
1538 matcher=None,
1531 mergeforce=False,
1539 mergeforce=False,
1532 ):
1540 ):
1533 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1541 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1534 # Avoid cycle.
1542 # Avoid cycle.
1535 from . import sparse
1543 from . import sparse
1536
1544
1537 if len(ancestors) == 1: # default
1545 if len(ancestors) == 1: # default
1538 actions, diverge, renamedelete = manifestmerge(
1546 actions, diverge, renamedelete = manifestmerge(
1539 repo,
1547 repo,
1540 wctx,
1548 wctx,
1541 mctx,
1549 mctx,
1542 ancestors[0],
1550 ancestors[0],
1543 branchmerge,
1551 branchmerge,
1544 force,
1552 force,
1545 matcher,
1553 matcher,
1546 acceptremote,
1554 acceptremote,
1547 followcopies,
1555 followcopies,
1548 )
1556 )
1549 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1557 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1550
1558
1551 else: # only when merge.preferancestor=* - the default
1559 else: # only when merge.preferancestor=* - the default
1552 repo.ui.note(
1560 repo.ui.note(
1553 _(b"note: merging %s and %s using bids from ancestors %s\n")
1561 _(b"note: merging %s and %s using bids from ancestors %s\n")
1554 % (
1562 % (
1555 wctx,
1563 wctx,
1556 mctx,
1564 mctx,
1557 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1565 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1558 )
1566 )
1559 )
1567 )
1560
1568
1561 # Call for bids
1569 # Call for bids
1562 fbids = (
1570 fbids = (
1563 {}
1571 {}
1564 ) # mapping filename to bids (action method to list af actions)
1572 ) # mapping filename to bids (action method to list af actions)
1565 diverge, renamedelete = None, None
1573 diverge, renamedelete = None, None
1566 for ancestor in ancestors:
1574 for ancestor in ancestors:
1567 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1575 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1568 actions, diverge1, renamedelete1 = manifestmerge(
1576 actions, diverge1, renamedelete1 = manifestmerge(
1569 repo,
1577 repo,
1570 wctx,
1578 wctx,
1571 mctx,
1579 mctx,
1572 ancestor,
1580 ancestor,
1573 branchmerge,
1581 branchmerge,
1574 force,
1582 force,
1575 matcher,
1583 matcher,
1576 acceptremote,
1584 acceptremote,
1577 followcopies,
1585 followcopies,
1578 forcefulldiff=True,
1586 forcefulldiff=True,
1579 )
1587 )
1580 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1588 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1581
1589
1582 # Track the shortest set of warning on the theory that bid
1590 # Track the shortest set of warning on the theory that bid
1583 # merge will correctly incorporate more information
1591 # merge will correctly incorporate more information
1584 if diverge is None or len(diverge1) < len(diverge):
1592 if diverge is None or len(diverge1) < len(diverge):
1585 diverge = diverge1
1593 diverge = diverge1
1586 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1594 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1587 renamedelete = renamedelete1
1595 renamedelete = renamedelete1
1588
1596
1589 for f, a in sorted(pycompat.iteritems(actions)):
1597 for f, a in sorted(pycompat.iteritems(actions)):
1590 m, args, msg = a
1598 m, args, msg = a
1591 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1599 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1592 if f in fbids:
1600 if f in fbids:
1593 d = fbids[f]
1601 d = fbids[f]
1594 if m in d:
1602 if m in d:
1595 d[m].append(a)
1603 d[m].append(a)
1596 else:
1604 else:
1597 d[m] = [a]
1605 d[m] = [a]
1598 else:
1606 else:
1599 fbids[f] = {m: [a]}
1607 fbids[f] = {m: [a]}
1600
1608
1601 # Pick the best bid for each file
1609 # Pick the best bid for each file
1602 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1610 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1603 actions = {}
1611 actions = {}
1604 for f, bids in sorted(fbids.items()):
1612 for f, bids in sorted(fbids.items()):
1605 # bids is a mapping from action method to list af actions
1613 # bids is a mapping from action method to list af actions
1606 # Consensus?
1614 # Consensus?
1607 if len(bids) == 1: # all bids are the same kind of method
1615 if len(bids) == 1: # all bids are the same kind of method
1608 m, l = list(bids.items())[0]
1616 m, l = list(bids.items())[0]
1609 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1617 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1610 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1618 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1611 actions[f] = l[0]
1619 actions[f] = l[0]
1612 continue
1620 continue
1613 # If keep is an option, just do it.
1621 # If keep is an option, just do it.
1614 if ACTION_KEEP in bids:
1622 if ACTION_KEEP in bids:
1615 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1623 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1616 actions[f] = bids[ACTION_KEEP][0]
1624 actions[f] = bids[ACTION_KEEP][0]
1617 continue
1625 continue
1618 # If there are gets and they all agree [how could they not?], do it.
1626 # If there are gets and they all agree [how could they not?], do it.
1619 if ACTION_GET in bids:
1627 if ACTION_GET in bids:
1620 ga0 = bids[ACTION_GET][0]
1628 ga0 = bids[ACTION_GET][0]
1621 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1629 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1622 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1630 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1623 actions[f] = ga0
1631 actions[f] = ga0
1624 continue
1632 continue
1625 # TODO: Consider other simple actions such as mode changes
1633 # TODO: Consider other simple actions such as mode changes
1626 # Handle inefficient democrazy.
1634 # Handle inefficient democrazy.
1627 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1635 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1628 for m, l in sorted(bids.items()):
1636 for m, l in sorted(bids.items()):
1629 for _f, args, msg in l:
1637 for _f, args, msg in l:
1630 repo.ui.note(b' %s -> %s\n' % (msg, m))
1638 repo.ui.note(b' %s -> %s\n' % (msg, m))
1631 # Pick random action. TODO: Instead, prompt user when resolving
1639 # Pick random action. TODO: Instead, prompt user when resolving
1632 m, l = list(bids.items())[0]
1640 m, l = list(bids.items())[0]
1633 repo.ui.warn(
1641 repo.ui.warn(
1634 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1642 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1635 )
1643 )
1636 actions[f] = l[0]
1644 actions[f] = l[0]
1637 continue
1645 continue
1638 repo.ui.note(_(b'end of auction\n\n'))
1646 repo.ui.note(_(b'end of auction\n\n'))
1639
1647
1640 if wctx.rev() is None:
1648 if wctx.rev() is None:
1641 fractions = _forgetremoved(wctx, mctx, branchmerge)
1649 fractions = _forgetremoved(wctx, mctx, branchmerge)
1642 actions.update(fractions)
1650 actions.update(fractions)
1643
1651
1644 prunedactions = sparse.filterupdatesactions(
1652 prunedactions = sparse.filterupdatesactions(
1645 repo, wctx, mctx, branchmerge, actions
1653 repo, wctx, mctx, branchmerge, actions
1646 )
1654 )
1647 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1655 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1648
1656
1649 return prunedactions, diverge, renamedelete
1657 return prunedactions, diverge, renamedelete
1650
1658
1651
1659
1652 def _getcwd():
1660 def _getcwd():
1653 try:
1661 try:
1654 return encoding.getcwd()
1662 return encoding.getcwd()
1655 except OSError as err:
1663 except OSError as err:
1656 if err.errno == errno.ENOENT:
1664 if err.errno == errno.ENOENT:
1657 return None
1665 return None
1658 raise
1666 raise
1659
1667
1660
1668
1661 def batchremove(repo, wctx, actions):
1669 def batchremove(repo, wctx, actions):
1662 """apply removes to the working directory
1670 """apply removes to the working directory
1663
1671
1664 yields tuples for progress updates
1672 yields tuples for progress updates
1665 """
1673 """
1666 verbose = repo.ui.verbose
1674 verbose = repo.ui.verbose
1667 cwd = _getcwd()
1675 cwd = _getcwd()
1668 i = 0
1676 i = 0
1669 for f, args, msg in actions:
1677 for f, args, msg in actions:
1670 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1678 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1671 if verbose:
1679 if verbose:
1672 repo.ui.note(_(b"removing %s\n") % f)
1680 repo.ui.note(_(b"removing %s\n") % f)
1673 wctx[f].audit()
1681 wctx[f].audit()
1674 try:
1682 try:
1675 wctx[f].remove(ignoremissing=True)
1683 wctx[f].remove(ignoremissing=True)
1676 except OSError as inst:
1684 except OSError as inst:
1677 repo.ui.warn(
1685 repo.ui.warn(
1678 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1686 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1679 )
1687 )
1680 if i == 100:
1688 if i == 100:
1681 yield i, f
1689 yield i, f
1682 i = 0
1690 i = 0
1683 i += 1
1691 i += 1
1684 if i > 0:
1692 if i > 0:
1685 yield i, f
1693 yield i, f
1686
1694
1687 if cwd and not _getcwd():
1695 if cwd and not _getcwd():
1688 # cwd was removed in the course of removing files; print a helpful
1696 # cwd was removed in the course of removing files; print a helpful
1689 # warning.
1697 # warning.
1690 repo.ui.warn(
1698 repo.ui.warn(
1691 _(
1699 _(
1692 b"current directory was removed\n"
1700 b"current directory was removed\n"
1693 b"(consider changing to repo root: %s)\n"
1701 b"(consider changing to repo root: %s)\n"
1694 )
1702 )
1695 % repo.root
1703 % repo.root
1696 )
1704 )
1697
1705
1698
1706
1699 def batchget(repo, mctx, wctx, wantfiledata, actions):
1707 def batchget(repo, mctx, wctx, wantfiledata, actions):
1700 """apply gets to the working directory
1708 """apply gets to the working directory
1701
1709
1702 mctx is the context to get from
1710 mctx is the context to get from
1703
1711
1704 Yields arbitrarily many (False, tuple) for progress updates, followed by
1712 Yields arbitrarily many (False, tuple) for progress updates, followed by
1705 exactly one (True, filedata). When wantfiledata is false, filedata is an
1713 exactly one (True, filedata). When wantfiledata is false, filedata is an
1706 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1714 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1707 mtime) of the file f written for each action.
1715 mtime) of the file f written for each action.
1708 """
1716 """
1709 filedata = {}
1717 filedata = {}
1710 verbose = repo.ui.verbose
1718 verbose = repo.ui.verbose
1711 fctx = mctx.filectx
1719 fctx = mctx.filectx
1712 ui = repo.ui
1720 ui = repo.ui
1713 i = 0
1721 i = 0
1714 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1722 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1715 for f, (flags, backup), msg in actions:
1723 for f, (flags, backup), msg in actions:
1716 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1724 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1717 if verbose:
1725 if verbose:
1718 repo.ui.note(_(b"getting %s\n") % f)
1726 repo.ui.note(_(b"getting %s\n") % f)
1719
1727
1720 if backup:
1728 if backup:
1721 # If a file or directory exists with the same name, back that
1729 # If a file or directory exists with the same name, back that
1722 # up. Otherwise, look to see if there is a file that conflicts
1730 # up. Otherwise, look to see if there is a file that conflicts
1723 # with a directory this file is in, and if so, back that up.
1731 # with a directory this file is in, and if so, back that up.
1724 conflicting = f
1732 conflicting = f
1725 if not repo.wvfs.lexists(f):
1733 if not repo.wvfs.lexists(f):
1726 for p in pathutil.finddirs(f):
1734 for p in pathutil.finddirs(f):
1727 if repo.wvfs.isfileorlink(p):
1735 if repo.wvfs.isfileorlink(p):
1728 conflicting = p
1736 conflicting = p
1729 break
1737 break
1730 if repo.wvfs.lexists(conflicting):
1738 if repo.wvfs.lexists(conflicting):
1731 orig = scmutil.backuppath(ui, repo, conflicting)
1739 orig = scmutil.backuppath(ui, repo, conflicting)
1732 util.rename(repo.wjoin(conflicting), orig)
1740 util.rename(repo.wjoin(conflicting), orig)
1733 wfctx = wctx[f]
1741 wfctx = wctx[f]
1734 wfctx.clearunknown()
1742 wfctx.clearunknown()
1735 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1743 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1736 size = wfctx.write(
1744 size = wfctx.write(
1737 fctx(f).data(),
1745 fctx(f).data(),
1738 flags,
1746 flags,
1739 backgroundclose=True,
1747 backgroundclose=True,
1740 atomictemp=atomictemp,
1748 atomictemp=atomictemp,
1741 )
1749 )
1742 if wantfiledata:
1750 if wantfiledata:
1743 s = wfctx.lstat()
1751 s = wfctx.lstat()
1744 mode = s.st_mode
1752 mode = s.st_mode
1745 mtime = s[stat.ST_MTIME]
1753 mtime = s[stat.ST_MTIME]
1746 filedata[f] = (mode, size, mtime) # for dirstate.normal
1754 filedata[f] = (mode, size, mtime) # for dirstate.normal
1747 if i == 100:
1755 if i == 100:
1748 yield False, (i, f)
1756 yield False, (i, f)
1749 i = 0
1757 i = 0
1750 i += 1
1758 i += 1
1751 if i > 0:
1759 if i > 0:
1752 yield False, (i, f)
1760 yield False, (i, f)
1753 yield True, filedata
1761 yield True, filedata
1754
1762
1755
1763
1756 def _prefetchfiles(repo, ctx, actions):
1764 def _prefetchfiles(repo, ctx, actions):
1757 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1765 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1758 of merge actions. ``ctx`` is the context being merged in."""
1766 of merge actions. ``ctx`` is the context being merged in."""
1759
1767
1760 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1768 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1761 # don't touch the context to be merged in. 'cd' is skipped, because
1769 # don't touch the context to be merged in. 'cd' is skipped, because
1762 # changed/deleted never resolves to something from the remote side.
1770 # changed/deleted never resolves to something from the remote side.
1763 oplist = [
1771 oplist = [
1764 actions[a]
1772 actions[a]
1765 for a in (
1773 for a in (
1766 ACTION_GET,
1774 ACTION_GET,
1767 ACTION_DELETED_CHANGED,
1775 ACTION_DELETED_CHANGED,
1768 ACTION_LOCAL_DIR_RENAME_GET,
1776 ACTION_LOCAL_DIR_RENAME_GET,
1769 ACTION_MERGE,
1777 ACTION_MERGE,
1770 )
1778 )
1771 ]
1779 ]
1772 prefetch = scmutil.prefetchfiles
1780 prefetch = scmutil.prefetchfiles
1773 matchfiles = scmutil.matchfiles
1781 matchfiles = scmutil.matchfiles
1774 prefetch(
1782 prefetch(
1775 repo,
1783 repo,
1776 [ctx.rev()],
1784 [ctx.rev()],
1777 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1785 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1778 )
1786 )
1779
1787
1780
1788
1781 @attr.s(frozen=True)
1789 @attr.s(frozen=True)
1782 class updateresult(object):
1790 class updateresult(object):
1783 updatedcount = attr.ib()
1791 updatedcount = attr.ib()
1784 mergedcount = attr.ib()
1792 mergedcount = attr.ib()
1785 removedcount = attr.ib()
1793 removedcount = attr.ib()
1786 unresolvedcount = attr.ib()
1794 unresolvedcount = attr.ib()
1787
1795
1788 def isempty(self):
1796 def isempty(self):
1789 return not (
1797 return not (
1790 self.updatedcount
1798 self.updatedcount
1791 or self.mergedcount
1799 or self.mergedcount
1792 or self.removedcount
1800 or self.removedcount
1793 or self.unresolvedcount
1801 or self.unresolvedcount
1794 )
1802 )
1795
1803
1796
1804
1797 def emptyactions():
1805 def emptyactions():
1798 """create an actions dict, to be populated and passed to applyupdates()"""
1806 """create an actions dict, to be populated and passed to applyupdates()"""
1799 return dict(
1807 return dict(
1800 (m, [])
1808 (m, [])
1801 for m in (
1809 for m in (
1802 ACTION_ADD,
1810 ACTION_ADD,
1803 ACTION_ADD_MODIFIED,
1811 ACTION_ADD_MODIFIED,
1804 ACTION_FORGET,
1812 ACTION_FORGET,
1805 ACTION_GET,
1813 ACTION_GET,
1806 ACTION_CHANGED_DELETED,
1814 ACTION_CHANGED_DELETED,
1807 ACTION_DELETED_CHANGED,
1815 ACTION_DELETED_CHANGED,
1808 ACTION_REMOVE,
1816 ACTION_REMOVE,
1809 ACTION_DIR_RENAME_MOVE_LOCAL,
1817 ACTION_DIR_RENAME_MOVE_LOCAL,
1810 ACTION_LOCAL_DIR_RENAME_GET,
1818 ACTION_LOCAL_DIR_RENAME_GET,
1811 ACTION_MERGE,
1819 ACTION_MERGE,
1812 ACTION_EXEC,
1820 ACTION_EXEC,
1813 ACTION_KEEP,
1821 ACTION_KEEP,
1814 ACTION_PATH_CONFLICT,
1822 ACTION_PATH_CONFLICT,
1815 ACTION_PATH_CONFLICT_RESOLVE,
1823 ACTION_PATH_CONFLICT_RESOLVE,
1816 )
1824 )
1817 )
1825 )
1818
1826
1819
1827
1820 def applyupdates(
1828 def applyupdates(
1821 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1829 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1822 ):
1830 ):
1823 """apply the merge action list to the working directory
1831 """apply the merge action list to the working directory
1824
1832
1825 wctx is the working copy context
1833 wctx is the working copy context
1826 mctx is the context to be merged into the working copy
1834 mctx is the context to be merged into the working copy
1827
1835
1828 Return a tuple of (counts, filedata), where counts is a tuple
1836 Return a tuple of (counts, filedata), where counts is a tuple
1829 (updated, merged, removed, unresolved) that describes how many
1837 (updated, merged, removed, unresolved) that describes how many
1830 files were affected by the update, and filedata is as described in
1838 files were affected by the update, and filedata is as described in
1831 batchget.
1839 batchget.
1832 """
1840 """
1833
1841
1834 _prefetchfiles(repo, mctx, actions)
1842 _prefetchfiles(repo, mctx, actions)
1835
1843
1836 updated, merged, removed = 0, 0, 0
1844 updated, merged, removed = 0, 0, 0
1837 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1845 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1838 moves = []
1846 moves = []
1839 for m, l in actions.items():
1847 for m, l in actions.items():
1840 l.sort()
1848 l.sort()
1841
1849
1842 # 'cd' and 'dc' actions are treated like other merge conflicts
1850 # 'cd' and 'dc' actions are treated like other merge conflicts
1843 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1851 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1844 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1852 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1845 mergeactions.extend(actions[ACTION_MERGE])
1853 mergeactions.extend(actions[ACTION_MERGE])
1846 for f, args, msg in mergeactions:
1854 for f, args, msg in mergeactions:
1847 f1, f2, fa, move, anc = args
1855 f1, f2, fa, move, anc = args
1848 if f == b'.hgsubstate': # merged internally
1856 if f == b'.hgsubstate': # merged internally
1849 continue
1857 continue
1850 if f1 is None:
1858 if f1 is None:
1851 fcl = filemerge.absentfilectx(wctx, fa)
1859 fcl = filemerge.absentfilectx(wctx, fa)
1852 else:
1860 else:
1853 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1861 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1854 fcl = wctx[f1]
1862 fcl = wctx[f1]
1855 if f2 is None:
1863 if f2 is None:
1856 fco = filemerge.absentfilectx(mctx, fa)
1864 fco = filemerge.absentfilectx(mctx, fa)
1857 else:
1865 else:
1858 fco = mctx[f2]
1866 fco = mctx[f2]
1859 actx = repo[anc]
1867 actx = repo[anc]
1860 if fa in actx:
1868 if fa in actx:
1861 fca = actx[fa]
1869 fca = actx[fa]
1862 else:
1870 else:
1863 # TODO: move to absentfilectx
1871 # TODO: move to absentfilectx
1864 fca = repo.filectx(f1, fileid=nullrev)
1872 fca = repo.filectx(f1, fileid=nullrev)
1865 ms.add(fcl, fco, fca, f)
1873 ms.add(fcl, fco, fca, f)
1866 if f1 != f and move:
1874 if f1 != f and move:
1867 moves.append(f1)
1875 moves.append(f1)
1868
1876
1869 # remove renamed files after safely stored
1877 # remove renamed files after safely stored
1870 for f in moves:
1878 for f in moves:
1871 if wctx[f].lexists():
1879 if wctx[f].lexists():
1872 repo.ui.debug(b"removing %s\n" % f)
1880 repo.ui.debug(b"removing %s\n" % f)
1873 wctx[f].audit()
1881 wctx[f].audit()
1874 wctx[f].remove()
1882 wctx[f].remove()
1875
1883
1876 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1884 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1877 progress = repo.ui.makeprogress(
1885 progress = repo.ui.makeprogress(
1878 _(b'updating'), unit=_(b'files'), total=numupdates
1886 _(b'updating'), unit=_(b'files'), total=numupdates
1879 )
1887 )
1880
1888
1881 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1889 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1882 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1890 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1883
1891
1884 # record path conflicts
1892 # record path conflicts
1885 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1893 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1886 f1, fo = args
1894 f1, fo = args
1887 s = repo.ui.status
1895 s = repo.ui.status
1888 s(
1896 s(
1889 _(
1897 _(
1890 b"%s: path conflict - a file or link has the same name as a "
1898 b"%s: path conflict - a file or link has the same name as a "
1891 b"directory\n"
1899 b"directory\n"
1892 )
1900 )
1893 % f
1901 % f
1894 )
1902 )
1895 if fo == b'l':
1903 if fo == b'l':
1896 s(_(b"the local file has been renamed to %s\n") % f1)
1904 s(_(b"the local file has been renamed to %s\n") % f1)
1897 else:
1905 else:
1898 s(_(b"the remote file has been renamed to %s\n") % f1)
1906 s(_(b"the remote file has been renamed to %s\n") % f1)
1899 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1907 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1900 ms.addpath(f, f1, fo)
1908 ms.addpath(f, f1, fo)
1901 progress.increment(item=f)
1909 progress.increment(item=f)
1902
1910
1903 # When merging in-memory, we can't support worker processes, so set the
1911 # When merging in-memory, we can't support worker processes, so set the
1904 # per-item cost at 0 in that case.
1912 # per-item cost at 0 in that case.
1905 cost = 0 if wctx.isinmemory() else 0.001
1913 cost = 0 if wctx.isinmemory() else 0.001
1906
1914
1907 # remove in parallel (must come before resolving path conflicts and getting)
1915 # remove in parallel (must come before resolving path conflicts and getting)
1908 prog = worker.worker(
1916 prog = worker.worker(
1909 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1917 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1910 )
1918 )
1911 for i, item in prog:
1919 for i, item in prog:
1912 progress.increment(step=i, item=item)
1920 progress.increment(step=i, item=item)
1913 removed = len(actions[ACTION_REMOVE])
1921 removed = len(actions[ACTION_REMOVE])
1914
1922
1915 # resolve path conflicts (must come before getting)
1923 # resolve path conflicts (must come before getting)
1916 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1924 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1917 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1925 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1918 (f0,) = args
1926 (f0,) = args
1919 if wctx[f0].lexists():
1927 if wctx[f0].lexists():
1920 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1928 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1921 wctx[f].audit()
1929 wctx[f].audit()
1922 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1930 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1923 wctx[f0].remove()
1931 wctx[f0].remove()
1924 progress.increment(item=f)
1932 progress.increment(item=f)
1925
1933
1926 # get in parallel.
1934 # get in parallel.
1927 threadsafe = repo.ui.configbool(
1935 threadsafe = repo.ui.configbool(
1928 b'experimental', b'worker.wdir-get-thread-safe'
1936 b'experimental', b'worker.wdir-get-thread-safe'
1929 )
1937 )
1930 prog = worker.worker(
1938 prog = worker.worker(
1931 repo.ui,
1939 repo.ui,
1932 cost,
1940 cost,
1933 batchget,
1941 batchget,
1934 (repo, mctx, wctx, wantfiledata),
1942 (repo, mctx, wctx, wantfiledata),
1935 actions[ACTION_GET],
1943 actions[ACTION_GET],
1936 threadsafe=threadsafe,
1944 threadsafe=threadsafe,
1937 hasretval=True,
1945 hasretval=True,
1938 )
1946 )
1939 getfiledata = {}
1947 getfiledata = {}
1940 for final, res in prog:
1948 for final, res in prog:
1941 if final:
1949 if final:
1942 getfiledata = res
1950 getfiledata = res
1943 else:
1951 else:
1944 i, item = res
1952 i, item = res
1945 progress.increment(step=i, item=item)
1953 progress.increment(step=i, item=item)
1946 updated = len(actions[ACTION_GET])
1954 updated = len(actions[ACTION_GET])
1947
1955
1948 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1956 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1949 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1957 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1950
1958
1951 # forget (manifest only, just log it) (must come first)
1959 # forget (manifest only, just log it) (must come first)
1952 for f, args, msg in actions[ACTION_FORGET]:
1960 for f, args, msg in actions[ACTION_FORGET]:
1953 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1961 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1954 progress.increment(item=f)
1962 progress.increment(item=f)
1955
1963
1956 # re-add (manifest only, just log it)
1964 # re-add (manifest only, just log it)
1957 for f, args, msg in actions[ACTION_ADD]:
1965 for f, args, msg in actions[ACTION_ADD]:
1958 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1966 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1959 progress.increment(item=f)
1967 progress.increment(item=f)
1960
1968
1961 # re-add/mark as modified (manifest only, just log it)
1969 # re-add/mark as modified (manifest only, just log it)
1962 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1970 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1963 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1971 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1964 progress.increment(item=f)
1972 progress.increment(item=f)
1965
1973
1966 # keep (noop, just log it)
1974 # keep (noop, just log it)
1967 for f, args, msg in actions[ACTION_KEEP]:
1975 for f, args, msg in actions[ACTION_KEEP]:
1968 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1976 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1969 # no progress
1977 # no progress
1970
1978
1971 # directory rename, move local
1979 # directory rename, move local
1972 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1980 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1973 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1981 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1974 progress.increment(item=f)
1982 progress.increment(item=f)
1975 f0, flags = args
1983 f0, flags = args
1976 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1984 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1977 wctx[f].audit()
1985 wctx[f].audit()
1978 wctx[f].write(wctx.filectx(f0).data(), flags)
1986 wctx[f].write(wctx.filectx(f0).data(), flags)
1979 wctx[f0].remove()
1987 wctx[f0].remove()
1980 updated += 1
1988 updated += 1
1981
1989
1982 # local directory rename, get
1990 # local directory rename, get
1983 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1991 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1984 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1992 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1985 progress.increment(item=f)
1993 progress.increment(item=f)
1986 f0, flags = args
1994 f0, flags = args
1987 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1995 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1988 wctx[f].write(mctx.filectx(f0).data(), flags)
1996 wctx[f].write(mctx.filectx(f0).data(), flags)
1989 updated += 1
1997 updated += 1
1990
1998
1991 # exec
1999 # exec
1992 for f, args, msg in actions[ACTION_EXEC]:
2000 for f, args, msg in actions[ACTION_EXEC]:
1993 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
2001 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1994 progress.increment(item=f)
2002 progress.increment(item=f)
1995 (flags,) = args
2003 (flags,) = args
1996 wctx[f].audit()
2004 wctx[f].audit()
1997 wctx[f].setflags(b'l' in flags, b'x' in flags)
2005 wctx[f].setflags(b'l' in flags, b'x' in flags)
1998 updated += 1
2006 updated += 1
1999
2007
2000 # the ordering is important here -- ms.mergedriver will raise if the merge
2008 # the ordering is important here -- ms.mergedriver will raise if the merge
2001 # driver has changed, and we want to be able to bypass it when overwrite is
2009 # driver has changed, and we want to be able to bypass it when overwrite is
2002 # True
2010 # True
2003 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2011 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2004
2012
2005 if usemergedriver:
2013 if usemergedriver:
2006 if wctx.isinmemory():
2014 if wctx.isinmemory():
2007 raise error.InMemoryMergeConflictsError(
2015 raise error.InMemoryMergeConflictsError(
2008 b"in-memory merge does not support mergedriver"
2016 b"in-memory merge does not support mergedriver"
2009 )
2017 )
2010 ms.commit()
2018 ms.commit()
2011 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2019 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2012 # the driver might leave some files unresolved
2020 # the driver might leave some files unresolved
2013 unresolvedf = set(ms.unresolved())
2021 unresolvedf = set(ms.unresolved())
2014 if not proceed:
2022 if not proceed:
2015 # XXX setting unresolved to at least 1 is a hack to make sure we
2023 # XXX setting unresolved to at least 1 is a hack to make sure we
2016 # error out
2024 # error out
2017 return updateresult(
2025 return updateresult(
2018 updated, merged, removed, max(len(unresolvedf), 1)
2026 updated, merged, removed, max(len(unresolvedf), 1)
2019 )
2027 )
2020 newactions = []
2028 newactions = []
2021 for f, args, msg in mergeactions:
2029 for f, args, msg in mergeactions:
2022 if f in unresolvedf:
2030 if f in unresolvedf:
2023 newactions.append((f, args, msg))
2031 newactions.append((f, args, msg))
2024 mergeactions = newactions
2032 mergeactions = newactions
2025
2033
2026 try:
2034 try:
2027 # premerge
2035 # premerge
2028 tocomplete = []
2036 tocomplete = []
2029 for f, args, msg in mergeactions:
2037 for f, args, msg in mergeactions:
2030 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2038 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2031 progress.increment(item=f)
2039 progress.increment(item=f)
2032 if f == b'.hgsubstate': # subrepo states need updating
2040 if f == b'.hgsubstate': # subrepo states need updating
2033 subrepoutil.submerge(
2041 subrepoutil.submerge(
2034 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2042 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2035 )
2043 )
2036 continue
2044 continue
2037 wctx[f].audit()
2045 wctx[f].audit()
2038 complete, r = ms.preresolve(f, wctx)
2046 complete, r = ms.preresolve(f, wctx)
2039 if not complete:
2047 if not complete:
2040 numupdates += 1
2048 numupdates += 1
2041 tocomplete.append((f, args, msg))
2049 tocomplete.append((f, args, msg))
2042
2050
2043 # merge
2051 # merge
2044 for f, args, msg in tocomplete:
2052 for f, args, msg in tocomplete:
2045 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2053 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2046 progress.increment(item=f, total=numupdates)
2054 progress.increment(item=f, total=numupdates)
2047 ms.resolve(f, wctx)
2055 ms.resolve(f, wctx)
2048
2056
2049 finally:
2057 finally:
2050 ms.commit()
2058 ms.commit()
2051
2059
2052 unresolved = ms.unresolvedcount()
2060 unresolved = ms.unresolvedcount()
2053
2061
2054 if (
2062 if (
2055 usemergedriver
2063 usemergedriver
2056 and not unresolved
2064 and not unresolved
2057 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2065 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2058 ):
2066 ):
2059 if not driverconclude(repo, ms, wctx, labels=labels):
2067 if not driverconclude(repo, ms, wctx, labels=labels):
2060 # XXX setting unresolved to at least 1 is a hack to make sure we
2068 # XXX setting unresolved to at least 1 is a hack to make sure we
2061 # error out
2069 # error out
2062 unresolved = max(unresolved, 1)
2070 unresolved = max(unresolved, 1)
2063
2071
2064 ms.commit()
2072 ms.commit()
2065
2073
2066 msupdated, msmerged, msremoved = ms.counts()
2074 msupdated, msmerged, msremoved = ms.counts()
2067 updated += msupdated
2075 updated += msupdated
2068 merged += msmerged
2076 merged += msmerged
2069 removed += msremoved
2077 removed += msremoved
2070
2078
2071 extraactions = ms.actions()
2079 extraactions = ms.actions()
2072 if extraactions:
2080 if extraactions:
2073 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2081 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2074 for k, acts in pycompat.iteritems(extraactions):
2082 for k, acts in pycompat.iteritems(extraactions):
2075 actions[k].extend(acts)
2083 actions[k].extend(acts)
2076 if k == ACTION_GET and wantfiledata:
2084 if k == ACTION_GET and wantfiledata:
2077 # no filedata until mergestate is updated to provide it
2085 # no filedata until mergestate is updated to provide it
2078 for a in acts:
2086 for a in acts:
2079 getfiledata[a[0]] = None
2087 getfiledata[a[0]] = None
2080 # Remove these files from actions[ACTION_MERGE] as well. This is
2088 # Remove these files from actions[ACTION_MERGE] as well. This is
2081 # important because in recordupdates, files in actions[ACTION_MERGE]
2089 # important because in recordupdates, files in actions[ACTION_MERGE]
2082 # are processed after files in other actions, and the merge driver
2090 # are processed after files in other actions, and the merge driver
2083 # might add files to those actions via extraactions above. This can
2091 # might add files to those actions via extraactions above. This can
2084 # lead to a file being recorded twice, with poor results. This is
2092 # lead to a file being recorded twice, with poor results. This is
2085 # especially problematic for actions[ACTION_REMOVE] (currently only
2093 # especially problematic for actions[ACTION_REMOVE] (currently only
2086 # possible with the merge driver in the initial merge process;
2094 # possible with the merge driver in the initial merge process;
2087 # interrupted merges don't go through this flow).
2095 # interrupted merges don't go through this flow).
2088 #
2096 #
2089 # The real fix here is to have indexes by both file and action so
2097 # The real fix here is to have indexes by both file and action so
2090 # that when the action for a file is changed it is automatically
2098 # that when the action for a file is changed it is automatically
2091 # reflected in the other action lists. But that involves a more
2099 # reflected in the other action lists. But that involves a more
2092 # complex data structure, so this will do for now.
2100 # complex data structure, so this will do for now.
2093 #
2101 #
2094 # We don't need to do the same operation for 'dc' and 'cd' because
2102 # We don't need to do the same operation for 'dc' and 'cd' because
2095 # those lists aren't consulted again.
2103 # those lists aren't consulted again.
2096 mfiles.difference_update(a[0] for a in acts)
2104 mfiles.difference_update(a[0] for a in acts)
2097
2105
2098 actions[ACTION_MERGE] = [
2106 actions[ACTION_MERGE] = [
2099 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2107 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2100 ]
2108 ]
2101
2109
2102 progress.complete()
2110 progress.complete()
2103 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2111 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2104 return updateresult(updated, merged, removed, unresolved), getfiledata
2112 return updateresult(updated, merged, removed, unresolved), getfiledata
2105
2113
2106
2114
2107 def recordupdates(repo, actions, branchmerge, getfiledata):
2115 def recordupdates(repo, actions, branchmerge, getfiledata):
2108 """record merge actions to the dirstate"""
2116 """record merge actions to the dirstate"""
2109 # remove (must come first)
2117 # remove (must come first)
2110 for f, args, msg in actions.get(ACTION_REMOVE, []):
2118 for f, args, msg in actions.get(ACTION_REMOVE, []):
2111 if branchmerge:
2119 if branchmerge:
2112 repo.dirstate.remove(f)
2120 repo.dirstate.remove(f)
2113 else:
2121 else:
2114 repo.dirstate.drop(f)
2122 repo.dirstate.drop(f)
2115
2123
2116 # forget (must come first)
2124 # forget (must come first)
2117 for f, args, msg in actions.get(ACTION_FORGET, []):
2125 for f, args, msg in actions.get(ACTION_FORGET, []):
2118 repo.dirstate.drop(f)
2126 repo.dirstate.drop(f)
2119
2127
2120 # resolve path conflicts
2128 # resolve path conflicts
2121 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2129 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2122 (f0,) = args
2130 (f0,) = args
2123 origf0 = repo.dirstate.copied(f0) or f0
2131 origf0 = repo.dirstate.copied(f0) or f0
2124 repo.dirstate.add(f)
2132 repo.dirstate.add(f)
2125 repo.dirstate.copy(origf0, f)
2133 repo.dirstate.copy(origf0, f)
2126 if f0 == origf0:
2134 if f0 == origf0:
2127 repo.dirstate.remove(f0)
2135 repo.dirstate.remove(f0)
2128 else:
2136 else:
2129 repo.dirstate.drop(f0)
2137 repo.dirstate.drop(f0)
2130
2138
2131 # re-add
2139 # re-add
2132 for f, args, msg in actions.get(ACTION_ADD, []):
2140 for f, args, msg in actions.get(ACTION_ADD, []):
2133 repo.dirstate.add(f)
2141 repo.dirstate.add(f)
2134
2142
2135 # re-add/mark as modified
2143 # re-add/mark as modified
2136 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2144 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2137 if branchmerge:
2145 if branchmerge:
2138 repo.dirstate.normallookup(f)
2146 repo.dirstate.normallookup(f)
2139 else:
2147 else:
2140 repo.dirstate.add(f)
2148 repo.dirstate.add(f)
2141
2149
2142 # exec change
2150 # exec change
2143 for f, args, msg in actions.get(ACTION_EXEC, []):
2151 for f, args, msg in actions.get(ACTION_EXEC, []):
2144 repo.dirstate.normallookup(f)
2152 repo.dirstate.normallookup(f)
2145
2153
2146 # keep
2154 # keep
2147 for f, args, msg in actions.get(ACTION_KEEP, []):
2155 for f, args, msg in actions.get(ACTION_KEEP, []):
2148 pass
2156 pass
2149
2157
2150 # get
2158 # get
2151 for f, args, msg in actions.get(ACTION_GET, []):
2159 for f, args, msg in actions.get(ACTION_GET, []):
2152 if branchmerge:
2160 if branchmerge:
2153 repo.dirstate.otherparent(f)
2161 repo.dirstate.otherparent(f)
2154 else:
2162 else:
2155 parentfiledata = getfiledata[f] if getfiledata else None
2163 parentfiledata = getfiledata[f] if getfiledata else None
2156 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2164 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2157
2165
2158 # merge
2166 # merge
2159 for f, args, msg in actions.get(ACTION_MERGE, []):
2167 for f, args, msg in actions.get(ACTION_MERGE, []):
2160 f1, f2, fa, move, anc = args
2168 f1, f2, fa, move, anc = args
2161 if branchmerge:
2169 if branchmerge:
2162 # We've done a branch merge, mark this file as merged
2170 # We've done a branch merge, mark this file as merged
2163 # so that we properly record the merger later
2171 # so that we properly record the merger later
2164 repo.dirstate.merge(f)
2172 repo.dirstate.merge(f)
2165 if f1 != f2: # copy/rename
2173 if f1 != f2: # copy/rename
2166 if move:
2174 if move:
2167 repo.dirstate.remove(f1)
2175 repo.dirstate.remove(f1)
2168 if f1 != f:
2176 if f1 != f:
2169 repo.dirstate.copy(f1, f)
2177 repo.dirstate.copy(f1, f)
2170 else:
2178 else:
2171 repo.dirstate.copy(f2, f)
2179 repo.dirstate.copy(f2, f)
2172 else:
2180 else:
2173 # We've update-merged a locally modified file, so
2181 # We've update-merged a locally modified file, so
2174 # we set the dirstate to emulate a normal checkout
2182 # we set the dirstate to emulate a normal checkout
2175 # of that file some time in the past. Thus our
2183 # of that file some time in the past. Thus our
2176 # merge will appear as a normal local file
2184 # merge will appear as a normal local file
2177 # modification.
2185 # modification.
2178 if f2 == f: # file not locally copied/moved
2186 if f2 == f: # file not locally copied/moved
2179 repo.dirstate.normallookup(f)
2187 repo.dirstate.normallookup(f)
2180 if move:
2188 if move:
2181 repo.dirstate.drop(f1)
2189 repo.dirstate.drop(f1)
2182
2190
2183 # directory rename, move local
2191 # directory rename, move local
2184 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2192 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2185 f0, flag = args
2193 f0, flag = args
2186 if branchmerge:
2194 if branchmerge:
2187 repo.dirstate.add(f)
2195 repo.dirstate.add(f)
2188 repo.dirstate.remove(f0)
2196 repo.dirstate.remove(f0)
2189 repo.dirstate.copy(f0, f)
2197 repo.dirstate.copy(f0, f)
2190 else:
2198 else:
2191 repo.dirstate.normal(f)
2199 repo.dirstate.normal(f)
2192 repo.dirstate.drop(f0)
2200 repo.dirstate.drop(f0)
2193
2201
2194 # directory rename, get
2202 # directory rename, get
2195 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2203 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2196 f0, flag = args
2204 f0, flag = args
2197 if branchmerge:
2205 if branchmerge:
2198 repo.dirstate.add(f)
2206 repo.dirstate.add(f)
2199 repo.dirstate.copy(f0, f)
2207 repo.dirstate.copy(f0, f)
2200 else:
2208 else:
2201 repo.dirstate.normal(f)
2209 repo.dirstate.normal(f)
2202
2210
2203
2211
2204 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2212 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2205 UPDATECHECK_NONE = b'none'
2213 UPDATECHECK_NONE = b'none'
2206 UPDATECHECK_LINEAR = b'linear'
2214 UPDATECHECK_LINEAR = b'linear'
2207 UPDATECHECK_NO_CONFLICT = b'noconflict'
2215 UPDATECHECK_NO_CONFLICT = b'noconflict'
2208
2216
2209
2217
2210 def update(
2218 def update(
2211 repo,
2219 repo,
2212 node,
2220 node,
2213 branchmerge,
2221 branchmerge,
2214 force,
2222 force,
2215 ancestor=None,
2223 ancestor=None,
2216 mergeancestor=False,
2224 mergeancestor=False,
2217 labels=None,
2225 labels=None,
2218 matcher=None,
2226 matcher=None,
2219 mergeforce=False,
2227 mergeforce=False,
2220 updatecheck=None,
2228 updatecheck=None,
2221 wc=None,
2229 wc=None,
2222 ):
2230 ):
2223 """
2231 """
2224 Perform a merge between the working directory and the given node
2232 Perform a merge between the working directory and the given node
2225
2233
2226 node = the node to update to
2234 node = the node to update to
2227 branchmerge = whether to merge between branches
2235 branchmerge = whether to merge between branches
2228 force = whether to force branch merging or file overwriting
2236 force = whether to force branch merging or file overwriting
2229 matcher = a matcher to filter file lists (dirstate not updated)
2237 matcher = a matcher to filter file lists (dirstate not updated)
2230 mergeancestor = whether it is merging with an ancestor. If true,
2238 mergeancestor = whether it is merging with an ancestor. If true,
2231 we should accept the incoming changes for any prompts that occur.
2239 we should accept the incoming changes for any prompts that occur.
2232 If false, merging with an ancestor (fast-forward) is only allowed
2240 If false, merging with an ancestor (fast-forward) is only allowed
2233 between different named branches. This flag is used by rebase extension
2241 between different named branches. This flag is used by rebase extension
2234 as a temporary fix and should be avoided in general.
2242 as a temporary fix and should be avoided in general.
2235 labels = labels to use for base, local and other
2243 labels = labels to use for base, local and other
2236 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2244 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2237 this is True, then 'force' should be True as well.
2245 this is True, then 'force' should be True as well.
2238
2246
2239 The table below shows all the behaviors of the update command given the
2247 The table below shows all the behaviors of the update command given the
2240 -c/--check and -C/--clean or no options, whether the working directory is
2248 -c/--check and -C/--clean or no options, whether the working directory is
2241 dirty, whether a revision is specified, and the relationship of the parent
2249 dirty, whether a revision is specified, and the relationship of the parent
2242 rev to the target rev (linear or not). Match from top first. The -n
2250 rev to the target rev (linear or not). Match from top first. The -n
2243 option doesn't exist on the command line, but represents the
2251 option doesn't exist on the command line, but represents the
2244 experimental.updatecheck=noconflict option.
2252 experimental.updatecheck=noconflict option.
2245
2253
2246 This logic is tested by test-update-branches.t.
2254 This logic is tested by test-update-branches.t.
2247
2255
2248 -c -C -n -m dirty rev linear | result
2256 -c -C -n -m dirty rev linear | result
2249 y y * * * * * | (1)
2257 y y * * * * * | (1)
2250 y * y * * * * | (1)
2258 y * y * * * * | (1)
2251 y * * y * * * | (1)
2259 y * * y * * * | (1)
2252 * y y * * * * | (1)
2260 * y y * * * * | (1)
2253 * y * y * * * | (1)
2261 * y * y * * * | (1)
2254 * * y y * * * | (1)
2262 * * y y * * * | (1)
2255 * * * * * n n | x
2263 * * * * * n n | x
2256 * * * * n * * | ok
2264 * * * * n * * | ok
2257 n n n n y * y | merge
2265 n n n n y * y | merge
2258 n n n n y y n | (2)
2266 n n n n y y n | (2)
2259 n n n y y * * | merge
2267 n n n y y * * | merge
2260 n n y n y * * | merge if no conflict
2268 n n y n y * * | merge if no conflict
2261 n y n n y * * | discard
2269 n y n n y * * | discard
2262 y n n n y * * | (3)
2270 y n n n y * * | (3)
2263
2271
2264 x = can't happen
2272 x = can't happen
2265 * = don't-care
2273 * = don't-care
2266 1 = incompatible options (checked in commands.py)
2274 1 = incompatible options (checked in commands.py)
2267 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2275 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2268 3 = abort: uncommitted changes (checked in commands.py)
2276 3 = abort: uncommitted changes (checked in commands.py)
2269
2277
2270 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2278 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2271 to repo[None] if None is passed.
2279 to repo[None] if None is passed.
2272
2280
2273 Return the same tuple as applyupdates().
2281 Return the same tuple as applyupdates().
2274 """
2282 """
2275 # Avoid cycle.
2283 # Avoid cycle.
2276 from . import sparse
2284 from . import sparse
2277
2285
2278 # This function used to find the default destination if node was None, but
2286 # This function used to find the default destination if node was None, but
2279 # that's now in destutil.py.
2287 # that's now in destutil.py.
2280 assert node is not None
2288 assert node is not None
2281 if not branchmerge and not force:
2289 if not branchmerge and not force:
2282 # TODO: remove the default once all callers that pass branchmerge=False
2290 # TODO: remove the default once all callers that pass branchmerge=False
2283 # and force=False pass a value for updatecheck. We may want to allow
2291 # and force=False pass a value for updatecheck. We may want to allow
2284 # updatecheck='abort' to better suppport some of these callers.
2292 # updatecheck='abort' to better suppport some of these callers.
2285 if updatecheck is None:
2293 if updatecheck is None:
2286 updatecheck = UPDATECHECK_LINEAR
2294 updatecheck = UPDATECHECK_LINEAR
2287 if updatecheck not in (
2295 if updatecheck not in (
2288 UPDATECHECK_NONE,
2296 UPDATECHECK_NONE,
2289 UPDATECHECK_LINEAR,
2297 UPDATECHECK_LINEAR,
2290 UPDATECHECK_NO_CONFLICT,
2298 UPDATECHECK_NO_CONFLICT,
2291 ):
2299 ):
2292 raise ValueError(
2300 raise ValueError(
2293 r'Invalid updatecheck %r (can accept %r)'
2301 r'Invalid updatecheck %r (can accept %r)'
2294 % (
2302 % (
2295 updatecheck,
2303 updatecheck,
2296 (
2304 (
2297 UPDATECHECK_NONE,
2305 UPDATECHECK_NONE,
2298 UPDATECHECK_LINEAR,
2306 UPDATECHECK_LINEAR,
2299 UPDATECHECK_NO_CONFLICT,
2307 UPDATECHECK_NO_CONFLICT,
2300 ),
2308 ),
2301 )
2309 )
2302 )
2310 )
2303 with repo.wlock():
2311 with repo.wlock():
2304 if wc is None:
2312 if wc is None:
2305 wc = repo[None]
2313 wc = repo[None]
2306 pl = wc.parents()
2314 pl = wc.parents()
2307 p1 = pl[0]
2315 p1 = pl[0]
2308 p2 = repo[node]
2316 p2 = repo[node]
2309 if ancestor is not None:
2317 if ancestor is not None:
2310 pas = [repo[ancestor]]
2318 pas = [repo[ancestor]]
2311 else:
2319 else:
2312 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2320 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2313 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2321 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2314 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2322 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2315 else:
2323 else:
2316 pas = [p1.ancestor(p2, warn=branchmerge)]
2324 pas = [p1.ancestor(p2, warn=branchmerge)]
2317
2325
2318 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2326 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2319
2327
2320 overwrite = force and not branchmerge
2328 overwrite = force and not branchmerge
2321 ### check phase
2329 ### check phase
2322 if not overwrite:
2330 if not overwrite:
2323 if len(pl) > 1:
2331 if len(pl) > 1:
2324 raise error.Abort(_(b"outstanding uncommitted merge"))
2332 raise error.Abort(_(b"outstanding uncommitted merge"))
2325 ms = mergestate.read(repo)
2333 ms = mergestate.read(repo)
2326 if list(ms.unresolved()):
2334 if list(ms.unresolved()):
2327 raise error.Abort(
2335 raise error.Abort(
2328 _(b"outstanding merge conflicts"),
2336 _(b"outstanding merge conflicts"),
2329 hint=_(b"use 'hg resolve' to resolve"),
2337 hint=_(b"use 'hg resolve' to resolve"),
2330 )
2338 )
2331 if branchmerge:
2339 if branchmerge:
2332 if pas == [p2]:
2340 if pas == [p2]:
2333 raise error.Abort(
2341 raise error.Abort(
2334 _(
2342 _(
2335 b"merging with a working directory ancestor"
2343 b"merging with a working directory ancestor"
2336 b" has no effect"
2344 b" has no effect"
2337 )
2345 )
2338 )
2346 )
2339 elif pas == [p1]:
2347 elif pas == [p1]:
2340 if not mergeancestor and wc.branch() == p2.branch():
2348 if not mergeancestor and wc.branch() == p2.branch():
2341 raise error.Abort(
2349 raise error.Abort(
2342 _(b"nothing to merge"),
2350 _(b"nothing to merge"),
2343 hint=_(b"use 'hg update' or check 'hg heads'"),
2351 hint=_(b"use 'hg update' or check 'hg heads'"),
2344 )
2352 )
2345 if not force and (wc.files() or wc.deleted()):
2353 if not force and (wc.files() or wc.deleted()):
2346 raise error.Abort(
2354 raise error.Abort(
2347 _(b"uncommitted changes"),
2355 _(b"uncommitted changes"),
2348 hint=_(b"use 'hg status' to list changes"),
2356 hint=_(b"use 'hg status' to list changes"),
2349 )
2357 )
2350 if not wc.isinmemory():
2358 if not wc.isinmemory():
2351 for s in sorted(wc.substate):
2359 for s in sorted(wc.substate):
2352 wc.sub(s).bailifchanged()
2360 wc.sub(s).bailifchanged()
2353
2361
2354 elif not overwrite:
2362 elif not overwrite:
2355 if p1 == p2: # no-op update
2363 if p1 == p2: # no-op update
2356 # call the hooks and exit early
2364 # call the hooks and exit early
2357 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2365 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2358 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2366 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2359 return updateresult(0, 0, 0, 0)
2367 return updateresult(0, 0, 0, 0)
2360
2368
2361 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2369 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2362 [p1],
2370 [p1],
2363 [p2],
2371 [p2],
2364 ): # nonlinear
2372 ): # nonlinear
2365 dirty = wc.dirty(missing=True)
2373 dirty = wc.dirty(missing=True)
2366 if dirty:
2374 if dirty:
2367 # Branching is a bit strange to ensure we do the minimal
2375 # Branching is a bit strange to ensure we do the minimal
2368 # amount of call to obsutil.foreground.
2376 # amount of call to obsutil.foreground.
2369 foreground = obsutil.foreground(repo, [p1.node()])
2377 foreground = obsutil.foreground(repo, [p1.node()])
2370 # note: the <node> variable contains a random identifier
2378 # note: the <node> variable contains a random identifier
2371 if repo[node].node() in foreground:
2379 if repo[node].node() in foreground:
2372 pass # allow updating to successors
2380 pass # allow updating to successors
2373 else:
2381 else:
2374 msg = _(b"uncommitted changes")
2382 msg = _(b"uncommitted changes")
2375 hint = _(b"commit or update --clean to discard changes")
2383 hint = _(b"commit or update --clean to discard changes")
2376 raise error.UpdateAbort(msg, hint=hint)
2384 raise error.UpdateAbort(msg, hint=hint)
2377 else:
2385 else:
2378 # Allow jumping branches if clean and specific rev given
2386 # Allow jumping branches if clean and specific rev given
2379 pass
2387 pass
2380
2388
2381 if overwrite:
2389 if overwrite:
2382 pas = [wc]
2390 pas = [wc]
2383 elif not branchmerge:
2391 elif not branchmerge:
2384 pas = [p1]
2392 pas = [p1]
2385
2393
2386 # deprecated config: merge.followcopies
2394 # deprecated config: merge.followcopies
2387 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2395 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2388 if overwrite:
2396 if overwrite:
2389 followcopies = False
2397 followcopies = False
2390 elif not pas[0]:
2398 elif not pas[0]:
2391 followcopies = False
2399 followcopies = False
2392 if not branchmerge and not wc.dirty(missing=True):
2400 if not branchmerge and not wc.dirty(missing=True):
2393 followcopies = False
2401 followcopies = False
2394
2402
2395 ### calculate phase
2403 ### calculate phase
2396 actionbyfile, diverge, renamedelete = calculateupdates(
2404 actionbyfile, diverge, renamedelete = calculateupdates(
2397 repo,
2405 repo,
2398 wc,
2406 wc,
2399 p2,
2407 p2,
2400 pas,
2408 pas,
2401 branchmerge,
2409 branchmerge,
2402 force,
2410 force,
2403 mergeancestor,
2411 mergeancestor,
2404 followcopies,
2412 followcopies,
2405 matcher=matcher,
2413 matcher=matcher,
2406 mergeforce=mergeforce,
2414 mergeforce=mergeforce,
2407 )
2415 )
2408
2416
2409 if updatecheck == UPDATECHECK_NO_CONFLICT:
2417 if updatecheck == UPDATECHECK_NO_CONFLICT:
2410 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2418 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2411 if m not in (
2419 if m not in (
2412 ACTION_GET,
2420 ACTION_GET,
2413 ACTION_KEEP,
2421 ACTION_KEEP,
2414 ACTION_EXEC,
2422 ACTION_EXEC,
2415 ACTION_REMOVE,
2423 ACTION_REMOVE,
2416 ACTION_PATH_CONFLICT_RESOLVE,
2424 ACTION_PATH_CONFLICT_RESOLVE,
2417 ):
2425 ):
2418 msg = _(b"conflicting changes")
2426 msg = _(b"conflicting changes")
2419 hint = _(b"commit or update --clean to discard changes")
2427 hint = _(b"commit or update --clean to discard changes")
2420 raise error.Abort(msg, hint=hint)
2428 raise error.Abort(msg, hint=hint)
2421
2429
2422 # Prompt and create actions. Most of this is in the resolve phase
2430 # Prompt and create actions. Most of this is in the resolve phase
2423 # already, but we can't handle .hgsubstate in filemerge or
2431 # already, but we can't handle .hgsubstate in filemerge or
2424 # subrepoutil.submerge yet so we have to keep prompting for it.
2432 # subrepoutil.submerge yet so we have to keep prompting for it.
2425 if b'.hgsubstate' in actionbyfile:
2433 if b'.hgsubstate' in actionbyfile:
2426 f = b'.hgsubstate'
2434 f = b'.hgsubstate'
2427 m, args, msg = actionbyfile[f]
2435 m, args, msg = actionbyfile[f]
2428 prompts = filemerge.partextras(labels)
2436 prompts = filemerge.partextras(labels)
2429 prompts[b'f'] = f
2437 prompts[b'f'] = f
2430 if m == ACTION_CHANGED_DELETED:
2438 if m == ACTION_CHANGED_DELETED:
2431 if repo.ui.promptchoice(
2439 if repo.ui.promptchoice(
2432 _(
2440 _(
2433 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2441 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2434 b"use (c)hanged version or (d)elete?"
2442 b"use (c)hanged version or (d)elete?"
2435 b"$$ &Changed $$ &Delete"
2443 b"$$ &Changed $$ &Delete"
2436 )
2444 )
2437 % prompts,
2445 % prompts,
2438 0,
2446 0,
2439 ):
2447 ):
2440 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2448 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2441 elif f in p1:
2449 elif f in p1:
2442 actionbyfile[f] = (
2450 actionbyfile[f] = (
2443 ACTION_ADD_MODIFIED,
2451 ACTION_ADD_MODIFIED,
2444 None,
2452 None,
2445 b'prompt keep',
2453 b'prompt keep',
2446 )
2454 )
2447 else:
2455 else:
2448 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2456 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2449 elif m == ACTION_DELETED_CHANGED:
2457 elif m == ACTION_DELETED_CHANGED:
2450 f1, f2, fa, move, anc = args
2458 f1, f2, fa, move, anc = args
2451 flags = p2[f2].flags()
2459 flags = p2[f2].flags()
2452 if (
2460 if (
2453 repo.ui.promptchoice(
2461 repo.ui.promptchoice(
2454 _(
2462 _(
2455 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2463 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2456 b"use (c)hanged version or leave (d)eleted?"
2464 b"use (c)hanged version or leave (d)eleted?"
2457 b"$$ &Changed $$ &Deleted"
2465 b"$$ &Changed $$ &Deleted"
2458 )
2466 )
2459 % prompts,
2467 % prompts,
2460 0,
2468 0,
2461 )
2469 )
2462 == 0
2470 == 0
2463 ):
2471 ):
2464 actionbyfile[f] = (
2472 actionbyfile[f] = (
2465 ACTION_GET,
2473 ACTION_GET,
2466 (flags, False),
2474 (flags, False),
2467 b'prompt recreating',
2475 b'prompt recreating',
2468 )
2476 )
2469 else:
2477 else:
2470 del actionbyfile[f]
2478 del actionbyfile[f]
2471
2479
2472 # Convert to dictionary-of-lists format
2480 # Convert to dictionary-of-lists format
2473 actions = emptyactions()
2481 actions = emptyactions()
2474 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2482 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2475 if m not in actions:
2483 if m not in actions:
2476 actions[m] = []
2484 actions[m] = []
2477 actions[m].append((f, args, msg))
2485 actions[m].append((f, args, msg))
2478
2486
2479 if not util.fscasesensitive(repo.path):
2487 if not util.fscasesensitive(repo.path):
2480 # check collision between files only in p2 for clean update
2488 # check collision between files only in p2 for clean update
2481 if not branchmerge and (
2489 if not branchmerge and (
2482 force or not wc.dirty(missing=True, branch=False)
2490 force or not wc.dirty(missing=True, branch=False)
2483 ):
2491 ):
2484 _checkcollision(repo, p2.manifest(), None)
2492 _checkcollision(repo, p2.manifest(), None)
2485 else:
2493 else:
2486 _checkcollision(repo, wc.manifest(), actions)
2494 _checkcollision(repo, wc.manifest(), actions)
2487
2495
2488 # divergent renames
2496 # divergent renames
2489 for f, fl in sorted(pycompat.iteritems(diverge)):
2497 for f, fl in sorted(pycompat.iteritems(diverge)):
2490 repo.ui.warn(
2498 repo.ui.warn(
2491 _(
2499 _(
2492 b"note: possible conflict - %s was renamed "
2500 b"note: possible conflict - %s was renamed "
2493 b"multiple times to:\n"
2501 b"multiple times to:\n"
2494 )
2502 )
2495 % f
2503 % f
2496 )
2504 )
2497 for nf in sorted(fl):
2505 for nf in sorted(fl):
2498 repo.ui.warn(b" %s\n" % nf)
2506 repo.ui.warn(b" %s\n" % nf)
2499
2507
2500 # rename and delete
2508 # rename and delete
2501 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2509 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2502 repo.ui.warn(
2510 repo.ui.warn(
2503 _(
2511 _(
2504 b"note: possible conflict - %s was deleted "
2512 b"note: possible conflict - %s was deleted "
2505 b"and renamed to:\n"
2513 b"and renamed to:\n"
2506 )
2514 )
2507 % f
2515 % f
2508 )
2516 )
2509 for nf in sorted(fl):
2517 for nf in sorted(fl):
2510 repo.ui.warn(b" %s\n" % nf)
2518 repo.ui.warn(b" %s\n" % nf)
2511
2519
2512 ### apply phase
2520 ### apply phase
2513 if not branchmerge: # just jump to the new rev
2521 if not branchmerge: # just jump to the new rev
2514 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2522 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2515 # If we're doing a partial update, we need to skip updating
2523 # If we're doing a partial update, we need to skip updating
2516 # the dirstate.
2524 # the dirstate.
2517 always = matcher is None or matcher.always()
2525 always = matcher is None or matcher.always()
2518 updatedirstate = always and not wc.isinmemory()
2526 updatedirstate = always and not wc.isinmemory()
2519 if updatedirstate:
2527 if updatedirstate:
2520 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2528 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2521 # note that we're in the middle of an update
2529 # note that we're in the middle of an update
2522 repo.vfs.write(b'updatestate', p2.hex())
2530 repo.vfs.write(b'updatestate', p2.hex())
2523
2531
2524 # Advertise fsmonitor when its presence could be useful.
2532 # Advertise fsmonitor when its presence could be useful.
2525 #
2533 #
2526 # We only advertise when performing an update from an empty working
2534 # We only advertise when performing an update from an empty working
2527 # directory. This typically only occurs during initial clone.
2535 # directory. This typically only occurs during initial clone.
2528 #
2536 #
2529 # We give users a mechanism to disable the warning in case it is
2537 # We give users a mechanism to disable the warning in case it is
2530 # annoying.
2538 # annoying.
2531 #
2539 #
2532 # We only allow on Linux and MacOS because that's where fsmonitor is
2540 # We only allow on Linux and MacOS because that's where fsmonitor is
2533 # considered stable.
2541 # considered stable.
2534 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2542 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2535 fsmonitorthreshold = repo.ui.configint(
2543 fsmonitorthreshold = repo.ui.configint(
2536 b'fsmonitor', b'warn_update_file_count'
2544 b'fsmonitor', b'warn_update_file_count'
2537 )
2545 )
2538 try:
2546 try:
2539 # avoid cycle: extensions -> cmdutil -> merge
2547 # avoid cycle: extensions -> cmdutil -> merge
2540 from . import extensions
2548 from . import extensions
2541
2549
2542 extensions.find(b'fsmonitor')
2550 extensions.find(b'fsmonitor')
2543 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2551 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2544 # We intentionally don't look at whether fsmonitor has disabled
2552 # We intentionally don't look at whether fsmonitor has disabled
2545 # itself because a) fsmonitor may have already printed a warning
2553 # itself because a) fsmonitor may have already printed a warning
2546 # b) we only care about the config state here.
2554 # b) we only care about the config state here.
2547 except KeyError:
2555 except KeyError:
2548 fsmonitorenabled = False
2556 fsmonitorenabled = False
2549
2557
2550 if (
2558 if (
2551 fsmonitorwarning
2559 fsmonitorwarning
2552 and not fsmonitorenabled
2560 and not fsmonitorenabled
2553 and p1.node() == nullid
2561 and p1.node() == nullid
2554 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2562 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2555 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2563 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2556 ):
2564 ):
2557 repo.ui.warn(
2565 repo.ui.warn(
2558 _(
2566 _(
2559 b'(warning: large working directory being used without '
2567 b'(warning: large working directory being used without '
2560 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2568 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2561 b'see "hg help -e fsmonitor")\n'
2569 b'see "hg help -e fsmonitor")\n'
2562 )
2570 )
2563 )
2571 )
2564
2572
2565 wantfiledata = updatedirstate and not branchmerge
2573 wantfiledata = updatedirstate and not branchmerge
2566 stats, getfiledata = applyupdates(
2574 stats, getfiledata = applyupdates(
2567 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2575 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2568 )
2576 )
2569
2577
2570 if updatedirstate:
2578 if updatedirstate:
2571 with repo.dirstate.parentchange():
2579 with repo.dirstate.parentchange():
2572 repo.setparents(fp1, fp2)
2580 repo.setparents(fp1, fp2)
2573 recordupdates(repo, actions, branchmerge, getfiledata)
2581 recordupdates(repo, actions, branchmerge, getfiledata)
2574 # update completed, clear state
2582 # update completed, clear state
2575 util.unlink(repo.vfs.join(b'updatestate'))
2583 util.unlink(repo.vfs.join(b'updatestate'))
2576
2584
2577 if not branchmerge:
2585 if not branchmerge:
2578 repo.dirstate.setbranch(p2.branch())
2586 repo.dirstate.setbranch(p2.branch())
2579
2587
2580 # If we're updating to a location, clean up any stale temporary includes
2588 # If we're updating to a location, clean up any stale temporary includes
2581 # (ex: this happens during hg rebase --abort).
2589 # (ex: this happens during hg rebase --abort).
2582 if not branchmerge:
2590 if not branchmerge:
2583 sparse.prunetemporaryincludes(repo)
2591 sparse.prunetemporaryincludes(repo)
2584
2592
2585 if updatedirstate:
2593 if updatedirstate:
2586 repo.hook(
2594 repo.hook(
2587 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2595 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2588 )
2596 )
2589 return stats
2597 return stats
2590
2598
2591
2599
2592 def graft(
2600 def graft(
2593 repo,
2601 repo,
2594 ctx,
2602 ctx,
2595 base=None,
2603 base=None,
2596 labels=None,
2604 labels=None,
2597 keepparent=False,
2605 keepparent=False,
2598 keepconflictparent=False,
2606 keepconflictparent=False,
2599 wctx=None,
2607 wctx=None,
2600 ):
2608 ):
2601 """Do a graft-like merge.
2609 """Do a graft-like merge.
2602
2610
2603 This is a merge where the merge ancestor is chosen such that one
2611 This is a merge where the merge ancestor is chosen such that one
2604 or more changesets are grafted onto the current changeset. In
2612 or more changesets are grafted onto the current changeset. In
2605 addition to the merge, this fixes up the dirstate to include only
2613 addition to the merge, this fixes up the dirstate to include only
2606 a single parent (if keepparent is False) and tries to duplicate any
2614 a single parent (if keepparent is False) and tries to duplicate any
2607 renames/copies appropriately.
2615 renames/copies appropriately.
2608
2616
2609 ctx - changeset to rebase
2617 ctx - changeset to rebase
2610 base - merge base, or ctx.p1() if not specified
2618 base - merge base, or ctx.p1() if not specified
2611 labels - merge labels eg ['local', 'graft']
2619 labels - merge labels eg ['local', 'graft']
2612 keepparent - keep second parent if any
2620 keepparent - keep second parent if any
2613 keepconflictparent - if unresolved, keep parent used for the merge
2621 keepconflictparent - if unresolved, keep parent used for the merge
2614
2622
2615 """
2623 """
2616 # If we're grafting a descendant onto an ancestor, be sure to pass
2624 # If we're grafting a descendant onto an ancestor, be sure to pass
2617 # mergeancestor=True to update. This does two things: 1) allows the merge if
2625 # mergeancestor=True to update. This does two things: 1) allows the merge if
2618 # the destination is the same as the parent of the ctx (so we can use graft
2626 # the destination is the same as the parent of the ctx (so we can use graft
2619 # to copy commits), and 2) informs update that the incoming changes are
2627 # to copy commits), and 2) informs update that the incoming changes are
2620 # newer than the destination so it doesn't prompt about "remote changed foo
2628 # newer than the destination so it doesn't prompt about "remote changed foo
2621 # which local deleted".
2629 # which local deleted".
2622 wctx = wctx or repo[None]
2630 wctx = wctx or repo[None]
2623 pctx = wctx.p1()
2631 pctx = wctx.p1()
2624 base = base or ctx.p1()
2632 base = base or ctx.p1()
2625 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2633 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2626
2634
2627 stats = update(
2635 stats = update(
2628 repo,
2636 repo,
2629 ctx.node(),
2637 ctx.node(),
2630 True,
2638 True,
2631 True,
2639 True,
2632 base.node(),
2640 base.node(),
2633 mergeancestor=mergeancestor,
2641 mergeancestor=mergeancestor,
2634 labels=labels,
2642 labels=labels,
2635 wc=wctx,
2643 wc=wctx,
2636 )
2644 )
2637
2645
2638 if keepconflictparent and stats.unresolvedcount:
2646 if keepconflictparent and stats.unresolvedcount:
2639 pother = ctx.node()
2647 pother = ctx.node()
2640 else:
2648 else:
2641 pother = nullid
2649 pother = nullid
2642 parents = ctx.parents()
2650 parents = ctx.parents()
2643 if keepparent and len(parents) == 2 and base in parents:
2651 if keepparent and len(parents) == 2 and base in parents:
2644 parents.remove(base)
2652 parents.remove(base)
2645 pother = parents[0].node()
2653 pother = parents[0].node()
2646 # Never set both parents equal to each other
2654 # Never set both parents equal to each other
2647 if pother == pctx.node():
2655 if pother == pctx.node():
2648 pother = nullid
2656 pother = nullid
2649
2657
2650 if wctx.isinmemory():
2658 if wctx.isinmemory():
2651 wctx.setparents(pctx.node(), pother)
2659 wctx.setparents(pctx.node(), pother)
2652 # fix up dirstate for copies and renames
2660 # fix up dirstate for copies and renames
2653 copies.graftcopies(wctx, ctx, base)
2661 copies.graftcopies(wctx, ctx, base)
2654 else:
2662 else:
2655 with repo.dirstate.parentchange():
2663 with repo.dirstate.parentchange():
2656 repo.setparents(pctx.node(), pother)
2664 repo.setparents(pctx.node(), pother)
2657 repo.dirstate.write(repo.currenttransaction())
2665 repo.dirstate.write(repo.currenttransaction())
2658 # fix up dirstate for copies and renames
2666 # fix up dirstate for copies and renames
2659 copies.graftcopies(wctx, ctx, base)
2667 copies.graftcopies(wctx, ctx, base)
2660 return stats
2668 return stats
2661
2669
2662
2670
2663 def purge(
2671 def purge(
2664 repo,
2672 repo,
2665 matcher,
2673 matcher,
2666 ignored=False,
2674 ignored=False,
2667 removeemptydirs=True,
2675 removeemptydirs=True,
2668 removefiles=True,
2676 removefiles=True,
2669 abortonerror=False,
2677 abortonerror=False,
2670 noop=False,
2678 noop=False,
2671 ):
2679 ):
2672 """Purge the working directory of untracked files.
2680 """Purge the working directory of untracked files.
2673
2681
2674 ``matcher`` is a matcher configured to scan the working directory -
2682 ``matcher`` is a matcher configured to scan the working directory -
2675 potentially a subset.
2683 potentially a subset.
2676
2684
2677 ``ignored`` controls whether ignored files should also be purged.
2685 ``ignored`` controls whether ignored files should also be purged.
2678
2686
2679 ``removeemptydirs`` controls whether empty directories should be removed.
2687 ``removeemptydirs`` controls whether empty directories should be removed.
2680
2688
2681 ``removefiles`` controls whether files are removed.
2689 ``removefiles`` controls whether files are removed.
2682
2690
2683 ``abortonerror`` causes an exception to be raised if an error occurs
2691 ``abortonerror`` causes an exception to be raised if an error occurs
2684 deleting a file or directory.
2692 deleting a file or directory.
2685
2693
2686 ``noop`` controls whether to actually remove files. If not defined, actions
2694 ``noop`` controls whether to actually remove files. If not defined, actions
2687 will be taken.
2695 will be taken.
2688
2696
2689 Returns an iterable of relative paths in the working directory that were
2697 Returns an iterable of relative paths in the working directory that were
2690 or would be removed.
2698 or would be removed.
2691 """
2699 """
2692
2700
2693 def remove(removefn, path):
2701 def remove(removefn, path):
2694 try:
2702 try:
2695 removefn(path)
2703 removefn(path)
2696 except OSError:
2704 except OSError:
2697 m = _(b'%s cannot be removed') % path
2705 m = _(b'%s cannot be removed') % path
2698 if abortonerror:
2706 if abortonerror:
2699 raise error.Abort(m)
2707 raise error.Abort(m)
2700 else:
2708 else:
2701 repo.ui.warn(_(b'warning: %s\n') % m)
2709 repo.ui.warn(_(b'warning: %s\n') % m)
2702
2710
2703 # There's no API to copy a matcher. So mutate the passed matcher and
2711 # There's no API to copy a matcher. So mutate the passed matcher and
2704 # restore it when we're done.
2712 # restore it when we're done.
2705 oldtraversedir = matcher.traversedir
2713 oldtraversedir = matcher.traversedir
2706
2714
2707 res = []
2715 res = []
2708
2716
2709 try:
2717 try:
2710 if removeemptydirs:
2718 if removeemptydirs:
2711 directories = []
2719 directories = []
2712 matcher.traversedir = directories.append
2720 matcher.traversedir = directories.append
2713
2721
2714 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2722 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2715
2723
2716 if removefiles:
2724 if removefiles:
2717 for f in sorted(status.unknown + status.ignored):
2725 for f in sorted(status.unknown + status.ignored):
2718 if not noop:
2726 if not noop:
2719 repo.ui.note(_(b'removing file %s\n') % f)
2727 repo.ui.note(_(b'removing file %s\n') % f)
2720 remove(repo.wvfs.unlink, f)
2728 remove(repo.wvfs.unlink, f)
2721 res.append(f)
2729 res.append(f)
2722
2730
2723 if removeemptydirs:
2731 if removeemptydirs:
2724 for f in sorted(directories, reverse=True):
2732 for f in sorted(directories, reverse=True):
2725 if matcher(f) and not repo.wvfs.listdir(f):
2733 if matcher(f) and not repo.wvfs.listdir(f):
2726 if not noop:
2734 if not noop:
2727 repo.ui.note(_(b'removing directory %s\n') % f)
2735 repo.ui.note(_(b'removing directory %s\n') % f)
2728 remove(repo.wvfs.rmdir, f)
2736 remove(repo.wvfs.rmdir, f)
2729 res.append(f)
2737 res.append(f)
2730
2738
2731 return res
2739 return res
2732
2740
2733 finally:
2741 finally:
2734 matcher.traversedir = oldtraversedir
2742 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now