##// END OF EJS Templates
merge: use constants for merge state record types...
Gregory Szorc -
r37127:a532b2f5 default
parent child Browse files
Show More
@@ -1,2113 +1,2133 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .thirdparty import (
25 from .thirdparty import (
26 attr,
26 attr,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 subrepoutil,
36 subrepoutil,
37 util,
37 util,
38 worker,
38 worker,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43
43
44 def _droponode(data):
44 def _droponode(data):
45 # used for compatibility for v1
45 # used for compatibility for v1
46 bits = data.split('\0')
46 bits = data.split('\0')
47 bits = bits[:-2] + bits[-1:]
47 bits = bits[:-2] + bits[-1:]
48 return '\0'.join(bits)
48 return '\0'.join(bits)
49
49
50 # Merge state record types. See ``mergestate`` docs for more.
51 RECORD_LOCAL = b'L'
52 RECORD_OTHER = b'O'
53 RECORD_MERGED = b'F'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 RECORD_PATH_CONFLICT = b'P'
57 RECORD_MERGE_DRIVER_STATE = b'm'
58 RECORD_FILE_VALUES = b'f'
59 RECORD_LABELS = b'l'
60 RECORD_OVERRIDE = b't'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63
50 class mergestate(object):
64 class mergestate(object):
51 '''track 3-way merge state of individual files
65 '''track 3-way merge state of individual files
52
66
53 The merge state is stored on disk when needed. Two files are used: one with
67 The merge state is stored on disk when needed. Two files are used: one with
54 an old format (version 1), and one with a new format (version 2). Version 2
68 an old format (version 1), and one with a new format (version 2). Version 2
55 stores a superset of the data in version 1, including new kinds of records
69 stores a superset of the data in version 1, including new kinds of records
56 in the future. For more about the new format, see the documentation for
70 in the future. For more about the new format, see the documentation for
57 `_readrecordsv2`.
71 `_readrecordsv2`.
58
72
59 Each record can contain arbitrary content, and has an associated type. This
73 Each record can contain arbitrary content, and has an associated type. This
60 `type` should be a letter. If `type` is uppercase, the record is mandatory:
74 `type` should be a letter. If `type` is uppercase, the record is mandatory:
61 versions of Mercurial that don't support it should abort. If `type` is
75 versions of Mercurial that don't support it should abort. If `type` is
62 lowercase, the record can be safely ignored.
76 lowercase, the record can be safely ignored.
63
77
64 Currently known records:
78 Currently known records:
65
79
66 L: the node of the "local" part of the merge (hexified version)
80 L: the node of the "local" part of the merge (hexified version)
67 O: the node of the "other" part of the merge (hexified version)
81 O: the node of the "other" part of the merge (hexified version)
68 F: a file to be merged entry
82 F: a file to be merged entry
69 C: a change/delete or delete/change conflict
83 C: a change/delete or delete/change conflict
70 D: a file that the external merge driver will merge internally
84 D: a file that the external merge driver will merge internally
71 (experimental)
85 (experimental)
72 P: a path conflict (file vs directory)
86 P: a path conflict (file vs directory)
73 m: the external merge driver defined for this merge plus its run state
87 m: the external merge driver defined for this merge plus its run state
74 (experimental)
88 (experimental)
75 f: a (filename, dictionary) tuple of optional values for a given file
89 f: a (filename, dictionary) tuple of optional values for a given file
76 X: unsupported mandatory record type (used in tests)
90 X: unsupported mandatory record type (used in tests)
77 x: unsupported advisory record type (used in tests)
91 x: unsupported advisory record type (used in tests)
78 l: the labels for the parts of the merge.
92 l: the labels for the parts of the merge.
79
93
80 Merge driver run states (experimental):
94 Merge driver run states (experimental):
81 u: driver-resolved files unmarked -- needs to be run next time we're about
95 u: driver-resolved files unmarked -- needs to be run next time we're about
82 to resolve or commit
96 to resolve or commit
83 m: driver-resolved files marked -- only needs to be run before commit
97 m: driver-resolved files marked -- only needs to be run before commit
84 s: success/skipped -- does not need to be run any more
98 s: success/skipped -- does not need to be run any more
85
99
86 Merge record states (stored in self._state, indexed by filename):
100 Merge record states (stored in self._state, indexed by filename):
87 u: unresolved conflict
101 u: unresolved conflict
88 r: resolved conflict
102 r: resolved conflict
89 pu: unresolved path conflict (file conflicts with directory)
103 pu: unresolved path conflict (file conflicts with directory)
90 pr: resolved path conflict
104 pr: resolved path conflict
91 d: driver-resolved conflict
105 d: driver-resolved conflict
92
106
93 The resolve command transitions between 'u' and 'r' for conflicts and
107 The resolve command transitions between 'u' and 'r' for conflicts and
94 'pu' and 'pr' for path conflicts.
108 'pu' and 'pr' for path conflicts.
95 '''
109 '''
96 statepathv1 = 'merge/state'
110 statepathv1 = 'merge/state'
97 statepathv2 = 'merge/state2'
111 statepathv2 = 'merge/state2'
98
112
99 @staticmethod
113 @staticmethod
100 def clean(repo, node=None, other=None, labels=None):
114 def clean(repo, node=None, other=None, labels=None):
101 """Initialize a brand new merge state, removing any existing state on
115 """Initialize a brand new merge state, removing any existing state on
102 disk."""
116 disk."""
103 ms = mergestate(repo)
117 ms = mergestate(repo)
104 ms.reset(node, other, labels)
118 ms.reset(node, other, labels)
105 return ms
119 return ms
106
120
107 @staticmethod
121 @staticmethod
108 def read(repo):
122 def read(repo):
109 """Initialize the merge state, reading it from disk."""
123 """Initialize the merge state, reading it from disk."""
110 ms = mergestate(repo)
124 ms = mergestate(repo)
111 ms._read()
125 ms._read()
112 return ms
126 return ms
113
127
114 def __init__(self, repo):
128 def __init__(self, repo):
115 """Initialize the merge state.
129 """Initialize the merge state.
116
130
117 Do not use this directly! Instead call read() or clean()."""
131 Do not use this directly! Instead call read() or clean()."""
118 self._repo = repo
132 self._repo = repo
119 self._dirty = False
133 self._dirty = False
120 self._labels = None
134 self._labels = None
121
135
122 def reset(self, node=None, other=None, labels=None):
136 def reset(self, node=None, other=None, labels=None):
123 self._state = {}
137 self._state = {}
124 self._stateextras = {}
138 self._stateextras = {}
125 self._local = None
139 self._local = None
126 self._other = None
140 self._other = None
127 self._labels = labels
141 self._labels = labels
128 for var in ('localctx', 'otherctx'):
142 for var in ('localctx', 'otherctx'):
129 if var in vars(self):
143 if var in vars(self):
130 delattr(self, var)
144 delattr(self, var)
131 if node:
145 if node:
132 self._local = node
146 self._local = node
133 self._other = other
147 self._other = other
134 self._readmergedriver = None
148 self._readmergedriver = None
135 if self.mergedriver:
149 if self.mergedriver:
136 self._mdstate = 's'
150 self._mdstate = 's'
137 else:
151 else:
138 self._mdstate = 'u'
152 self._mdstate = 'u'
139 shutil.rmtree(self._repo.vfs.join('merge'), True)
153 shutil.rmtree(self._repo.vfs.join('merge'), True)
140 self._results = {}
154 self._results = {}
141 self._dirty = False
155 self._dirty = False
142
156
143 def _read(self):
157 def _read(self):
144 """Analyse each record content to restore a serialized state from disk
158 """Analyse each record content to restore a serialized state from disk
145
159
146 This function process "record" entry produced by the de-serialization
160 This function process "record" entry produced by the de-serialization
147 of on disk file.
161 of on disk file.
148 """
162 """
149 self._state = {}
163 self._state = {}
150 self._stateextras = {}
164 self._stateextras = {}
151 self._local = None
165 self._local = None
152 self._other = None
166 self._other = None
153 for var in ('localctx', 'otherctx'):
167 for var in ('localctx', 'otherctx'):
154 if var in vars(self):
168 if var in vars(self):
155 delattr(self, var)
169 delattr(self, var)
156 self._readmergedriver = None
170 self._readmergedriver = None
157 self._mdstate = 's'
171 self._mdstate = 's'
158 unsupported = set()
172 unsupported = set()
159 records = self._readrecords()
173 records = self._readrecords()
160 for rtype, record in records:
174 for rtype, record in records:
161 if rtype == 'L':
175 if rtype == RECORD_LOCAL:
162 self._local = bin(record)
176 self._local = bin(record)
163 elif rtype == 'O':
177 elif rtype == RECORD_OTHER:
164 self._other = bin(record)
178 self._other = bin(record)
165 elif rtype == 'm':
179 elif rtype == RECORD_MERGE_DRIVER_STATE:
166 bits = record.split('\0', 1)
180 bits = record.split('\0', 1)
167 mdstate = bits[1]
181 mdstate = bits[1]
168 if len(mdstate) != 1 or mdstate not in 'ums':
182 if len(mdstate) != 1 or mdstate not in 'ums':
169 # the merge driver should be idempotent, so just rerun it
183 # the merge driver should be idempotent, so just rerun it
170 mdstate = 'u'
184 mdstate = 'u'
171
185
172 self._readmergedriver = bits[0]
186 self._readmergedriver = bits[0]
173 self._mdstate = mdstate
187 self._mdstate = mdstate
174 elif rtype in 'FDCP':
188 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
189 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
175 bits = record.split('\0')
190 bits = record.split('\0')
176 self._state[bits[0]] = bits[1:]
191 self._state[bits[0]] = bits[1:]
177 elif rtype == 'f':
192 elif rtype == RECORD_FILE_VALUES:
178 filename, rawextras = record.split('\0', 1)
193 filename, rawextras = record.split('\0', 1)
179 extraparts = rawextras.split('\0')
194 extraparts = rawextras.split('\0')
180 extras = {}
195 extras = {}
181 i = 0
196 i = 0
182 while i < len(extraparts):
197 while i < len(extraparts):
183 extras[extraparts[i]] = extraparts[i + 1]
198 extras[extraparts[i]] = extraparts[i + 1]
184 i += 2
199 i += 2
185
200
186 self._stateextras[filename] = extras
201 self._stateextras[filename] = extras
187 elif rtype == 'l':
202 elif rtype == RECORD_LABELS:
188 labels = record.split('\0', 2)
203 labels = record.split('\0', 2)
189 self._labels = [l for l in labels if len(l) > 0]
204 self._labels = [l for l in labels if len(l) > 0]
190 elif not rtype.islower():
205 elif not rtype.islower():
191 unsupported.add(rtype)
206 unsupported.add(rtype)
192 self._results = {}
207 self._results = {}
193 self._dirty = False
208 self._dirty = False
194
209
195 if unsupported:
210 if unsupported:
196 raise error.UnsupportedMergeRecords(unsupported)
211 raise error.UnsupportedMergeRecords(unsupported)
197
212
198 def _readrecords(self):
213 def _readrecords(self):
199 """Read merge state from disk and return a list of record (TYPE, data)
214 """Read merge state from disk and return a list of record (TYPE, data)
200
215
201 We read data from both v1 and v2 files and decide which one to use.
216 We read data from both v1 and v2 files and decide which one to use.
202
217
203 V1 has been used by version prior to 2.9.1 and contains less data than
218 V1 has been used by version prior to 2.9.1 and contains less data than
204 v2. We read both versions and check if no data in v2 contradicts
219 v2. We read both versions and check if no data in v2 contradicts
205 v1. If there is not contradiction we can safely assume that both v1
220 v1. If there is not contradiction we can safely assume that both v1
206 and v2 were written at the same time and use the extract data in v2. If
221 and v2 were written at the same time and use the extract data in v2. If
207 there is contradiction we ignore v2 content as we assume an old version
222 there is contradiction we ignore v2 content as we assume an old version
208 of Mercurial has overwritten the mergestate file and left an old v2
223 of Mercurial has overwritten the mergestate file and left an old v2
209 file around.
224 file around.
210
225
211 returns list of record [(TYPE, data), ...]"""
226 returns list of record [(TYPE, data), ...]"""
212 v1records = self._readrecordsv1()
227 v1records = self._readrecordsv1()
213 v2records = self._readrecordsv2()
228 v2records = self._readrecordsv2()
214 if self._v1v2match(v1records, v2records):
229 if self._v1v2match(v1records, v2records):
215 return v2records
230 return v2records
216 else:
231 else:
217 # v1 file is newer than v2 file, use it
232 # v1 file is newer than v2 file, use it
218 # we have to infer the "other" changeset of the merge
233 # we have to infer the "other" changeset of the merge
219 # we cannot do better than that with v1 of the format
234 # we cannot do better than that with v1 of the format
220 mctx = self._repo[None].parents()[-1]
235 mctx = self._repo[None].parents()[-1]
221 v1records.append(('O', mctx.hex()))
236 v1records.append((RECORD_OTHER, mctx.hex()))
222 # add place holder "other" file node information
237 # add place holder "other" file node information
223 # nobody is using it yet so we do no need to fetch the data
238 # nobody is using it yet so we do no need to fetch the data
224 # if mctx was wrong `mctx[bits[-2]]` may fails.
239 # if mctx was wrong `mctx[bits[-2]]` may fails.
225 for idx, r in enumerate(v1records):
240 for idx, r in enumerate(v1records):
226 if r[0] == 'F':
241 if r[0] == RECORD_MERGED:
227 bits = r[1].split('\0')
242 bits = r[1].split('\0')
228 bits.insert(-2, '')
243 bits.insert(-2, '')
229 v1records[idx] = (r[0], '\0'.join(bits))
244 v1records[idx] = (r[0], '\0'.join(bits))
230 return v1records
245 return v1records
231
246
232 def _v1v2match(self, v1records, v2records):
247 def _v1v2match(self, v1records, v2records):
233 oldv2 = set() # old format version of v2 record
248 oldv2 = set() # old format version of v2 record
234 for rec in v2records:
249 for rec in v2records:
235 if rec[0] == 'L':
250 if rec[0] == RECORD_LOCAL:
236 oldv2.add(rec)
251 oldv2.add(rec)
237 elif rec[0] == 'F':
252 elif rec[0] == RECORD_MERGED:
238 # drop the onode data (not contained in v1)
253 # drop the onode data (not contained in v1)
239 oldv2.add(('F', _droponode(rec[1])))
254 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
240 for rec in v1records:
255 for rec in v1records:
241 if rec not in oldv2:
256 if rec not in oldv2:
242 return False
257 return False
243 else:
258 else:
244 return True
259 return True
245
260
246 def _readrecordsv1(self):
261 def _readrecordsv1(self):
247 """read on disk merge state for version 1 file
262 """read on disk merge state for version 1 file
248
263
249 returns list of record [(TYPE, data), ...]
264 returns list of record [(TYPE, data), ...]
250
265
251 Note: the "F" data from this file are one entry short
266 Note: the "F" data from this file are one entry short
252 (no "other file node" entry)
267 (no "other file node" entry)
253 """
268 """
254 records = []
269 records = []
255 try:
270 try:
256 f = self._repo.vfs(self.statepathv1)
271 f = self._repo.vfs(self.statepathv1)
257 for i, l in enumerate(f):
272 for i, l in enumerate(f):
258 if i == 0:
273 if i == 0:
259 records.append(('L', l[:-1]))
274 records.append((RECORD_LOCAL, l[:-1]))
260 else:
275 else:
261 records.append(('F', l[:-1]))
276 records.append((RECORD_MERGED, l[:-1]))
262 f.close()
277 f.close()
263 except IOError as err:
278 except IOError as err:
264 if err.errno != errno.ENOENT:
279 if err.errno != errno.ENOENT:
265 raise
280 raise
266 return records
281 return records
267
282
268 def _readrecordsv2(self):
283 def _readrecordsv2(self):
269 """read on disk merge state for version 2 file
284 """read on disk merge state for version 2 file
270
285
271 This format is a list of arbitrary records of the form:
286 This format is a list of arbitrary records of the form:
272
287
273 [type][length][content]
288 [type][length][content]
274
289
275 `type` is a single character, `length` is a 4 byte integer, and
290 `type` is a single character, `length` is a 4 byte integer, and
276 `content` is an arbitrary byte sequence of length `length`.
291 `content` is an arbitrary byte sequence of length `length`.
277
292
278 Mercurial versions prior to 3.7 have a bug where if there are
293 Mercurial versions prior to 3.7 have a bug where if there are
279 unsupported mandatory merge records, attempting to clear out the merge
294 unsupported mandatory merge records, attempting to clear out the merge
280 state with hg update --clean or similar aborts. The 't' record type
295 state with hg update --clean or similar aborts. The 't' record type
281 works around that by writing out what those versions treat as an
296 works around that by writing out what those versions treat as an
282 advisory record, but later versions interpret as special: the first
297 advisory record, but later versions interpret as special: the first
283 character is the 'real' record type and everything onwards is the data.
298 character is the 'real' record type and everything onwards is the data.
284
299
285 Returns list of records [(TYPE, data), ...]."""
300 Returns list of records [(TYPE, data), ...]."""
286 records = []
301 records = []
287 try:
302 try:
288 f = self._repo.vfs(self.statepathv2)
303 f = self._repo.vfs(self.statepathv2)
289 data = f.read()
304 data = f.read()
290 off = 0
305 off = 0
291 end = len(data)
306 end = len(data)
292 while off < end:
307 while off < end:
293 rtype = data[off:off + 1]
308 rtype = data[off:off + 1]
294 off += 1
309 off += 1
295 length = _unpack('>I', data[off:(off + 4)])[0]
310 length = _unpack('>I', data[off:(off + 4)])[0]
296 off += 4
311 off += 4
297 record = data[off:(off + length)]
312 record = data[off:(off + length)]
298 off += length
313 off += length
299 if rtype == 't':
314 if rtype == RECORD_OVERRIDE:
300 rtype, record = record[0:1], record[1:]
315 rtype, record = record[0:1], record[1:]
301 records.append((rtype, record))
316 records.append((rtype, record))
302 f.close()
317 f.close()
303 except IOError as err:
318 except IOError as err:
304 if err.errno != errno.ENOENT:
319 if err.errno != errno.ENOENT:
305 raise
320 raise
306 return records
321 return records
307
322
308 @util.propertycache
323 @util.propertycache
309 def mergedriver(self):
324 def mergedriver(self):
310 # protect against the following:
325 # protect against the following:
311 # - A configures a malicious merge driver in their hgrc, then
326 # - A configures a malicious merge driver in their hgrc, then
312 # pauses the merge
327 # pauses the merge
313 # - A edits their hgrc to remove references to the merge driver
328 # - A edits their hgrc to remove references to the merge driver
314 # - A gives a copy of their entire repo, including .hg, to B
329 # - A gives a copy of their entire repo, including .hg, to B
315 # - B inspects .hgrc and finds it to be clean
330 # - B inspects .hgrc and finds it to be clean
316 # - B then continues the merge and the malicious merge driver
331 # - B then continues the merge and the malicious merge driver
317 # gets invoked
332 # gets invoked
318 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
333 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
319 if (self._readmergedriver is not None
334 if (self._readmergedriver is not None
320 and self._readmergedriver != configmergedriver):
335 and self._readmergedriver != configmergedriver):
321 raise error.ConfigError(
336 raise error.ConfigError(
322 _("merge driver changed since merge started"),
337 _("merge driver changed since merge started"),
323 hint=_("revert merge driver change or abort merge"))
338 hint=_("revert merge driver change or abort merge"))
324
339
325 return configmergedriver
340 return configmergedriver
326
341
327 @util.propertycache
342 @util.propertycache
328 def localctx(self):
343 def localctx(self):
329 if self._local is None:
344 if self._local is None:
330 msg = "localctx accessed but self._local isn't set"
345 msg = "localctx accessed but self._local isn't set"
331 raise error.ProgrammingError(msg)
346 raise error.ProgrammingError(msg)
332 return self._repo[self._local]
347 return self._repo[self._local]
333
348
334 @util.propertycache
349 @util.propertycache
335 def otherctx(self):
350 def otherctx(self):
336 if self._other is None:
351 if self._other is None:
337 msg = "otherctx accessed but self._other isn't set"
352 msg = "otherctx accessed but self._other isn't set"
338 raise error.ProgrammingError(msg)
353 raise error.ProgrammingError(msg)
339 return self._repo[self._other]
354 return self._repo[self._other]
340
355
341 def active(self):
356 def active(self):
342 """Whether mergestate is active.
357 """Whether mergestate is active.
343
358
344 Returns True if there appears to be mergestate. This is a rough proxy
359 Returns True if there appears to be mergestate. This is a rough proxy
345 for "is a merge in progress."
360 for "is a merge in progress."
346 """
361 """
347 # Check local variables before looking at filesystem for performance
362 # Check local variables before looking at filesystem for performance
348 # reasons.
363 # reasons.
349 return bool(self._local) or bool(self._state) or \
364 return bool(self._local) or bool(self._state) or \
350 self._repo.vfs.exists(self.statepathv1) or \
365 self._repo.vfs.exists(self.statepathv1) or \
351 self._repo.vfs.exists(self.statepathv2)
366 self._repo.vfs.exists(self.statepathv2)
352
367
353 def commit(self):
368 def commit(self):
354 """Write current state on disk (if necessary)"""
369 """Write current state on disk (if necessary)"""
355 if self._dirty:
370 if self._dirty:
356 records = self._makerecords()
371 records = self._makerecords()
357 self._writerecords(records)
372 self._writerecords(records)
358 self._dirty = False
373 self._dirty = False
359
374
360 def _makerecords(self):
375 def _makerecords(self):
361 records = []
376 records = []
362 records.append(('L', hex(self._local)))
377 records.append((RECORD_LOCAL, hex(self._local)))
363 records.append(('O', hex(self._other)))
378 records.append((RECORD_OTHER, hex(self._other)))
364 if self.mergedriver:
379 if self.mergedriver:
365 records.append(('m', '\0'.join([
380 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
366 self.mergedriver, self._mdstate])))
381 self.mergedriver, self._mdstate])))
367 # Write out state items. In all cases, the value of the state map entry
382 # Write out state items. In all cases, the value of the state map entry
368 # is written as the contents of the record. The record type depends on
383 # is written as the contents of the record. The record type depends on
369 # the type of state that is stored, and capital-letter records are used
384 # the type of state that is stored, and capital-letter records are used
370 # to prevent older versions of Mercurial that do not support the feature
385 # to prevent older versions of Mercurial that do not support the feature
371 # from loading them.
386 # from loading them.
372 for filename, v in self._state.iteritems():
387 for filename, v in self._state.iteritems():
373 if v[0] == 'd':
388 if v[0] == 'd':
374 # Driver-resolved merge. These are stored in 'D' records.
389 # Driver-resolved merge. These are stored in 'D' records.
375 records.append(('D', '\0'.join([filename] + v)))
390 records.append((RECORD_MERGE_DRIVER_MERGE,
391 '\0'.join([filename] + v)))
376 elif v[0] in ('pu', 'pr'):
392 elif v[0] in ('pu', 'pr'):
377 # Path conflicts. These are stored in 'P' records. The current
393 # Path conflicts. These are stored in 'P' records. The current
378 # resolution state ('pu' or 'pr') is stored within the record.
394 # resolution state ('pu' or 'pr') is stored within the record.
379 records.append(('P', '\0'.join([filename] + v)))
395 records.append((RECORD_PATH_CONFLICT,
396 '\0'.join([filename] + v)))
380 elif v[1] == nullhex or v[6] == nullhex:
397 elif v[1] == nullhex or v[6] == nullhex:
381 # Change/Delete or Delete/Change conflicts. These are stored in
398 # Change/Delete or Delete/Change conflicts. These are stored in
382 # 'C' records. v[1] is the local file, and is nullhex when the
399 # 'C' records. v[1] is the local file, and is nullhex when the
383 # file is deleted locally ('dc'). v[6] is the remote file, and
400 # file is deleted locally ('dc'). v[6] is the remote file, and
384 # is nullhex when the file is deleted remotely ('cd').
401 # is nullhex when the file is deleted remotely ('cd').
385 records.append(('C', '\0'.join([filename] + v)))
402 records.append((RECORD_CHANGEDELETE_CONFLICT,
403 '\0'.join([filename] + v)))
386 else:
404 else:
387 # Normal files. These are stored in 'F' records.
405 # Normal files. These are stored in 'F' records.
388 records.append(('F', '\0'.join([filename] + v)))
406 records.append((RECORD_MERGED,
407 '\0'.join([filename] + v)))
389 for filename, extras in sorted(self._stateextras.iteritems()):
408 for filename, extras in sorted(self._stateextras.iteritems()):
390 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
409 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
391 extras.iteritems())
410 extras.iteritems())
392 records.append(('f', '%s\0%s' % (filename, rawextras)))
411 records.append((RECORD_FILE_VALUES,
412 '%s\0%s' % (filename, rawextras)))
393 if self._labels is not None:
413 if self._labels is not None:
394 labels = '\0'.join(self._labels)
414 labels = '\0'.join(self._labels)
395 records.append(('l', labels))
415 records.append((RECORD_LABELS, labels))
396 return records
416 return records
397
417
398 def _writerecords(self, records):
418 def _writerecords(self, records):
399 """Write current state on disk (both v1 and v2)"""
419 """Write current state on disk (both v1 and v2)"""
400 self._writerecordsv1(records)
420 self._writerecordsv1(records)
401 self._writerecordsv2(records)
421 self._writerecordsv2(records)
402
422
403 def _writerecordsv1(self, records):
423 def _writerecordsv1(self, records):
404 """Write current state on disk in a version 1 file"""
424 """Write current state on disk in a version 1 file"""
405 f = self._repo.vfs(self.statepathv1, 'wb')
425 f = self._repo.vfs(self.statepathv1, 'wb')
406 irecords = iter(records)
426 irecords = iter(records)
407 lrecords = next(irecords)
427 lrecords = next(irecords)
408 assert lrecords[0] == 'L'
428 assert lrecords[0] == RECORD_LOCAL
409 f.write(hex(self._local) + '\n')
429 f.write(hex(self._local) + '\n')
410 for rtype, data in irecords:
430 for rtype, data in irecords:
411 if rtype == 'F':
431 if rtype == RECORD_MERGED:
412 f.write('%s\n' % _droponode(data))
432 f.write('%s\n' % _droponode(data))
413 f.close()
433 f.close()
414
434
415 def _writerecordsv2(self, records):
435 def _writerecordsv2(self, records):
416 """Write current state on disk in a version 2 file
436 """Write current state on disk in a version 2 file
417
437
418 See the docstring for _readrecordsv2 for why we use 't'."""
438 See the docstring for _readrecordsv2 for why we use 't'."""
419 # these are the records that all version 2 clients can read
439 # these are the records that all version 2 clients can read
420 whitelist = 'LOF'
440 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
421 f = self._repo.vfs(self.statepathv2, 'wb')
441 f = self._repo.vfs(self.statepathv2, 'wb')
422 for key, data in records:
442 for key, data in records:
423 assert len(key) == 1
443 assert len(key) == 1
424 if key not in whitelist:
444 if key not in allowlist:
425 key, data = 't', '%s%s' % (key, data)
445 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
426 format = '>sI%is' % len(data)
446 format = '>sI%is' % len(data)
427 f.write(_pack(format, key, len(data), data))
447 f.write(_pack(format, key, len(data), data))
428 f.close()
448 f.close()
429
449
430 def add(self, fcl, fco, fca, fd):
450 def add(self, fcl, fco, fca, fd):
431 """add a new (potentially?) conflicting file the merge state
451 """add a new (potentially?) conflicting file the merge state
432 fcl: file context for local,
452 fcl: file context for local,
433 fco: file context for remote,
453 fco: file context for remote,
434 fca: file context for ancestors,
454 fca: file context for ancestors,
435 fd: file path of the resulting merge.
455 fd: file path of the resulting merge.
436
456
437 note: also write the local version to the `.hg/merge` directory.
457 note: also write the local version to the `.hg/merge` directory.
438 """
458 """
439 if fcl.isabsent():
459 if fcl.isabsent():
440 hash = nullhex
460 hash = nullhex
441 else:
461 else:
442 hash = hex(hashlib.sha1(fcl.path()).digest())
462 hash = hex(hashlib.sha1(fcl.path()).digest())
443 self._repo.vfs.write('merge/' + hash, fcl.data())
463 self._repo.vfs.write('merge/' + hash, fcl.data())
444 self._state[fd] = ['u', hash, fcl.path(),
464 self._state[fd] = ['u', hash, fcl.path(),
445 fca.path(), hex(fca.filenode()),
465 fca.path(), hex(fca.filenode()),
446 fco.path(), hex(fco.filenode()),
466 fco.path(), hex(fco.filenode()),
447 fcl.flags()]
467 fcl.flags()]
448 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
468 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
449 self._dirty = True
469 self._dirty = True
450
470
451 def addpath(self, path, frename, forigin):
471 def addpath(self, path, frename, forigin):
452 """add a new conflicting path to the merge state
472 """add a new conflicting path to the merge state
453 path: the path that conflicts
473 path: the path that conflicts
454 frename: the filename the conflicting file was renamed to
474 frename: the filename the conflicting file was renamed to
455 forigin: origin of the file ('l' or 'r' for local/remote)
475 forigin: origin of the file ('l' or 'r' for local/remote)
456 """
476 """
457 self._state[path] = ['pu', frename, forigin]
477 self._state[path] = ['pu', frename, forigin]
458 self._dirty = True
478 self._dirty = True
459
479
460 def __contains__(self, dfile):
480 def __contains__(self, dfile):
461 return dfile in self._state
481 return dfile in self._state
462
482
463 def __getitem__(self, dfile):
483 def __getitem__(self, dfile):
464 return self._state[dfile][0]
484 return self._state[dfile][0]
465
485
466 def __iter__(self):
486 def __iter__(self):
467 return iter(sorted(self._state))
487 return iter(sorted(self._state))
468
488
469 def files(self):
489 def files(self):
470 return self._state.keys()
490 return self._state.keys()
471
491
472 def mark(self, dfile, state):
492 def mark(self, dfile, state):
473 self._state[dfile][0] = state
493 self._state[dfile][0] = state
474 self._dirty = True
494 self._dirty = True
475
495
476 def mdstate(self):
496 def mdstate(self):
477 return self._mdstate
497 return self._mdstate
478
498
479 def unresolved(self):
499 def unresolved(self):
480 """Obtain the paths of unresolved files."""
500 """Obtain the paths of unresolved files."""
481
501
482 for f, entry in self._state.iteritems():
502 for f, entry in self._state.iteritems():
483 if entry[0] in ('u', 'pu'):
503 if entry[0] in ('u', 'pu'):
484 yield f
504 yield f
485
505
486 def driverresolved(self):
506 def driverresolved(self):
487 """Obtain the paths of driver-resolved files."""
507 """Obtain the paths of driver-resolved files."""
488
508
489 for f, entry in self._state.items():
509 for f, entry in self._state.items():
490 if entry[0] == 'd':
510 if entry[0] == 'd':
491 yield f
511 yield f
492
512
493 def extras(self, filename):
513 def extras(self, filename):
494 return self._stateextras.setdefault(filename, {})
514 return self._stateextras.setdefault(filename, {})
495
515
496 def _resolve(self, preresolve, dfile, wctx):
516 def _resolve(self, preresolve, dfile, wctx):
497 """rerun merge process for file path `dfile`"""
517 """rerun merge process for file path `dfile`"""
498 if self[dfile] in 'rd':
518 if self[dfile] in 'rd':
499 return True, 0
519 return True, 0
500 stateentry = self._state[dfile]
520 stateentry = self._state[dfile]
501 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
521 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
502 octx = self._repo[self._other]
522 octx = self._repo[self._other]
503 extras = self.extras(dfile)
523 extras = self.extras(dfile)
504 anccommitnode = extras.get('ancestorlinknode')
524 anccommitnode = extras.get('ancestorlinknode')
505 if anccommitnode:
525 if anccommitnode:
506 actx = self._repo[anccommitnode]
526 actx = self._repo[anccommitnode]
507 else:
527 else:
508 actx = None
528 actx = None
509 fcd = self._filectxorabsent(hash, wctx, dfile)
529 fcd = self._filectxorabsent(hash, wctx, dfile)
510 fco = self._filectxorabsent(onode, octx, ofile)
530 fco = self._filectxorabsent(onode, octx, ofile)
511 # TODO: move this to filectxorabsent
531 # TODO: move this to filectxorabsent
512 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
532 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
513 # "premerge" x flags
533 # "premerge" x flags
514 flo = fco.flags()
534 flo = fco.flags()
515 fla = fca.flags()
535 fla = fca.flags()
516 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
536 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
517 if fca.node() == nullid and flags != flo:
537 if fca.node() == nullid and flags != flo:
518 if preresolve:
538 if preresolve:
519 self._repo.ui.warn(
539 self._repo.ui.warn(
520 _('warning: cannot merge flags for %s '
540 _('warning: cannot merge flags for %s '
521 'without common ancestor - keeping local flags\n')
541 'without common ancestor - keeping local flags\n')
522 % afile)
542 % afile)
523 elif flags == fla:
543 elif flags == fla:
524 flags = flo
544 flags = flo
525 if preresolve:
545 if preresolve:
526 # restore local
546 # restore local
527 if hash != nullhex:
547 if hash != nullhex:
528 f = self._repo.vfs('merge/' + hash)
548 f = self._repo.vfs('merge/' + hash)
529 wctx[dfile].write(f.read(), flags)
549 wctx[dfile].write(f.read(), flags)
530 f.close()
550 f.close()
531 else:
551 else:
532 wctx[dfile].remove(ignoremissing=True)
552 wctx[dfile].remove(ignoremissing=True)
533 complete, r, deleted = filemerge.premerge(self._repo, wctx,
553 complete, r, deleted = filemerge.premerge(self._repo, wctx,
534 self._local, lfile, fcd,
554 self._local, lfile, fcd,
535 fco, fca,
555 fco, fca,
536 labels=self._labels)
556 labels=self._labels)
537 else:
557 else:
538 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
558 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
539 self._local, lfile, fcd,
559 self._local, lfile, fcd,
540 fco, fca,
560 fco, fca,
541 labels=self._labels)
561 labels=self._labels)
542 if r is None:
562 if r is None:
543 # no real conflict
563 # no real conflict
544 del self._state[dfile]
564 del self._state[dfile]
545 self._stateextras.pop(dfile, None)
565 self._stateextras.pop(dfile, None)
546 self._dirty = True
566 self._dirty = True
547 elif not r:
567 elif not r:
548 self.mark(dfile, 'r')
568 self.mark(dfile, 'r')
549
569
550 if complete:
570 if complete:
551 action = None
571 action = None
552 if deleted:
572 if deleted:
553 if fcd.isabsent():
573 if fcd.isabsent():
554 # dc: local picked. Need to drop if present, which may
574 # dc: local picked. Need to drop if present, which may
555 # happen on re-resolves.
575 # happen on re-resolves.
556 action = 'f'
576 action = 'f'
557 else:
577 else:
558 # cd: remote picked (or otherwise deleted)
578 # cd: remote picked (or otherwise deleted)
559 action = 'r'
579 action = 'r'
560 else:
580 else:
561 if fcd.isabsent(): # dc: remote picked
581 if fcd.isabsent(): # dc: remote picked
562 action = 'g'
582 action = 'g'
563 elif fco.isabsent(): # cd: local picked
583 elif fco.isabsent(): # cd: local picked
564 if dfile in self.localctx:
584 if dfile in self.localctx:
565 action = 'am'
585 action = 'am'
566 else:
586 else:
567 action = 'a'
587 action = 'a'
568 # else: regular merges (no action necessary)
588 # else: regular merges (no action necessary)
569 self._results[dfile] = r, action
589 self._results[dfile] = r, action
570
590
571 return complete, r
591 return complete, r
572
592
573 def _filectxorabsent(self, hexnode, ctx, f):
593 def _filectxorabsent(self, hexnode, ctx, f):
574 if hexnode == nullhex:
594 if hexnode == nullhex:
575 return filemerge.absentfilectx(ctx, f)
595 return filemerge.absentfilectx(ctx, f)
576 else:
596 else:
577 return ctx[f]
597 return ctx[f]
578
598
579 def preresolve(self, dfile, wctx):
599 def preresolve(self, dfile, wctx):
580 """run premerge process for dfile
600 """run premerge process for dfile
581
601
582 Returns whether the merge is complete, and the exit code."""
602 Returns whether the merge is complete, and the exit code."""
583 return self._resolve(True, dfile, wctx)
603 return self._resolve(True, dfile, wctx)
584
604
585 def resolve(self, dfile, wctx):
605 def resolve(self, dfile, wctx):
586 """run merge process (assuming premerge was run) for dfile
606 """run merge process (assuming premerge was run) for dfile
587
607
588 Returns the exit code of the merge."""
608 Returns the exit code of the merge."""
589 return self._resolve(False, dfile, wctx)[1]
609 return self._resolve(False, dfile, wctx)[1]
590
610
591 def counts(self):
611 def counts(self):
592 """return counts for updated, merged and removed files in this
612 """return counts for updated, merged and removed files in this
593 session"""
613 session"""
594 updated, merged, removed = 0, 0, 0
614 updated, merged, removed = 0, 0, 0
595 for r, action in self._results.itervalues():
615 for r, action in self._results.itervalues():
596 if r is None:
616 if r is None:
597 updated += 1
617 updated += 1
598 elif r == 0:
618 elif r == 0:
599 if action == 'r':
619 if action == 'r':
600 removed += 1
620 removed += 1
601 else:
621 else:
602 merged += 1
622 merged += 1
603 return updated, merged, removed
623 return updated, merged, removed
604
624
605 def unresolvedcount(self):
625 def unresolvedcount(self):
606 """get unresolved count for this merge (persistent)"""
626 """get unresolved count for this merge (persistent)"""
607 return len(list(self.unresolved()))
627 return len(list(self.unresolved()))
608
628
609 def actions(self):
629 def actions(self):
610 """return lists of actions to perform on the dirstate"""
630 """return lists of actions to perform on the dirstate"""
611 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
631 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
612 for f, (r, action) in self._results.iteritems():
632 for f, (r, action) in self._results.iteritems():
613 if action is not None:
633 if action is not None:
614 actions[action].append((f, None, "merge result"))
634 actions[action].append((f, None, "merge result"))
615 return actions
635 return actions
616
636
617 def recordactions(self):
637 def recordactions(self):
618 """record remove/add/get actions in the dirstate"""
638 """record remove/add/get actions in the dirstate"""
619 branchmerge = self._repo.dirstate.p2() != nullid
639 branchmerge = self._repo.dirstate.p2() != nullid
620 recordupdates(self._repo, self.actions(), branchmerge)
640 recordupdates(self._repo, self.actions(), branchmerge)
621
641
622 def queueremove(self, f):
642 def queueremove(self, f):
623 """queues a file to be removed from the dirstate
643 """queues a file to be removed from the dirstate
624
644
625 Meant for use by custom merge drivers."""
645 Meant for use by custom merge drivers."""
626 self._results[f] = 0, 'r'
646 self._results[f] = 0, 'r'
627
647
628 def queueadd(self, f):
648 def queueadd(self, f):
629 """queues a file to be added to the dirstate
649 """queues a file to be added to the dirstate
630
650
631 Meant for use by custom merge drivers."""
651 Meant for use by custom merge drivers."""
632 self._results[f] = 0, 'a'
652 self._results[f] = 0, 'a'
633
653
634 def queueget(self, f):
654 def queueget(self, f):
635 """queues a file to be marked modified in the dirstate
655 """queues a file to be marked modified in the dirstate
636
656
637 Meant for use by custom merge drivers."""
657 Meant for use by custom merge drivers."""
638 self._results[f] = 0, 'g'
658 self._results[f] = 0, 'g'
639
659
640 def _getcheckunknownconfig(repo, section, name):
660 def _getcheckunknownconfig(repo, section, name):
641 config = repo.ui.config(section, name)
661 config = repo.ui.config(section, name)
642 valid = ['abort', 'ignore', 'warn']
662 valid = ['abort', 'ignore', 'warn']
643 if config not in valid:
663 if config not in valid:
644 validstr = ', '.join(["'" + v + "'" for v in valid])
664 validstr = ', '.join(["'" + v + "'" for v in valid])
645 raise error.ConfigError(_("%s.%s not valid "
665 raise error.ConfigError(_("%s.%s not valid "
646 "('%s' is none of %s)")
666 "('%s' is none of %s)")
647 % (section, name, config, validstr))
667 % (section, name, config, validstr))
648 return config
668 return config
649
669
650 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
670 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
651 if wctx.isinmemory():
671 if wctx.isinmemory():
652 # Nothing to do in IMM because nothing in the "working copy" can be an
672 # Nothing to do in IMM because nothing in the "working copy" can be an
653 # unknown file.
673 # unknown file.
654 #
674 #
655 # Note that we should bail out here, not in ``_checkunknownfiles()``,
675 # Note that we should bail out here, not in ``_checkunknownfiles()``,
656 # because that function does other useful work.
676 # because that function does other useful work.
657 return False
677 return False
658
678
659 if f2 is None:
679 if f2 is None:
660 f2 = f
680 f2 = f
661 return (repo.wvfs.audit.check(f)
681 return (repo.wvfs.audit.check(f)
662 and repo.wvfs.isfileorlink(f)
682 and repo.wvfs.isfileorlink(f)
663 and repo.dirstate.normalize(f) not in repo.dirstate
683 and repo.dirstate.normalize(f) not in repo.dirstate
664 and mctx[f2].cmp(wctx[f]))
684 and mctx[f2].cmp(wctx[f]))
665
685
666 class _unknowndirschecker(object):
686 class _unknowndirschecker(object):
667 """
687 """
668 Look for any unknown files or directories that may have a path conflict
688 Look for any unknown files or directories that may have a path conflict
669 with a file. If any path prefix of the file exists as a file or link,
689 with a file. If any path prefix of the file exists as a file or link,
670 then it conflicts. If the file itself is a directory that contains any
690 then it conflicts. If the file itself is a directory that contains any
671 file that is not tracked, then it conflicts.
691 file that is not tracked, then it conflicts.
672
692
673 Returns the shortest path at which a conflict occurs, or None if there is
693 Returns the shortest path at which a conflict occurs, or None if there is
674 no conflict.
694 no conflict.
675 """
695 """
676 def __init__(self):
696 def __init__(self):
677 # A set of paths known to be good. This prevents repeated checking of
697 # A set of paths known to be good. This prevents repeated checking of
678 # dirs. It will be updated with any new dirs that are checked and found
698 # dirs. It will be updated with any new dirs that are checked and found
679 # to be safe.
699 # to be safe.
680 self._unknowndircache = set()
700 self._unknowndircache = set()
681
701
682 # A set of paths that are known to be absent. This prevents repeated
702 # A set of paths that are known to be absent. This prevents repeated
683 # checking of subdirectories that are known not to exist. It will be
703 # checking of subdirectories that are known not to exist. It will be
684 # updated with any new dirs that are checked and found to be absent.
704 # updated with any new dirs that are checked and found to be absent.
685 self._missingdircache = set()
705 self._missingdircache = set()
686
706
687 def __call__(self, repo, wctx, f):
707 def __call__(self, repo, wctx, f):
688 if wctx.isinmemory():
708 if wctx.isinmemory():
689 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
709 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
690 return False
710 return False
691
711
692 # Check for path prefixes that exist as unknown files.
712 # Check for path prefixes that exist as unknown files.
693 for p in reversed(list(util.finddirs(f))):
713 for p in reversed(list(util.finddirs(f))):
694 if p in self._missingdircache:
714 if p in self._missingdircache:
695 return
715 return
696 if p in self._unknowndircache:
716 if p in self._unknowndircache:
697 continue
717 continue
698 if repo.wvfs.audit.check(p):
718 if repo.wvfs.audit.check(p):
699 if (repo.wvfs.isfileorlink(p)
719 if (repo.wvfs.isfileorlink(p)
700 and repo.dirstate.normalize(p) not in repo.dirstate):
720 and repo.dirstate.normalize(p) not in repo.dirstate):
701 return p
721 return p
702 if not repo.wvfs.lexists(p):
722 if not repo.wvfs.lexists(p):
703 self._missingdircache.add(p)
723 self._missingdircache.add(p)
704 return
724 return
705 self._unknowndircache.add(p)
725 self._unknowndircache.add(p)
706
726
707 # Check if the file conflicts with a directory containing unknown files.
727 # Check if the file conflicts with a directory containing unknown files.
708 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
728 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
709 # Does the directory contain any files that are not in the dirstate?
729 # Does the directory contain any files that are not in the dirstate?
710 for p, dirs, files in repo.wvfs.walk(f):
730 for p, dirs, files in repo.wvfs.walk(f):
711 for fn in files:
731 for fn in files:
712 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
732 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
713 relf = repo.dirstate.normalize(relf, isknown=True)
733 relf = repo.dirstate.normalize(relf, isknown=True)
714 if relf not in repo.dirstate:
734 if relf not in repo.dirstate:
715 return f
735 return f
716 return None
736 return None
717
737
718 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
738 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
719 """
739 """
720 Considers any actions that care about the presence of conflicting unknown
740 Considers any actions that care about the presence of conflicting unknown
721 files. For some actions, the result is to abort; for others, it is to
741 files. For some actions, the result is to abort; for others, it is to
722 choose a different action.
742 choose a different action.
723 """
743 """
724 fileconflicts = set()
744 fileconflicts = set()
725 pathconflicts = set()
745 pathconflicts = set()
726 warnconflicts = set()
746 warnconflicts = set()
727 abortconflicts = set()
747 abortconflicts = set()
728 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
748 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
729 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
749 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
730 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
750 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
731 if not force:
751 if not force:
732 def collectconflicts(conflicts, config):
752 def collectconflicts(conflicts, config):
733 if config == 'abort':
753 if config == 'abort':
734 abortconflicts.update(conflicts)
754 abortconflicts.update(conflicts)
735 elif config == 'warn':
755 elif config == 'warn':
736 warnconflicts.update(conflicts)
756 warnconflicts.update(conflicts)
737
757
738 checkunknowndirs = _unknowndirschecker()
758 checkunknowndirs = _unknowndirschecker()
739 for f, (m, args, msg) in actions.iteritems():
759 for f, (m, args, msg) in actions.iteritems():
740 if m in ('c', 'dc'):
760 if m in ('c', 'dc'):
741 if _checkunknownfile(repo, wctx, mctx, f):
761 if _checkunknownfile(repo, wctx, mctx, f):
742 fileconflicts.add(f)
762 fileconflicts.add(f)
743 elif pathconfig and f not in wctx:
763 elif pathconfig and f not in wctx:
744 path = checkunknowndirs(repo, wctx, f)
764 path = checkunknowndirs(repo, wctx, f)
745 if path is not None:
765 if path is not None:
746 pathconflicts.add(path)
766 pathconflicts.add(path)
747 elif m == 'dg':
767 elif m == 'dg':
748 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
768 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
749 fileconflicts.add(f)
769 fileconflicts.add(f)
750
770
751 allconflicts = fileconflicts | pathconflicts
771 allconflicts = fileconflicts | pathconflicts
752 ignoredconflicts = set([c for c in allconflicts
772 ignoredconflicts = set([c for c in allconflicts
753 if repo.dirstate._ignore(c)])
773 if repo.dirstate._ignore(c)])
754 unknownconflicts = allconflicts - ignoredconflicts
774 unknownconflicts = allconflicts - ignoredconflicts
755 collectconflicts(ignoredconflicts, ignoredconfig)
775 collectconflicts(ignoredconflicts, ignoredconfig)
756 collectconflicts(unknownconflicts, unknownconfig)
776 collectconflicts(unknownconflicts, unknownconfig)
757 else:
777 else:
758 for f, (m, args, msg) in actions.iteritems():
778 for f, (m, args, msg) in actions.iteritems():
759 if m == 'cm':
779 if m == 'cm':
760 fl2, anc = args
780 fl2, anc = args
761 different = _checkunknownfile(repo, wctx, mctx, f)
781 different = _checkunknownfile(repo, wctx, mctx, f)
762 if repo.dirstate._ignore(f):
782 if repo.dirstate._ignore(f):
763 config = ignoredconfig
783 config = ignoredconfig
764 else:
784 else:
765 config = unknownconfig
785 config = unknownconfig
766
786
767 # The behavior when force is True is described by this table:
787 # The behavior when force is True is described by this table:
768 # config different mergeforce | action backup
788 # config different mergeforce | action backup
769 # * n * | get n
789 # * n * | get n
770 # * y y | merge -
790 # * y y | merge -
771 # abort y n | merge - (1)
791 # abort y n | merge - (1)
772 # warn y n | warn + get y
792 # warn y n | warn + get y
773 # ignore y n | get y
793 # ignore y n | get y
774 #
794 #
775 # (1) this is probably the wrong behavior here -- we should
795 # (1) this is probably the wrong behavior here -- we should
776 # probably abort, but some actions like rebases currently
796 # probably abort, but some actions like rebases currently
777 # don't like an abort happening in the middle of
797 # don't like an abort happening in the middle of
778 # merge.update.
798 # merge.update.
779 if not different:
799 if not different:
780 actions[f] = ('g', (fl2, False), "remote created")
800 actions[f] = ('g', (fl2, False), "remote created")
781 elif mergeforce or config == 'abort':
801 elif mergeforce or config == 'abort':
782 actions[f] = ('m', (f, f, None, False, anc),
802 actions[f] = ('m', (f, f, None, False, anc),
783 "remote differs from untracked local")
803 "remote differs from untracked local")
784 elif config == 'abort':
804 elif config == 'abort':
785 abortconflicts.add(f)
805 abortconflicts.add(f)
786 else:
806 else:
787 if config == 'warn':
807 if config == 'warn':
788 warnconflicts.add(f)
808 warnconflicts.add(f)
789 actions[f] = ('g', (fl2, True), "remote created")
809 actions[f] = ('g', (fl2, True), "remote created")
790
810
791 for f in sorted(abortconflicts):
811 for f in sorted(abortconflicts):
792 warn = repo.ui.warn
812 warn = repo.ui.warn
793 if f in pathconflicts:
813 if f in pathconflicts:
794 if repo.wvfs.isfileorlink(f):
814 if repo.wvfs.isfileorlink(f):
795 warn(_("%s: untracked file conflicts with directory\n") % f)
815 warn(_("%s: untracked file conflicts with directory\n") % f)
796 else:
816 else:
797 warn(_("%s: untracked directory conflicts with file\n") % f)
817 warn(_("%s: untracked directory conflicts with file\n") % f)
798 else:
818 else:
799 warn(_("%s: untracked file differs\n") % f)
819 warn(_("%s: untracked file differs\n") % f)
800 if abortconflicts:
820 if abortconflicts:
801 raise error.Abort(_("untracked files in working directory "
821 raise error.Abort(_("untracked files in working directory "
802 "differ from files in requested revision"))
822 "differ from files in requested revision"))
803
823
804 for f in sorted(warnconflicts):
824 for f in sorted(warnconflicts):
805 if repo.wvfs.isfileorlink(f):
825 if repo.wvfs.isfileorlink(f):
806 repo.ui.warn(_("%s: replacing untracked file\n") % f)
826 repo.ui.warn(_("%s: replacing untracked file\n") % f)
807 else:
827 else:
808 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
828 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
809
829
810 for f, (m, args, msg) in actions.iteritems():
830 for f, (m, args, msg) in actions.iteritems():
811 if m == 'c':
831 if m == 'c':
812 backup = (f in fileconflicts or f in pathconflicts or
832 backup = (f in fileconflicts or f in pathconflicts or
813 any(p in pathconflicts for p in util.finddirs(f)))
833 any(p in pathconflicts for p in util.finddirs(f)))
814 flags, = args
834 flags, = args
815 actions[f] = ('g', (flags, backup), msg)
835 actions[f] = ('g', (flags, backup), msg)
816
836
817 def _forgetremoved(wctx, mctx, branchmerge):
837 def _forgetremoved(wctx, mctx, branchmerge):
818 """
838 """
819 Forget removed files
839 Forget removed files
820
840
821 If we're jumping between revisions (as opposed to merging), and if
841 If we're jumping between revisions (as opposed to merging), and if
822 neither the working directory nor the target rev has the file,
842 neither the working directory nor the target rev has the file,
823 then we need to remove it from the dirstate, to prevent the
843 then we need to remove it from the dirstate, to prevent the
824 dirstate from listing the file when it is no longer in the
844 dirstate from listing the file when it is no longer in the
825 manifest.
845 manifest.
826
846
827 If we're merging, and the other revision has removed a file
847 If we're merging, and the other revision has removed a file
828 that is not present in the working directory, we need to mark it
848 that is not present in the working directory, we need to mark it
829 as removed.
849 as removed.
830 """
850 """
831
851
832 actions = {}
852 actions = {}
833 m = 'f'
853 m = 'f'
834 if branchmerge:
854 if branchmerge:
835 m = 'r'
855 m = 'r'
836 for f in wctx.deleted():
856 for f in wctx.deleted():
837 if f not in mctx:
857 if f not in mctx:
838 actions[f] = m, None, "forget deleted"
858 actions[f] = m, None, "forget deleted"
839
859
840 if not branchmerge:
860 if not branchmerge:
841 for f in wctx.removed():
861 for f in wctx.removed():
842 if f not in mctx:
862 if f not in mctx:
843 actions[f] = 'f', None, "forget removed"
863 actions[f] = 'f', None, "forget removed"
844
864
845 return actions
865 return actions
846
866
847 def _checkcollision(repo, wmf, actions):
867 def _checkcollision(repo, wmf, actions):
848 # build provisional merged manifest up
868 # build provisional merged manifest up
849 pmmf = set(wmf)
869 pmmf = set(wmf)
850
870
851 if actions:
871 if actions:
852 # k, dr, e and rd are no-op
872 # k, dr, e and rd are no-op
853 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
873 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
854 for f, args, msg in actions[m]:
874 for f, args, msg in actions[m]:
855 pmmf.add(f)
875 pmmf.add(f)
856 for f, args, msg in actions['r']:
876 for f, args, msg in actions['r']:
857 pmmf.discard(f)
877 pmmf.discard(f)
858 for f, args, msg in actions['dm']:
878 for f, args, msg in actions['dm']:
859 f2, flags = args
879 f2, flags = args
860 pmmf.discard(f2)
880 pmmf.discard(f2)
861 pmmf.add(f)
881 pmmf.add(f)
862 for f, args, msg in actions['dg']:
882 for f, args, msg in actions['dg']:
863 pmmf.add(f)
883 pmmf.add(f)
864 for f, args, msg in actions['m']:
884 for f, args, msg in actions['m']:
865 f1, f2, fa, move, anc = args
885 f1, f2, fa, move, anc = args
866 if move:
886 if move:
867 pmmf.discard(f1)
887 pmmf.discard(f1)
868 pmmf.add(f)
888 pmmf.add(f)
869
889
870 # check case-folding collision in provisional merged manifest
890 # check case-folding collision in provisional merged manifest
871 foldmap = {}
891 foldmap = {}
872 for f in pmmf:
892 for f in pmmf:
873 fold = util.normcase(f)
893 fold = util.normcase(f)
874 if fold in foldmap:
894 if fold in foldmap:
875 raise error.Abort(_("case-folding collision between %s and %s")
895 raise error.Abort(_("case-folding collision between %s and %s")
876 % (f, foldmap[fold]))
896 % (f, foldmap[fold]))
877 foldmap[fold] = f
897 foldmap[fold] = f
878
898
879 # check case-folding of directories
899 # check case-folding of directories
880 foldprefix = unfoldprefix = lastfull = ''
900 foldprefix = unfoldprefix = lastfull = ''
881 for fold, f in sorted(foldmap.items()):
901 for fold, f in sorted(foldmap.items()):
882 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
902 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
883 # the folded prefix matches but actual casing is different
903 # the folded prefix matches but actual casing is different
884 raise error.Abort(_("case-folding collision between "
904 raise error.Abort(_("case-folding collision between "
885 "%s and directory of %s") % (lastfull, f))
905 "%s and directory of %s") % (lastfull, f))
886 foldprefix = fold + '/'
906 foldprefix = fold + '/'
887 unfoldprefix = f + '/'
907 unfoldprefix = f + '/'
888 lastfull = f
908 lastfull = f
889
909
890 def driverpreprocess(repo, ms, wctx, labels=None):
910 def driverpreprocess(repo, ms, wctx, labels=None):
891 """run the preprocess step of the merge driver, if any
911 """run the preprocess step of the merge driver, if any
892
912
893 This is currently not implemented -- it's an extension point."""
913 This is currently not implemented -- it's an extension point."""
894 return True
914 return True
895
915
896 def driverconclude(repo, ms, wctx, labels=None):
916 def driverconclude(repo, ms, wctx, labels=None):
897 """run the conclude step of the merge driver, if any
917 """run the conclude step of the merge driver, if any
898
918
899 This is currently not implemented -- it's an extension point."""
919 This is currently not implemented -- it's an extension point."""
900 return True
920 return True
901
921
902 def _filesindirs(repo, manifest, dirs):
922 def _filesindirs(repo, manifest, dirs):
903 """
923 """
904 Generator that yields pairs of all the files in the manifest that are found
924 Generator that yields pairs of all the files in the manifest that are found
905 inside the directories listed in dirs, and which directory they are found
925 inside the directories listed in dirs, and which directory they are found
906 in.
926 in.
907 """
927 """
908 for f in manifest:
928 for f in manifest:
909 for p in util.finddirs(f):
929 for p in util.finddirs(f):
910 if p in dirs:
930 if p in dirs:
911 yield f, p
931 yield f, p
912 break
932 break
913
933
914 def checkpathconflicts(repo, wctx, mctx, actions):
934 def checkpathconflicts(repo, wctx, mctx, actions):
915 """
935 """
916 Check if any actions introduce path conflicts in the repository, updating
936 Check if any actions introduce path conflicts in the repository, updating
917 actions to record or handle the path conflict accordingly.
937 actions to record or handle the path conflict accordingly.
918 """
938 """
919 mf = wctx.manifest()
939 mf = wctx.manifest()
920
940
921 # The set of local files that conflict with a remote directory.
941 # The set of local files that conflict with a remote directory.
922 localconflicts = set()
942 localconflicts = set()
923
943
924 # The set of directories that conflict with a remote file, and so may cause
944 # The set of directories that conflict with a remote file, and so may cause
925 # conflicts if they still contain any files after the merge.
945 # conflicts if they still contain any files after the merge.
926 remoteconflicts = set()
946 remoteconflicts = set()
927
947
928 # The set of directories that appear as both a file and a directory in the
948 # The set of directories that appear as both a file and a directory in the
929 # remote manifest. These indicate an invalid remote manifest, which
949 # remote manifest. These indicate an invalid remote manifest, which
930 # can't be updated to cleanly.
950 # can't be updated to cleanly.
931 invalidconflicts = set()
951 invalidconflicts = set()
932
952
933 # The set of directories that contain files that are being created.
953 # The set of directories that contain files that are being created.
934 createdfiledirs = set()
954 createdfiledirs = set()
935
955
936 # The set of files deleted by all the actions.
956 # The set of files deleted by all the actions.
937 deletedfiles = set()
957 deletedfiles = set()
938
958
939 for f, (m, args, msg) in actions.items():
959 for f, (m, args, msg) in actions.items():
940 if m in ('c', 'dc', 'm', 'cm'):
960 if m in ('c', 'dc', 'm', 'cm'):
941 # This action may create a new local file.
961 # This action may create a new local file.
942 createdfiledirs.update(util.finddirs(f))
962 createdfiledirs.update(util.finddirs(f))
943 if mf.hasdir(f):
963 if mf.hasdir(f):
944 # The file aliases a local directory. This might be ok if all
964 # The file aliases a local directory. This might be ok if all
945 # the files in the local directory are being deleted. This
965 # the files in the local directory are being deleted. This
946 # will be checked once we know what all the deleted files are.
966 # will be checked once we know what all the deleted files are.
947 remoteconflicts.add(f)
967 remoteconflicts.add(f)
948 # Track the names of all deleted files.
968 # Track the names of all deleted files.
949 if m == 'r':
969 if m == 'r':
950 deletedfiles.add(f)
970 deletedfiles.add(f)
951 if m == 'm':
971 if m == 'm':
952 f1, f2, fa, move, anc = args
972 f1, f2, fa, move, anc = args
953 if move:
973 if move:
954 deletedfiles.add(f1)
974 deletedfiles.add(f1)
955 if m == 'dm':
975 if m == 'dm':
956 f2, flags = args
976 f2, flags = args
957 deletedfiles.add(f2)
977 deletedfiles.add(f2)
958
978
959 # Check all directories that contain created files for path conflicts.
979 # Check all directories that contain created files for path conflicts.
960 for p in createdfiledirs:
980 for p in createdfiledirs:
961 if p in mf:
981 if p in mf:
962 if p in mctx:
982 if p in mctx:
963 # A file is in a directory which aliases both a local
983 # A file is in a directory which aliases both a local
964 # and a remote file. This is an internal inconsistency
984 # and a remote file. This is an internal inconsistency
965 # within the remote manifest.
985 # within the remote manifest.
966 invalidconflicts.add(p)
986 invalidconflicts.add(p)
967 else:
987 else:
968 # A file is in a directory which aliases a local file.
988 # A file is in a directory which aliases a local file.
969 # We will need to rename the local file.
989 # We will need to rename the local file.
970 localconflicts.add(p)
990 localconflicts.add(p)
971 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
991 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
972 # The file is in a directory which aliases a remote file.
992 # The file is in a directory which aliases a remote file.
973 # This is an internal inconsistency within the remote
993 # This is an internal inconsistency within the remote
974 # manifest.
994 # manifest.
975 invalidconflicts.add(p)
995 invalidconflicts.add(p)
976
996
977 # Rename all local conflicting files that have not been deleted.
997 # Rename all local conflicting files that have not been deleted.
978 for p in localconflicts:
998 for p in localconflicts:
979 if p not in deletedfiles:
999 if p not in deletedfiles:
980 ctxname = bytes(wctx).rstrip('+')
1000 ctxname = bytes(wctx).rstrip('+')
981 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1001 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
982 actions[pnew] = ('pr', (p,), "local path conflict")
1002 actions[pnew] = ('pr', (p,), "local path conflict")
983 actions[p] = ('p', (pnew, 'l'), "path conflict")
1003 actions[p] = ('p', (pnew, 'l'), "path conflict")
984
1004
985 if remoteconflicts:
1005 if remoteconflicts:
986 # Check if all files in the conflicting directories have been removed.
1006 # Check if all files in the conflicting directories have been removed.
987 ctxname = bytes(mctx).rstrip('+')
1007 ctxname = bytes(mctx).rstrip('+')
988 for f, p in _filesindirs(repo, mf, remoteconflicts):
1008 for f, p in _filesindirs(repo, mf, remoteconflicts):
989 if f not in deletedfiles:
1009 if f not in deletedfiles:
990 m, args, msg = actions[p]
1010 m, args, msg = actions[p]
991 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1011 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
992 if m in ('dc', 'm'):
1012 if m in ('dc', 'm'):
993 # Action was merge, just update target.
1013 # Action was merge, just update target.
994 actions[pnew] = (m, args, msg)
1014 actions[pnew] = (m, args, msg)
995 else:
1015 else:
996 # Action was create, change to renamed get action.
1016 # Action was create, change to renamed get action.
997 fl = args[0]
1017 fl = args[0]
998 actions[pnew] = ('dg', (p, fl), "remote path conflict")
1018 actions[pnew] = ('dg', (p, fl), "remote path conflict")
999 actions[p] = ('p', (pnew, 'r'), "path conflict")
1019 actions[p] = ('p', (pnew, 'r'), "path conflict")
1000 remoteconflicts.remove(p)
1020 remoteconflicts.remove(p)
1001 break
1021 break
1002
1022
1003 if invalidconflicts:
1023 if invalidconflicts:
1004 for p in invalidconflicts:
1024 for p in invalidconflicts:
1005 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1025 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1006 raise error.Abort(_("destination manifest contains path conflicts"))
1026 raise error.Abort(_("destination manifest contains path conflicts"))
1007
1027
1008 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1028 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1009 acceptremote, followcopies, forcefulldiff=False):
1029 acceptremote, followcopies, forcefulldiff=False):
1010 """
1030 """
1011 Merge wctx and p2 with ancestor pa and generate merge action list
1031 Merge wctx and p2 with ancestor pa and generate merge action list
1012
1032
1013 branchmerge and force are as passed in to update
1033 branchmerge and force are as passed in to update
1014 matcher = matcher to filter file lists
1034 matcher = matcher to filter file lists
1015 acceptremote = accept the incoming changes without prompting
1035 acceptremote = accept the incoming changes without prompting
1016 """
1036 """
1017 if matcher is not None and matcher.always():
1037 if matcher is not None and matcher.always():
1018 matcher = None
1038 matcher = None
1019
1039
1020 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1040 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1021
1041
1022 # manifests fetched in order are going to be faster, so prime the caches
1042 # manifests fetched in order are going to be faster, so prime the caches
1023 [x.manifest() for x in
1043 [x.manifest() for x in
1024 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1044 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1025
1045
1026 if followcopies:
1046 if followcopies:
1027 ret = copies.mergecopies(repo, wctx, p2, pa)
1047 ret = copies.mergecopies(repo, wctx, p2, pa)
1028 copy, movewithdir, diverge, renamedelete, dirmove = ret
1048 copy, movewithdir, diverge, renamedelete, dirmove = ret
1029
1049
1030 boolbm = pycompat.bytestr(bool(branchmerge))
1050 boolbm = pycompat.bytestr(bool(branchmerge))
1031 boolf = pycompat.bytestr(bool(force))
1051 boolf = pycompat.bytestr(bool(force))
1032 boolm = pycompat.bytestr(bool(matcher))
1052 boolm = pycompat.bytestr(bool(matcher))
1033 repo.ui.note(_("resolving manifests\n"))
1053 repo.ui.note(_("resolving manifests\n"))
1034 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1054 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1035 % (boolbm, boolf, boolm))
1055 % (boolbm, boolf, boolm))
1036 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1056 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1037
1057
1038 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1058 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1039 copied = set(copy.values())
1059 copied = set(copy.values())
1040 copied.update(movewithdir.values())
1060 copied.update(movewithdir.values())
1041
1061
1042 if '.hgsubstate' in m1:
1062 if '.hgsubstate' in m1:
1043 # check whether sub state is modified
1063 # check whether sub state is modified
1044 if any(wctx.sub(s).dirty() for s in wctx.substate):
1064 if any(wctx.sub(s).dirty() for s in wctx.substate):
1045 m1['.hgsubstate'] = modifiednodeid
1065 m1['.hgsubstate'] = modifiednodeid
1046
1066
1047 # Don't use m2-vs-ma optimization if:
1067 # Don't use m2-vs-ma optimization if:
1048 # - ma is the same as m1 or m2, which we're just going to diff again later
1068 # - ma is the same as m1 or m2, which we're just going to diff again later
1049 # - The caller specifically asks for a full diff, which is useful during bid
1069 # - The caller specifically asks for a full diff, which is useful during bid
1050 # merge.
1070 # merge.
1051 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1071 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1052 # Identify which files are relevant to the merge, so we can limit the
1072 # Identify which files are relevant to the merge, so we can limit the
1053 # total m1-vs-m2 diff to just those files. This has significant
1073 # total m1-vs-m2 diff to just those files. This has significant
1054 # performance benefits in large repositories.
1074 # performance benefits in large repositories.
1055 relevantfiles = set(ma.diff(m2).keys())
1075 relevantfiles = set(ma.diff(m2).keys())
1056
1076
1057 # For copied and moved files, we need to add the source file too.
1077 # For copied and moved files, we need to add the source file too.
1058 for copykey, copyvalue in copy.iteritems():
1078 for copykey, copyvalue in copy.iteritems():
1059 if copyvalue in relevantfiles:
1079 if copyvalue in relevantfiles:
1060 relevantfiles.add(copykey)
1080 relevantfiles.add(copykey)
1061 for movedirkey in movewithdir:
1081 for movedirkey in movewithdir:
1062 relevantfiles.add(movedirkey)
1082 relevantfiles.add(movedirkey)
1063 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1083 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1064 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1084 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1065
1085
1066 diff = m1.diff(m2, match=matcher)
1086 diff = m1.diff(m2, match=matcher)
1067
1087
1068 if matcher is None:
1088 if matcher is None:
1069 matcher = matchmod.always('', '')
1089 matcher = matchmod.always('', '')
1070
1090
1071 actions = {}
1091 actions = {}
1072 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1092 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1073 if n1 and n2: # file exists on both local and remote side
1093 if n1 and n2: # file exists on both local and remote side
1074 if f not in ma:
1094 if f not in ma:
1075 fa = copy.get(f, None)
1095 fa = copy.get(f, None)
1076 if fa is not None:
1096 if fa is not None:
1077 actions[f] = ('m', (f, f, fa, False, pa.node()),
1097 actions[f] = ('m', (f, f, fa, False, pa.node()),
1078 "both renamed from " + fa)
1098 "both renamed from " + fa)
1079 else:
1099 else:
1080 actions[f] = ('m', (f, f, None, False, pa.node()),
1100 actions[f] = ('m', (f, f, None, False, pa.node()),
1081 "both created")
1101 "both created")
1082 else:
1102 else:
1083 a = ma[f]
1103 a = ma[f]
1084 fla = ma.flags(f)
1104 fla = ma.flags(f)
1085 nol = 'l' not in fl1 + fl2 + fla
1105 nol = 'l' not in fl1 + fl2 + fla
1086 if n2 == a and fl2 == fla:
1106 if n2 == a and fl2 == fla:
1087 actions[f] = ('k', (), "remote unchanged")
1107 actions[f] = ('k', (), "remote unchanged")
1088 elif n1 == a and fl1 == fla: # local unchanged - use remote
1108 elif n1 == a and fl1 == fla: # local unchanged - use remote
1089 if n1 == n2: # optimization: keep local content
1109 if n1 == n2: # optimization: keep local content
1090 actions[f] = ('e', (fl2,), "update permissions")
1110 actions[f] = ('e', (fl2,), "update permissions")
1091 else:
1111 else:
1092 actions[f] = ('g', (fl2, False), "remote is newer")
1112 actions[f] = ('g', (fl2, False), "remote is newer")
1093 elif nol and n2 == a: # remote only changed 'x'
1113 elif nol and n2 == a: # remote only changed 'x'
1094 actions[f] = ('e', (fl2,), "update permissions")
1114 actions[f] = ('e', (fl2,), "update permissions")
1095 elif nol and n1 == a: # local only changed 'x'
1115 elif nol and n1 == a: # local only changed 'x'
1096 actions[f] = ('g', (fl1, False), "remote is newer")
1116 actions[f] = ('g', (fl1, False), "remote is newer")
1097 else: # both changed something
1117 else: # both changed something
1098 actions[f] = ('m', (f, f, f, False, pa.node()),
1118 actions[f] = ('m', (f, f, f, False, pa.node()),
1099 "versions differ")
1119 "versions differ")
1100 elif n1: # file exists only on local side
1120 elif n1: # file exists only on local side
1101 if f in copied:
1121 if f in copied:
1102 pass # we'll deal with it on m2 side
1122 pass # we'll deal with it on m2 side
1103 elif f in movewithdir: # directory rename, move local
1123 elif f in movewithdir: # directory rename, move local
1104 f2 = movewithdir[f]
1124 f2 = movewithdir[f]
1105 if f2 in m2:
1125 if f2 in m2:
1106 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1126 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1107 "remote directory rename, both created")
1127 "remote directory rename, both created")
1108 else:
1128 else:
1109 actions[f2] = ('dm', (f, fl1),
1129 actions[f2] = ('dm', (f, fl1),
1110 "remote directory rename - move from " + f)
1130 "remote directory rename - move from " + f)
1111 elif f in copy:
1131 elif f in copy:
1112 f2 = copy[f]
1132 f2 = copy[f]
1113 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1133 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1114 "local copied/moved from " + f2)
1134 "local copied/moved from " + f2)
1115 elif f in ma: # clean, a different, no remote
1135 elif f in ma: # clean, a different, no remote
1116 if n1 != ma[f]:
1136 if n1 != ma[f]:
1117 if acceptremote:
1137 if acceptremote:
1118 actions[f] = ('r', None, "remote delete")
1138 actions[f] = ('r', None, "remote delete")
1119 else:
1139 else:
1120 actions[f] = ('cd', (f, None, f, False, pa.node()),
1140 actions[f] = ('cd', (f, None, f, False, pa.node()),
1121 "prompt changed/deleted")
1141 "prompt changed/deleted")
1122 elif n1 == addednodeid:
1142 elif n1 == addednodeid:
1123 # This extra 'a' is added by working copy manifest to mark
1143 # This extra 'a' is added by working copy manifest to mark
1124 # the file as locally added. We should forget it instead of
1144 # the file as locally added. We should forget it instead of
1125 # deleting it.
1145 # deleting it.
1126 actions[f] = ('f', None, "remote deleted")
1146 actions[f] = ('f', None, "remote deleted")
1127 else:
1147 else:
1128 actions[f] = ('r', None, "other deleted")
1148 actions[f] = ('r', None, "other deleted")
1129 elif n2: # file exists only on remote side
1149 elif n2: # file exists only on remote side
1130 if f in copied:
1150 if f in copied:
1131 pass # we'll deal with it on m1 side
1151 pass # we'll deal with it on m1 side
1132 elif f in movewithdir:
1152 elif f in movewithdir:
1133 f2 = movewithdir[f]
1153 f2 = movewithdir[f]
1134 if f2 in m1:
1154 if f2 in m1:
1135 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1155 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1136 "local directory rename, both created")
1156 "local directory rename, both created")
1137 else:
1157 else:
1138 actions[f2] = ('dg', (f, fl2),
1158 actions[f2] = ('dg', (f, fl2),
1139 "local directory rename - get from " + f)
1159 "local directory rename - get from " + f)
1140 elif f in copy:
1160 elif f in copy:
1141 f2 = copy[f]
1161 f2 = copy[f]
1142 if f2 in m2:
1162 if f2 in m2:
1143 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1163 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1144 "remote copied from " + f2)
1164 "remote copied from " + f2)
1145 else:
1165 else:
1146 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1166 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1147 "remote moved from " + f2)
1167 "remote moved from " + f2)
1148 elif f not in ma:
1168 elif f not in ma:
1149 # local unknown, remote created: the logic is described by the
1169 # local unknown, remote created: the logic is described by the
1150 # following table:
1170 # following table:
1151 #
1171 #
1152 # force branchmerge different | action
1172 # force branchmerge different | action
1153 # n * * | create
1173 # n * * | create
1154 # y n * | create
1174 # y n * | create
1155 # y y n | create
1175 # y y n | create
1156 # y y y | merge
1176 # y y y | merge
1157 #
1177 #
1158 # Checking whether the files are different is expensive, so we
1178 # Checking whether the files are different is expensive, so we
1159 # don't do that when we can avoid it.
1179 # don't do that when we can avoid it.
1160 if not force:
1180 if not force:
1161 actions[f] = ('c', (fl2,), "remote created")
1181 actions[f] = ('c', (fl2,), "remote created")
1162 elif not branchmerge:
1182 elif not branchmerge:
1163 actions[f] = ('c', (fl2,), "remote created")
1183 actions[f] = ('c', (fl2,), "remote created")
1164 else:
1184 else:
1165 actions[f] = ('cm', (fl2, pa.node()),
1185 actions[f] = ('cm', (fl2, pa.node()),
1166 "remote created, get or merge")
1186 "remote created, get or merge")
1167 elif n2 != ma[f]:
1187 elif n2 != ma[f]:
1168 df = None
1188 df = None
1169 for d in dirmove:
1189 for d in dirmove:
1170 if f.startswith(d):
1190 if f.startswith(d):
1171 # new file added in a directory that was moved
1191 # new file added in a directory that was moved
1172 df = dirmove[d] + f[len(d):]
1192 df = dirmove[d] + f[len(d):]
1173 break
1193 break
1174 if df is not None and df in m1:
1194 if df is not None and df in m1:
1175 actions[df] = ('m', (df, f, f, False, pa.node()),
1195 actions[df] = ('m', (df, f, f, False, pa.node()),
1176 "local directory rename - respect move from " + f)
1196 "local directory rename - respect move from " + f)
1177 elif acceptremote:
1197 elif acceptremote:
1178 actions[f] = ('c', (fl2,), "remote recreating")
1198 actions[f] = ('c', (fl2,), "remote recreating")
1179 else:
1199 else:
1180 actions[f] = ('dc', (None, f, f, False, pa.node()),
1200 actions[f] = ('dc', (None, f, f, False, pa.node()),
1181 "prompt deleted/changed")
1201 "prompt deleted/changed")
1182
1202
1183 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1203 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1184 # If we are merging, look for path conflicts.
1204 # If we are merging, look for path conflicts.
1185 checkpathconflicts(repo, wctx, p2, actions)
1205 checkpathconflicts(repo, wctx, p2, actions)
1186
1206
1187 return actions, diverge, renamedelete
1207 return actions, diverge, renamedelete
1188
1208
1189 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1209 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1190 """Resolves false conflicts where the nodeid changed but the content
1210 """Resolves false conflicts where the nodeid changed but the content
1191 remained the same."""
1211 remained the same."""
1192 # We force a copy of actions.items() because we're going to mutate
1212 # We force a copy of actions.items() because we're going to mutate
1193 # actions as we resolve trivial conflicts.
1213 # actions as we resolve trivial conflicts.
1194 for f, (m, args, msg) in list(actions.items()):
1214 for f, (m, args, msg) in list(actions.items()):
1195 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1215 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1196 # local did change but ended up with same content
1216 # local did change but ended up with same content
1197 actions[f] = 'r', None, "prompt same"
1217 actions[f] = 'r', None, "prompt same"
1198 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1218 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1199 # remote did change but ended up with same content
1219 # remote did change but ended up with same content
1200 del actions[f] # don't get = keep local deleted
1220 del actions[f] # don't get = keep local deleted
1201
1221
1202 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1222 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1203 acceptremote, followcopies, matcher=None,
1223 acceptremote, followcopies, matcher=None,
1204 mergeforce=False):
1224 mergeforce=False):
1205 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1225 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1206 # Avoid cycle.
1226 # Avoid cycle.
1207 from . import sparse
1227 from . import sparse
1208
1228
1209 if len(ancestors) == 1: # default
1229 if len(ancestors) == 1: # default
1210 actions, diverge, renamedelete = manifestmerge(
1230 actions, diverge, renamedelete = manifestmerge(
1211 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1231 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1212 acceptremote, followcopies)
1232 acceptremote, followcopies)
1213 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1233 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1214
1234
1215 else: # only when merge.preferancestor=* - the default
1235 else: # only when merge.preferancestor=* - the default
1216 repo.ui.note(
1236 repo.ui.note(
1217 _("note: merging %s and %s using bids from ancestors %s\n") %
1237 _("note: merging %s and %s using bids from ancestors %s\n") %
1218 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1238 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1219 for anc in ancestors)))
1239 for anc in ancestors)))
1220
1240
1221 # Call for bids
1241 # Call for bids
1222 fbids = {} # mapping filename to bids (action method to list af actions)
1242 fbids = {} # mapping filename to bids (action method to list af actions)
1223 diverge, renamedelete = None, None
1243 diverge, renamedelete = None, None
1224 for ancestor in ancestors:
1244 for ancestor in ancestors:
1225 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1245 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1226 actions, diverge1, renamedelete1 = manifestmerge(
1246 actions, diverge1, renamedelete1 = manifestmerge(
1227 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1247 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1228 acceptremote, followcopies, forcefulldiff=True)
1248 acceptremote, followcopies, forcefulldiff=True)
1229 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1249 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1230
1250
1231 # Track the shortest set of warning on the theory that bid
1251 # Track the shortest set of warning on the theory that bid
1232 # merge will correctly incorporate more information
1252 # merge will correctly incorporate more information
1233 if diverge is None or len(diverge1) < len(diverge):
1253 if diverge is None or len(diverge1) < len(diverge):
1234 diverge = diverge1
1254 diverge = diverge1
1235 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1255 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1236 renamedelete = renamedelete1
1256 renamedelete = renamedelete1
1237
1257
1238 for f, a in sorted(actions.iteritems()):
1258 for f, a in sorted(actions.iteritems()):
1239 m, args, msg = a
1259 m, args, msg = a
1240 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1260 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1241 if f in fbids:
1261 if f in fbids:
1242 d = fbids[f]
1262 d = fbids[f]
1243 if m in d:
1263 if m in d:
1244 d[m].append(a)
1264 d[m].append(a)
1245 else:
1265 else:
1246 d[m] = [a]
1266 d[m] = [a]
1247 else:
1267 else:
1248 fbids[f] = {m: [a]}
1268 fbids[f] = {m: [a]}
1249
1269
1250 # Pick the best bid for each file
1270 # Pick the best bid for each file
1251 repo.ui.note(_('\nauction for merging merge bids\n'))
1271 repo.ui.note(_('\nauction for merging merge bids\n'))
1252 actions = {}
1272 actions = {}
1253 dms = [] # filenames that have dm actions
1273 dms = [] # filenames that have dm actions
1254 for f, bids in sorted(fbids.items()):
1274 for f, bids in sorted(fbids.items()):
1255 # bids is a mapping from action method to list af actions
1275 # bids is a mapping from action method to list af actions
1256 # Consensus?
1276 # Consensus?
1257 if len(bids) == 1: # all bids are the same kind of method
1277 if len(bids) == 1: # all bids are the same kind of method
1258 m, l = list(bids.items())[0]
1278 m, l = list(bids.items())[0]
1259 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1279 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1260 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1280 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1261 actions[f] = l[0]
1281 actions[f] = l[0]
1262 if m == 'dm':
1282 if m == 'dm':
1263 dms.append(f)
1283 dms.append(f)
1264 continue
1284 continue
1265 # If keep is an option, just do it.
1285 # If keep is an option, just do it.
1266 if 'k' in bids:
1286 if 'k' in bids:
1267 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1287 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1268 actions[f] = bids['k'][0]
1288 actions[f] = bids['k'][0]
1269 continue
1289 continue
1270 # If there are gets and they all agree [how could they not?], do it.
1290 # If there are gets and they all agree [how could they not?], do it.
1271 if 'g' in bids:
1291 if 'g' in bids:
1272 ga0 = bids['g'][0]
1292 ga0 = bids['g'][0]
1273 if all(a == ga0 for a in bids['g'][1:]):
1293 if all(a == ga0 for a in bids['g'][1:]):
1274 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1294 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1275 actions[f] = ga0
1295 actions[f] = ga0
1276 continue
1296 continue
1277 # TODO: Consider other simple actions such as mode changes
1297 # TODO: Consider other simple actions such as mode changes
1278 # Handle inefficient democrazy.
1298 # Handle inefficient democrazy.
1279 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1299 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1280 for m, l in sorted(bids.items()):
1300 for m, l in sorted(bids.items()):
1281 for _f, args, msg in l:
1301 for _f, args, msg in l:
1282 repo.ui.note(' %s -> %s\n' % (msg, m))
1302 repo.ui.note(' %s -> %s\n' % (msg, m))
1283 # Pick random action. TODO: Instead, prompt user when resolving
1303 # Pick random action. TODO: Instead, prompt user when resolving
1284 m, l = list(bids.items())[0]
1304 m, l = list(bids.items())[0]
1285 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1305 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1286 (f, m))
1306 (f, m))
1287 actions[f] = l[0]
1307 actions[f] = l[0]
1288 if m == 'dm':
1308 if m == 'dm':
1289 dms.append(f)
1309 dms.append(f)
1290 continue
1310 continue
1291 # Work around 'dm' that can cause multiple actions for the same file
1311 # Work around 'dm' that can cause multiple actions for the same file
1292 for f in dms:
1312 for f in dms:
1293 dm, (f0, flags), msg = actions[f]
1313 dm, (f0, flags), msg = actions[f]
1294 assert dm == 'dm', dm
1314 assert dm == 'dm', dm
1295 if f0 in actions and actions[f0][0] == 'r':
1315 if f0 in actions and actions[f0][0] == 'r':
1296 # We have one bid for removing a file and another for moving it.
1316 # We have one bid for removing a file and another for moving it.
1297 # These two could be merged as first move and then delete ...
1317 # These two could be merged as first move and then delete ...
1298 # but instead drop moving and just delete.
1318 # but instead drop moving and just delete.
1299 del actions[f]
1319 del actions[f]
1300 repo.ui.note(_('end of auction\n\n'))
1320 repo.ui.note(_('end of auction\n\n'))
1301
1321
1302 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1322 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1303
1323
1304 if wctx.rev() is None:
1324 if wctx.rev() is None:
1305 fractions = _forgetremoved(wctx, mctx, branchmerge)
1325 fractions = _forgetremoved(wctx, mctx, branchmerge)
1306 actions.update(fractions)
1326 actions.update(fractions)
1307
1327
1308 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1328 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1309 actions)
1329 actions)
1310
1330
1311 return prunedactions, diverge, renamedelete
1331 return prunedactions, diverge, renamedelete
1312
1332
1313 def _getcwd():
1333 def _getcwd():
1314 try:
1334 try:
1315 return pycompat.getcwd()
1335 return pycompat.getcwd()
1316 except OSError as err:
1336 except OSError as err:
1317 if err.errno == errno.ENOENT:
1337 if err.errno == errno.ENOENT:
1318 return None
1338 return None
1319 raise
1339 raise
1320
1340
1321 def batchremove(repo, wctx, actions):
1341 def batchremove(repo, wctx, actions):
1322 """apply removes to the working directory
1342 """apply removes to the working directory
1323
1343
1324 yields tuples for progress updates
1344 yields tuples for progress updates
1325 """
1345 """
1326 verbose = repo.ui.verbose
1346 verbose = repo.ui.verbose
1327 cwd = _getcwd()
1347 cwd = _getcwd()
1328 i = 0
1348 i = 0
1329 for f, args, msg in actions:
1349 for f, args, msg in actions:
1330 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1350 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1331 if verbose:
1351 if verbose:
1332 repo.ui.note(_("removing %s\n") % f)
1352 repo.ui.note(_("removing %s\n") % f)
1333 wctx[f].audit()
1353 wctx[f].audit()
1334 try:
1354 try:
1335 wctx[f].remove(ignoremissing=True)
1355 wctx[f].remove(ignoremissing=True)
1336 except OSError as inst:
1356 except OSError as inst:
1337 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1357 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1338 (f, inst.strerror))
1358 (f, inst.strerror))
1339 if i == 100:
1359 if i == 100:
1340 yield i, f
1360 yield i, f
1341 i = 0
1361 i = 0
1342 i += 1
1362 i += 1
1343 if i > 0:
1363 if i > 0:
1344 yield i, f
1364 yield i, f
1345
1365
1346 if cwd and not _getcwd():
1366 if cwd and not _getcwd():
1347 # cwd was removed in the course of removing files; print a helpful
1367 # cwd was removed in the course of removing files; print a helpful
1348 # warning.
1368 # warning.
1349 repo.ui.warn(_("current directory was removed\n"
1369 repo.ui.warn(_("current directory was removed\n"
1350 "(consider changing to repo root: %s)\n") % repo.root)
1370 "(consider changing to repo root: %s)\n") % repo.root)
1351
1371
1352 def batchget(repo, mctx, wctx, actions):
1372 def batchget(repo, mctx, wctx, actions):
1353 """apply gets to the working directory
1373 """apply gets to the working directory
1354
1374
1355 mctx is the context to get from
1375 mctx is the context to get from
1356
1376
1357 yields tuples for progress updates
1377 yields tuples for progress updates
1358 """
1378 """
1359 verbose = repo.ui.verbose
1379 verbose = repo.ui.verbose
1360 fctx = mctx.filectx
1380 fctx = mctx.filectx
1361 ui = repo.ui
1381 ui = repo.ui
1362 i = 0
1382 i = 0
1363 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1383 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1364 for f, (flags, backup), msg in actions:
1384 for f, (flags, backup), msg in actions:
1365 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1385 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1366 if verbose:
1386 if verbose:
1367 repo.ui.note(_("getting %s\n") % f)
1387 repo.ui.note(_("getting %s\n") % f)
1368
1388
1369 if backup:
1389 if backup:
1370 # If a file or directory exists with the same name, back that
1390 # If a file or directory exists with the same name, back that
1371 # up. Otherwise, look to see if there is a file that conflicts
1391 # up. Otherwise, look to see if there is a file that conflicts
1372 # with a directory this file is in, and if so, back that up.
1392 # with a directory this file is in, and if so, back that up.
1373 absf = repo.wjoin(f)
1393 absf = repo.wjoin(f)
1374 if not repo.wvfs.lexists(f):
1394 if not repo.wvfs.lexists(f):
1375 for p in util.finddirs(f):
1395 for p in util.finddirs(f):
1376 if repo.wvfs.isfileorlink(p):
1396 if repo.wvfs.isfileorlink(p):
1377 absf = repo.wjoin(p)
1397 absf = repo.wjoin(p)
1378 break
1398 break
1379 orig = scmutil.origpath(ui, repo, absf)
1399 orig = scmutil.origpath(ui, repo, absf)
1380 if repo.wvfs.lexists(absf):
1400 if repo.wvfs.lexists(absf):
1381 util.rename(absf, orig)
1401 util.rename(absf, orig)
1382 wctx[f].clearunknown()
1402 wctx[f].clearunknown()
1383 atomictemp = ui.configbool("experimental", "update.atomic-file")
1403 atomictemp = ui.configbool("experimental", "update.atomic-file")
1384 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1404 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1385 atomictemp=atomictemp)
1405 atomictemp=atomictemp)
1386 if i == 100:
1406 if i == 100:
1387 yield i, f
1407 yield i, f
1388 i = 0
1408 i = 0
1389 i += 1
1409 i += 1
1390 if i > 0:
1410 if i > 0:
1391 yield i, f
1411 yield i, f
1392
1412
1393 def _prefetchfiles(repo, ctx, actions):
1413 def _prefetchfiles(repo, ctx, actions):
1394 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1414 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1395 of merge actions. ``ctx`` is the context being merged in."""
1415 of merge actions. ``ctx`` is the context being merged in."""
1396
1416
1397 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1417 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1398 # don't touch the context to be merged in. 'cd' is skipped, because
1418 # don't touch the context to be merged in. 'cd' is skipped, because
1399 # changed/deleted never resolves to something from the remote side.
1419 # changed/deleted never resolves to something from the remote side.
1400 oplist = [actions[a] for a in 'g dc dg m'.split()]
1420 oplist = [actions[a] for a in 'g dc dg m'.split()]
1401 prefetch = scmutil.fileprefetchhooks
1421 prefetch = scmutil.fileprefetchhooks
1402 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1422 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1403
1423
1404 @attr.s(frozen=True)
1424 @attr.s(frozen=True)
1405 class updateresult(object):
1425 class updateresult(object):
1406 updatedcount = attr.ib()
1426 updatedcount = attr.ib()
1407 mergedcount = attr.ib()
1427 mergedcount = attr.ib()
1408 removedcount = attr.ib()
1428 removedcount = attr.ib()
1409 unresolvedcount = attr.ib()
1429 unresolvedcount = attr.ib()
1410
1430
1411 # TODO remove container emulation once consumers switch to new API.
1431 # TODO remove container emulation once consumers switch to new API.
1412
1432
1413 def __getitem__(self, x):
1433 def __getitem__(self, x):
1414 if x == 0:
1434 if x == 0:
1415 return self.updatedcount
1435 return self.updatedcount
1416 elif x == 1:
1436 elif x == 1:
1417 return self.mergedcount
1437 return self.mergedcount
1418 elif x == 2:
1438 elif x == 2:
1419 return self.removedcount
1439 return self.removedcount
1420 elif x == 3:
1440 elif x == 3:
1421 return self.unresolvedcount
1441 return self.unresolvedcount
1422 else:
1442 else:
1423 raise IndexError('can only access items 0-3')
1443 raise IndexError('can only access items 0-3')
1424
1444
1425 def __len__(self):
1445 def __len__(self):
1426 return 4
1446 return 4
1427
1447
1428 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1448 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1429 """apply the merge action list to the working directory
1449 """apply the merge action list to the working directory
1430
1450
1431 wctx is the working copy context
1451 wctx is the working copy context
1432 mctx is the context to be merged into the working copy
1452 mctx is the context to be merged into the working copy
1433
1453
1434 Return a tuple of counts (updated, merged, removed, unresolved) that
1454 Return a tuple of counts (updated, merged, removed, unresolved) that
1435 describes how many files were affected by the update.
1455 describes how many files were affected by the update.
1436 """
1456 """
1437
1457
1438 _prefetchfiles(repo, mctx, actions)
1458 _prefetchfiles(repo, mctx, actions)
1439
1459
1440 updated, merged, removed = 0, 0, 0
1460 updated, merged, removed = 0, 0, 0
1441 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1461 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1442 moves = []
1462 moves = []
1443 for m, l in actions.items():
1463 for m, l in actions.items():
1444 l.sort()
1464 l.sort()
1445
1465
1446 # 'cd' and 'dc' actions are treated like other merge conflicts
1466 # 'cd' and 'dc' actions are treated like other merge conflicts
1447 mergeactions = sorted(actions['cd'])
1467 mergeactions = sorted(actions['cd'])
1448 mergeactions.extend(sorted(actions['dc']))
1468 mergeactions.extend(sorted(actions['dc']))
1449 mergeactions.extend(actions['m'])
1469 mergeactions.extend(actions['m'])
1450 for f, args, msg in mergeactions:
1470 for f, args, msg in mergeactions:
1451 f1, f2, fa, move, anc = args
1471 f1, f2, fa, move, anc = args
1452 if f == '.hgsubstate': # merged internally
1472 if f == '.hgsubstate': # merged internally
1453 continue
1473 continue
1454 if f1 is None:
1474 if f1 is None:
1455 fcl = filemerge.absentfilectx(wctx, fa)
1475 fcl = filemerge.absentfilectx(wctx, fa)
1456 else:
1476 else:
1457 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1477 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1458 fcl = wctx[f1]
1478 fcl = wctx[f1]
1459 if f2 is None:
1479 if f2 is None:
1460 fco = filemerge.absentfilectx(mctx, fa)
1480 fco = filemerge.absentfilectx(mctx, fa)
1461 else:
1481 else:
1462 fco = mctx[f2]
1482 fco = mctx[f2]
1463 actx = repo[anc]
1483 actx = repo[anc]
1464 if fa in actx:
1484 if fa in actx:
1465 fca = actx[fa]
1485 fca = actx[fa]
1466 else:
1486 else:
1467 # TODO: move to absentfilectx
1487 # TODO: move to absentfilectx
1468 fca = repo.filectx(f1, fileid=nullrev)
1488 fca = repo.filectx(f1, fileid=nullrev)
1469 ms.add(fcl, fco, fca, f)
1489 ms.add(fcl, fco, fca, f)
1470 if f1 != f and move:
1490 if f1 != f and move:
1471 moves.append(f1)
1491 moves.append(f1)
1472
1492
1473 _updating = _('updating')
1493 _updating = _('updating')
1474 _files = _('files')
1494 _files = _('files')
1475 progress = repo.ui.progress
1495 progress = repo.ui.progress
1476
1496
1477 # remove renamed files after safely stored
1497 # remove renamed files after safely stored
1478 for f in moves:
1498 for f in moves:
1479 if wctx[f].lexists():
1499 if wctx[f].lexists():
1480 repo.ui.debug("removing %s\n" % f)
1500 repo.ui.debug("removing %s\n" % f)
1481 wctx[f].audit()
1501 wctx[f].audit()
1482 wctx[f].remove()
1502 wctx[f].remove()
1483
1503
1484 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1504 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1485 z = 0
1505 z = 0
1486
1506
1487 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1507 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1488 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1508 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1489
1509
1490 # record path conflicts
1510 # record path conflicts
1491 for f, args, msg in actions['p']:
1511 for f, args, msg in actions['p']:
1492 f1, fo = args
1512 f1, fo = args
1493 s = repo.ui.status
1513 s = repo.ui.status
1494 s(_("%s: path conflict - a file or link has the same name as a "
1514 s(_("%s: path conflict - a file or link has the same name as a "
1495 "directory\n") % f)
1515 "directory\n") % f)
1496 if fo == 'l':
1516 if fo == 'l':
1497 s(_("the local file has been renamed to %s\n") % f1)
1517 s(_("the local file has been renamed to %s\n") % f1)
1498 else:
1518 else:
1499 s(_("the remote file has been renamed to %s\n") % f1)
1519 s(_("the remote file has been renamed to %s\n") % f1)
1500 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1520 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1501 ms.addpath(f, f1, fo)
1521 ms.addpath(f, f1, fo)
1502 z += 1
1522 z += 1
1503 progress(_updating, z, item=f, total=numupdates, unit=_files)
1523 progress(_updating, z, item=f, total=numupdates, unit=_files)
1504
1524
1505 # When merging in-memory, we can't support worker processes, so set the
1525 # When merging in-memory, we can't support worker processes, so set the
1506 # per-item cost at 0 in that case.
1526 # per-item cost at 0 in that case.
1507 cost = 0 if wctx.isinmemory() else 0.001
1527 cost = 0 if wctx.isinmemory() else 0.001
1508
1528
1509 # remove in parallel (must come before resolving path conflicts and getting)
1529 # remove in parallel (must come before resolving path conflicts and getting)
1510 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1530 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1511 actions['r'])
1531 actions['r'])
1512 for i, item in prog:
1532 for i, item in prog:
1513 z += i
1533 z += i
1514 progress(_updating, z, item=item, total=numupdates, unit=_files)
1534 progress(_updating, z, item=item, total=numupdates, unit=_files)
1515 removed = len(actions['r'])
1535 removed = len(actions['r'])
1516
1536
1517 # resolve path conflicts (must come before getting)
1537 # resolve path conflicts (must come before getting)
1518 for f, args, msg in actions['pr']:
1538 for f, args, msg in actions['pr']:
1519 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1539 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1520 f0, = args
1540 f0, = args
1521 if wctx[f0].lexists():
1541 if wctx[f0].lexists():
1522 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1542 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1523 wctx[f].audit()
1543 wctx[f].audit()
1524 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1544 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1525 wctx[f0].remove()
1545 wctx[f0].remove()
1526 z += 1
1546 z += 1
1527 progress(_updating, z, item=f, total=numupdates, unit=_files)
1547 progress(_updating, z, item=f, total=numupdates, unit=_files)
1528
1548
1529 # get in parallel
1549 # get in parallel
1530 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1550 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1531 actions['g'])
1551 actions['g'])
1532 for i, item in prog:
1552 for i, item in prog:
1533 z += i
1553 z += i
1534 progress(_updating, z, item=item, total=numupdates, unit=_files)
1554 progress(_updating, z, item=item, total=numupdates, unit=_files)
1535 updated = len(actions['g'])
1555 updated = len(actions['g'])
1536
1556
1537 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1557 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1538 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1558 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1539
1559
1540 # forget (manifest only, just log it) (must come first)
1560 # forget (manifest only, just log it) (must come first)
1541 for f, args, msg in actions['f']:
1561 for f, args, msg in actions['f']:
1542 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1562 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1543 z += 1
1563 z += 1
1544 progress(_updating, z, item=f, total=numupdates, unit=_files)
1564 progress(_updating, z, item=f, total=numupdates, unit=_files)
1545
1565
1546 # re-add (manifest only, just log it)
1566 # re-add (manifest only, just log it)
1547 for f, args, msg in actions['a']:
1567 for f, args, msg in actions['a']:
1548 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1568 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1549 z += 1
1569 z += 1
1550 progress(_updating, z, item=f, total=numupdates, unit=_files)
1570 progress(_updating, z, item=f, total=numupdates, unit=_files)
1551
1571
1552 # re-add/mark as modified (manifest only, just log it)
1572 # re-add/mark as modified (manifest only, just log it)
1553 for f, args, msg in actions['am']:
1573 for f, args, msg in actions['am']:
1554 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1574 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1555 z += 1
1575 z += 1
1556 progress(_updating, z, item=f, total=numupdates, unit=_files)
1576 progress(_updating, z, item=f, total=numupdates, unit=_files)
1557
1577
1558 # keep (noop, just log it)
1578 # keep (noop, just log it)
1559 for f, args, msg in actions['k']:
1579 for f, args, msg in actions['k']:
1560 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1580 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1561 # no progress
1581 # no progress
1562
1582
1563 # directory rename, move local
1583 # directory rename, move local
1564 for f, args, msg in actions['dm']:
1584 for f, args, msg in actions['dm']:
1565 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1585 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1566 z += 1
1586 z += 1
1567 progress(_updating, z, item=f, total=numupdates, unit=_files)
1587 progress(_updating, z, item=f, total=numupdates, unit=_files)
1568 f0, flags = args
1588 f0, flags = args
1569 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1589 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1570 wctx[f].audit()
1590 wctx[f].audit()
1571 wctx[f].write(wctx.filectx(f0).data(), flags)
1591 wctx[f].write(wctx.filectx(f0).data(), flags)
1572 wctx[f0].remove()
1592 wctx[f0].remove()
1573 updated += 1
1593 updated += 1
1574
1594
1575 # local directory rename, get
1595 # local directory rename, get
1576 for f, args, msg in actions['dg']:
1596 for f, args, msg in actions['dg']:
1577 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1597 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1578 z += 1
1598 z += 1
1579 progress(_updating, z, item=f, total=numupdates, unit=_files)
1599 progress(_updating, z, item=f, total=numupdates, unit=_files)
1580 f0, flags = args
1600 f0, flags = args
1581 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1601 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1582 wctx[f].write(mctx.filectx(f0).data(), flags)
1602 wctx[f].write(mctx.filectx(f0).data(), flags)
1583 updated += 1
1603 updated += 1
1584
1604
1585 # exec
1605 # exec
1586 for f, args, msg in actions['e']:
1606 for f, args, msg in actions['e']:
1587 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1607 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1588 z += 1
1608 z += 1
1589 progress(_updating, z, item=f, total=numupdates, unit=_files)
1609 progress(_updating, z, item=f, total=numupdates, unit=_files)
1590 flags, = args
1610 flags, = args
1591 wctx[f].audit()
1611 wctx[f].audit()
1592 wctx[f].setflags('l' in flags, 'x' in flags)
1612 wctx[f].setflags('l' in flags, 'x' in flags)
1593 updated += 1
1613 updated += 1
1594
1614
1595 # the ordering is important here -- ms.mergedriver will raise if the merge
1615 # the ordering is important here -- ms.mergedriver will raise if the merge
1596 # driver has changed, and we want to be able to bypass it when overwrite is
1616 # driver has changed, and we want to be able to bypass it when overwrite is
1597 # True
1617 # True
1598 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1618 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1599
1619
1600 if usemergedriver:
1620 if usemergedriver:
1601 if wctx.isinmemory():
1621 if wctx.isinmemory():
1602 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1622 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1603 "support mergedriver")
1623 "support mergedriver")
1604 ms.commit()
1624 ms.commit()
1605 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1625 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1606 # the driver might leave some files unresolved
1626 # the driver might leave some files unresolved
1607 unresolvedf = set(ms.unresolved())
1627 unresolvedf = set(ms.unresolved())
1608 if not proceed:
1628 if not proceed:
1609 # XXX setting unresolved to at least 1 is a hack to make sure we
1629 # XXX setting unresolved to at least 1 is a hack to make sure we
1610 # error out
1630 # error out
1611 return updateresult(updated, merged, removed,
1631 return updateresult(updated, merged, removed,
1612 max(len(unresolvedf), 1))
1632 max(len(unresolvedf), 1))
1613 newactions = []
1633 newactions = []
1614 for f, args, msg in mergeactions:
1634 for f, args, msg in mergeactions:
1615 if f in unresolvedf:
1635 if f in unresolvedf:
1616 newactions.append((f, args, msg))
1636 newactions.append((f, args, msg))
1617 mergeactions = newactions
1637 mergeactions = newactions
1618
1638
1619 try:
1639 try:
1620 # premerge
1640 # premerge
1621 tocomplete = []
1641 tocomplete = []
1622 for f, args, msg in mergeactions:
1642 for f, args, msg in mergeactions:
1623 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1643 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1624 z += 1
1644 z += 1
1625 progress(_updating, z, item=f, total=numupdates, unit=_files)
1645 progress(_updating, z, item=f, total=numupdates, unit=_files)
1626 if f == '.hgsubstate': # subrepo states need updating
1646 if f == '.hgsubstate': # subrepo states need updating
1627 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1647 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1628 overwrite, labels)
1648 overwrite, labels)
1629 continue
1649 continue
1630 wctx[f].audit()
1650 wctx[f].audit()
1631 complete, r = ms.preresolve(f, wctx)
1651 complete, r = ms.preresolve(f, wctx)
1632 if not complete:
1652 if not complete:
1633 numupdates += 1
1653 numupdates += 1
1634 tocomplete.append((f, args, msg))
1654 tocomplete.append((f, args, msg))
1635
1655
1636 # merge
1656 # merge
1637 for f, args, msg in tocomplete:
1657 for f, args, msg in tocomplete:
1638 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1658 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1639 z += 1
1659 z += 1
1640 progress(_updating, z, item=f, total=numupdates, unit=_files)
1660 progress(_updating, z, item=f, total=numupdates, unit=_files)
1641 ms.resolve(f, wctx)
1661 ms.resolve(f, wctx)
1642
1662
1643 finally:
1663 finally:
1644 ms.commit()
1664 ms.commit()
1645
1665
1646 unresolved = ms.unresolvedcount()
1666 unresolved = ms.unresolvedcount()
1647
1667
1648 if usemergedriver and not unresolved and ms.mdstate() != 's':
1668 if usemergedriver and not unresolved and ms.mdstate() != 's':
1649 if not driverconclude(repo, ms, wctx, labels=labels):
1669 if not driverconclude(repo, ms, wctx, labels=labels):
1650 # XXX setting unresolved to at least 1 is a hack to make sure we
1670 # XXX setting unresolved to at least 1 is a hack to make sure we
1651 # error out
1671 # error out
1652 unresolved = max(unresolved, 1)
1672 unresolved = max(unresolved, 1)
1653
1673
1654 ms.commit()
1674 ms.commit()
1655
1675
1656 msupdated, msmerged, msremoved = ms.counts()
1676 msupdated, msmerged, msremoved = ms.counts()
1657 updated += msupdated
1677 updated += msupdated
1658 merged += msmerged
1678 merged += msmerged
1659 removed += msremoved
1679 removed += msremoved
1660
1680
1661 extraactions = ms.actions()
1681 extraactions = ms.actions()
1662 if extraactions:
1682 if extraactions:
1663 mfiles = set(a[0] for a in actions['m'])
1683 mfiles = set(a[0] for a in actions['m'])
1664 for k, acts in extraactions.iteritems():
1684 for k, acts in extraactions.iteritems():
1665 actions[k].extend(acts)
1685 actions[k].extend(acts)
1666 # Remove these files from actions['m'] as well. This is important
1686 # Remove these files from actions['m'] as well. This is important
1667 # because in recordupdates, files in actions['m'] are processed
1687 # because in recordupdates, files in actions['m'] are processed
1668 # after files in other actions, and the merge driver might add
1688 # after files in other actions, and the merge driver might add
1669 # files to those actions via extraactions above. This can lead to a
1689 # files to those actions via extraactions above. This can lead to a
1670 # file being recorded twice, with poor results. This is especially
1690 # file being recorded twice, with poor results. This is especially
1671 # problematic for actions['r'] (currently only possible with the
1691 # problematic for actions['r'] (currently only possible with the
1672 # merge driver in the initial merge process; interrupted merges
1692 # merge driver in the initial merge process; interrupted merges
1673 # don't go through this flow).
1693 # don't go through this flow).
1674 #
1694 #
1675 # The real fix here is to have indexes by both file and action so
1695 # The real fix here is to have indexes by both file and action so
1676 # that when the action for a file is changed it is automatically
1696 # that when the action for a file is changed it is automatically
1677 # reflected in the other action lists. But that involves a more
1697 # reflected in the other action lists. But that involves a more
1678 # complex data structure, so this will do for now.
1698 # complex data structure, so this will do for now.
1679 #
1699 #
1680 # We don't need to do the same operation for 'dc' and 'cd' because
1700 # We don't need to do the same operation for 'dc' and 'cd' because
1681 # those lists aren't consulted again.
1701 # those lists aren't consulted again.
1682 mfiles.difference_update(a[0] for a in acts)
1702 mfiles.difference_update(a[0] for a in acts)
1683
1703
1684 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1704 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1685
1705
1686 progress(_updating, None, total=numupdates, unit=_files)
1706 progress(_updating, None, total=numupdates, unit=_files)
1687 return updateresult(updated, merged, removed, unresolved)
1707 return updateresult(updated, merged, removed, unresolved)
1688
1708
1689 def recordupdates(repo, actions, branchmerge):
1709 def recordupdates(repo, actions, branchmerge):
1690 "record merge actions to the dirstate"
1710 "record merge actions to the dirstate"
1691 # remove (must come first)
1711 # remove (must come first)
1692 for f, args, msg in actions.get('r', []):
1712 for f, args, msg in actions.get('r', []):
1693 if branchmerge:
1713 if branchmerge:
1694 repo.dirstate.remove(f)
1714 repo.dirstate.remove(f)
1695 else:
1715 else:
1696 repo.dirstate.drop(f)
1716 repo.dirstate.drop(f)
1697
1717
1698 # forget (must come first)
1718 # forget (must come first)
1699 for f, args, msg in actions.get('f', []):
1719 for f, args, msg in actions.get('f', []):
1700 repo.dirstate.drop(f)
1720 repo.dirstate.drop(f)
1701
1721
1702 # resolve path conflicts
1722 # resolve path conflicts
1703 for f, args, msg in actions.get('pr', []):
1723 for f, args, msg in actions.get('pr', []):
1704 f0, = args
1724 f0, = args
1705 origf0 = repo.dirstate.copied(f0) or f0
1725 origf0 = repo.dirstate.copied(f0) or f0
1706 repo.dirstate.add(f)
1726 repo.dirstate.add(f)
1707 repo.dirstate.copy(origf0, f)
1727 repo.dirstate.copy(origf0, f)
1708 if f0 == origf0:
1728 if f0 == origf0:
1709 repo.dirstate.remove(f0)
1729 repo.dirstate.remove(f0)
1710 else:
1730 else:
1711 repo.dirstate.drop(f0)
1731 repo.dirstate.drop(f0)
1712
1732
1713 # re-add
1733 # re-add
1714 for f, args, msg in actions.get('a', []):
1734 for f, args, msg in actions.get('a', []):
1715 repo.dirstate.add(f)
1735 repo.dirstate.add(f)
1716
1736
1717 # re-add/mark as modified
1737 # re-add/mark as modified
1718 for f, args, msg in actions.get('am', []):
1738 for f, args, msg in actions.get('am', []):
1719 if branchmerge:
1739 if branchmerge:
1720 repo.dirstate.normallookup(f)
1740 repo.dirstate.normallookup(f)
1721 else:
1741 else:
1722 repo.dirstate.add(f)
1742 repo.dirstate.add(f)
1723
1743
1724 # exec change
1744 # exec change
1725 for f, args, msg in actions.get('e', []):
1745 for f, args, msg in actions.get('e', []):
1726 repo.dirstate.normallookup(f)
1746 repo.dirstate.normallookup(f)
1727
1747
1728 # keep
1748 # keep
1729 for f, args, msg in actions.get('k', []):
1749 for f, args, msg in actions.get('k', []):
1730 pass
1750 pass
1731
1751
1732 # get
1752 # get
1733 for f, args, msg in actions.get('g', []):
1753 for f, args, msg in actions.get('g', []):
1734 if branchmerge:
1754 if branchmerge:
1735 repo.dirstate.otherparent(f)
1755 repo.dirstate.otherparent(f)
1736 else:
1756 else:
1737 repo.dirstate.normal(f)
1757 repo.dirstate.normal(f)
1738
1758
1739 # merge
1759 # merge
1740 for f, args, msg in actions.get('m', []):
1760 for f, args, msg in actions.get('m', []):
1741 f1, f2, fa, move, anc = args
1761 f1, f2, fa, move, anc = args
1742 if branchmerge:
1762 if branchmerge:
1743 # We've done a branch merge, mark this file as merged
1763 # We've done a branch merge, mark this file as merged
1744 # so that we properly record the merger later
1764 # so that we properly record the merger later
1745 repo.dirstate.merge(f)
1765 repo.dirstate.merge(f)
1746 if f1 != f2: # copy/rename
1766 if f1 != f2: # copy/rename
1747 if move:
1767 if move:
1748 repo.dirstate.remove(f1)
1768 repo.dirstate.remove(f1)
1749 if f1 != f:
1769 if f1 != f:
1750 repo.dirstate.copy(f1, f)
1770 repo.dirstate.copy(f1, f)
1751 else:
1771 else:
1752 repo.dirstate.copy(f2, f)
1772 repo.dirstate.copy(f2, f)
1753 else:
1773 else:
1754 # We've update-merged a locally modified file, so
1774 # We've update-merged a locally modified file, so
1755 # we set the dirstate to emulate a normal checkout
1775 # we set the dirstate to emulate a normal checkout
1756 # of that file some time in the past. Thus our
1776 # of that file some time in the past. Thus our
1757 # merge will appear as a normal local file
1777 # merge will appear as a normal local file
1758 # modification.
1778 # modification.
1759 if f2 == f: # file not locally copied/moved
1779 if f2 == f: # file not locally copied/moved
1760 repo.dirstate.normallookup(f)
1780 repo.dirstate.normallookup(f)
1761 if move:
1781 if move:
1762 repo.dirstate.drop(f1)
1782 repo.dirstate.drop(f1)
1763
1783
1764 # directory rename, move local
1784 # directory rename, move local
1765 for f, args, msg in actions.get('dm', []):
1785 for f, args, msg in actions.get('dm', []):
1766 f0, flag = args
1786 f0, flag = args
1767 if branchmerge:
1787 if branchmerge:
1768 repo.dirstate.add(f)
1788 repo.dirstate.add(f)
1769 repo.dirstate.remove(f0)
1789 repo.dirstate.remove(f0)
1770 repo.dirstate.copy(f0, f)
1790 repo.dirstate.copy(f0, f)
1771 else:
1791 else:
1772 repo.dirstate.normal(f)
1792 repo.dirstate.normal(f)
1773 repo.dirstate.drop(f0)
1793 repo.dirstate.drop(f0)
1774
1794
1775 # directory rename, get
1795 # directory rename, get
1776 for f, args, msg in actions.get('dg', []):
1796 for f, args, msg in actions.get('dg', []):
1777 f0, flag = args
1797 f0, flag = args
1778 if branchmerge:
1798 if branchmerge:
1779 repo.dirstate.add(f)
1799 repo.dirstate.add(f)
1780 repo.dirstate.copy(f0, f)
1800 repo.dirstate.copy(f0, f)
1781 else:
1801 else:
1782 repo.dirstate.normal(f)
1802 repo.dirstate.normal(f)
1783
1803
1784 def update(repo, node, branchmerge, force, ancestor=None,
1804 def update(repo, node, branchmerge, force, ancestor=None,
1785 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1805 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1786 updatecheck=None, wc=None):
1806 updatecheck=None, wc=None):
1787 """
1807 """
1788 Perform a merge between the working directory and the given node
1808 Perform a merge between the working directory and the given node
1789
1809
1790 node = the node to update to
1810 node = the node to update to
1791 branchmerge = whether to merge between branches
1811 branchmerge = whether to merge between branches
1792 force = whether to force branch merging or file overwriting
1812 force = whether to force branch merging or file overwriting
1793 matcher = a matcher to filter file lists (dirstate not updated)
1813 matcher = a matcher to filter file lists (dirstate not updated)
1794 mergeancestor = whether it is merging with an ancestor. If true,
1814 mergeancestor = whether it is merging with an ancestor. If true,
1795 we should accept the incoming changes for any prompts that occur.
1815 we should accept the incoming changes for any prompts that occur.
1796 If false, merging with an ancestor (fast-forward) is only allowed
1816 If false, merging with an ancestor (fast-forward) is only allowed
1797 between different named branches. This flag is used by rebase extension
1817 between different named branches. This flag is used by rebase extension
1798 as a temporary fix and should be avoided in general.
1818 as a temporary fix and should be avoided in general.
1799 labels = labels to use for base, local and other
1819 labels = labels to use for base, local and other
1800 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1820 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1801 this is True, then 'force' should be True as well.
1821 this is True, then 'force' should be True as well.
1802
1822
1803 The table below shows all the behaviors of the update command given the
1823 The table below shows all the behaviors of the update command given the
1804 -c/--check and -C/--clean or no options, whether the working directory is
1824 -c/--check and -C/--clean or no options, whether the working directory is
1805 dirty, whether a revision is specified, and the relationship of the parent
1825 dirty, whether a revision is specified, and the relationship of the parent
1806 rev to the target rev (linear or not). Match from top first. The -n
1826 rev to the target rev (linear or not). Match from top first. The -n
1807 option doesn't exist on the command line, but represents the
1827 option doesn't exist on the command line, but represents the
1808 experimental.updatecheck=noconflict option.
1828 experimental.updatecheck=noconflict option.
1809
1829
1810 This logic is tested by test-update-branches.t.
1830 This logic is tested by test-update-branches.t.
1811
1831
1812 -c -C -n -m dirty rev linear | result
1832 -c -C -n -m dirty rev linear | result
1813 y y * * * * * | (1)
1833 y y * * * * * | (1)
1814 y * y * * * * | (1)
1834 y * y * * * * | (1)
1815 y * * y * * * | (1)
1835 y * * y * * * | (1)
1816 * y y * * * * | (1)
1836 * y y * * * * | (1)
1817 * y * y * * * | (1)
1837 * y * y * * * | (1)
1818 * * y y * * * | (1)
1838 * * y y * * * | (1)
1819 * * * * * n n | x
1839 * * * * * n n | x
1820 * * * * n * * | ok
1840 * * * * n * * | ok
1821 n n n n y * y | merge
1841 n n n n y * y | merge
1822 n n n n y y n | (2)
1842 n n n n y y n | (2)
1823 n n n y y * * | merge
1843 n n n y y * * | merge
1824 n n y n y * * | merge if no conflict
1844 n n y n y * * | merge if no conflict
1825 n y n n y * * | discard
1845 n y n n y * * | discard
1826 y n n n y * * | (3)
1846 y n n n y * * | (3)
1827
1847
1828 x = can't happen
1848 x = can't happen
1829 * = don't-care
1849 * = don't-care
1830 1 = incompatible options (checked in commands.py)
1850 1 = incompatible options (checked in commands.py)
1831 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1851 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1832 3 = abort: uncommitted changes (checked in commands.py)
1852 3 = abort: uncommitted changes (checked in commands.py)
1833
1853
1834 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1854 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1835 to repo[None] if None is passed.
1855 to repo[None] if None is passed.
1836
1856
1837 Return the same tuple as applyupdates().
1857 Return the same tuple as applyupdates().
1838 """
1858 """
1839 # Avoid cycle.
1859 # Avoid cycle.
1840 from . import sparse
1860 from . import sparse
1841
1861
1842 # This function used to find the default destination if node was None, but
1862 # This function used to find the default destination if node was None, but
1843 # that's now in destutil.py.
1863 # that's now in destutil.py.
1844 assert node is not None
1864 assert node is not None
1845 if not branchmerge and not force:
1865 if not branchmerge and not force:
1846 # TODO: remove the default once all callers that pass branchmerge=False
1866 # TODO: remove the default once all callers that pass branchmerge=False
1847 # and force=False pass a value for updatecheck. We may want to allow
1867 # and force=False pass a value for updatecheck. We may want to allow
1848 # updatecheck='abort' to better suppport some of these callers.
1868 # updatecheck='abort' to better suppport some of these callers.
1849 if updatecheck is None:
1869 if updatecheck is None:
1850 updatecheck = 'linear'
1870 updatecheck = 'linear'
1851 assert updatecheck in ('none', 'linear', 'noconflict')
1871 assert updatecheck in ('none', 'linear', 'noconflict')
1852 # If we're doing a partial update, we need to skip updating
1872 # If we're doing a partial update, we need to skip updating
1853 # the dirstate, so make a note of any partial-ness to the
1873 # the dirstate, so make a note of any partial-ness to the
1854 # update here.
1874 # update here.
1855 if matcher is None or matcher.always():
1875 if matcher is None or matcher.always():
1856 partial = False
1876 partial = False
1857 else:
1877 else:
1858 partial = True
1878 partial = True
1859 with repo.wlock():
1879 with repo.wlock():
1860 if wc is None:
1880 if wc is None:
1861 wc = repo[None]
1881 wc = repo[None]
1862 pl = wc.parents()
1882 pl = wc.parents()
1863 p1 = pl[0]
1883 p1 = pl[0]
1864 pas = [None]
1884 pas = [None]
1865 if ancestor is not None:
1885 if ancestor is not None:
1866 pas = [repo[ancestor]]
1886 pas = [repo[ancestor]]
1867
1887
1868 overwrite = force and not branchmerge
1888 overwrite = force and not branchmerge
1869
1889
1870 p2 = repo[node]
1890 p2 = repo[node]
1871 if pas[0] is None:
1891 if pas[0] is None:
1872 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1892 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1873 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1893 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1874 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1894 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1875 else:
1895 else:
1876 pas = [p1.ancestor(p2, warn=branchmerge)]
1896 pas = [p1.ancestor(p2, warn=branchmerge)]
1877
1897
1878 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1898 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1879
1899
1880 ### check phase
1900 ### check phase
1881 if not overwrite:
1901 if not overwrite:
1882 if len(pl) > 1:
1902 if len(pl) > 1:
1883 raise error.Abort(_("outstanding uncommitted merge"))
1903 raise error.Abort(_("outstanding uncommitted merge"))
1884 ms = mergestate.read(repo)
1904 ms = mergestate.read(repo)
1885 if list(ms.unresolved()):
1905 if list(ms.unresolved()):
1886 raise error.Abort(_("outstanding merge conflicts"))
1906 raise error.Abort(_("outstanding merge conflicts"))
1887 if branchmerge:
1907 if branchmerge:
1888 if pas == [p2]:
1908 if pas == [p2]:
1889 raise error.Abort(_("merging with a working directory ancestor"
1909 raise error.Abort(_("merging with a working directory ancestor"
1890 " has no effect"))
1910 " has no effect"))
1891 elif pas == [p1]:
1911 elif pas == [p1]:
1892 if not mergeancestor and wc.branch() == p2.branch():
1912 if not mergeancestor and wc.branch() == p2.branch():
1893 raise error.Abort(_("nothing to merge"),
1913 raise error.Abort(_("nothing to merge"),
1894 hint=_("use 'hg update' "
1914 hint=_("use 'hg update' "
1895 "or check 'hg heads'"))
1915 "or check 'hg heads'"))
1896 if not force and (wc.files() or wc.deleted()):
1916 if not force and (wc.files() or wc.deleted()):
1897 raise error.Abort(_("uncommitted changes"),
1917 raise error.Abort(_("uncommitted changes"),
1898 hint=_("use 'hg status' to list changes"))
1918 hint=_("use 'hg status' to list changes"))
1899 if not wc.isinmemory():
1919 if not wc.isinmemory():
1900 for s in sorted(wc.substate):
1920 for s in sorted(wc.substate):
1901 wc.sub(s).bailifchanged()
1921 wc.sub(s).bailifchanged()
1902
1922
1903 elif not overwrite:
1923 elif not overwrite:
1904 if p1 == p2: # no-op update
1924 if p1 == p2: # no-op update
1905 # call the hooks and exit early
1925 # call the hooks and exit early
1906 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1926 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1907 repo.hook('update', parent1=xp2, parent2='', error=0)
1927 repo.hook('update', parent1=xp2, parent2='', error=0)
1908 return updateresult(0, 0, 0, 0)
1928 return updateresult(0, 0, 0, 0)
1909
1929
1910 if (updatecheck == 'linear' and
1930 if (updatecheck == 'linear' and
1911 pas not in ([p1], [p2])): # nonlinear
1931 pas not in ([p1], [p2])): # nonlinear
1912 dirty = wc.dirty(missing=True)
1932 dirty = wc.dirty(missing=True)
1913 if dirty:
1933 if dirty:
1914 # Branching is a bit strange to ensure we do the minimal
1934 # Branching is a bit strange to ensure we do the minimal
1915 # amount of call to obsutil.foreground.
1935 # amount of call to obsutil.foreground.
1916 foreground = obsutil.foreground(repo, [p1.node()])
1936 foreground = obsutil.foreground(repo, [p1.node()])
1917 # note: the <node> variable contains a random identifier
1937 # note: the <node> variable contains a random identifier
1918 if repo[node].node() in foreground:
1938 if repo[node].node() in foreground:
1919 pass # allow updating to successors
1939 pass # allow updating to successors
1920 else:
1940 else:
1921 msg = _("uncommitted changes")
1941 msg = _("uncommitted changes")
1922 hint = _("commit or update --clean to discard changes")
1942 hint = _("commit or update --clean to discard changes")
1923 raise error.UpdateAbort(msg, hint=hint)
1943 raise error.UpdateAbort(msg, hint=hint)
1924 else:
1944 else:
1925 # Allow jumping branches if clean and specific rev given
1945 # Allow jumping branches if clean and specific rev given
1926 pass
1946 pass
1927
1947
1928 if overwrite:
1948 if overwrite:
1929 pas = [wc]
1949 pas = [wc]
1930 elif not branchmerge:
1950 elif not branchmerge:
1931 pas = [p1]
1951 pas = [p1]
1932
1952
1933 # deprecated config: merge.followcopies
1953 # deprecated config: merge.followcopies
1934 followcopies = repo.ui.configbool('merge', 'followcopies')
1954 followcopies = repo.ui.configbool('merge', 'followcopies')
1935 if overwrite:
1955 if overwrite:
1936 followcopies = False
1956 followcopies = False
1937 elif not pas[0]:
1957 elif not pas[0]:
1938 followcopies = False
1958 followcopies = False
1939 if not branchmerge and not wc.dirty(missing=True):
1959 if not branchmerge and not wc.dirty(missing=True):
1940 followcopies = False
1960 followcopies = False
1941
1961
1942 ### calculate phase
1962 ### calculate phase
1943 actionbyfile, diverge, renamedelete = calculateupdates(
1963 actionbyfile, diverge, renamedelete = calculateupdates(
1944 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1964 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1945 followcopies, matcher=matcher, mergeforce=mergeforce)
1965 followcopies, matcher=matcher, mergeforce=mergeforce)
1946
1966
1947 if updatecheck == 'noconflict':
1967 if updatecheck == 'noconflict':
1948 for f, (m, args, msg) in actionbyfile.iteritems():
1968 for f, (m, args, msg) in actionbyfile.iteritems():
1949 if m not in ('g', 'k', 'e', 'r', 'pr'):
1969 if m not in ('g', 'k', 'e', 'r', 'pr'):
1950 msg = _("conflicting changes")
1970 msg = _("conflicting changes")
1951 hint = _("commit or update --clean to discard changes")
1971 hint = _("commit or update --clean to discard changes")
1952 raise error.Abort(msg, hint=hint)
1972 raise error.Abort(msg, hint=hint)
1953
1973
1954 # Prompt and create actions. Most of this is in the resolve phase
1974 # Prompt and create actions. Most of this is in the resolve phase
1955 # already, but we can't handle .hgsubstate in filemerge or
1975 # already, but we can't handle .hgsubstate in filemerge or
1956 # subrepoutil.submerge yet so we have to keep prompting for it.
1976 # subrepoutil.submerge yet so we have to keep prompting for it.
1957 if '.hgsubstate' in actionbyfile:
1977 if '.hgsubstate' in actionbyfile:
1958 f = '.hgsubstate'
1978 f = '.hgsubstate'
1959 m, args, msg = actionbyfile[f]
1979 m, args, msg = actionbyfile[f]
1960 prompts = filemerge.partextras(labels)
1980 prompts = filemerge.partextras(labels)
1961 prompts['f'] = f
1981 prompts['f'] = f
1962 if m == 'cd':
1982 if m == 'cd':
1963 if repo.ui.promptchoice(
1983 if repo.ui.promptchoice(
1964 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1984 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1965 "use (c)hanged version or (d)elete?"
1985 "use (c)hanged version or (d)elete?"
1966 "$$ &Changed $$ &Delete") % prompts, 0):
1986 "$$ &Changed $$ &Delete") % prompts, 0):
1967 actionbyfile[f] = ('r', None, "prompt delete")
1987 actionbyfile[f] = ('r', None, "prompt delete")
1968 elif f in p1:
1988 elif f in p1:
1969 actionbyfile[f] = ('am', None, "prompt keep")
1989 actionbyfile[f] = ('am', None, "prompt keep")
1970 else:
1990 else:
1971 actionbyfile[f] = ('a', None, "prompt keep")
1991 actionbyfile[f] = ('a', None, "prompt keep")
1972 elif m == 'dc':
1992 elif m == 'dc':
1973 f1, f2, fa, move, anc = args
1993 f1, f2, fa, move, anc = args
1974 flags = p2[f2].flags()
1994 flags = p2[f2].flags()
1975 if repo.ui.promptchoice(
1995 if repo.ui.promptchoice(
1976 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1996 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1977 "use (c)hanged version or leave (d)eleted?"
1997 "use (c)hanged version or leave (d)eleted?"
1978 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1998 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1979 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1999 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1980 else:
2000 else:
1981 del actionbyfile[f]
2001 del actionbyfile[f]
1982
2002
1983 # Convert to dictionary-of-lists format
2003 # Convert to dictionary-of-lists format
1984 actions = dict((m, [])
2004 actions = dict((m, [])
1985 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
2005 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1986 for f, (m, args, msg) in actionbyfile.iteritems():
2006 for f, (m, args, msg) in actionbyfile.iteritems():
1987 if m not in actions:
2007 if m not in actions:
1988 actions[m] = []
2008 actions[m] = []
1989 actions[m].append((f, args, msg))
2009 actions[m].append((f, args, msg))
1990
2010
1991 if not util.fscasesensitive(repo.path):
2011 if not util.fscasesensitive(repo.path):
1992 # check collision between files only in p2 for clean update
2012 # check collision between files only in p2 for clean update
1993 if (not branchmerge and
2013 if (not branchmerge and
1994 (force or not wc.dirty(missing=True, branch=False))):
2014 (force or not wc.dirty(missing=True, branch=False))):
1995 _checkcollision(repo, p2.manifest(), None)
2015 _checkcollision(repo, p2.manifest(), None)
1996 else:
2016 else:
1997 _checkcollision(repo, wc.manifest(), actions)
2017 _checkcollision(repo, wc.manifest(), actions)
1998
2018
1999 # divergent renames
2019 # divergent renames
2000 for f, fl in sorted(diverge.iteritems()):
2020 for f, fl in sorted(diverge.iteritems()):
2001 repo.ui.warn(_("note: possible conflict - %s was renamed "
2021 repo.ui.warn(_("note: possible conflict - %s was renamed "
2002 "multiple times to:\n") % f)
2022 "multiple times to:\n") % f)
2003 for nf in fl:
2023 for nf in fl:
2004 repo.ui.warn(" %s\n" % nf)
2024 repo.ui.warn(" %s\n" % nf)
2005
2025
2006 # rename and delete
2026 # rename and delete
2007 for f, fl in sorted(renamedelete.iteritems()):
2027 for f, fl in sorted(renamedelete.iteritems()):
2008 repo.ui.warn(_("note: possible conflict - %s was deleted "
2028 repo.ui.warn(_("note: possible conflict - %s was deleted "
2009 "and renamed to:\n") % f)
2029 "and renamed to:\n") % f)
2010 for nf in fl:
2030 for nf in fl:
2011 repo.ui.warn(" %s\n" % nf)
2031 repo.ui.warn(" %s\n" % nf)
2012
2032
2013 ### apply phase
2033 ### apply phase
2014 if not branchmerge: # just jump to the new rev
2034 if not branchmerge: # just jump to the new rev
2015 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2035 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2016 if not partial and not wc.isinmemory():
2036 if not partial and not wc.isinmemory():
2017 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2037 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2018 # note that we're in the middle of an update
2038 # note that we're in the middle of an update
2019 repo.vfs.write('updatestate', p2.hex())
2039 repo.vfs.write('updatestate', p2.hex())
2020
2040
2021 # Advertise fsmonitor when its presence could be useful.
2041 # Advertise fsmonitor when its presence could be useful.
2022 #
2042 #
2023 # We only advertise when performing an update from an empty working
2043 # We only advertise when performing an update from an empty working
2024 # directory. This typically only occurs during initial clone.
2044 # directory. This typically only occurs during initial clone.
2025 #
2045 #
2026 # We give users a mechanism to disable the warning in case it is
2046 # We give users a mechanism to disable the warning in case it is
2027 # annoying.
2047 # annoying.
2028 #
2048 #
2029 # We only allow on Linux and MacOS because that's where fsmonitor is
2049 # We only allow on Linux and MacOS because that's where fsmonitor is
2030 # considered stable.
2050 # considered stable.
2031 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2051 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2032 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2052 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2033 'warn_update_file_count')
2053 'warn_update_file_count')
2034 try:
2054 try:
2035 # avoid cycle: extensions -> cmdutil -> merge
2055 # avoid cycle: extensions -> cmdutil -> merge
2036 from . import extensions
2056 from . import extensions
2037 extensions.find('fsmonitor')
2057 extensions.find('fsmonitor')
2038 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2058 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2039 # We intentionally don't look at whether fsmonitor has disabled
2059 # We intentionally don't look at whether fsmonitor has disabled
2040 # itself because a) fsmonitor may have already printed a warning
2060 # itself because a) fsmonitor may have already printed a warning
2041 # b) we only care about the config state here.
2061 # b) we only care about the config state here.
2042 except KeyError:
2062 except KeyError:
2043 fsmonitorenabled = False
2063 fsmonitorenabled = False
2044
2064
2045 if (fsmonitorwarning
2065 if (fsmonitorwarning
2046 and not fsmonitorenabled
2066 and not fsmonitorenabled
2047 and p1.node() == nullid
2067 and p1.node() == nullid
2048 and len(actions['g']) >= fsmonitorthreshold
2068 and len(actions['g']) >= fsmonitorthreshold
2049 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2069 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2050 repo.ui.warn(
2070 repo.ui.warn(
2051 _('(warning: large working directory being used without '
2071 _('(warning: large working directory being used without '
2052 'fsmonitor enabled; enable fsmonitor to improve performance; '
2072 'fsmonitor enabled; enable fsmonitor to improve performance; '
2053 'see "hg help -e fsmonitor")\n'))
2073 'see "hg help -e fsmonitor")\n'))
2054
2074
2055 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2075 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2056
2076
2057 if not partial and not wc.isinmemory():
2077 if not partial and not wc.isinmemory():
2058 with repo.dirstate.parentchange():
2078 with repo.dirstate.parentchange():
2059 repo.setparents(fp1, fp2)
2079 repo.setparents(fp1, fp2)
2060 recordupdates(repo, actions, branchmerge)
2080 recordupdates(repo, actions, branchmerge)
2061 # update completed, clear state
2081 # update completed, clear state
2062 util.unlink(repo.vfs.join('updatestate'))
2082 util.unlink(repo.vfs.join('updatestate'))
2063
2083
2064 if not branchmerge:
2084 if not branchmerge:
2065 repo.dirstate.setbranch(p2.branch())
2085 repo.dirstate.setbranch(p2.branch())
2066
2086
2067 # If we're updating to a location, clean up any stale temporary includes
2087 # If we're updating to a location, clean up any stale temporary includes
2068 # (ex: this happens during hg rebase --abort).
2088 # (ex: this happens during hg rebase --abort).
2069 if not branchmerge:
2089 if not branchmerge:
2070 sparse.prunetemporaryincludes(repo)
2090 sparse.prunetemporaryincludes(repo)
2071
2091
2072 if not partial:
2092 if not partial:
2073 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2093 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2074 return stats
2094 return stats
2075
2095
2076 def graft(repo, ctx, pctx, labels, keepparent=False):
2096 def graft(repo, ctx, pctx, labels, keepparent=False):
2077 """Do a graft-like merge.
2097 """Do a graft-like merge.
2078
2098
2079 This is a merge where the merge ancestor is chosen such that one
2099 This is a merge where the merge ancestor is chosen such that one
2080 or more changesets are grafted onto the current changeset. In
2100 or more changesets are grafted onto the current changeset. In
2081 addition to the merge, this fixes up the dirstate to include only
2101 addition to the merge, this fixes up the dirstate to include only
2082 a single parent (if keepparent is False) and tries to duplicate any
2102 a single parent (if keepparent is False) and tries to duplicate any
2083 renames/copies appropriately.
2103 renames/copies appropriately.
2084
2104
2085 ctx - changeset to rebase
2105 ctx - changeset to rebase
2086 pctx - merge base, usually ctx.p1()
2106 pctx - merge base, usually ctx.p1()
2087 labels - merge labels eg ['local', 'graft']
2107 labels - merge labels eg ['local', 'graft']
2088 keepparent - keep second parent if any
2108 keepparent - keep second parent if any
2089
2109
2090 """
2110 """
2091 # If we're grafting a descendant onto an ancestor, be sure to pass
2111 # If we're grafting a descendant onto an ancestor, be sure to pass
2092 # mergeancestor=True to update. This does two things: 1) allows the merge if
2112 # mergeancestor=True to update. This does two things: 1) allows the merge if
2093 # the destination is the same as the parent of the ctx (so we can use graft
2113 # the destination is the same as the parent of the ctx (so we can use graft
2094 # to copy commits), and 2) informs update that the incoming changes are
2114 # to copy commits), and 2) informs update that the incoming changes are
2095 # newer than the destination so it doesn't prompt about "remote changed foo
2115 # newer than the destination so it doesn't prompt about "remote changed foo
2096 # which local deleted".
2116 # which local deleted".
2097 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2117 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2098
2118
2099 stats = update(repo, ctx.node(), True, True, pctx.node(),
2119 stats = update(repo, ctx.node(), True, True, pctx.node(),
2100 mergeancestor=mergeancestor, labels=labels)
2120 mergeancestor=mergeancestor, labels=labels)
2101
2121
2102 pother = nullid
2122 pother = nullid
2103 parents = ctx.parents()
2123 parents = ctx.parents()
2104 if keepparent and len(parents) == 2 and pctx in parents:
2124 if keepparent and len(parents) == 2 and pctx in parents:
2105 parents.remove(pctx)
2125 parents.remove(pctx)
2106 pother = parents[0].node()
2126 pother = parents[0].node()
2107
2127
2108 with repo.dirstate.parentchange():
2128 with repo.dirstate.parentchange():
2109 repo.setparents(repo['.'].node(), pother)
2129 repo.setparents(repo['.'].node(), pother)
2110 repo.dirstate.write(repo.currenttransaction())
2130 repo.dirstate.write(repo.currenttransaction())
2111 # fix up dirstate for copies and renames
2131 # fix up dirstate for copies and renames
2112 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2132 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2113 return stats
2133 return stats
General Comments 0
You need to be logged in to leave comments. Login now