##// END OF EJS Templates
merge: raise before running mergedriver if using IMM...
Phil Cohen -
r35516:87918218 @16 default
parent child Browse files
Show More
@@ -1,2066 +1,2069
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 extensions,
28 extensions,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 P: a path conflict (file vs directory)
70 P: a path conflict (file vs directory)
71 m: the external merge driver defined for this merge plus its run state
71 m: the external merge driver defined for this merge plus its run state
72 (experimental)
72 (experimental)
73 f: a (filename, dictionary) tuple of optional values for a given file
73 f: a (filename, dictionary) tuple of optional values for a given file
74 X: unsupported mandatory record type (used in tests)
74 X: unsupported mandatory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
76 l: the labels for the parts of the merge.
76 l: the labels for the parts of the merge.
77
77
78 Merge driver run states (experimental):
78 Merge driver run states (experimental):
79 u: driver-resolved files unmarked -- needs to be run next time we're about
79 u: driver-resolved files unmarked -- needs to be run next time we're about
80 to resolve or commit
80 to resolve or commit
81 m: driver-resolved files marked -- only needs to be run before commit
81 m: driver-resolved files marked -- only needs to be run before commit
82 s: success/skipped -- does not need to be run any more
82 s: success/skipped -- does not need to be run any more
83
83
84 Merge record states (stored in self._state, indexed by filename):
84 Merge record states (stored in self._state, indexed by filename):
85 u: unresolved conflict
85 u: unresolved conflict
86 r: resolved conflict
86 r: resolved conflict
87 pu: unresolved path conflict (file conflicts with directory)
87 pu: unresolved path conflict (file conflicts with directory)
88 pr: resolved path conflict
88 pr: resolved path conflict
89 d: driver-resolved conflict
89 d: driver-resolved conflict
90
90
91 The resolve command transitions between 'u' and 'r' for conflicts and
91 The resolve command transitions between 'u' and 'r' for conflicts and
92 'pu' and 'pr' for path conflicts.
92 'pu' and 'pr' for path conflicts.
93 '''
93 '''
94 statepathv1 = 'merge/state'
94 statepathv1 = 'merge/state'
95 statepathv2 = 'merge/state2'
95 statepathv2 = 'merge/state2'
96
96
97 @staticmethod
97 @staticmethod
98 def clean(repo, node=None, other=None, labels=None):
98 def clean(repo, node=None, other=None, labels=None):
99 """Initialize a brand new merge state, removing any existing state on
99 """Initialize a brand new merge state, removing any existing state on
100 disk."""
100 disk."""
101 ms = mergestate(repo)
101 ms = mergestate(repo)
102 ms.reset(node, other, labels)
102 ms.reset(node, other, labels)
103 return ms
103 return ms
104
104
105 @staticmethod
105 @staticmethod
106 def read(repo):
106 def read(repo):
107 """Initialize the merge state, reading it from disk."""
107 """Initialize the merge state, reading it from disk."""
108 ms = mergestate(repo)
108 ms = mergestate(repo)
109 ms._read()
109 ms._read()
110 return ms
110 return ms
111
111
112 def __init__(self, repo):
112 def __init__(self, repo):
113 """Initialize the merge state.
113 """Initialize the merge state.
114
114
115 Do not use this directly! Instead call read() or clean()."""
115 Do not use this directly! Instead call read() or clean()."""
116 self._repo = repo
116 self._repo = repo
117 self._dirty = False
117 self._dirty = False
118 self._labels = None
118 self._labels = None
119
119
120 def reset(self, node=None, other=None, labels=None):
120 def reset(self, node=None, other=None, labels=None):
121 self._state = {}
121 self._state = {}
122 self._stateextras = {}
122 self._stateextras = {}
123 self._local = None
123 self._local = None
124 self._other = None
124 self._other = None
125 self._labels = labels
125 self._labels = labels
126 for var in ('localctx', 'otherctx'):
126 for var in ('localctx', 'otherctx'):
127 if var in vars(self):
127 if var in vars(self):
128 delattr(self, var)
128 delattr(self, var)
129 if node:
129 if node:
130 self._local = node
130 self._local = node
131 self._other = other
131 self._other = other
132 self._readmergedriver = None
132 self._readmergedriver = None
133 if self.mergedriver:
133 if self.mergedriver:
134 self._mdstate = 's'
134 self._mdstate = 's'
135 else:
135 else:
136 self._mdstate = 'u'
136 self._mdstate = 'u'
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
138 self._results = {}
138 self._results = {}
139 self._dirty = False
139 self._dirty = False
140
140
141 def _read(self):
141 def _read(self):
142 """Analyse each record content to restore a serialized state from disk
142 """Analyse each record content to restore a serialized state from disk
143
143
144 This function process "record" entry produced by the de-serialization
144 This function process "record" entry produced by the de-serialization
145 of on disk file.
145 of on disk file.
146 """
146 """
147 self._state = {}
147 self._state = {}
148 self._stateextras = {}
148 self._stateextras = {}
149 self._local = None
149 self._local = None
150 self._other = None
150 self._other = None
151 for var in ('localctx', 'otherctx'):
151 for var in ('localctx', 'otherctx'):
152 if var in vars(self):
152 if var in vars(self):
153 delattr(self, var)
153 delattr(self, var)
154 self._readmergedriver = None
154 self._readmergedriver = None
155 self._mdstate = 's'
155 self._mdstate = 's'
156 unsupported = set()
156 unsupported = set()
157 records = self._readrecords()
157 records = self._readrecords()
158 for rtype, record in records:
158 for rtype, record in records:
159 if rtype == 'L':
159 if rtype == 'L':
160 self._local = bin(record)
160 self._local = bin(record)
161 elif rtype == 'O':
161 elif rtype == 'O':
162 self._other = bin(record)
162 self._other = bin(record)
163 elif rtype == 'm':
163 elif rtype == 'm':
164 bits = record.split('\0', 1)
164 bits = record.split('\0', 1)
165 mdstate = bits[1]
165 mdstate = bits[1]
166 if len(mdstate) != 1 or mdstate not in 'ums':
166 if len(mdstate) != 1 or mdstate not in 'ums':
167 # the merge driver should be idempotent, so just rerun it
167 # the merge driver should be idempotent, so just rerun it
168 mdstate = 'u'
168 mdstate = 'u'
169
169
170 self._readmergedriver = bits[0]
170 self._readmergedriver = bits[0]
171 self._mdstate = mdstate
171 self._mdstate = mdstate
172 elif rtype in 'FDCP':
172 elif rtype in 'FDCP':
173 bits = record.split('\0')
173 bits = record.split('\0')
174 self._state[bits[0]] = bits[1:]
174 self._state[bits[0]] = bits[1:]
175 elif rtype == 'f':
175 elif rtype == 'f':
176 filename, rawextras = record.split('\0', 1)
176 filename, rawextras = record.split('\0', 1)
177 extraparts = rawextras.split('\0')
177 extraparts = rawextras.split('\0')
178 extras = {}
178 extras = {}
179 i = 0
179 i = 0
180 while i < len(extraparts):
180 while i < len(extraparts):
181 extras[extraparts[i]] = extraparts[i + 1]
181 extras[extraparts[i]] = extraparts[i + 1]
182 i += 2
182 i += 2
183
183
184 self._stateextras[filename] = extras
184 self._stateextras[filename] = extras
185 elif rtype == 'l':
185 elif rtype == 'l':
186 labels = record.split('\0', 2)
186 labels = record.split('\0', 2)
187 self._labels = [l for l in labels if len(l) > 0]
187 self._labels = [l for l in labels if len(l) > 0]
188 elif not rtype.islower():
188 elif not rtype.islower():
189 unsupported.add(rtype)
189 unsupported.add(rtype)
190 self._results = {}
190 self._results = {}
191 self._dirty = False
191 self._dirty = False
192
192
193 if unsupported:
193 if unsupported:
194 raise error.UnsupportedMergeRecords(unsupported)
194 raise error.UnsupportedMergeRecords(unsupported)
195
195
196 def _readrecords(self):
196 def _readrecords(self):
197 """Read merge state from disk and return a list of record (TYPE, data)
197 """Read merge state from disk and return a list of record (TYPE, data)
198
198
199 We read data from both v1 and v2 files and decide which one to use.
199 We read data from both v1 and v2 files and decide which one to use.
200
200
201 V1 has been used by version prior to 2.9.1 and contains less data than
201 V1 has been used by version prior to 2.9.1 and contains less data than
202 v2. We read both versions and check if no data in v2 contradicts
202 v2. We read both versions and check if no data in v2 contradicts
203 v1. If there is not contradiction we can safely assume that both v1
203 v1. If there is not contradiction we can safely assume that both v1
204 and v2 were written at the same time and use the extract data in v2. If
204 and v2 were written at the same time and use the extract data in v2. If
205 there is contradiction we ignore v2 content as we assume an old version
205 there is contradiction we ignore v2 content as we assume an old version
206 of Mercurial has overwritten the mergestate file and left an old v2
206 of Mercurial has overwritten the mergestate file and left an old v2
207 file around.
207 file around.
208
208
209 returns list of record [(TYPE, data), ...]"""
209 returns list of record [(TYPE, data), ...]"""
210 v1records = self._readrecordsv1()
210 v1records = self._readrecordsv1()
211 v2records = self._readrecordsv2()
211 v2records = self._readrecordsv2()
212 if self._v1v2match(v1records, v2records):
212 if self._v1v2match(v1records, v2records):
213 return v2records
213 return v2records
214 else:
214 else:
215 # v1 file is newer than v2 file, use it
215 # v1 file is newer than v2 file, use it
216 # we have to infer the "other" changeset of the merge
216 # we have to infer the "other" changeset of the merge
217 # we cannot do better than that with v1 of the format
217 # we cannot do better than that with v1 of the format
218 mctx = self._repo[None].parents()[-1]
218 mctx = self._repo[None].parents()[-1]
219 v1records.append(('O', mctx.hex()))
219 v1records.append(('O', mctx.hex()))
220 # add place holder "other" file node information
220 # add place holder "other" file node information
221 # nobody is using it yet so we do no need to fetch the data
221 # nobody is using it yet so we do no need to fetch the data
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
223 for idx, r in enumerate(v1records):
223 for idx, r in enumerate(v1records):
224 if r[0] == 'F':
224 if r[0] == 'F':
225 bits = r[1].split('\0')
225 bits = r[1].split('\0')
226 bits.insert(-2, '')
226 bits.insert(-2, '')
227 v1records[idx] = (r[0], '\0'.join(bits))
227 v1records[idx] = (r[0], '\0'.join(bits))
228 return v1records
228 return v1records
229
229
230 def _v1v2match(self, v1records, v2records):
230 def _v1v2match(self, v1records, v2records):
231 oldv2 = set() # old format version of v2 record
231 oldv2 = set() # old format version of v2 record
232 for rec in v2records:
232 for rec in v2records:
233 if rec[0] == 'L':
233 if rec[0] == 'L':
234 oldv2.add(rec)
234 oldv2.add(rec)
235 elif rec[0] == 'F':
235 elif rec[0] == 'F':
236 # drop the onode data (not contained in v1)
236 # drop the onode data (not contained in v1)
237 oldv2.add(('F', _droponode(rec[1])))
237 oldv2.add(('F', _droponode(rec[1])))
238 for rec in v1records:
238 for rec in v1records:
239 if rec not in oldv2:
239 if rec not in oldv2:
240 return False
240 return False
241 else:
241 else:
242 return True
242 return True
243
243
244 def _readrecordsv1(self):
244 def _readrecordsv1(self):
245 """read on disk merge state for version 1 file
245 """read on disk merge state for version 1 file
246
246
247 returns list of record [(TYPE, data), ...]
247 returns list of record [(TYPE, data), ...]
248
248
249 Note: the "F" data from this file are one entry short
249 Note: the "F" data from this file are one entry short
250 (no "other file node" entry)
250 (no "other file node" entry)
251 """
251 """
252 records = []
252 records = []
253 try:
253 try:
254 f = self._repo.vfs(self.statepathv1)
254 f = self._repo.vfs(self.statepathv1)
255 for i, l in enumerate(f):
255 for i, l in enumerate(f):
256 if i == 0:
256 if i == 0:
257 records.append(('L', l[:-1]))
257 records.append(('L', l[:-1]))
258 else:
258 else:
259 records.append(('F', l[:-1]))
259 records.append(('F', l[:-1]))
260 f.close()
260 f.close()
261 except IOError as err:
261 except IOError as err:
262 if err.errno != errno.ENOENT:
262 if err.errno != errno.ENOENT:
263 raise
263 raise
264 return records
264 return records
265
265
266 def _readrecordsv2(self):
266 def _readrecordsv2(self):
267 """read on disk merge state for version 2 file
267 """read on disk merge state for version 2 file
268
268
269 This format is a list of arbitrary records of the form:
269 This format is a list of arbitrary records of the form:
270
270
271 [type][length][content]
271 [type][length][content]
272
272
273 `type` is a single character, `length` is a 4 byte integer, and
273 `type` is a single character, `length` is a 4 byte integer, and
274 `content` is an arbitrary byte sequence of length `length`.
274 `content` is an arbitrary byte sequence of length `length`.
275
275
276 Mercurial versions prior to 3.7 have a bug where if there are
276 Mercurial versions prior to 3.7 have a bug where if there are
277 unsupported mandatory merge records, attempting to clear out the merge
277 unsupported mandatory merge records, attempting to clear out the merge
278 state with hg update --clean or similar aborts. The 't' record type
278 state with hg update --clean or similar aborts. The 't' record type
279 works around that by writing out what those versions treat as an
279 works around that by writing out what those versions treat as an
280 advisory record, but later versions interpret as special: the first
280 advisory record, but later versions interpret as special: the first
281 character is the 'real' record type and everything onwards is the data.
281 character is the 'real' record type and everything onwards is the data.
282
282
283 Returns list of records [(TYPE, data), ...]."""
283 Returns list of records [(TYPE, data), ...]."""
284 records = []
284 records = []
285 try:
285 try:
286 f = self._repo.vfs(self.statepathv2)
286 f = self._repo.vfs(self.statepathv2)
287 data = f.read()
287 data = f.read()
288 off = 0
288 off = 0
289 end = len(data)
289 end = len(data)
290 while off < end:
290 while off < end:
291 rtype = data[off]
291 rtype = data[off]
292 off += 1
292 off += 1
293 length = _unpack('>I', data[off:(off + 4)])[0]
293 length = _unpack('>I', data[off:(off + 4)])[0]
294 off += 4
294 off += 4
295 record = data[off:(off + length)]
295 record = data[off:(off + length)]
296 off += length
296 off += length
297 if rtype == 't':
297 if rtype == 't':
298 rtype, record = record[0], record[1:]
298 rtype, record = record[0], record[1:]
299 records.append((rtype, record))
299 records.append((rtype, record))
300 f.close()
300 f.close()
301 except IOError as err:
301 except IOError as err:
302 if err.errno != errno.ENOENT:
302 if err.errno != errno.ENOENT:
303 raise
303 raise
304 return records
304 return records
305
305
306 @util.propertycache
306 @util.propertycache
307 def mergedriver(self):
307 def mergedriver(self):
308 # protect against the following:
308 # protect against the following:
309 # - A configures a malicious merge driver in their hgrc, then
309 # - A configures a malicious merge driver in their hgrc, then
310 # pauses the merge
310 # pauses the merge
311 # - A edits their hgrc to remove references to the merge driver
311 # - A edits their hgrc to remove references to the merge driver
312 # - A gives a copy of their entire repo, including .hg, to B
312 # - A gives a copy of their entire repo, including .hg, to B
313 # - B inspects .hgrc and finds it to be clean
313 # - B inspects .hgrc and finds it to be clean
314 # - B then continues the merge and the malicious merge driver
314 # - B then continues the merge and the malicious merge driver
315 # gets invoked
315 # gets invoked
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
317 if (self._readmergedriver is not None
317 if (self._readmergedriver is not None
318 and self._readmergedriver != configmergedriver):
318 and self._readmergedriver != configmergedriver):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _("merge driver changed since merge started"),
320 _("merge driver changed since merge started"),
321 hint=_("revert merge driver change or abort merge"))
321 hint=_("revert merge driver change or abort merge"))
322
322
323 return configmergedriver
323 return configmergedriver
324
324
325 @util.propertycache
325 @util.propertycache
326 def localctx(self):
326 def localctx(self):
327 if self._local is None:
327 if self._local is None:
328 msg = "localctx accessed but self._local isn't set"
328 msg = "localctx accessed but self._local isn't set"
329 raise error.ProgrammingError(msg)
329 raise error.ProgrammingError(msg)
330 return self._repo[self._local]
330 return self._repo[self._local]
331
331
332 @util.propertycache
332 @util.propertycache
333 def otherctx(self):
333 def otherctx(self):
334 if self._other is None:
334 if self._other is None:
335 msg = "otherctx accessed but self._other isn't set"
335 msg = "otherctx accessed but self._other isn't set"
336 raise error.ProgrammingError(msg)
336 raise error.ProgrammingError(msg)
337 return self._repo[self._other]
337 return self._repo[self._other]
338
338
339 def active(self):
339 def active(self):
340 """Whether mergestate is active.
340 """Whether mergestate is active.
341
341
342 Returns True if there appears to be mergestate. This is a rough proxy
342 Returns True if there appears to be mergestate. This is a rough proxy
343 for "is a merge in progress."
343 for "is a merge in progress."
344 """
344 """
345 # Check local variables before looking at filesystem for performance
345 # Check local variables before looking at filesystem for performance
346 # reasons.
346 # reasons.
347 return bool(self._local) or bool(self._state) or \
347 return bool(self._local) or bool(self._state) or \
348 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv1) or \
349 self._repo.vfs.exists(self.statepathv2)
349 self._repo.vfs.exists(self.statepathv2)
350
350
351 def commit(self):
351 def commit(self):
352 """Write current state on disk (if necessary)"""
352 """Write current state on disk (if necessary)"""
353 if self._dirty:
353 if self._dirty:
354 records = self._makerecords()
354 records = self._makerecords()
355 self._writerecords(records)
355 self._writerecords(records)
356 self._dirty = False
356 self._dirty = False
357
357
358 def _makerecords(self):
358 def _makerecords(self):
359 records = []
359 records = []
360 records.append(('L', hex(self._local)))
360 records.append(('L', hex(self._local)))
361 records.append(('O', hex(self._other)))
361 records.append(('O', hex(self._other)))
362 if self.mergedriver:
362 if self.mergedriver:
363 records.append(('m', '\0'.join([
363 records.append(('m', '\0'.join([
364 self.mergedriver, self._mdstate])))
364 self.mergedriver, self._mdstate])))
365 # Write out state items. In all cases, the value of the state map entry
365 # Write out state items. In all cases, the value of the state map entry
366 # is written as the contents of the record. The record type depends on
366 # is written as the contents of the record. The record type depends on
367 # the type of state that is stored, and capital-letter records are used
367 # the type of state that is stored, and capital-letter records are used
368 # to prevent older versions of Mercurial that do not support the feature
368 # to prevent older versions of Mercurial that do not support the feature
369 # from loading them.
369 # from loading them.
370 for filename, v in self._state.iteritems():
370 for filename, v in self._state.iteritems():
371 if v[0] == 'd':
371 if v[0] == 'd':
372 # Driver-resolved merge. These are stored in 'D' records.
372 # Driver-resolved merge. These are stored in 'D' records.
373 records.append(('D', '\0'.join([filename] + v)))
373 records.append(('D', '\0'.join([filename] + v)))
374 elif v[0] in ('pu', 'pr'):
374 elif v[0] in ('pu', 'pr'):
375 # Path conflicts. These are stored in 'P' records. The current
375 # Path conflicts. These are stored in 'P' records. The current
376 # resolution state ('pu' or 'pr') is stored within the record.
376 # resolution state ('pu' or 'pr') is stored within the record.
377 records.append(('P', '\0'.join([filename] + v)))
377 records.append(('P', '\0'.join([filename] + v)))
378 elif v[1] == nullhex or v[6] == nullhex:
378 elif v[1] == nullhex or v[6] == nullhex:
379 # Change/Delete or Delete/Change conflicts. These are stored in
379 # Change/Delete or Delete/Change conflicts. These are stored in
380 # 'C' records. v[1] is the local file, and is nullhex when the
380 # 'C' records. v[1] is the local file, and is nullhex when the
381 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # file is deleted locally ('dc'). v[6] is the remote file, and
382 # is nullhex when the file is deleted remotely ('cd').
382 # is nullhex when the file is deleted remotely ('cd').
383 records.append(('C', '\0'.join([filename] + v)))
383 records.append(('C', '\0'.join([filename] + v)))
384 else:
384 else:
385 # Normal files. These are stored in 'F' records.
385 # Normal files. These are stored in 'F' records.
386 records.append(('F', '\0'.join([filename] + v)))
386 records.append(('F', '\0'.join([filename] + v)))
387 for filename, extras in sorted(self._stateextras.iteritems()):
387 for filename, extras in sorted(self._stateextras.iteritems()):
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
389 extras.iteritems())
389 extras.iteritems())
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
391 if self._labels is not None:
391 if self._labels is not None:
392 labels = '\0'.join(self._labels)
392 labels = '\0'.join(self._labels)
393 records.append(('l', labels))
393 records.append(('l', labels))
394 return records
394 return records
395
395
396 def _writerecords(self, records):
396 def _writerecords(self, records):
397 """Write current state on disk (both v1 and v2)"""
397 """Write current state on disk (both v1 and v2)"""
398 self._writerecordsv1(records)
398 self._writerecordsv1(records)
399 self._writerecordsv2(records)
399 self._writerecordsv2(records)
400
400
401 def _writerecordsv1(self, records):
401 def _writerecordsv1(self, records):
402 """Write current state on disk in a version 1 file"""
402 """Write current state on disk in a version 1 file"""
403 f = self._repo.vfs(self.statepathv1, 'w')
403 f = self._repo.vfs(self.statepathv1, 'w')
404 irecords = iter(records)
404 irecords = iter(records)
405 lrecords = next(irecords)
405 lrecords = next(irecords)
406 assert lrecords[0] == 'L'
406 assert lrecords[0] == 'L'
407 f.write(hex(self._local) + '\n')
407 f.write(hex(self._local) + '\n')
408 for rtype, data in irecords:
408 for rtype, data in irecords:
409 if rtype == 'F':
409 if rtype == 'F':
410 f.write('%s\n' % _droponode(data))
410 f.write('%s\n' % _droponode(data))
411 f.close()
411 f.close()
412
412
413 def _writerecordsv2(self, records):
413 def _writerecordsv2(self, records):
414 """Write current state on disk in a version 2 file
414 """Write current state on disk in a version 2 file
415
415
416 See the docstring for _readrecordsv2 for why we use 't'."""
416 See the docstring for _readrecordsv2 for why we use 't'."""
417 # these are the records that all version 2 clients can read
417 # these are the records that all version 2 clients can read
418 whitelist = 'LOF'
418 whitelist = 'LOF'
419 f = self._repo.vfs(self.statepathv2, 'w')
419 f = self._repo.vfs(self.statepathv2, 'w')
420 for key, data in records:
420 for key, data in records:
421 assert len(key) == 1
421 assert len(key) == 1
422 if key not in whitelist:
422 if key not in whitelist:
423 key, data = 't', '%s%s' % (key, data)
423 key, data = 't', '%s%s' % (key, data)
424 format = '>sI%is' % len(data)
424 format = '>sI%is' % len(data)
425 f.write(_pack(format, key, len(data), data))
425 f.write(_pack(format, key, len(data), data))
426 f.close()
426 f.close()
427
427
428 def add(self, fcl, fco, fca, fd):
428 def add(self, fcl, fco, fca, fd):
429 """add a new (potentially?) conflicting file the merge state
429 """add a new (potentially?) conflicting file the merge state
430 fcl: file context for local,
430 fcl: file context for local,
431 fco: file context for remote,
431 fco: file context for remote,
432 fca: file context for ancestors,
432 fca: file context for ancestors,
433 fd: file path of the resulting merge.
433 fd: file path of the resulting merge.
434
434
435 note: also write the local version to the `.hg/merge` directory.
435 note: also write the local version to the `.hg/merge` directory.
436 """
436 """
437 if fcl.isabsent():
437 if fcl.isabsent():
438 hash = nullhex
438 hash = nullhex
439 else:
439 else:
440 hash = hex(hashlib.sha1(fcl.path()).digest())
440 hash = hex(hashlib.sha1(fcl.path()).digest())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
442 self._state[fd] = ['u', hash, fcl.path(),
442 self._state[fd] = ['u', hash, fcl.path(),
443 fca.path(), hex(fca.filenode()),
443 fca.path(), hex(fca.filenode()),
444 fco.path(), hex(fco.filenode()),
444 fco.path(), hex(fco.filenode()),
445 fcl.flags()]
445 fcl.flags()]
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
447 self._dirty = True
447 self._dirty = True
448
448
449 def addpath(self, path, frename, forigin):
449 def addpath(self, path, frename, forigin):
450 """add a new conflicting path to the merge state
450 """add a new conflicting path to the merge state
451 path: the path that conflicts
451 path: the path that conflicts
452 frename: the filename the conflicting file was renamed to
452 frename: the filename the conflicting file was renamed to
453 forigin: origin of the file ('l' or 'r' for local/remote)
453 forigin: origin of the file ('l' or 'r' for local/remote)
454 """
454 """
455 self._state[path] = ['pu', frename, forigin]
455 self._state[path] = ['pu', frename, forigin]
456 self._dirty = True
456 self._dirty = True
457
457
458 def __contains__(self, dfile):
458 def __contains__(self, dfile):
459 return dfile in self._state
459 return dfile in self._state
460
460
461 def __getitem__(self, dfile):
461 def __getitem__(self, dfile):
462 return self._state[dfile][0]
462 return self._state[dfile][0]
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(sorted(self._state))
465 return iter(sorted(self._state))
466
466
467 def files(self):
467 def files(self):
468 return self._state.keys()
468 return self._state.keys()
469
469
470 def mark(self, dfile, state):
470 def mark(self, dfile, state):
471 self._state[dfile][0] = state
471 self._state[dfile][0] = state
472 self._dirty = True
472 self._dirty = True
473
473
474 def mdstate(self):
474 def mdstate(self):
475 return self._mdstate
475 return self._mdstate
476
476
477 def unresolved(self):
477 def unresolved(self):
478 """Obtain the paths of unresolved files."""
478 """Obtain the paths of unresolved files."""
479
479
480 for f, entry in self._state.iteritems():
480 for f, entry in self._state.iteritems():
481 if entry[0] in ('u', 'pu'):
481 if entry[0] in ('u', 'pu'):
482 yield f
482 yield f
483
483
484 def driverresolved(self):
484 def driverresolved(self):
485 """Obtain the paths of driver-resolved files."""
485 """Obtain the paths of driver-resolved files."""
486
486
487 for f, entry in self._state.items():
487 for f, entry in self._state.items():
488 if entry[0] == 'd':
488 if entry[0] == 'd':
489 yield f
489 yield f
490
490
491 def extras(self, filename):
491 def extras(self, filename):
492 return self._stateextras.setdefault(filename, {})
492 return self._stateextras.setdefault(filename, {})
493
493
494 def _resolve(self, preresolve, dfile, wctx):
494 def _resolve(self, preresolve, dfile, wctx):
495 """rerun merge process for file path `dfile`"""
495 """rerun merge process for file path `dfile`"""
496 if self[dfile] in 'rd':
496 if self[dfile] in 'rd':
497 return True, 0
497 return True, 0
498 stateentry = self._state[dfile]
498 stateentry = self._state[dfile]
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
500 octx = self._repo[self._other]
500 octx = self._repo[self._other]
501 extras = self.extras(dfile)
501 extras = self.extras(dfile)
502 anccommitnode = extras.get('ancestorlinknode')
502 anccommitnode = extras.get('ancestorlinknode')
503 if anccommitnode:
503 if anccommitnode:
504 actx = self._repo[anccommitnode]
504 actx = self._repo[anccommitnode]
505 else:
505 else:
506 actx = None
506 actx = None
507 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fcd = self._filectxorabsent(hash, wctx, dfile)
508 fco = self._filectxorabsent(onode, octx, ofile)
508 fco = self._filectxorabsent(onode, octx, ofile)
509 # TODO: move this to filectxorabsent
509 # TODO: move this to filectxorabsent
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
511 # "premerge" x flags
511 # "premerge" x flags
512 flo = fco.flags()
512 flo = fco.flags()
513 fla = fca.flags()
513 fla = fca.flags()
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
515 if fca.node() == nullid and flags != flo:
515 if fca.node() == nullid and flags != flo:
516 if preresolve:
516 if preresolve:
517 self._repo.ui.warn(
517 self._repo.ui.warn(
518 _('warning: cannot merge flags for %s '
518 _('warning: cannot merge flags for %s '
519 'without common ancestor - keeping local flags\n')
519 'without common ancestor - keeping local flags\n')
520 % afile)
520 % afile)
521 elif flags == fla:
521 elif flags == fla:
522 flags = flo
522 flags = flo
523 if preresolve:
523 if preresolve:
524 # restore local
524 # restore local
525 if hash != nullhex:
525 if hash != nullhex:
526 f = self._repo.vfs('merge/' + hash)
526 f = self._repo.vfs('merge/' + hash)
527 wctx[dfile].write(f.read(), flags)
527 wctx[dfile].write(f.read(), flags)
528 f.close()
528 f.close()
529 else:
529 else:
530 wctx[dfile].remove(ignoremissing=True)
530 wctx[dfile].remove(ignoremissing=True)
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
532 self._local, lfile, fcd,
532 self._local, lfile, fcd,
533 fco, fca,
533 fco, fca,
534 labels=self._labels)
534 labels=self._labels)
535 else:
535 else:
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
537 self._local, lfile, fcd,
537 self._local, lfile, fcd,
538 fco, fca,
538 fco, fca,
539 labels=self._labels)
539 labels=self._labels)
540 if r is None:
540 if r is None:
541 # no real conflict
541 # no real conflict
542 del self._state[dfile]
542 del self._state[dfile]
543 self._stateextras.pop(dfile, None)
543 self._stateextras.pop(dfile, None)
544 self._dirty = True
544 self._dirty = True
545 elif not r:
545 elif not r:
546 self.mark(dfile, 'r')
546 self.mark(dfile, 'r')
547
547
548 if complete:
548 if complete:
549 action = None
549 action = None
550 if deleted:
550 if deleted:
551 if fcd.isabsent():
551 if fcd.isabsent():
552 # dc: local picked. Need to drop if present, which may
552 # dc: local picked. Need to drop if present, which may
553 # happen on re-resolves.
553 # happen on re-resolves.
554 action = 'f'
554 action = 'f'
555 else:
555 else:
556 # cd: remote picked (or otherwise deleted)
556 # cd: remote picked (or otherwise deleted)
557 action = 'r'
557 action = 'r'
558 else:
558 else:
559 if fcd.isabsent(): # dc: remote picked
559 if fcd.isabsent(): # dc: remote picked
560 action = 'g'
560 action = 'g'
561 elif fco.isabsent(): # cd: local picked
561 elif fco.isabsent(): # cd: local picked
562 if dfile in self.localctx:
562 if dfile in self.localctx:
563 action = 'am'
563 action = 'am'
564 else:
564 else:
565 action = 'a'
565 action = 'a'
566 # else: regular merges (no action necessary)
566 # else: regular merges (no action necessary)
567 self._results[dfile] = r, action
567 self._results[dfile] = r, action
568
568
569 return complete, r
569 return complete, r
570
570
571 def _filectxorabsent(self, hexnode, ctx, f):
571 def _filectxorabsent(self, hexnode, ctx, f):
572 if hexnode == nullhex:
572 if hexnode == nullhex:
573 return filemerge.absentfilectx(ctx, f)
573 return filemerge.absentfilectx(ctx, f)
574 else:
574 else:
575 return ctx[f]
575 return ctx[f]
576
576
577 def preresolve(self, dfile, wctx):
577 def preresolve(self, dfile, wctx):
578 """run premerge process for dfile
578 """run premerge process for dfile
579
579
580 Returns whether the merge is complete, and the exit code."""
580 Returns whether the merge is complete, and the exit code."""
581 return self._resolve(True, dfile, wctx)
581 return self._resolve(True, dfile, wctx)
582
582
583 def resolve(self, dfile, wctx):
583 def resolve(self, dfile, wctx):
584 """run merge process (assuming premerge was run) for dfile
584 """run merge process (assuming premerge was run) for dfile
585
585
586 Returns the exit code of the merge."""
586 Returns the exit code of the merge."""
587 return self._resolve(False, dfile, wctx)[1]
587 return self._resolve(False, dfile, wctx)[1]
588
588
589 def counts(self):
589 def counts(self):
590 """return counts for updated, merged and removed files in this
590 """return counts for updated, merged and removed files in this
591 session"""
591 session"""
592 updated, merged, removed = 0, 0, 0
592 updated, merged, removed = 0, 0, 0
593 for r, action in self._results.itervalues():
593 for r, action in self._results.itervalues():
594 if r is None:
594 if r is None:
595 updated += 1
595 updated += 1
596 elif r == 0:
596 elif r == 0:
597 if action == 'r':
597 if action == 'r':
598 removed += 1
598 removed += 1
599 else:
599 else:
600 merged += 1
600 merged += 1
601 return updated, merged, removed
601 return updated, merged, removed
602
602
603 def unresolvedcount(self):
603 def unresolvedcount(self):
604 """get unresolved count for this merge (persistent)"""
604 """get unresolved count for this merge (persistent)"""
605 return len(list(self.unresolved()))
605 return len(list(self.unresolved()))
606
606
607 def actions(self):
607 def actions(self):
608 """return lists of actions to perform on the dirstate"""
608 """return lists of actions to perform on the dirstate"""
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
610 for f, (r, action) in self._results.iteritems():
610 for f, (r, action) in self._results.iteritems():
611 if action is not None:
611 if action is not None:
612 actions[action].append((f, None, "merge result"))
612 actions[action].append((f, None, "merge result"))
613 return actions
613 return actions
614
614
615 def recordactions(self):
615 def recordactions(self):
616 """record remove/add/get actions in the dirstate"""
616 """record remove/add/get actions in the dirstate"""
617 branchmerge = self._repo.dirstate.p2() != nullid
617 branchmerge = self._repo.dirstate.p2() != nullid
618 recordupdates(self._repo, self.actions(), branchmerge)
618 recordupdates(self._repo, self.actions(), branchmerge)
619
619
620 def queueremove(self, f):
620 def queueremove(self, f):
621 """queues a file to be removed from the dirstate
621 """queues a file to be removed from the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'r'
624 self._results[f] = 0, 'r'
625
625
626 def queueadd(self, f):
626 def queueadd(self, f):
627 """queues a file to be added to the dirstate
627 """queues a file to be added to the dirstate
628
628
629 Meant for use by custom merge drivers."""
629 Meant for use by custom merge drivers."""
630 self._results[f] = 0, 'a'
630 self._results[f] = 0, 'a'
631
631
632 def queueget(self, f):
632 def queueget(self, f):
633 """queues a file to be marked modified in the dirstate
633 """queues a file to be marked modified in the dirstate
634
634
635 Meant for use by custom merge drivers."""
635 Meant for use by custom merge drivers."""
636 self._results[f] = 0, 'g'
636 self._results[f] = 0, 'g'
637
637
638 def _getcheckunknownconfig(repo, section, name):
638 def _getcheckunknownconfig(repo, section, name):
639 config = repo.ui.config(section, name)
639 config = repo.ui.config(section, name)
640 valid = ['abort', 'ignore', 'warn']
640 valid = ['abort', 'ignore', 'warn']
641 if config not in valid:
641 if config not in valid:
642 validstr = ', '.join(["'" + v + "'" for v in valid])
642 validstr = ', '.join(["'" + v + "'" for v in valid])
643 raise error.ConfigError(_("%s.%s not valid "
643 raise error.ConfigError(_("%s.%s not valid "
644 "('%s' is none of %s)")
644 "('%s' is none of %s)")
645 % (section, name, config, validstr))
645 % (section, name, config, validstr))
646 return config
646 return config
647
647
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
649 if wctx.isinmemory():
649 if wctx.isinmemory():
650 # Nothing to do in IMM because nothing in the "working copy" can be an
650 # Nothing to do in IMM because nothing in the "working copy" can be an
651 # unknown file.
651 # unknown file.
652 #
652 #
653 # Note that we should bail out here, not in ``_checkunknownfiles()``,
653 # Note that we should bail out here, not in ``_checkunknownfiles()``,
654 # because that function does other useful work.
654 # because that function does other useful work.
655 return False
655 return False
656
656
657 if f2 is None:
657 if f2 is None:
658 f2 = f
658 f2 = f
659 return (repo.wvfs.audit.check(f)
659 return (repo.wvfs.audit.check(f)
660 and repo.wvfs.isfileorlink(f)
660 and repo.wvfs.isfileorlink(f)
661 and repo.dirstate.normalize(f) not in repo.dirstate
661 and repo.dirstate.normalize(f) not in repo.dirstate
662 and mctx[f2].cmp(wctx[f]))
662 and mctx[f2].cmp(wctx[f]))
663
663
664 class _unknowndirschecker(object):
664 class _unknowndirschecker(object):
665 """
665 """
666 Look for any unknown files or directories that may have a path conflict
666 Look for any unknown files or directories that may have a path conflict
667 with a file. If any path prefix of the file exists as a file or link,
667 with a file. If any path prefix of the file exists as a file or link,
668 then it conflicts. If the file itself is a directory that contains any
668 then it conflicts. If the file itself is a directory that contains any
669 file that is not tracked, then it conflicts.
669 file that is not tracked, then it conflicts.
670
670
671 Returns the shortest path at which a conflict occurs, or None if there is
671 Returns the shortest path at which a conflict occurs, or None if there is
672 no conflict.
672 no conflict.
673 """
673 """
674 def __init__(self):
674 def __init__(self):
675 # A set of paths known to be good. This prevents repeated checking of
675 # A set of paths known to be good. This prevents repeated checking of
676 # dirs. It will be updated with any new dirs that are checked and found
676 # dirs. It will be updated with any new dirs that are checked and found
677 # to be safe.
677 # to be safe.
678 self._unknowndircache = set()
678 self._unknowndircache = set()
679
679
680 # A set of paths that are known to be absent. This prevents repeated
680 # A set of paths that are known to be absent. This prevents repeated
681 # checking of subdirectories that are known not to exist. It will be
681 # checking of subdirectories that are known not to exist. It will be
682 # updated with any new dirs that are checked and found to be absent.
682 # updated with any new dirs that are checked and found to be absent.
683 self._missingdircache = set()
683 self._missingdircache = set()
684
684
685 def __call__(self, repo, wctx, f):
685 def __call__(self, repo, wctx, f):
686 if wctx.isinmemory():
686 if wctx.isinmemory():
687 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
687 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
688 return False
688 return False
689
689
690 # Check for path prefixes that exist as unknown files.
690 # Check for path prefixes that exist as unknown files.
691 for p in reversed(list(util.finddirs(f))):
691 for p in reversed(list(util.finddirs(f))):
692 if p in self._missingdircache:
692 if p in self._missingdircache:
693 return
693 return
694 if p in self._unknowndircache:
694 if p in self._unknowndircache:
695 continue
695 continue
696 if repo.wvfs.audit.check(p):
696 if repo.wvfs.audit.check(p):
697 if (repo.wvfs.isfileorlink(p)
697 if (repo.wvfs.isfileorlink(p)
698 and repo.dirstate.normalize(p) not in repo.dirstate):
698 and repo.dirstate.normalize(p) not in repo.dirstate):
699 return p
699 return p
700 if not repo.wvfs.lexists(p):
700 if not repo.wvfs.lexists(p):
701 self._missingdircache.add(p)
701 self._missingdircache.add(p)
702 return
702 return
703 self._unknowndircache.add(p)
703 self._unknowndircache.add(p)
704
704
705 # Check if the file conflicts with a directory containing unknown files.
705 # Check if the file conflicts with a directory containing unknown files.
706 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
706 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
707 # Does the directory contain any files that are not in the dirstate?
707 # Does the directory contain any files that are not in the dirstate?
708 for p, dirs, files in repo.wvfs.walk(f):
708 for p, dirs, files in repo.wvfs.walk(f):
709 for fn in files:
709 for fn in files:
710 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
710 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
711 if relf not in repo.dirstate:
711 if relf not in repo.dirstate:
712 return f
712 return f
713 return None
713 return None
714
714
715 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
715 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
716 """
716 """
717 Considers any actions that care about the presence of conflicting unknown
717 Considers any actions that care about the presence of conflicting unknown
718 files. For some actions, the result is to abort; for others, it is to
718 files. For some actions, the result is to abort; for others, it is to
719 choose a different action.
719 choose a different action.
720 """
720 """
721 fileconflicts = set()
721 fileconflicts = set()
722 pathconflicts = set()
722 pathconflicts = set()
723 warnconflicts = set()
723 warnconflicts = set()
724 abortconflicts = set()
724 abortconflicts = set()
725 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
725 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
726 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
726 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
727 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
727 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
728 if not force:
728 if not force:
729 def collectconflicts(conflicts, config):
729 def collectconflicts(conflicts, config):
730 if config == 'abort':
730 if config == 'abort':
731 abortconflicts.update(conflicts)
731 abortconflicts.update(conflicts)
732 elif config == 'warn':
732 elif config == 'warn':
733 warnconflicts.update(conflicts)
733 warnconflicts.update(conflicts)
734
734
735 checkunknowndirs = _unknowndirschecker()
735 checkunknowndirs = _unknowndirschecker()
736 for f, (m, args, msg) in actions.iteritems():
736 for f, (m, args, msg) in actions.iteritems():
737 if m in ('c', 'dc'):
737 if m in ('c', 'dc'):
738 if _checkunknownfile(repo, wctx, mctx, f):
738 if _checkunknownfile(repo, wctx, mctx, f):
739 fileconflicts.add(f)
739 fileconflicts.add(f)
740 elif pathconfig and f not in wctx:
740 elif pathconfig and f not in wctx:
741 path = checkunknowndirs(repo, wctx, f)
741 path = checkunknowndirs(repo, wctx, f)
742 if path is not None:
742 if path is not None:
743 pathconflicts.add(path)
743 pathconflicts.add(path)
744 elif m == 'dg':
744 elif m == 'dg':
745 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
745 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
746 fileconflicts.add(f)
746 fileconflicts.add(f)
747
747
748 allconflicts = fileconflicts | pathconflicts
748 allconflicts = fileconflicts | pathconflicts
749 ignoredconflicts = set([c for c in allconflicts
749 ignoredconflicts = set([c for c in allconflicts
750 if repo.dirstate._ignore(c)])
750 if repo.dirstate._ignore(c)])
751 unknownconflicts = allconflicts - ignoredconflicts
751 unknownconflicts = allconflicts - ignoredconflicts
752 collectconflicts(ignoredconflicts, ignoredconfig)
752 collectconflicts(ignoredconflicts, ignoredconfig)
753 collectconflicts(unknownconflicts, unknownconfig)
753 collectconflicts(unknownconflicts, unknownconfig)
754 else:
754 else:
755 for f, (m, args, msg) in actions.iteritems():
755 for f, (m, args, msg) in actions.iteritems():
756 if m == 'cm':
756 if m == 'cm':
757 fl2, anc = args
757 fl2, anc = args
758 different = _checkunknownfile(repo, wctx, mctx, f)
758 different = _checkunknownfile(repo, wctx, mctx, f)
759 if repo.dirstate._ignore(f):
759 if repo.dirstate._ignore(f):
760 config = ignoredconfig
760 config = ignoredconfig
761 else:
761 else:
762 config = unknownconfig
762 config = unknownconfig
763
763
764 # The behavior when force is True is described by this table:
764 # The behavior when force is True is described by this table:
765 # config different mergeforce | action backup
765 # config different mergeforce | action backup
766 # * n * | get n
766 # * n * | get n
767 # * y y | merge -
767 # * y y | merge -
768 # abort y n | merge - (1)
768 # abort y n | merge - (1)
769 # warn y n | warn + get y
769 # warn y n | warn + get y
770 # ignore y n | get y
770 # ignore y n | get y
771 #
771 #
772 # (1) this is probably the wrong behavior here -- we should
772 # (1) this is probably the wrong behavior here -- we should
773 # probably abort, but some actions like rebases currently
773 # probably abort, but some actions like rebases currently
774 # don't like an abort happening in the middle of
774 # don't like an abort happening in the middle of
775 # merge.update.
775 # merge.update.
776 if not different:
776 if not different:
777 actions[f] = ('g', (fl2, False), "remote created")
777 actions[f] = ('g', (fl2, False), "remote created")
778 elif mergeforce or config == 'abort':
778 elif mergeforce or config == 'abort':
779 actions[f] = ('m', (f, f, None, False, anc),
779 actions[f] = ('m', (f, f, None, False, anc),
780 "remote differs from untracked local")
780 "remote differs from untracked local")
781 elif config == 'abort':
781 elif config == 'abort':
782 abortconflicts.add(f)
782 abortconflicts.add(f)
783 else:
783 else:
784 if config == 'warn':
784 if config == 'warn':
785 warnconflicts.add(f)
785 warnconflicts.add(f)
786 actions[f] = ('g', (fl2, True), "remote created")
786 actions[f] = ('g', (fl2, True), "remote created")
787
787
788 for f in sorted(abortconflicts):
788 for f in sorted(abortconflicts):
789 warn = repo.ui.warn
789 warn = repo.ui.warn
790 if f in pathconflicts:
790 if f in pathconflicts:
791 if repo.wvfs.isfileorlink(f):
791 if repo.wvfs.isfileorlink(f):
792 warn(_("%s: untracked file conflicts with directory\n") % f)
792 warn(_("%s: untracked file conflicts with directory\n") % f)
793 else:
793 else:
794 warn(_("%s: untracked directory conflicts with file\n") % f)
794 warn(_("%s: untracked directory conflicts with file\n") % f)
795 else:
795 else:
796 warn(_("%s: untracked file differs\n") % f)
796 warn(_("%s: untracked file differs\n") % f)
797 if abortconflicts:
797 if abortconflicts:
798 raise error.Abort(_("untracked files in working directory "
798 raise error.Abort(_("untracked files in working directory "
799 "differ from files in requested revision"))
799 "differ from files in requested revision"))
800
800
801 for f in sorted(warnconflicts):
801 for f in sorted(warnconflicts):
802 if repo.wvfs.isfileorlink(f):
802 if repo.wvfs.isfileorlink(f):
803 repo.ui.warn(_("%s: replacing untracked file\n") % f)
803 repo.ui.warn(_("%s: replacing untracked file\n") % f)
804 else:
804 else:
805 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
805 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
806
806
807 for f, (m, args, msg) in actions.iteritems():
807 for f, (m, args, msg) in actions.iteritems():
808 if m == 'c':
808 if m == 'c':
809 backup = (f in fileconflicts or f in pathconflicts or
809 backup = (f in fileconflicts or f in pathconflicts or
810 any(p in pathconflicts for p in util.finddirs(f)))
810 any(p in pathconflicts for p in util.finddirs(f)))
811 flags, = args
811 flags, = args
812 actions[f] = ('g', (flags, backup), msg)
812 actions[f] = ('g', (flags, backup), msg)
813
813
814 def _forgetremoved(wctx, mctx, branchmerge):
814 def _forgetremoved(wctx, mctx, branchmerge):
815 """
815 """
816 Forget removed files
816 Forget removed files
817
817
818 If we're jumping between revisions (as opposed to merging), and if
818 If we're jumping between revisions (as opposed to merging), and if
819 neither the working directory nor the target rev has the file,
819 neither the working directory nor the target rev has the file,
820 then we need to remove it from the dirstate, to prevent the
820 then we need to remove it from the dirstate, to prevent the
821 dirstate from listing the file when it is no longer in the
821 dirstate from listing the file when it is no longer in the
822 manifest.
822 manifest.
823
823
824 If we're merging, and the other revision has removed a file
824 If we're merging, and the other revision has removed a file
825 that is not present in the working directory, we need to mark it
825 that is not present in the working directory, we need to mark it
826 as removed.
826 as removed.
827 """
827 """
828
828
829 actions = {}
829 actions = {}
830 m = 'f'
830 m = 'f'
831 if branchmerge:
831 if branchmerge:
832 m = 'r'
832 m = 'r'
833 for f in wctx.deleted():
833 for f in wctx.deleted():
834 if f not in mctx:
834 if f not in mctx:
835 actions[f] = m, None, "forget deleted"
835 actions[f] = m, None, "forget deleted"
836
836
837 if not branchmerge:
837 if not branchmerge:
838 for f in wctx.removed():
838 for f in wctx.removed():
839 if f not in mctx:
839 if f not in mctx:
840 actions[f] = 'f', None, "forget removed"
840 actions[f] = 'f', None, "forget removed"
841
841
842 return actions
842 return actions
843
843
844 def _checkcollision(repo, wmf, actions):
844 def _checkcollision(repo, wmf, actions):
845 # build provisional merged manifest up
845 # build provisional merged manifest up
846 pmmf = set(wmf)
846 pmmf = set(wmf)
847
847
848 if actions:
848 if actions:
849 # k, dr, e and rd are no-op
849 # k, dr, e and rd are no-op
850 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
850 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
851 for f, args, msg in actions[m]:
851 for f, args, msg in actions[m]:
852 pmmf.add(f)
852 pmmf.add(f)
853 for f, args, msg in actions['r']:
853 for f, args, msg in actions['r']:
854 pmmf.discard(f)
854 pmmf.discard(f)
855 for f, args, msg in actions['dm']:
855 for f, args, msg in actions['dm']:
856 f2, flags = args
856 f2, flags = args
857 pmmf.discard(f2)
857 pmmf.discard(f2)
858 pmmf.add(f)
858 pmmf.add(f)
859 for f, args, msg in actions['dg']:
859 for f, args, msg in actions['dg']:
860 pmmf.add(f)
860 pmmf.add(f)
861 for f, args, msg in actions['m']:
861 for f, args, msg in actions['m']:
862 f1, f2, fa, move, anc = args
862 f1, f2, fa, move, anc = args
863 if move:
863 if move:
864 pmmf.discard(f1)
864 pmmf.discard(f1)
865 pmmf.add(f)
865 pmmf.add(f)
866
866
867 # check case-folding collision in provisional merged manifest
867 # check case-folding collision in provisional merged manifest
868 foldmap = {}
868 foldmap = {}
869 for f in pmmf:
869 for f in pmmf:
870 fold = util.normcase(f)
870 fold = util.normcase(f)
871 if fold in foldmap:
871 if fold in foldmap:
872 raise error.Abort(_("case-folding collision between %s and %s")
872 raise error.Abort(_("case-folding collision between %s and %s")
873 % (f, foldmap[fold]))
873 % (f, foldmap[fold]))
874 foldmap[fold] = f
874 foldmap[fold] = f
875
875
876 # check case-folding of directories
876 # check case-folding of directories
877 foldprefix = unfoldprefix = lastfull = ''
877 foldprefix = unfoldprefix = lastfull = ''
878 for fold, f in sorted(foldmap.items()):
878 for fold, f in sorted(foldmap.items()):
879 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
879 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
880 # the folded prefix matches but actual casing is different
880 # the folded prefix matches but actual casing is different
881 raise error.Abort(_("case-folding collision between "
881 raise error.Abort(_("case-folding collision between "
882 "%s and directory of %s") % (lastfull, f))
882 "%s and directory of %s") % (lastfull, f))
883 foldprefix = fold + '/'
883 foldprefix = fold + '/'
884 unfoldprefix = f + '/'
884 unfoldprefix = f + '/'
885 lastfull = f
885 lastfull = f
886
886
887 def driverpreprocess(repo, ms, wctx, labels=None):
887 def driverpreprocess(repo, ms, wctx, labels=None):
888 """run the preprocess step of the merge driver, if any
888 """run the preprocess step of the merge driver, if any
889
889
890 This is currently not implemented -- it's an extension point."""
890 This is currently not implemented -- it's an extension point."""
891 return True
891 return True
892
892
893 def driverconclude(repo, ms, wctx, labels=None):
893 def driverconclude(repo, ms, wctx, labels=None):
894 """run the conclude step of the merge driver, if any
894 """run the conclude step of the merge driver, if any
895
895
896 This is currently not implemented -- it's an extension point."""
896 This is currently not implemented -- it's an extension point."""
897 return True
897 return True
898
898
899 def _filesindirs(repo, manifest, dirs):
899 def _filesindirs(repo, manifest, dirs):
900 """
900 """
901 Generator that yields pairs of all the files in the manifest that are found
901 Generator that yields pairs of all the files in the manifest that are found
902 inside the directories listed in dirs, and which directory they are found
902 inside the directories listed in dirs, and which directory they are found
903 in.
903 in.
904 """
904 """
905 for f in manifest:
905 for f in manifest:
906 for p in util.finddirs(f):
906 for p in util.finddirs(f):
907 if p in dirs:
907 if p in dirs:
908 yield f, p
908 yield f, p
909 break
909 break
910
910
911 def checkpathconflicts(repo, wctx, mctx, actions):
911 def checkpathconflicts(repo, wctx, mctx, actions):
912 """
912 """
913 Check if any actions introduce path conflicts in the repository, updating
913 Check if any actions introduce path conflicts in the repository, updating
914 actions to record or handle the path conflict accordingly.
914 actions to record or handle the path conflict accordingly.
915 """
915 """
916 mf = wctx.manifest()
916 mf = wctx.manifest()
917
917
918 # The set of local files that conflict with a remote directory.
918 # The set of local files that conflict with a remote directory.
919 localconflicts = set()
919 localconflicts = set()
920
920
921 # The set of directories that conflict with a remote file, and so may cause
921 # The set of directories that conflict with a remote file, and so may cause
922 # conflicts if they still contain any files after the merge.
922 # conflicts if they still contain any files after the merge.
923 remoteconflicts = set()
923 remoteconflicts = set()
924
924
925 # The set of directories that appear as both a file and a directory in the
925 # The set of directories that appear as both a file and a directory in the
926 # remote manifest. These indicate an invalid remote manifest, which
926 # remote manifest. These indicate an invalid remote manifest, which
927 # can't be updated to cleanly.
927 # can't be updated to cleanly.
928 invalidconflicts = set()
928 invalidconflicts = set()
929
929
930 # The set of directories that contain files that are being created.
930 # The set of directories that contain files that are being created.
931 createdfiledirs = set()
931 createdfiledirs = set()
932
932
933 # The set of files deleted by all the actions.
933 # The set of files deleted by all the actions.
934 deletedfiles = set()
934 deletedfiles = set()
935
935
936 for f, (m, args, msg) in actions.items():
936 for f, (m, args, msg) in actions.items():
937 if m in ('c', 'dc', 'm', 'cm'):
937 if m in ('c', 'dc', 'm', 'cm'):
938 # This action may create a new local file.
938 # This action may create a new local file.
939 createdfiledirs.update(util.finddirs(f))
939 createdfiledirs.update(util.finddirs(f))
940 if mf.hasdir(f):
940 if mf.hasdir(f):
941 # The file aliases a local directory. This might be ok if all
941 # The file aliases a local directory. This might be ok if all
942 # the files in the local directory are being deleted. This
942 # the files in the local directory are being deleted. This
943 # will be checked once we know what all the deleted files are.
943 # will be checked once we know what all the deleted files are.
944 remoteconflicts.add(f)
944 remoteconflicts.add(f)
945 # Track the names of all deleted files.
945 # Track the names of all deleted files.
946 if m == 'r':
946 if m == 'r':
947 deletedfiles.add(f)
947 deletedfiles.add(f)
948 if m == 'm':
948 if m == 'm':
949 f1, f2, fa, move, anc = args
949 f1, f2, fa, move, anc = args
950 if move:
950 if move:
951 deletedfiles.add(f1)
951 deletedfiles.add(f1)
952 if m == 'dm':
952 if m == 'dm':
953 f2, flags = args
953 f2, flags = args
954 deletedfiles.add(f2)
954 deletedfiles.add(f2)
955
955
956 # Check all directories that contain created files for path conflicts.
956 # Check all directories that contain created files for path conflicts.
957 for p in createdfiledirs:
957 for p in createdfiledirs:
958 if p in mf:
958 if p in mf:
959 if p in mctx:
959 if p in mctx:
960 # A file is in a directory which aliases both a local
960 # A file is in a directory which aliases both a local
961 # and a remote file. This is an internal inconsistency
961 # and a remote file. This is an internal inconsistency
962 # within the remote manifest.
962 # within the remote manifest.
963 invalidconflicts.add(p)
963 invalidconflicts.add(p)
964 else:
964 else:
965 # A file is in a directory which aliases a local file.
965 # A file is in a directory which aliases a local file.
966 # We will need to rename the local file.
966 # We will need to rename the local file.
967 localconflicts.add(p)
967 localconflicts.add(p)
968 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
968 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
969 # The file is in a directory which aliases a remote file.
969 # The file is in a directory which aliases a remote file.
970 # This is an internal inconsistency within the remote
970 # This is an internal inconsistency within the remote
971 # manifest.
971 # manifest.
972 invalidconflicts.add(p)
972 invalidconflicts.add(p)
973
973
974 # Rename all local conflicting files that have not been deleted.
974 # Rename all local conflicting files that have not been deleted.
975 for p in localconflicts:
975 for p in localconflicts:
976 if p not in deletedfiles:
976 if p not in deletedfiles:
977 ctxname = str(wctx).rstrip('+')
977 ctxname = str(wctx).rstrip('+')
978 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
978 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
979 actions[pnew] = ('pr', (p,), "local path conflict")
979 actions[pnew] = ('pr', (p,), "local path conflict")
980 actions[p] = ('p', (pnew, 'l'), "path conflict")
980 actions[p] = ('p', (pnew, 'l'), "path conflict")
981
981
982 if remoteconflicts:
982 if remoteconflicts:
983 # Check if all files in the conflicting directories have been removed.
983 # Check if all files in the conflicting directories have been removed.
984 ctxname = str(mctx).rstrip('+')
984 ctxname = str(mctx).rstrip('+')
985 for f, p in _filesindirs(repo, mf, remoteconflicts):
985 for f, p in _filesindirs(repo, mf, remoteconflicts):
986 if f not in deletedfiles:
986 if f not in deletedfiles:
987 m, args, msg = actions[p]
987 m, args, msg = actions[p]
988 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
988 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
989 if m in ('dc', 'm'):
989 if m in ('dc', 'm'):
990 # Action was merge, just update target.
990 # Action was merge, just update target.
991 actions[pnew] = (m, args, msg)
991 actions[pnew] = (m, args, msg)
992 else:
992 else:
993 # Action was create, change to renamed get action.
993 # Action was create, change to renamed get action.
994 fl = args[0]
994 fl = args[0]
995 actions[pnew] = ('dg', (p, fl), "remote path conflict")
995 actions[pnew] = ('dg', (p, fl), "remote path conflict")
996 actions[p] = ('p', (pnew, 'r'), "path conflict")
996 actions[p] = ('p', (pnew, 'r'), "path conflict")
997 remoteconflicts.remove(p)
997 remoteconflicts.remove(p)
998 break
998 break
999
999
1000 if invalidconflicts:
1000 if invalidconflicts:
1001 for p in invalidconflicts:
1001 for p in invalidconflicts:
1002 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1002 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1003 raise error.Abort(_("destination manifest contains path conflicts"))
1003 raise error.Abort(_("destination manifest contains path conflicts"))
1004
1004
1005 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1005 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1006 acceptremote, followcopies, forcefulldiff=False):
1006 acceptremote, followcopies, forcefulldiff=False):
1007 """
1007 """
1008 Merge wctx and p2 with ancestor pa and generate merge action list
1008 Merge wctx and p2 with ancestor pa and generate merge action list
1009
1009
1010 branchmerge and force are as passed in to update
1010 branchmerge and force are as passed in to update
1011 matcher = matcher to filter file lists
1011 matcher = matcher to filter file lists
1012 acceptremote = accept the incoming changes without prompting
1012 acceptremote = accept the incoming changes without prompting
1013 """
1013 """
1014 if matcher is not None and matcher.always():
1014 if matcher is not None and matcher.always():
1015 matcher = None
1015 matcher = None
1016
1016
1017 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1017 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1018
1018
1019 # manifests fetched in order are going to be faster, so prime the caches
1019 # manifests fetched in order are going to be faster, so prime the caches
1020 [x.manifest() for x in
1020 [x.manifest() for x in
1021 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1021 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1022
1022
1023 if followcopies:
1023 if followcopies:
1024 ret = copies.mergecopies(repo, wctx, p2, pa)
1024 ret = copies.mergecopies(repo, wctx, p2, pa)
1025 copy, movewithdir, diverge, renamedelete, dirmove = ret
1025 copy, movewithdir, diverge, renamedelete, dirmove = ret
1026
1026
1027 boolbm = pycompat.bytestr(bool(branchmerge))
1027 boolbm = pycompat.bytestr(bool(branchmerge))
1028 boolf = pycompat.bytestr(bool(force))
1028 boolf = pycompat.bytestr(bool(force))
1029 boolm = pycompat.bytestr(bool(matcher))
1029 boolm = pycompat.bytestr(bool(matcher))
1030 repo.ui.note(_("resolving manifests\n"))
1030 repo.ui.note(_("resolving manifests\n"))
1031 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1031 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1032 % (boolbm, boolf, boolm))
1032 % (boolbm, boolf, boolm))
1033 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1033 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1034
1034
1035 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1035 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1036 copied = set(copy.values())
1036 copied = set(copy.values())
1037 copied.update(movewithdir.values())
1037 copied.update(movewithdir.values())
1038
1038
1039 if '.hgsubstate' in m1:
1039 if '.hgsubstate' in m1:
1040 # check whether sub state is modified
1040 # check whether sub state is modified
1041 if any(wctx.sub(s).dirty() for s in wctx.substate):
1041 if any(wctx.sub(s).dirty() for s in wctx.substate):
1042 m1['.hgsubstate'] = modifiednodeid
1042 m1['.hgsubstate'] = modifiednodeid
1043
1043
1044 # Don't use m2-vs-ma optimization if:
1044 # Don't use m2-vs-ma optimization if:
1045 # - ma is the same as m1 or m2, which we're just going to diff again later
1045 # - ma is the same as m1 or m2, which we're just going to diff again later
1046 # - The caller specifically asks for a full diff, which is useful during bid
1046 # - The caller specifically asks for a full diff, which is useful during bid
1047 # merge.
1047 # merge.
1048 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1048 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1049 # Identify which files are relevant to the merge, so we can limit the
1049 # Identify which files are relevant to the merge, so we can limit the
1050 # total m1-vs-m2 diff to just those files. This has significant
1050 # total m1-vs-m2 diff to just those files. This has significant
1051 # performance benefits in large repositories.
1051 # performance benefits in large repositories.
1052 relevantfiles = set(ma.diff(m2).keys())
1052 relevantfiles = set(ma.diff(m2).keys())
1053
1053
1054 # For copied and moved files, we need to add the source file too.
1054 # For copied and moved files, we need to add the source file too.
1055 for copykey, copyvalue in copy.iteritems():
1055 for copykey, copyvalue in copy.iteritems():
1056 if copyvalue in relevantfiles:
1056 if copyvalue in relevantfiles:
1057 relevantfiles.add(copykey)
1057 relevantfiles.add(copykey)
1058 for movedirkey in movewithdir:
1058 for movedirkey in movewithdir:
1059 relevantfiles.add(movedirkey)
1059 relevantfiles.add(movedirkey)
1060 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1060 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1061 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1061 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1062
1062
1063 diff = m1.diff(m2, match=matcher)
1063 diff = m1.diff(m2, match=matcher)
1064
1064
1065 if matcher is None:
1065 if matcher is None:
1066 matcher = matchmod.always('', '')
1066 matcher = matchmod.always('', '')
1067
1067
1068 actions = {}
1068 actions = {}
1069 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1069 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1070 if n1 and n2: # file exists on both local and remote side
1070 if n1 and n2: # file exists on both local and remote side
1071 if f not in ma:
1071 if f not in ma:
1072 fa = copy.get(f, None)
1072 fa = copy.get(f, None)
1073 if fa is not None:
1073 if fa is not None:
1074 actions[f] = ('m', (f, f, fa, False, pa.node()),
1074 actions[f] = ('m', (f, f, fa, False, pa.node()),
1075 "both renamed from " + fa)
1075 "both renamed from " + fa)
1076 else:
1076 else:
1077 actions[f] = ('m', (f, f, None, False, pa.node()),
1077 actions[f] = ('m', (f, f, None, False, pa.node()),
1078 "both created")
1078 "both created")
1079 else:
1079 else:
1080 a = ma[f]
1080 a = ma[f]
1081 fla = ma.flags(f)
1081 fla = ma.flags(f)
1082 nol = 'l' not in fl1 + fl2 + fla
1082 nol = 'l' not in fl1 + fl2 + fla
1083 if n2 == a and fl2 == fla:
1083 if n2 == a and fl2 == fla:
1084 actions[f] = ('k', (), "remote unchanged")
1084 actions[f] = ('k', (), "remote unchanged")
1085 elif n1 == a and fl1 == fla: # local unchanged - use remote
1085 elif n1 == a and fl1 == fla: # local unchanged - use remote
1086 if n1 == n2: # optimization: keep local content
1086 if n1 == n2: # optimization: keep local content
1087 actions[f] = ('e', (fl2,), "update permissions")
1087 actions[f] = ('e', (fl2,), "update permissions")
1088 else:
1088 else:
1089 actions[f] = ('g', (fl2, False), "remote is newer")
1089 actions[f] = ('g', (fl2, False), "remote is newer")
1090 elif nol and n2 == a: # remote only changed 'x'
1090 elif nol and n2 == a: # remote only changed 'x'
1091 actions[f] = ('e', (fl2,), "update permissions")
1091 actions[f] = ('e', (fl2,), "update permissions")
1092 elif nol and n1 == a: # local only changed 'x'
1092 elif nol and n1 == a: # local only changed 'x'
1093 actions[f] = ('g', (fl1, False), "remote is newer")
1093 actions[f] = ('g', (fl1, False), "remote is newer")
1094 else: # both changed something
1094 else: # both changed something
1095 actions[f] = ('m', (f, f, f, False, pa.node()),
1095 actions[f] = ('m', (f, f, f, False, pa.node()),
1096 "versions differ")
1096 "versions differ")
1097 elif n1: # file exists only on local side
1097 elif n1: # file exists only on local side
1098 if f in copied:
1098 if f in copied:
1099 pass # we'll deal with it on m2 side
1099 pass # we'll deal with it on m2 side
1100 elif f in movewithdir: # directory rename, move local
1100 elif f in movewithdir: # directory rename, move local
1101 f2 = movewithdir[f]
1101 f2 = movewithdir[f]
1102 if f2 in m2:
1102 if f2 in m2:
1103 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1103 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1104 "remote directory rename, both created")
1104 "remote directory rename, both created")
1105 else:
1105 else:
1106 actions[f2] = ('dm', (f, fl1),
1106 actions[f2] = ('dm', (f, fl1),
1107 "remote directory rename - move from " + f)
1107 "remote directory rename - move from " + f)
1108 elif f in copy:
1108 elif f in copy:
1109 f2 = copy[f]
1109 f2 = copy[f]
1110 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1110 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1111 "local copied/moved from " + f2)
1111 "local copied/moved from " + f2)
1112 elif f in ma: # clean, a different, no remote
1112 elif f in ma: # clean, a different, no remote
1113 if n1 != ma[f]:
1113 if n1 != ma[f]:
1114 if acceptremote:
1114 if acceptremote:
1115 actions[f] = ('r', None, "remote delete")
1115 actions[f] = ('r', None, "remote delete")
1116 else:
1116 else:
1117 actions[f] = ('cd', (f, None, f, False, pa.node()),
1117 actions[f] = ('cd', (f, None, f, False, pa.node()),
1118 "prompt changed/deleted")
1118 "prompt changed/deleted")
1119 elif n1 == addednodeid:
1119 elif n1 == addednodeid:
1120 # This extra 'a' is added by working copy manifest to mark
1120 # This extra 'a' is added by working copy manifest to mark
1121 # the file as locally added. We should forget it instead of
1121 # the file as locally added. We should forget it instead of
1122 # deleting it.
1122 # deleting it.
1123 actions[f] = ('f', None, "remote deleted")
1123 actions[f] = ('f', None, "remote deleted")
1124 else:
1124 else:
1125 actions[f] = ('r', None, "other deleted")
1125 actions[f] = ('r', None, "other deleted")
1126 elif n2: # file exists only on remote side
1126 elif n2: # file exists only on remote side
1127 if f in copied:
1127 if f in copied:
1128 pass # we'll deal with it on m1 side
1128 pass # we'll deal with it on m1 side
1129 elif f in movewithdir:
1129 elif f in movewithdir:
1130 f2 = movewithdir[f]
1130 f2 = movewithdir[f]
1131 if f2 in m1:
1131 if f2 in m1:
1132 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1132 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1133 "local directory rename, both created")
1133 "local directory rename, both created")
1134 else:
1134 else:
1135 actions[f2] = ('dg', (f, fl2),
1135 actions[f2] = ('dg', (f, fl2),
1136 "local directory rename - get from " + f)
1136 "local directory rename - get from " + f)
1137 elif f in copy:
1137 elif f in copy:
1138 f2 = copy[f]
1138 f2 = copy[f]
1139 if f2 in m2:
1139 if f2 in m2:
1140 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1140 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1141 "remote copied from " + f2)
1141 "remote copied from " + f2)
1142 else:
1142 else:
1143 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1143 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1144 "remote moved from " + f2)
1144 "remote moved from " + f2)
1145 elif f not in ma:
1145 elif f not in ma:
1146 # local unknown, remote created: the logic is described by the
1146 # local unknown, remote created: the logic is described by the
1147 # following table:
1147 # following table:
1148 #
1148 #
1149 # force branchmerge different | action
1149 # force branchmerge different | action
1150 # n * * | create
1150 # n * * | create
1151 # y n * | create
1151 # y n * | create
1152 # y y n | create
1152 # y y n | create
1153 # y y y | merge
1153 # y y y | merge
1154 #
1154 #
1155 # Checking whether the files are different is expensive, so we
1155 # Checking whether the files are different is expensive, so we
1156 # don't do that when we can avoid it.
1156 # don't do that when we can avoid it.
1157 if not force:
1157 if not force:
1158 actions[f] = ('c', (fl2,), "remote created")
1158 actions[f] = ('c', (fl2,), "remote created")
1159 elif not branchmerge:
1159 elif not branchmerge:
1160 actions[f] = ('c', (fl2,), "remote created")
1160 actions[f] = ('c', (fl2,), "remote created")
1161 else:
1161 else:
1162 actions[f] = ('cm', (fl2, pa.node()),
1162 actions[f] = ('cm', (fl2, pa.node()),
1163 "remote created, get or merge")
1163 "remote created, get or merge")
1164 elif n2 != ma[f]:
1164 elif n2 != ma[f]:
1165 df = None
1165 df = None
1166 for d in dirmove:
1166 for d in dirmove:
1167 if f.startswith(d):
1167 if f.startswith(d):
1168 # new file added in a directory that was moved
1168 # new file added in a directory that was moved
1169 df = dirmove[d] + f[len(d):]
1169 df = dirmove[d] + f[len(d):]
1170 break
1170 break
1171 if df is not None and df in m1:
1171 if df is not None and df in m1:
1172 actions[df] = ('m', (df, f, f, False, pa.node()),
1172 actions[df] = ('m', (df, f, f, False, pa.node()),
1173 "local directory rename - respect move from " + f)
1173 "local directory rename - respect move from " + f)
1174 elif acceptremote:
1174 elif acceptremote:
1175 actions[f] = ('c', (fl2,), "remote recreating")
1175 actions[f] = ('c', (fl2,), "remote recreating")
1176 else:
1176 else:
1177 actions[f] = ('dc', (None, f, f, False, pa.node()),
1177 actions[f] = ('dc', (None, f, f, False, pa.node()),
1178 "prompt deleted/changed")
1178 "prompt deleted/changed")
1179
1179
1180 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1180 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1181 # If we are merging, look for path conflicts.
1181 # If we are merging, look for path conflicts.
1182 checkpathconflicts(repo, wctx, p2, actions)
1182 checkpathconflicts(repo, wctx, p2, actions)
1183
1183
1184 return actions, diverge, renamedelete
1184 return actions, diverge, renamedelete
1185
1185
1186 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1186 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1187 """Resolves false conflicts where the nodeid changed but the content
1187 """Resolves false conflicts where the nodeid changed but the content
1188 remained the same."""
1188 remained the same."""
1189
1189
1190 for f, (m, args, msg) in actions.items():
1190 for f, (m, args, msg) in actions.items():
1191 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1191 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1192 # local did change but ended up with same content
1192 # local did change but ended up with same content
1193 actions[f] = 'r', None, "prompt same"
1193 actions[f] = 'r', None, "prompt same"
1194 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1194 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1195 # remote did change but ended up with same content
1195 # remote did change but ended up with same content
1196 del actions[f] # don't get = keep local deleted
1196 del actions[f] # don't get = keep local deleted
1197
1197
1198 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1198 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1199 acceptremote, followcopies, matcher=None,
1199 acceptremote, followcopies, matcher=None,
1200 mergeforce=False):
1200 mergeforce=False):
1201 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1201 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1202 # Avoid cycle.
1202 # Avoid cycle.
1203 from . import sparse
1203 from . import sparse
1204
1204
1205 if len(ancestors) == 1: # default
1205 if len(ancestors) == 1: # default
1206 actions, diverge, renamedelete = manifestmerge(
1206 actions, diverge, renamedelete = manifestmerge(
1207 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1207 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1208 acceptremote, followcopies)
1208 acceptremote, followcopies)
1209 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1209 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1210
1210
1211 else: # only when merge.preferancestor=* - the default
1211 else: # only when merge.preferancestor=* - the default
1212 repo.ui.note(
1212 repo.ui.note(
1213 _("note: merging %s and %s using bids from ancestors %s\n") %
1213 _("note: merging %s and %s using bids from ancestors %s\n") %
1214 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1214 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1215 for anc in ancestors)))
1215 for anc in ancestors)))
1216
1216
1217 # Call for bids
1217 # Call for bids
1218 fbids = {} # mapping filename to bids (action method to list af actions)
1218 fbids = {} # mapping filename to bids (action method to list af actions)
1219 diverge, renamedelete = None, None
1219 diverge, renamedelete = None, None
1220 for ancestor in ancestors:
1220 for ancestor in ancestors:
1221 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1221 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1222 actions, diverge1, renamedelete1 = manifestmerge(
1222 actions, diverge1, renamedelete1 = manifestmerge(
1223 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1223 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1224 acceptremote, followcopies, forcefulldiff=True)
1224 acceptremote, followcopies, forcefulldiff=True)
1225 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1225 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1226
1226
1227 # Track the shortest set of warning on the theory that bid
1227 # Track the shortest set of warning on the theory that bid
1228 # merge will correctly incorporate more information
1228 # merge will correctly incorporate more information
1229 if diverge is None or len(diverge1) < len(diverge):
1229 if diverge is None or len(diverge1) < len(diverge):
1230 diverge = diverge1
1230 diverge = diverge1
1231 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1231 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1232 renamedelete = renamedelete1
1232 renamedelete = renamedelete1
1233
1233
1234 for f, a in sorted(actions.iteritems()):
1234 for f, a in sorted(actions.iteritems()):
1235 m, args, msg = a
1235 m, args, msg = a
1236 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1236 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1237 if f in fbids:
1237 if f in fbids:
1238 d = fbids[f]
1238 d = fbids[f]
1239 if m in d:
1239 if m in d:
1240 d[m].append(a)
1240 d[m].append(a)
1241 else:
1241 else:
1242 d[m] = [a]
1242 d[m] = [a]
1243 else:
1243 else:
1244 fbids[f] = {m: [a]}
1244 fbids[f] = {m: [a]}
1245
1245
1246 # Pick the best bid for each file
1246 # Pick the best bid for each file
1247 repo.ui.note(_('\nauction for merging merge bids\n'))
1247 repo.ui.note(_('\nauction for merging merge bids\n'))
1248 actions = {}
1248 actions = {}
1249 dms = [] # filenames that have dm actions
1249 dms = [] # filenames that have dm actions
1250 for f, bids in sorted(fbids.items()):
1250 for f, bids in sorted(fbids.items()):
1251 # bids is a mapping from action method to list af actions
1251 # bids is a mapping from action method to list af actions
1252 # Consensus?
1252 # Consensus?
1253 if len(bids) == 1: # all bids are the same kind of method
1253 if len(bids) == 1: # all bids are the same kind of method
1254 m, l = list(bids.items())[0]
1254 m, l = list(bids.items())[0]
1255 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1255 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1256 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1256 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1257 actions[f] = l[0]
1257 actions[f] = l[0]
1258 if m == 'dm':
1258 if m == 'dm':
1259 dms.append(f)
1259 dms.append(f)
1260 continue
1260 continue
1261 # If keep is an option, just do it.
1261 # If keep is an option, just do it.
1262 if 'k' in bids:
1262 if 'k' in bids:
1263 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1263 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1264 actions[f] = bids['k'][0]
1264 actions[f] = bids['k'][0]
1265 continue
1265 continue
1266 # If there are gets and they all agree [how could they not?], do it.
1266 # If there are gets and they all agree [how could they not?], do it.
1267 if 'g' in bids:
1267 if 'g' in bids:
1268 ga0 = bids['g'][0]
1268 ga0 = bids['g'][0]
1269 if all(a == ga0 for a in bids['g'][1:]):
1269 if all(a == ga0 for a in bids['g'][1:]):
1270 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1270 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1271 actions[f] = ga0
1271 actions[f] = ga0
1272 continue
1272 continue
1273 # TODO: Consider other simple actions such as mode changes
1273 # TODO: Consider other simple actions such as mode changes
1274 # Handle inefficient democrazy.
1274 # Handle inefficient democrazy.
1275 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1275 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1276 for m, l in sorted(bids.items()):
1276 for m, l in sorted(bids.items()):
1277 for _f, args, msg in l:
1277 for _f, args, msg in l:
1278 repo.ui.note(' %s -> %s\n' % (msg, m))
1278 repo.ui.note(' %s -> %s\n' % (msg, m))
1279 # Pick random action. TODO: Instead, prompt user when resolving
1279 # Pick random action. TODO: Instead, prompt user when resolving
1280 m, l = list(bids.items())[0]
1280 m, l = list(bids.items())[0]
1281 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1281 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1282 (f, m))
1282 (f, m))
1283 actions[f] = l[0]
1283 actions[f] = l[0]
1284 if m == 'dm':
1284 if m == 'dm':
1285 dms.append(f)
1285 dms.append(f)
1286 continue
1286 continue
1287 # Work around 'dm' that can cause multiple actions for the same file
1287 # Work around 'dm' that can cause multiple actions for the same file
1288 for f in dms:
1288 for f in dms:
1289 dm, (f0, flags), msg = actions[f]
1289 dm, (f0, flags), msg = actions[f]
1290 assert dm == 'dm', dm
1290 assert dm == 'dm', dm
1291 if f0 in actions and actions[f0][0] == 'r':
1291 if f0 in actions and actions[f0][0] == 'r':
1292 # We have one bid for removing a file and another for moving it.
1292 # We have one bid for removing a file and another for moving it.
1293 # These two could be merged as first move and then delete ...
1293 # These two could be merged as first move and then delete ...
1294 # but instead drop moving and just delete.
1294 # but instead drop moving and just delete.
1295 del actions[f]
1295 del actions[f]
1296 repo.ui.note(_('end of auction\n\n'))
1296 repo.ui.note(_('end of auction\n\n'))
1297
1297
1298 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1298 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1299
1299
1300 if wctx.rev() is None:
1300 if wctx.rev() is None:
1301 fractions = _forgetremoved(wctx, mctx, branchmerge)
1301 fractions = _forgetremoved(wctx, mctx, branchmerge)
1302 actions.update(fractions)
1302 actions.update(fractions)
1303
1303
1304 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1304 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1305 actions)
1305 actions)
1306
1306
1307 return prunedactions, diverge, renamedelete
1307 return prunedactions, diverge, renamedelete
1308
1308
1309 def _getcwd():
1309 def _getcwd():
1310 try:
1310 try:
1311 return pycompat.getcwd()
1311 return pycompat.getcwd()
1312 except OSError as err:
1312 except OSError as err:
1313 if err.errno == errno.ENOENT:
1313 if err.errno == errno.ENOENT:
1314 return None
1314 return None
1315 raise
1315 raise
1316
1316
1317 def batchremove(repo, wctx, actions):
1317 def batchremove(repo, wctx, actions):
1318 """apply removes to the working directory
1318 """apply removes to the working directory
1319
1319
1320 yields tuples for progress updates
1320 yields tuples for progress updates
1321 """
1321 """
1322 verbose = repo.ui.verbose
1322 verbose = repo.ui.verbose
1323 cwd = _getcwd()
1323 cwd = _getcwd()
1324 i = 0
1324 i = 0
1325 for f, args, msg in actions:
1325 for f, args, msg in actions:
1326 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1326 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1327 if verbose:
1327 if verbose:
1328 repo.ui.note(_("removing %s\n") % f)
1328 repo.ui.note(_("removing %s\n") % f)
1329 wctx[f].audit()
1329 wctx[f].audit()
1330 try:
1330 try:
1331 wctx[f].remove(ignoremissing=True)
1331 wctx[f].remove(ignoremissing=True)
1332 except OSError as inst:
1332 except OSError as inst:
1333 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1333 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1334 (f, inst.strerror))
1334 (f, inst.strerror))
1335 if i == 100:
1335 if i == 100:
1336 yield i, f
1336 yield i, f
1337 i = 0
1337 i = 0
1338 i += 1
1338 i += 1
1339 if i > 0:
1339 if i > 0:
1340 yield i, f
1340 yield i, f
1341
1341
1342 if cwd and not _getcwd():
1342 if cwd and not _getcwd():
1343 # cwd was removed in the course of removing files; print a helpful
1343 # cwd was removed in the course of removing files; print a helpful
1344 # warning.
1344 # warning.
1345 repo.ui.warn(_("current directory was removed\n"
1345 repo.ui.warn(_("current directory was removed\n"
1346 "(consider changing to repo root: %s)\n") % repo.root)
1346 "(consider changing to repo root: %s)\n") % repo.root)
1347
1347
1348 def batchget(repo, mctx, wctx, actions):
1348 def batchget(repo, mctx, wctx, actions):
1349 """apply gets to the working directory
1349 """apply gets to the working directory
1350
1350
1351 mctx is the context to get from
1351 mctx is the context to get from
1352
1352
1353 yields tuples for progress updates
1353 yields tuples for progress updates
1354 """
1354 """
1355 verbose = repo.ui.verbose
1355 verbose = repo.ui.verbose
1356 fctx = mctx.filectx
1356 fctx = mctx.filectx
1357 ui = repo.ui
1357 ui = repo.ui
1358 i = 0
1358 i = 0
1359 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1359 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1360 for f, (flags, backup), msg in actions:
1360 for f, (flags, backup), msg in actions:
1361 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1361 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1362 if verbose:
1362 if verbose:
1363 repo.ui.note(_("getting %s\n") % f)
1363 repo.ui.note(_("getting %s\n") % f)
1364
1364
1365 if backup:
1365 if backup:
1366 # If a file or directory exists with the same name, back that
1366 # If a file or directory exists with the same name, back that
1367 # up. Otherwise, look to see if there is a file that conflicts
1367 # up. Otherwise, look to see if there is a file that conflicts
1368 # with a directory this file is in, and if so, back that up.
1368 # with a directory this file is in, and if so, back that up.
1369 absf = repo.wjoin(f)
1369 absf = repo.wjoin(f)
1370 if not repo.wvfs.lexists(f):
1370 if not repo.wvfs.lexists(f):
1371 for p in util.finddirs(f):
1371 for p in util.finddirs(f):
1372 if repo.wvfs.isfileorlink(p):
1372 if repo.wvfs.isfileorlink(p):
1373 absf = repo.wjoin(p)
1373 absf = repo.wjoin(p)
1374 break
1374 break
1375 orig = scmutil.origpath(ui, repo, absf)
1375 orig = scmutil.origpath(ui, repo, absf)
1376 if repo.wvfs.lexists(absf):
1376 if repo.wvfs.lexists(absf):
1377 util.rename(absf, orig)
1377 util.rename(absf, orig)
1378 wctx[f].clearunknown()
1378 wctx[f].clearunknown()
1379 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1379 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1380 if i == 100:
1380 if i == 100:
1381 yield i, f
1381 yield i, f
1382 i = 0
1382 i = 0
1383 i += 1
1383 i += 1
1384 if i > 0:
1384 if i > 0:
1385 yield i, f
1385 yield i, f
1386
1386
1387
1387
1388 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1388 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1389 """apply the merge action list to the working directory
1389 """apply the merge action list to the working directory
1390
1390
1391 wctx is the working copy context
1391 wctx is the working copy context
1392 mctx is the context to be merged into the working copy
1392 mctx is the context to be merged into the working copy
1393
1393
1394 Return a tuple of counts (updated, merged, removed, unresolved) that
1394 Return a tuple of counts (updated, merged, removed, unresolved) that
1395 describes how many files were affected by the update.
1395 describes how many files were affected by the update.
1396 """
1396 """
1397
1397
1398 updated, merged, removed = 0, 0, 0
1398 updated, merged, removed = 0, 0, 0
1399 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1399 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1400 moves = []
1400 moves = []
1401 for m, l in actions.items():
1401 for m, l in actions.items():
1402 l.sort()
1402 l.sort()
1403
1403
1404 # 'cd' and 'dc' actions are treated like other merge conflicts
1404 # 'cd' and 'dc' actions are treated like other merge conflicts
1405 mergeactions = sorted(actions['cd'])
1405 mergeactions = sorted(actions['cd'])
1406 mergeactions.extend(sorted(actions['dc']))
1406 mergeactions.extend(sorted(actions['dc']))
1407 mergeactions.extend(actions['m'])
1407 mergeactions.extend(actions['m'])
1408 for f, args, msg in mergeactions:
1408 for f, args, msg in mergeactions:
1409 f1, f2, fa, move, anc = args
1409 f1, f2, fa, move, anc = args
1410 if f == '.hgsubstate': # merged internally
1410 if f == '.hgsubstate': # merged internally
1411 continue
1411 continue
1412 if f1 is None:
1412 if f1 is None:
1413 fcl = filemerge.absentfilectx(wctx, fa)
1413 fcl = filemerge.absentfilectx(wctx, fa)
1414 else:
1414 else:
1415 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1415 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1416 fcl = wctx[f1]
1416 fcl = wctx[f1]
1417 if f2 is None:
1417 if f2 is None:
1418 fco = filemerge.absentfilectx(mctx, fa)
1418 fco = filemerge.absentfilectx(mctx, fa)
1419 else:
1419 else:
1420 fco = mctx[f2]
1420 fco = mctx[f2]
1421 actx = repo[anc]
1421 actx = repo[anc]
1422 if fa in actx:
1422 if fa in actx:
1423 fca = actx[fa]
1423 fca = actx[fa]
1424 else:
1424 else:
1425 # TODO: move to absentfilectx
1425 # TODO: move to absentfilectx
1426 fca = repo.filectx(f1, fileid=nullrev)
1426 fca = repo.filectx(f1, fileid=nullrev)
1427 ms.add(fcl, fco, fca, f)
1427 ms.add(fcl, fco, fca, f)
1428 if f1 != f and move:
1428 if f1 != f and move:
1429 moves.append(f1)
1429 moves.append(f1)
1430
1430
1431 _updating = _('updating')
1431 _updating = _('updating')
1432 _files = _('files')
1432 _files = _('files')
1433 progress = repo.ui.progress
1433 progress = repo.ui.progress
1434
1434
1435 # remove renamed files after safely stored
1435 # remove renamed files after safely stored
1436 for f in moves:
1436 for f in moves:
1437 if wctx[f].lexists():
1437 if wctx[f].lexists():
1438 repo.ui.debug("removing %s\n" % f)
1438 repo.ui.debug("removing %s\n" % f)
1439 wctx[f].audit()
1439 wctx[f].audit()
1440 wctx[f].remove()
1440 wctx[f].remove()
1441
1441
1442 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1442 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1443 z = 0
1443 z = 0
1444
1444
1445 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1445 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1446 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1446 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1447
1447
1448 # record path conflicts
1448 # record path conflicts
1449 for f, args, msg in actions['p']:
1449 for f, args, msg in actions['p']:
1450 f1, fo = args
1450 f1, fo = args
1451 s = repo.ui.status
1451 s = repo.ui.status
1452 s(_("%s: path conflict - a file or link has the same name as a "
1452 s(_("%s: path conflict - a file or link has the same name as a "
1453 "directory\n") % f)
1453 "directory\n") % f)
1454 if fo == 'l':
1454 if fo == 'l':
1455 s(_("the local file has been renamed to %s\n") % f1)
1455 s(_("the local file has been renamed to %s\n") % f1)
1456 else:
1456 else:
1457 s(_("the remote file has been renamed to %s\n") % f1)
1457 s(_("the remote file has been renamed to %s\n") % f1)
1458 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1458 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1459 ms.addpath(f, f1, fo)
1459 ms.addpath(f, f1, fo)
1460 z += 1
1460 z += 1
1461 progress(_updating, z, item=f, total=numupdates, unit=_files)
1461 progress(_updating, z, item=f, total=numupdates, unit=_files)
1462
1462
1463 # When merging in-memory, we can't support worker processes, so set the
1463 # When merging in-memory, we can't support worker processes, so set the
1464 # per-item cost at 0 in that case.
1464 # per-item cost at 0 in that case.
1465 cost = 0 if wctx.isinmemory() else 0.001
1465 cost = 0 if wctx.isinmemory() else 0.001
1466
1466
1467 # remove in parallel (must come before resolving path conflicts and getting)
1467 # remove in parallel (must come before resolving path conflicts and getting)
1468 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1468 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1469 actions['r'])
1469 actions['r'])
1470 for i, item in prog:
1470 for i, item in prog:
1471 z += i
1471 z += i
1472 progress(_updating, z, item=item, total=numupdates, unit=_files)
1472 progress(_updating, z, item=item, total=numupdates, unit=_files)
1473 removed = len(actions['r'])
1473 removed = len(actions['r'])
1474
1474
1475 # resolve path conflicts (must come before getting)
1475 # resolve path conflicts (must come before getting)
1476 for f, args, msg in actions['pr']:
1476 for f, args, msg in actions['pr']:
1477 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1477 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1478 f0, = args
1478 f0, = args
1479 if wctx[f0].lexists():
1479 if wctx[f0].lexists():
1480 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1480 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1481 wctx[f].audit()
1481 wctx[f].audit()
1482 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1482 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1483 wctx[f0].remove()
1483 wctx[f0].remove()
1484 z += 1
1484 z += 1
1485 progress(_updating, z, item=f, total=numupdates, unit=_files)
1485 progress(_updating, z, item=f, total=numupdates, unit=_files)
1486
1486
1487 # get in parallel
1487 # get in parallel
1488 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1488 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1489 actions['g'])
1489 actions['g'])
1490 for i, item in prog:
1490 for i, item in prog:
1491 z += i
1491 z += i
1492 progress(_updating, z, item=item, total=numupdates, unit=_files)
1492 progress(_updating, z, item=item, total=numupdates, unit=_files)
1493 updated = len(actions['g'])
1493 updated = len(actions['g'])
1494
1494
1495 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1495 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1496 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1496 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1497
1497
1498 # forget (manifest only, just log it) (must come first)
1498 # forget (manifest only, just log it) (must come first)
1499 for f, args, msg in actions['f']:
1499 for f, args, msg in actions['f']:
1500 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1500 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1501 z += 1
1501 z += 1
1502 progress(_updating, z, item=f, total=numupdates, unit=_files)
1502 progress(_updating, z, item=f, total=numupdates, unit=_files)
1503
1503
1504 # re-add (manifest only, just log it)
1504 # re-add (manifest only, just log it)
1505 for f, args, msg in actions['a']:
1505 for f, args, msg in actions['a']:
1506 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1506 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1507 z += 1
1507 z += 1
1508 progress(_updating, z, item=f, total=numupdates, unit=_files)
1508 progress(_updating, z, item=f, total=numupdates, unit=_files)
1509
1509
1510 # re-add/mark as modified (manifest only, just log it)
1510 # re-add/mark as modified (manifest only, just log it)
1511 for f, args, msg in actions['am']:
1511 for f, args, msg in actions['am']:
1512 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1512 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1513 z += 1
1513 z += 1
1514 progress(_updating, z, item=f, total=numupdates, unit=_files)
1514 progress(_updating, z, item=f, total=numupdates, unit=_files)
1515
1515
1516 # keep (noop, just log it)
1516 # keep (noop, just log it)
1517 for f, args, msg in actions['k']:
1517 for f, args, msg in actions['k']:
1518 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1518 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1519 # no progress
1519 # no progress
1520
1520
1521 # directory rename, move local
1521 # directory rename, move local
1522 for f, args, msg in actions['dm']:
1522 for f, args, msg in actions['dm']:
1523 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1523 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1524 z += 1
1524 z += 1
1525 progress(_updating, z, item=f, total=numupdates, unit=_files)
1525 progress(_updating, z, item=f, total=numupdates, unit=_files)
1526 f0, flags = args
1526 f0, flags = args
1527 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1527 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1528 wctx[f].audit()
1528 wctx[f].audit()
1529 wctx[f].write(wctx.filectx(f0).data(), flags)
1529 wctx[f].write(wctx.filectx(f0).data(), flags)
1530 wctx[f0].remove()
1530 wctx[f0].remove()
1531 updated += 1
1531 updated += 1
1532
1532
1533 # local directory rename, get
1533 # local directory rename, get
1534 for f, args, msg in actions['dg']:
1534 for f, args, msg in actions['dg']:
1535 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1535 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1536 z += 1
1536 z += 1
1537 progress(_updating, z, item=f, total=numupdates, unit=_files)
1537 progress(_updating, z, item=f, total=numupdates, unit=_files)
1538 f0, flags = args
1538 f0, flags = args
1539 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1539 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1540 wctx[f].write(mctx.filectx(f0).data(), flags)
1540 wctx[f].write(mctx.filectx(f0).data(), flags)
1541 updated += 1
1541 updated += 1
1542
1542
1543 # exec
1543 # exec
1544 for f, args, msg in actions['e']:
1544 for f, args, msg in actions['e']:
1545 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1545 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1546 z += 1
1546 z += 1
1547 progress(_updating, z, item=f, total=numupdates, unit=_files)
1547 progress(_updating, z, item=f, total=numupdates, unit=_files)
1548 flags, = args
1548 flags, = args
1549 wctx[f].audit()
1549 wctx[f].audit()
1550 wctx[f].setflags('l' in flags, 'x' in flags)
1550 wctx[f].setflags('l' in flags, 'x' in flags)
1551 updated += 1
1551 updated += 1
1552
1552
1553 # the ordering is important here -- ms.mergedriver will raise if the merge
1553 # the ordering is important here -- ms.mergedriver will raise if the merge
1554 # driver has changed, and we want to be able to bypass it when overwrite is
1554 # driver has changed, and we want to be able to bypass it when overwrite is
1555 # True
1555 # True
1556 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1556 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1557
1557
1558 if usemergedriver:
1558 if usemergedriver:
1559 if wctx.isinmemory():
1560 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1561 "support mergedriver")
1559 ms.commit()
1562 ms.commit()
1560 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1563 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1561 # the driver might leave some files unresolved
1564 # the driver might leave some files unresolved
1562 unresolvedf = set(ms.unresolved())
1565 unresolvedf = set(ms.unresolved())
1563 if not proceed:
1566 if not proceed:
1564 # XXX setting unresolved to at least 1 is a hack to make sure we
1567 # XXX setting unresolved to at least 1 is a hack to make sure we
1565 # error out
1568 # error out
1566 return updated, merged, removed, max(len(unresolvedf), 1)
1569 return updated, merged, removed, max(len(unresolvedf), 1)
1567 newactions = []
1570 newactions = []
1568 for f, args, msg in mergeactions:
1571 for f, args, msg in mergeactions:
1569 if f in unresolvedf:
1572 if f in unresolvedf:
1570 newactions.append((f, args, msg))
1573 newactions.append((f, args, msg))
1571 mergeactions = newactions
1574 mergeactions = newactions
1572
1575
1573 try:
1576 try:
1574 # premerge
1577 # premerge
1575 tocomplete = []
1578 tocomplete = []
1576 for f, args, msg in mergeactions:
1579 for f, args, msg in mergeactions:
1577 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1580 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1578 z += 1
1581 z += 1
1579 progress(_updating, z, item=f, total=numupdates, unit=_files)
1582 progress(_updating, z, item=f, total=numupdates, unit=_files)
1580 if f == '.hgsubstate': # subrepo states need updating
1583 if f == '.hgsubstate': # subrepo states need updating
1581 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1584 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1582 overwrite, labels)
1585 overwrite, labels)
1583 continue
1586 continue
1584 wctx[f].audit()
1587 wctx[f].audit()
1585 complete, r = ms.preresolve(f, wctx)
1588 complete, r = ms.preresolve(f, wctx)
1586 if not complete:
1589 if not complete:
1587 numupdates += 1
1590 numupdates += 1
1588 tocomplete.append((f, args, msg))
1591 tocomplete.append((f, args, msg))
1589
1592
1590 # merge
1593 # merge
1591 for f, args, msg in tocomplete:
1594 for f, args, msg in tocomplete:
1592 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1595 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1593 z += 1
1596 z += 1
1594 progress(_updating, z, item=f, total=numupdates, unit=_files)
1597 progress(_updating, z, item=f, total=numupdates, unit=_files)
1595 ms.resolve(f, wctx)
1598 ms.resolve(f, wctx)
1596
1599
1597 finally:
1600 finally:
1598 ms.commit()
1601 ms.commit()
1599
1602
1600 unresolved = ms.unresolvedcount()
1603 unresolved = ms.unresolvedcount()
1601
1604
1602 if usemergedriver and not unresolved and ms.mdstate() != 's':
1605 if usemergedriver and not unresolved and ms.mdstate() != 's':
1603 if not driverconclude(repo, ms, wctx, labels=labels):
1606 if not driverconclude(repo, ms, wctx, labels=labels):
1604 # XXX setting unresolved to at least 1 is a hack to make sure we
1607 # XXX setting unresolved to at least 1 is a hack to make sure we
1605 # error out
1608 # error out
1606 unresolved = max(unresolved, 1)
1609 unresolved = max(unresolved, 1)
1607
1610
1608 ms.commit()
1611 ms.commit()
1609
1612
1610 msupdated, msmerged, msremoved = ms.counts()
1613 msupdated, msmerged, msremoved = ms.counts()
1611 updated += msupdated
1614 updated += msupdated
1612 merged += msmerged
1615 merged += msmerged
1613 removed += msremoved
1616 removed += msremoved
1614
1617
1615 extraactions = ms.actions()
1618 extraactions = ms.actions()
1616 if extraactions:
1619 if extraactions:
1617 mfiles = set(a[0] for a in actions['m'])
1620 mfiles = set(a[0] for a in actions['m'])
1618 for k, acts in extraactions.iteritems():
1621 for k, acts in extraactions.iteritems():
1619 actions[k].extend(acts)
1622 actions[k].extend(acts)
1620 # Remove these files from actions['m'] as well. This is important
1623 # Remove these files from actions['m'] as well. This is important
1621 # because in recordupdates, files in actions['m'] are processed
1624 # because in recordupdates, files in actions['m'] are processed
1622 # after files in other actions, and the merge driver might add
1625 # after files in other actions, and the merge driver might add
1623 # files to those actions via extraactions above. This can lead to a
1626 # files to those actions via extraactions above. This can lead to a
1624 # file being recorded twice, with poor results. This is especially
1627 # file being recorded twice, with poor results. This is especially
1625 # problematic for actions['r'] (currently only possible with the
1628 # problematic for actions['r'] (currently only possible with the
1626 # merge driver in the initial merge process; interrupted merges
1629 # merge driver in the initial merge process; interrupted merges
1627 # don't go through this flow).
1630 # don't go through this flow).
1628 #
1631 #
1629 # The real fix here is to have indexes by both file and action so
1632 # The real fix here is to have indexes by both file and action so
1630 # that when the action for a file is changed it is automatically
1633 # that when the action for a file is changed it is automatically
1631 # reflected in the other action lists. But that involves a more
1634 # reflected in the other action lists. But that involves a more
1632 # complex data structure, so this will do for now.
1635 # complex data structure, so this will do for now.
1633 #
1636 #
1634 # We don't need to do the same operation for 'dc' and 'cd' because
1637 # We don't need to do the same operation for 'dc' and 'cd' because
1635 # those lists aren't consulted again.
1638 # those lists aren't consulted again.
1636 mfiles.difference_update(a[0] for a in acts)
1639 mfiles.difference_update(a[0] for a in acts)
1637
1640
1638 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1641 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1639
1642
1640 progress(_updating, None, total=numupdates, unit=_files)
1643 progress(_updating, None, total=numupdates, unit=_files)
1641
1644
1642 return updated, merged, removed, unresolved
1645 return updated, merged, removed, unresolved
1643
1646
1644 def recordupdates(repo, actions, branchmerge):
1647 def recordupdates(repo, actions, branchmerge):
1645 "record merge actions to the dirstate"
1648 "record merge actions to the dirstate"
1646 # remove (must come first)
1649 # remove (must come first)
1647 for f, args, msg in actions.get('r', []):
1650 for f, args, msg in actions.get('r', []):
1648 if branchmerge:
1651 if branchmerge:
1649 repo.dirstate.remove(f)
1652 repo.dirstate.remove(f)
1650 else:
1653 else:
1651 repo.dirstate.drop(f)
1654 repo.dirstate.drop(f)
1652
1655
1653 # forget (must come first)
1656 # forget (must come first)
1654 for f, args, msg in actions.get('f', []):
1657 for f, args, msg in actions.get('f', []):
1655 repo.dirstate.drop(f)
1658 repo.dirstate.drop(f)
1656
1659
1657 # resolve path conflicts
1660 # resolve path conflicts
1658 for f, args, msg in actions.get('pr', []):
1661 for f, args, msg in actions.get('pr', []):
1659 f0, = args
1662 f0, = args
1660 origf0 = repo.dirstate.copied(f0) or f0
1663 origf0 = repo.dirstate.copied(f0) or f0
1661 repo.dirstate.add(f)
1664 repo.dirstate.add(f)
1662 repo.dirstate.copy(origf0, f)
1665 repo.dirstate.copy(origf0, f)
1663 if f0 == origf0:
1666 if f0 == origf0:
1664 repo.dirstate.remove(f0)
1667 repo.dirstate.remove(f0)
1665 else:
1668 else:
1666 repo.dirstate.drop(f0)
1669 repo.dirstate.drop(f0)
1667
1670
1668 # re-add
1671 # re-add
1669 for f, args, msg in actions.get('a', []):
1672 for f, args, msg in actions.get('a', []):
1670 repo.dirstate.add(f)
1673 repo.dirstate.add(f)
1671
1674
1672 # re-add/mark as modified
1675 # re-add/mark as modified
1673 for f, args, msg in actions.get('am', []):
1676 for f, args, msg in actions.get('am', []):
1674 if branchmerge:
1677 if branchmerge:
1675 repo.dirstate.normallookup(f)
1678 repo.dirstate.normallookup(f)
1676 else:
1679 else:
1677 repo.dirstate.add(f)
1680 repo.dirstate.add(f)
1678
1681
1679 # exec change
1682 # exec change
1680 for f, args, msg in actions.get('e', []):
1683 for f, args, msg in actions.get('e', []):
1681 repo.dirstate.normallookup(f)
1684 repo.dirstate.normallookup(f)
1682
1685
1683 # keep
1686 # keep
1684 for f, args, msg in actions.get('k', []):
1687 for f, args, msg in actions.get('k', []):
1685 pass
1688 pass
1686
1689
1687 # get
1690 # get
1688 for f, args, msg in actions.get('g', []):
1691 for f, args, msg in actions.get('g', []):
1689 if branchmerge:
1692 if branchmerge:
1690 repo.dirstate.otherparent(f)
1693 repo.dirstate.otherparent(f)
1691 else:
1694 else:
1692 repo.dirstate.normal(f)
1695 repo.dirstate.normal(f)
1693
1696
1694 # merge
1697 # merge
1695 for f, args, msg in actions.get('m', []):
1698 for f, args, msg in actions.get('m', []):
1696 f1, f2, fa, move, anc = args
1699 f1, f2, fa, move, anc = args
1697 if branchmerge:
1700 if branchmerge:
1698 # We've done a branch merge, mark this file as merged
1701 # We've done a branch merge, mark this file as merged
1699 # so that we properly record the merger later
1702 # so that we properly record the merger later
1700 repo.dirstate.merge(f)
1703 repo.dirstate.merge(f)
1701 if f1 != f2: # copy/rename
1704 if f1 != f2: # copy/rename
1702 if move:
1705 if move:
1703 repo.dirstate.remove(f1)
1706 repo.dirstate.remove(f1)
1704 if f1 != f:
1707 if f1 != f:
1705 repo.dirstate.copy(f1, f)
1708 repo.dirstate.copy(f1, f)
1706 else:
1709 else:
1707 repo.dirstate.copy(f2, f)
1710 repo.dirstate.copy(f2, f)
1708 else:
1711 else:
1709 # We've update-merged a locally modified file, so
1712 # We've update-merged a locally modified file, so
1710 # we set the dirstate to emulate a normal checkout
1713 # we set the dirstate to emulate a normal checkout
1711 # of that file some time in the past. Thus our
1714 # of that file some time in the past. Thus our
1712 # merge will appear as a normal local file
1715 # merge will appear as a normal local file
1713 # modification.
1716 # modification.
1714 if f2 == f: # file not locally copied/moved
1717 if f2 == f: # file not locally copied/moved
1715 repo.dirstate.normallookup(f)
1718 repo.dirstate.normallookup(f)
1716 if move:
1719 if move:
1717 repo.dirstate.drop(f1)
1720 repo.dirstate.drop(f1)
1718
1721
1719 # directory rename, move local
1722 # directory rename, move local
1720 for f, args, msg in actions.get('dm', []):
1723 for f, args, msg in actions.get('dm', []):
1721 f0, flag = args
1724 f0, flag = args
1722 if branchmerge:
1725 if branchmerge:
1723 repo.dirstate.add(f)
1726 repo.dirstate.add(f)
1724 repo.dirstate.remove(f0)
1727 repo.dirstate.remove(f0)
1725 repo.dirstate.copy(f0, f)
1728 repo.dirstate.copy(f0, f)
1726 else:
1729 else:
1727 repo.dirstate.normal(f)
1730 repo.dirstate.normal(f)
1728 repo.dirstate.drop(f0)
1731 repo.dirstate.drop(f0)
1729
1732
1730 # directory rename, get
1733 # directory rename, get
1731 for f, args, msg in actions.get('dg', []):
1734 for f, args, msg in actions.get('dg', []):
1732 f0, flag = args
1735 f0, flag = args
1733 if branchmerge:
1736 if branchmerge:
1734 repo.dirstate.add(f)
1737 repo.dirstate.add(f)
1735 repo.dirstate.copy(f0, f)
1738 repo.dirstate.copy(f0, f)
1736 else:
1739 else:
1737 repo.dirstate.normal(f)
1740 repo.dirstate.normal(f)
1738
1741
1739 def update(repo, node, branchmerge, force, ancestor=None,
1742 def update(repo, node, branchmerge, force, ancestor=None,
1740 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1743 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1741 updatecheck=None, wc=None):
1744 updatecheck=None, wc=None):
1742 """
1745 """
1743 Perform a merge between the working directory and the given node
1746 Perform a merge between the working directory and the given node
1744
1747
1745 node = the node to update to
1748 node = the node to update to
1746 branchmerge = whether to merge between branches
1749 branchmerge = whether to merge between branches
1747 force = whether to force branch merging or file overwriting
1750 force = whether to force branch merging or file overwriting
1748 matcher = a matcher to filter file lists (dirstate not updated)
1751 matcher = a matcher to filter file lists (dirstate not updated)
1749 mergeancestor = whether it is merging with an ancestor. If true,
1752 mergeancestor = whether it is merging with an ancestor. If true,
1750 we should accept the incoming changes for any prompts that occur.
1753 we should accept the incoming changes for any prompts that occur.
1751 If false, merging with an ancestor (fast-forward) is only allowed
1754 If false, merging with an ancestor (fast-forward) is only allowed
1752 between different named branches. This flag is used by rebase extension
1755 between different named branches. This flag is used by rebase extension
1753 as a temporary fix and should be avoided in general.
1756 as a temporary fix and should be avoided in general.
1754 labels = labels to use for base, local and other
1757 labels = labels to use for base, local and other
1755 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1758 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1756 this is True, then 'force' should be True as well.
1759 this is True, then 'force' should be True as well.
1757
1760
1758 The table below shows all the behaviors of the update command given the
1761 The table below shows all the behaviors of the update command given the
1759 -c/--check and -C/--clean or no options, whether the working directory is
1762 -c/--check and -C/--clean or no options, whether the working directory is
1760 dirty, whether a revision is specified, and the relationship of the parent
1763 dirty, whether a revision is specified, and the relationship of the parent
1761 rev to the target rev (linear or not). Match from top first. The -n
1764 rev to the target rev (linear or not). Match from top first. The -n
1762 option doesn't exist on the command line, but represents the
1765 option doesn't exist on the command line, but represents the
1763 experimental.updatecheck=noconflict option.
1766 experimental.updatecheck=noconflict option.
1764
1767
1765 This logic is tested by test-update-branches.t.
1768 This logic is tested by test-update-branches.t.
1766
1769
1767 -c -C -n -m dirty rev linear | result
1770 -c -C -n -m dirty rev linear | result
1768 y y * * * * * | (1)
1771 y y * * * * * | (1)
1769 y * y * * * * | (1)
1772 y * y * * * * | (1)
1770 y * * y * * * | (1)
1773 y * * y * * * | (1)
1771 * y y * * * * | (1)
1774 * y y * * * * | (1)
1772 * y * y * * * | (1)
1775 * y * y * * * | (1)
1773 * * y y * * * | (1)
1776 * * y y * * * | (1)
1774 * * * * * n n | x
1777 * * * * * n n | x
1775 * * * * n * * | ok
1778 * * * * n * * | ok
1776 n n n n y * y | merge
1779 n n n n y * y | merge
1777 n n n n y y n | (2)
1780 n n n n y y n | (2)
1778 n n n y y * * | merge
1781 n n n y y * * | merge
1779 n n y n y * * | merge if no conflict
1782 n n y n y * * | merge if no conflict
1780 n y n n y * * | discard
1783 n y n n y * * | discard
1781 y n n n y * * | (3)
1784 y n n n y * * | (3)
1782
1785
1783 x = can't happen
1786 x = can't happen
1784 * = don't-care
1787 * = don't-care
1785 1 = incompatible options (checked in commands.py)
1788 1 = incompatible options (checked in commands.py)
1786 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1789 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1787 3 = abort: uncommitted changes (checked in commands.py)
1790 3 = abort: uncommitted changes (checked in commands.py)
1788
1791
1789 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1792 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1790 to repo[None] if None is passed.
1793 to repo[None] if None is passed.
1791
1794
1792 Return the same tuple as applyupdates().
1795 Return the same tuple as applyupdates().
1793 """
1796 """
1794 # Avoid cycle.
1797 # Avoid cycle.
1795 from . import sparse
1798 from . import sparse
1796
1799
1797 # This function used to find the default destination if node was None, but
1800 # This function used to find the default destination if node was None, but
1798 # that's now in destutil.py.
1801 # that's now in destutil.py.
1799 assert node is not None
1802 assert node is not None
1800 if not branchmerge and not force:
1803 if not branchmerge and not force:
1801 # TODO: remove the default once all callers that pass branchmerge=False
1804 # TODO: remove the default once all callers that pass branchmerge=False
1802 # and force=False pass a value for updatecheck. We may want to allow
1805 # and force=False pass a value for updatecheck. We may want to allow
1803 # updatecheck='abort' to better suppport some of these callers.
1806 # updatecheck='abort' to better suppport some of these callers.
1804 if updatecheck is None:
1807 if updatecheck is None:
1805 updatecheck = 'linear'
1808 updatecheck = 'linear'
1806 assert updatecheck in ('none', 'linear', 'noconflict')
1809 assert updatecheck in ('none', 'linear', 'noconflict')
1807 # If we're doing a partial update, we need to skip updating
1810 # If we're doing a partial update, we need to skip updating
1808 # the dirstate, so make a note of any partial-ness to the
1811 # the dirstate, so make a note of any partial-ness to the
1809 # update here.
1812 # update here.
1810 if matcher is None or matcher.always():
1813 if matcher is None or matcher.always():
1811 partial = False
1814 partial = False
1812 else:
1815 else:
1813 partial = True
1816 partial = True
1814 with repo.wlock():
1817 with repo.wlock():
1815 if wc is None:
1818 if wc is None:
1816 wc = repo[None]
1819 wc = repo[None]
1817 pl = wc.parents()
1820 pl = wc.parents()
1818 p1 = pl[0]
1821 p1 = pl[0]
1819 pas = [None]
1822 pas = [None]
1820 if ancestor is not None:
1823 if ancestor is not None:
1821 pas = [repo[ancestor]]
1824 pas = [repo[ancestor]]
1822
1825
1823 overwrite = force and not branchmerge
1826 overwrite = force and not branchmerge
1824
1827
1825 p2 = repo[node]
1828 p2 = repo[node]
1826 if pas[0] is None:
1829 if pas[0] is None:
1827 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1830 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1828 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1831 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1829 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1832 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1830 else:
1833 else:
1831 pas = [p1.ancestor(p2, warn=branchmerge)]
1834 pas = [p1.ancestor(p2, warn=branchmerge)]
1832
1835
1833 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1836 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1834
1837
1835 ### check phase
1838 ### check phase
1836 if not overwrite:
1839 if not overwrite:
1837 if len(pl) > 1:
1840 if len(pl) > 1:
1838 raise error.Abort(_("outstanding uncommitted merge"))
1841 raise error.Abort(_("outstanding uncommitted merge"))
1839 ms = mergestate.read(repo)
1842 ms = mergestate.read(repo)
1840 if list(ms.unresolved()):
1843 if list(ms.unresolved()):
1841 raise error.Abort(_("outstanding merge conflicts"))
1844 raise error.Abort(_("outstanding merge conflicts"))
1842 if branchmerge:
1845 if branchmerge:
1843 if pas == [p2]:
1846 if pas == [p2]:
1844 raise error.Abort(_("merging with a working directory ancestor"
1847 raise error.Abort(_("merging with a working directory ancestor"
1845 " has no effect"))
1848 " has no effect"))
1846 elif pas == [p1]:
1849 elif pas == [p1]:
1847 if not mergeancestor and wc.branch() == p2.branch():
1850 if not mergeancestor and wc.branch() == p2.branch():
1848 raise error.Abort(_("nothing to merge"),
1851 raise error.Abort(_("nothing to merge"),
1849 hint=_("use 'hg update' "
1852 hint=_("use 'hg update' "
1850 "or check 'hg heads'"))
1853 "or check 'hg heads'"))
1851 if not force and (wc.files() or wc.deleted()):
1854 if not force and (wc.files() or wc.deleted()):
1852 raise error.Abort(_("uncommitted changes"),
1855 raise error.Abort(_("uncommitted changes"),
1853 hint=_("use 'hg status' to list changes"))
1856 hint=_("use 'hg status' to list changes"))
1854 if not wc.isinmemory():
1857 if not wc.isinmemory():
1855 for s in sorted(wc.substate):
1858 for s in sorted(wc.substate):
1856 wc.sub(s).bailifchanged()
1859 wc.sub(s).bailifchanged()
1857
1860
1858 elif not overwrite:
1861 elif not overwrite:
1859 if p1 == p2: # no-op update
1862 if p1 == p2: # no-op update
1860 # call the hooks and exit early
1863 # call the hooks and exit early
1861 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1864 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1862 repo.hook('update', parent1=xp2, parent2='', error=0)
1865 repo.hook('update', parent1=xp2, parent2='', error=0)
1863 return 0, 0, 0, 0
1866 return 0, 0, 0, 0
1864
1867
1865 if (updatecheck == 'linear' and
1868 if (updatecheck == 'linear' and
1866 pas not in ([p1], [p2])): # nonlinear
1869 pas not in ([p1], [p2])): # nonlinear
1867 dirty = wc.dirty(missing=True)
1870 dirty = wc.dirty(missing=True)
1868 if dirty:
1871 if dirty:
1869 # Branching is a bit strange to ensure we do the minimal
1872 # Branching is a bit strange to ensure we do the minimal
1870 # amount of call to obsutil.foreground.
1873 # amount of call to obsutil.foreground.
1871 foreground = obsutil.foreground(repo, [p1.node()])
1874 foreground = obsutil.foreground(repo, [p1.node()])
1872 # note: the <node> variable contains a random identifier
1875 # note: the <node> variable contains a random identifier
1873 if repo[node].node() in foreground:
1876 if repo[node].node() in foreground:
1874 pass # allow updating to successors
1877 pass # allow updating to successors
1875 else:
1878 else:
1876 msg = _("uncommitted changes")
1879 msg = _("uncommitted changes")
1877 hint = _("commit or update --clean to discard changes")
1880 hint = _("commit or update --clean to discard changes")
1878 raise error.UpdateAbort(msg, hint=hint)
1881 raise error.UpdateAbort(msg, hint=hint)
1879 else:
1882 else:
1880 # Allow jumping branches if clean and specific rev given
1883 # Allow jumping branches if clean and specific rev given
1881 pass
1884 pass
1882
1885
1883 if overwrite:
1886 if overwrite:
1884 pas = [wc]
1887 pas = [wc]
1885 elif not branchmerge:
1888 elif not branchmerge:
1886 pas = [p1]
1889 pas = [p1]
1887
1890
1888 # deprecated config: merge.followcopies
1891 # deprecated config: merge.followcopies
1889 followcopies = repo.ui.configbool('merge', 'followcopies')
1892 followcopies = repo.ui.configbool('merge', 'followcopies')
1890 if overwrite:
1893 if overwrite:
1891 followcopies = False
1894 followcopies = False
1892 elif not pas[0]:
1895 elif not pas[0]:
1893 followcopies = False
1896 followcopies = False
1894 if not branchmerge and not wc.dirty(missing=True):
1897 if not branchmerge and not wc.dirty(missing=True):
1895 followcopies = False
1898 followcopies = False
1896
1899
1897 ### calculate phase
1900 ### calculate phase
1898 actionbyfile, diverge, renamedelete = calculateupdates(
1901 actionbyfile, diverge, renamedelete = calculateupdates(
1899 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1902 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1900 followcopies, matcher=matcher, mergeforce=mergeforce)
1903 followcopies, matcher=matcher, mergeforce=mergeforce)
1901
1904
1902 if updatecheck == 'noconflict':
1905 if updatecheck == 'noconflict':
1903 for f, (m, args, msg) in actionbyfile.iteritems():
1906 for f, (m, args, msg) in actionbyfile.iteritems():
1904 if m not in ('g', 'k', 'e', 'r', 'pr'):
1907 if m not in ('g', 'k', 'e', 'r', 'pr'):
1905 msg = _("conflicting changes")
1908 msg = _("conflicting changes")
1906 hint = _("commit or update --clean to discard changes")
1909 hint = _("commit or update --clean to discard changes")
1907 raise error.Abort(msg, hint=hint)
1910 raise error.Abort(msg, hint=hint)
1908
1911
1909 # Prompt and create actions. Most of this is in the resolve phase
1912 # Prompt and create actions. Most of this is in the resolve phase
1910 # already, but we can't handle .hgsubstate in filemerge or
1913 # already, but we can't handle .hgsubstate in filemerge or
1911 # subrepo.submerge yet so we have to keep prompting for it.
1914 # subrepo.submerge yet so we have to keep prompting for it.
1912 if '.hgsubstate' in actionbyfile:
1915 if '.hgsubstate' in actionbyfile:
1913 f = '.hgsubstate'
1916 f = '.hgsubstate'
1914 m, args, msg = actionbyfile[f]
1917 m, args, msg = actionbyfile[f]
1915 prompts = filemerge.partextras(labels)
1918 prompts = filemerge.partextras(labels)
1916 prompts['f'] = f
1919 prompts['f'] = f
1917 if m == 'cd':
1920 if m == 'cd':
1918 if repo.ui.promptchoice(
1921 if repo.ui.promptchoice(
1919 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1922 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1920 "use (c)hanged version or (d)elete?"
1923 "use (c)hanged version or (d)elete?"
1921 "$$ &Changed $$ &Delete") % prompts, 0):
1924 "$$ &Changed $$ &Delete") % prompts, 0):
1922 actionbyfile[f] = ('r', None, "prompt delete")
1925 actionbyfile[f] = ('r', None, "prompt delete")
1923 elif f in p1:
1926 elif f in p1:
1924 actionbyfile[f] = ('am', None, "prompt keep")
1927 actionbyfile[f] = ('am', None, "prompt keep")
1925 else:
1928 else:
1926 actionbyfile[f] = ('a', None, "prompt keep")
1929 actionbyfile[f] = ('a', None, "prompt keep")
1927 elif m == 'dc':
1930 elif m == 'dc':
1928 f1, f2, fa, move, anc = args
1931 f1, f2, fa, move, anc = args
1929 flags = p2[f2].flags()
1932 flags = p2[f2].flags()
1930 if repo.ui.promptchoice(
1933 if repo.ui.promptchoice(
1931 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1934 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1932 "use (c)hanged version or leave (d)eleted?"
1935 "use (c)hanged version or leave (d)eleted?"
1933 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1936 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1934 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1937 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1935 else:
1938 else:
1936 del actionbyfile[f]
1939 del actionbyfile[f]
1937
1940
1938 # Convert to dictionary-of-lists format
1941 # Convert to dictionary-of-lists format
1939 actions = dict((m, [])
1942 actions = dict((m, [])
1940 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1943 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1941 for f, (m, args, msg) in actionbyfile.iteritems():
1944 for f, (m, args, msg) in actionbyfile.iteritems():
1942 if m not in actions:
1945 if m not in actions:
1943 actions[m] = []
1946 actions[m] = []
1944 actions[m].append((f, args, msg))
1947 actions[m].append((f, args, msg))
1945
1948
1946 if not util.fscasesensitive(repo.path):
1949 if not util.fscasesensitive(repo.path):
1947 # check collision between files only in p2 for clean update
1950 # check collision between files only in p2 for clean update
1948 if (not branchmerge and
1951 if (not branchmerge and
1949 (force or not wc.dirty(missing=True, branch=False))):
1952 (force or not wc.dirty(missing=True, branch=False))):
1950 _checkcollision(repo, p2.manifest(), None)
1953 _checkcollision(repo, p2.manifest(), None)
1951 else:
1954 else:
1952 _checkcollision(repo, wc.manifest(), actions)
1955 _checkcollision(repo, wc.manifest(), actions)
1953
1956
1954 # divergent renames
1957 # divergent renames
1955 for f, fl in sorted(diverge.iteritems()):
1958 for f, fl in sorted(diverge.iteritems()):
1956 repo.ui.warn(_("note: possible conflict - %s was renamed "
1959 repo.ui.warn(_("note: possible conflict - %s was renamed "
1957 "multiple times to:\n") % f)
1960 "multiple times to:\n") % f)
1958 for nf in fl:
1961 for nf in fl:
1959 repo.ui.warn(" %s\n" % nf)
1962 repo.ui.warn(" %s\n" % nf)
1960
1963
1961 # rename and delete
1964 # rename and delete
1962 for f, fl in sorted(renamedelete.iteritems()):
1965 for f, fl in sorted(renamedelete.iteritems()):
1963 repo.ui.warn(_("note: possible conflict - %s was deleted "
1966 repo.ui.warn(_("note: possible conflict - %s was deleted "
1964 "and renamed to:\n") % f)
1967 "and renamed to:\n") % f)
1965 for nf in fl:
1968 for nf in fl:
1966 repo.ui.warn(" %s\n" % nf)
1969 repo.ui.warn(" %s\n" % nf)
1967
1970
1968 ### apply phase
1971 ### apply phase
1969 if not branchmerge: # just jump to the new rev
1972 if not branchmerge: # just jump to the new rev
1970 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1973 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1971 if not partial and not wc.isinmemory():
1974 if not partial and not wc.isinmemory():
1972 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1975 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1973 # note that we're in the middle of an update
1976 # note that we're in the middle of an update
1974 repo.vfs.write('updatestate', p2.hex())
1977 repo.vfs.write('updatestate', p2.hex())
1975
1978
1976 # Advertise fsmonitor when its presence could be useful.
1979 # Advertise fsmonitor when its presence could be useful.
1977 #
1980 #
1978 # We only advertise when performing an update from an empty working
1981 # We only advertise when performing an update from an empty working
1979 # directory. This typically only occurs during initial clone.
1982 # directory. This typically only occurs during initial clone.
1980 #
1983 #
1981 # We give users a mechanism to disable the warning in case it is
1984 # We give users a mechanism to disable the warning in case it is
1982 # annoying.
1985 # annoying.
1983 #
1986 #
1984 # We only allow on Linux and MacOS because that's where fsmonitor is
1987 # We only allow on Linux and MacOS because that's where fsmonitor is
1985 # considered stable.
1988 # considered stable.
1986 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1989 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1987 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1990 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1988 'warn_update_file_count')
1991 'warn_update_file_count')
1989 try:
1992 try:
1990 extensions.find('fsmonitor')
1993 extensions.find('fsmonitor')
1991 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1994 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1992 # We intentionally don't look at whether fsmonitor has disabled
1995 # We intentionally don't look at whether fsmonitor has disabled
1993 # itself because a) fsmonitor may have already printed a warning
1996 # itself because a) fsmonitor may have already printed a warning
1994 # b) we only care about the config state here.
1997 # b) we only care about the config state here.
1995 except KeyError:
1998 except KeyError:
1996 fsmonitorenabled = False
1999 fsmonitorenabled = False
1997
2000
1998 if (fsmonitorwarning
2001 if (fsmonitorwarning
1999 and not fsmonitorenabled
2002 and not fsmonitorenabled
2000 and p1.node() == nullid
2003 and p1.node() == nullid
2001 and len(actions['g']) >= fsmonitorthreshold
2004 and len(actions['g']) >= fsmonitorthreshold
2002 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2005 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2003 repo.ui.warn(
2006 repo.ui.warn(
2004 _('(warning: large working directory being used without '
2007 _('(warning: large working directory being used without '
2005 'fsmonitor enabled; enable fsmonitor to improve performance; '
2008 'fsmonitor enabled; enable fsmonitor to improve performance; '
2006 'see "hg help -e fsmonitor")\n'))
2009 'see "hg help -e fsmonitor")\n'))
2007
2010
2008 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2011 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2009
2012
2010 if not partial and not wc.isinmemory():
2013 if not partial and not wc.isinmemory():
2011 with repo.dirstate.parentchange():
2014 with repo.dirstate.parentchange():
2012 repo.setparents(fp1, fp2)
2015 repo.setparents(fp1, fp2)
2013 recordupdates(repo, actions, branchmerge)
2016 recordupdates(repo, actions, branchmerge)
2014 # update completed, clear state
2017 # update completed, clear state
2015 util.unlink(repo.vfs.join('updatestate'))
2018 util.unlink(repo.vfs.join('updatestate'))
2016
2019
2017 if not branchmerge:
2020 if not branchmerge:
2018 repo.dirstate.setbranch(p2.branch())
2021 repo.dirstate.setbranch(p2.branch())
2019
2022
2020 # If we're updating to a location, clean up any stale temporary includes
2023 # If we're updating to a location, clean up any stale temporary includes
2021 # (ex: this happens during hg rebase --abort).
2024 # (ex: this happens during hg rebase --abort).
2022 if not branchmerge:
2025 if not branchmerge:
2023 sparse.prunetemporaryincludes(repo)
2026 sparse.prunetemporaryincludes(repo)
2024
2027
2025 if not partial:
2028 if not partial:
2026 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2029 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2027 return stats
2030 return stats
2028
2031
2029 def graft(repo, ctx, pctx, labels, keepparent=False):
2032 def graft(repo, ctx, pctx, labels, keepparent=False):
2030 """Do a graft-like merge.
2033 """Do a graft-like merge.
2031
2034
2032 This is a merge where the merge ancestor is chosen such that one
2035 This is a merge where the merge ancestor is chosen such that one
2033 or more changesets are grafted onto the current changeset. In
2036 or more changesets are grafted onto the current changeset. In
2034 addition to the merge, this fixes up the dirstate to include only
2037 addition to the merge, this fixes up the dirstate to include only
2035 a single parent (if keepparent is False) and tries to duplicate any
2038 a single parent (if keepparent is False) and tries to duplicate any
2036 renames/copies appropriately.
2039 renames/copies appropriately.
2037
2040
2038 ctx - changeset to rebase
2041 ctx - changeset to rebase
2039 pctx - merge base, usually ctx.p1()
2042 pctx - merge base, usually ctx.p1()
2040 labels - merge labels eg ['local', 'graft']
2043 labels - merge labels eg ['local', 'graft']
2041 keepparent - keep second parent if any
2044 keepparent - keep second parent if any
2042
2045
2043 """
2046 """
2044 # If we're grafting a descendant onto an ancestor, be sure to pass
2047 # If we're grafting a descendant onto an ancestor, be sure to pass
2045 # mergeancestor=True to update. This does two things: 1) allows the merge if
2048 # mergeancestor=True to update. This does two things: 1) allows the merge if
2046 # the destination is the same as the parent of the ctx (so we can use graft
2049 # the destination is the same as the parent of the ctx (so we can use graft
2047 # to copy commits), and 2) informs update that the incoming changes are
2050 # to copy commits), and 2) informs update that the incoming changes are
2048 # newer than the destination so it doesn't prompt about "remote changed foo
2051 # newer than the destination so it doesn't prompt about "remote changed foo
2049 # which local deleted".
2052 # which local deleted".
2050 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2053 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2051
2054
2052 stats = update(repo, ctx.node(), True, True, pctx.node(),
2055 stats = update(repo, ctx.node(), True, True, pctx.node(),
2053 mergeancestor=mergeancestor, labels=labels)
2056 mergeancestor=mergeancestor, labels=labels)
2054
2057
2055 pother = nullid
2058 pother = nullid
2056 parents = ctx.parents()
2059 parents = ctx.parents()
2057 if keepparent and len(parents) == 2 and pctx in parents:
2060 if keepparent and len(parents) == 2 and pctx in parents:
2058 parents.remove(pctx)
2061 parents.remove(pctx)
2059 pother = parents[0].node()
2062 pother = parents[0].node()
2060
2063
2061 with repo.dirstate.parentchange():
2064 with repo.dirstate.parentchange():
2062 repo.setparents(repo['.'].node(), pother)
2065 repo.setparents(repo['.'].node(), pother)
2063 repo.dirstate.write(repo.currenttransaction())
2066 repo.dirstate.write(repo.currenttransaction())
2064 # fix up dirstate for copies and renames
2067 # fix up dirstate for copies and renames
2065 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2068 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2066 return stats
2069 return stats
General Comments 0
You need to be logged in to leave comments. Login now