##// END OF EJS Templates
merge: removed sorting in casefolding detection, for a slight performance win...
Alex Gaynor -
r33807:055fee35 default
parent child Browse files
Show More
@@ -1,1760 +1,1760 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 m: the external merge driver defined for this merge plus its run state
69 m: the external merge driver defined for this merge plus its run state
70 (experimental)
70 (experimental)
71 f: a (filename, dictionary) tuple of optional values for a given file
71 f: a (filename, dictionary) tuple of optional values for a given file
72 X: unsupported mandatory record type (used in tests)
72 X: unsupported mandatory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
74 l: the labels for the parts of the merge.
74 l: the labels for the parts of the merge.
75
75
76 Merge driver run states (experimental):
76 Merge driver run states (experimental):
77 u: driver-resolved files unmarked -- needs to be run next time we're about
77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 to resolve or commit
78 to resolve or commit
79 m: driver-resolved files marked -- only needs to be run before commit
79 m: driver-resolved files marked -- only needs to be run before commit
80 s: success/skipped -- does not need to be run any more
80 s: success/skipped -- does not need to be run any more
81
81
82 '''
82 '''
83 statepathv1 = 'merge/state'
83 statepathv1 = 'merge/state'
84 statepathv2 = 'merge/state2'
84 statepathv2 = 'merge/state2'
85
85
86 @staticmethod
86 @staticmethod
87 def clean(repo, node=None, other=None, labels=None):
87 def clean(repo, node=None, other=None, labels=None):
88 """Initialize a brand new merge state, removing any existing state on
88 """Initialize a brand new merge state, removing any existing state on
89 disk."""
89 disk."""
90 ms = mergestate(repo)
90 ms = mergestate(repo)
91 ms.reset(node, other, labels)
91 ms.reset(node, other, labels)
92 return ms
92 return ms
93
93
94 @staticmethod
94 @staticmethod
95 def read(repo):
95 def read(repo):
96 """Initialize the merge state, reading it from disk."""
96 """Initialize the merge state, reading it from disk."""
97 ms = mergestate(repo)
97 ms = mergestate(repo)
98 ms._read()
98 ms._read()
99 return ms
99 return ms
100
100
101 def __init__(self, repo):
101 def __init__(self, repo):
102 """Initialize the merge state.
102 """Initialize the merge state.
103
103
104 Do not use this directly! Instead call read() or clean()."""
104 Do not use this directly! Instead call read() or clean()."""
105 self._repo = repo
105 self._repo = repo
106 self._dirty = False
106 self._dirty = False
107 self._labels = None
107 self._labels = None
108
108
109 def reset(self, node=None, other=None, labels=None):
109 def reset(self, node=None, other=None, labels=None):
110 self._state = {}
110 self._state = {}
111 self._stateextras = {}
111 self._stateextras = {}
112 self._local = None
112 self._local = None
113 self._other = None
113 self._other = None
114 self._labels = labels
114 self._labels = labels
115 for var in ('localctx', 'otherctx'):
115 for var in ('localctx', 'otherctx'):
116 if var in vars(self):
116 if var in vars(self):
117 delattr(self, var)
117 delattr(self, var)
118 if node:
118 if node:
119 self._local = node
119 self._local = node
120 self._other = other
120 self._other = other
121 self._readmergedriver = None
121 self._readmergedriver = None
122 if self.mergedriver:
122 if self.mergedriver:
123 self._mdstate = 's'
123 self._mdstate = 's'
124 else:
124 else:
125 self._mdstate = 'u'
125 self._mdstate = 'u'
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 self._results = {}
127 self._results = {}
128 self._dirty = False
128 self._dirty = False
129
129
130 def _read(self):
130 def _read(self):
131 """Analyse each record content to restore a serialized state from disk
131 """Analyse each record content to restore a serialized state from disk
132
132
133 This function process "record" entry produced by the de-serialization
133 This function process "record" entry produced by the de-serialization
134 of on disk file.
134 of on disk file.
135 """
135 """
136 self._state = {}
136 self._state = {}
137 self._stateextras = {}
137 self._stateextras = {}
138 self._local = None
138 self._local = None
139 self._other = None
139 self._other = None
140 for var in ('localctx', 'otherctx'):
140 for var in ('localctx', 'otherctx'):
141 if var in vars(self):
141 if var in vars(self):
142 delattr(self, var)
142 delattr(self, var)
143 self._readmergedriver = None
143 self._readmergedriver = None
144 self._mdstate = 's'
144 self._mdstate = 's'
145 unsupported = set()
145 unsupported = set()
146 records = self._readrecords()
146 records = self._readrecords()
147 for rtype, record in records:
147 for rtype, record in records:
148 if rtype == 'L':
148 if rtype == 'L':
149 self._local = bin(record)
149 self._local = bin(record)
150 elif rtype == 'O':
150 elif rtype == 'O':
151 self._other = bin(record)
151 self._other = bin(record)
152 elif rtype == 'm':
152 elif rtype == 'm':
153 bits = record.split('\0', 1)
153 bits = record.split('\0', 1)
154 mdstate = bits[1]
154 mdstate = bits[1]
155 if len(mdstate) != 1 or mdstate not in 'ums':
155 if len(mdstate) != 1 or mdstate not in 'ums':
156 # the merge driver should be idempotent, so just rerun it
156 # the merge driver should be idempotent, so just rerun it
157 mdstate = 'u'
157 mdstate = 'u'
158
158
159 self._readmergedriver = bits[0]
159 self._readmergedriver = bits[0]
160 self._mdstate = mdstate
160 self._mdstate = mdstate
161 elif rtype in 'FDC':
161 elif rtype in 'FDC':
162 bits = record.split('\0')
162 bits = record.split('\0')
163 self._state[bits[0]] = bits[1:]
163 self._state[bits[0]] = bits[1:]
164 elif rtype == 'f':
164 elif rtype == 'f':
165 filename, rawextras = record.split('\0', 1)
165 filename, rawextras = record.split('\0', 1)
166 extraparts = rawextras.split('\0')
166 extraparts = rawextras.split('\0')
167 extras = {}
167 extras = {}
168 i = 0
168 i = 0
169 while i < len(extraparts):
169 while i < len(extraparts):
170 extras[extraparts[i]] = extraparts[i + 1]
170 extras[extraparts[i]] = extraparts[i + 1]
171 i += 2
171 i += 2
172
172
173 self._stateextras[filename] = extras
173 self._stateextras[filename] = extras
174 elif rtype == 'l':
174 elif rtype == 'l':
175 labels = record.split('\0', 2)
175 labels = record.split('\0', 2)
176 self._labels = [l for l in labels if len(l) > 0]
176 self._labels = [l for l in labels if len(l) > 0]
177 elif not rtype.islower():
177 elif not rtype.islower():
178 unsupported.add(rtype)
178 unsupported.add(rtype)
179 self._results = {}
179 self._results = {}
180 self._dirty = False
180 self._dirty = False
181
181
182 if unsupported:
182 if unsupported:
183 raise error.UnsupportedMergeRecords(unsupported)
183 raise error.UnsupportedMergeRecords(unsupported)
184
184
185 def _readrecords(self):
185 def _readrecords(self):
186 """Read merge state from disk and return a list of record (TYPE, data)
186 """Read merge state from disk and return a list of record (TYPE, data)
187
187
188 We read data from both v1 and v2 files and decide which one to use.
188 We read data from both v1 and v2 files and decide which one to use.
189
189
190 V1 has been used by version prior to 2.9.1 and contains less data than
190 V1 has been used by version prior to 2.9.1 and contains less data than
191 v2. We read both versions and check if no data in v2 contradicts
191 v2. We read both versions and check if no data in v2 contradicts
192 v1. If there is not contradiction we can safely assume that both v1
192 v1. If there is not contradiction we can safely assume that both v1
193 and v2 were written at the same time and use the extract data in v2. If
193 and v2 were written at the same time and use the extract data in v2. If
194 there is contradiction we ignore v2 content as we assume an old version
194 there is contradiction we ignore v2 content as we assume an old version
195 of Mercurial has overwritten the mergestate file and left an old v2
195 of Mercurial has overwritten the mergestate file and left an old v2
196 file around.
196 file around.
197
197
198 returns list of record [(TYPE, data), ...]"""
198 returns list of record [(TYPE, data), ...]"""
199 v1records = self._readrecordsv1()
199 v1records = self._readrecordsv1()
200 v2records = self._readrecordsv2()
200 v2records = self._readrecordsv2()
201 if self._v1v2match(v1records, v2records):
201 if self._v1v2match(v1records, v2records):
202 return v2records
202 return v2records
203 else:
203 else:
204 # v1 file is newer than v2 file, use it
204 # v1 file is newer than v2 file, use it
205 # we have to infer the "other" changeset of the merge
205 # we have to infer the "other" changeset of the merge
206 # we cannot do better than that with v1 of the format
206 # we cannot do better than that with v1 of the format
207 mctx = self._repo[None].parents()[-1]
207 mctx = self._repo[None].parents()[-1]
208 v1records.append(('O', mctx.hex()))
208 v1records.append(('O', mctx.hex()))
209 # add place holder "other" file node information
209 # add place holder "other" file node information
210 # nobody is using it yet so we do no need to fetch the data
210 # nobody is using it yet so we do no need to fetch the data
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 for idx, r in enumerate(v1records):
212 for idx, r in enumerate(v1records):
213 if r[0] == 'F':
213 if r[0] == 'F':
214 bits = r[1].split('\0')
214 bits = r[1].split('\0')
215 bits.insert(-2, '')
215 bits.insert(-2, '')
216 v1records[idx] = (r[0], '\0'.join(bits))
216 v1records[idx] = (r[0], '\0'.join(bits))
217 return v1records
217 return v1records
218
218
219 def _v1v2match(self, v1records, v2records):
219 def _v1v2match(self, v1records, v2records):
220 oldv2 = set() # old format version of v2 record
220 oldv2 = set() # old format version of v2 record
221 for rec in v2records:
221 for rec in v2records:
222 if rec[0] == 'L':
222 if rec[0] == 'L':
223 oldv2.add(rec)
223 oldv2.add(rec)
224 elif rec[0] == 'F':
224 elif rec[0] == 'F':
225 # drop the onode data (not contained in v1)
225 # drop the onode data (not contained in v1)
226 oldv2.add(('F', _droponode(rec[1])))
226 oldv2.add(('F', _droponode(rec[1])))
227 for rec in v1records:
227 for rec in v1records:
228 if rec not in oldv2:
228 if rec not in oldv2:
229 return False
229 return False
230 else:
230 else:
231 return True
231 return True
232
232
233 def _readrecordsv1(self):
233 def _readrecordsv1(self):
234 """read on disk merge state for version 1 file
234 """read on disk merge state for version 1 file
235
235
236 returns list of record [(TYPE, data), ...]
236 returns list of record [(TYPE, data), ...]
237
237
238 Note: the "F" data from this file are one entry short
238 Note: the "F" data from this file are one entry short
239 (no "other file node" entry)
239 (no "other file node" entry)
240 """
240 """
241 records = []
241 records = []
242 try:
242 try:
243 f = self._repo.vfs(self.statepathv1)
243 f = self._repo.vfs(self.statepathv1)
244 for i, l in enumerate(f):
244 for i, l in enumerate(f):
245 if i == 0:
245 if i == 0:
246 records.append(('L', l[:-1]))
246 records.append(('L', l[:-1]))
247 else:
247 else:
248 records.append(('F', l[:-1]))
248 records.append(('F', l[:-1]))
249 f.close()
249 f.close()
250 except IOError as err:
250 except IOError as err:
251 if err.errno != errno.ENOENT:
251 if err.errno != errno.ENOENT:
252 raise
252 raise
253 return records
253 return records
254
254
255 def _readrecordsv2(self):
255 def _readrecordsv2(self):
256 """read on disk merge state for version 2 file
256 """read on disk merge state for version 2 file
257
257
258 This format is a list of arbitrary records of the form:
258 This format is a list of arbitrary records of the form:
259
259
260 [type][length][content]
260 [type][length][content]
261
261
262 `type` is a single character, `length` is a 4 byte integer, and
262 `type` is a single character, `length` is a 4 byte integer, and
263 `content` is an arbitrary byte sequence of length `length`.
263 `content` is an arbitrary byte sequence of length `length`.
264
264
265 Mercurial versions prior to 3.7 have a bug where if there are
265 Mercurial versions prior to 3.7 have a bug where if there are
266 unsupported mandatory merge records, attempting to clear out the merge
266 unsupported mandatory merge records, attempting to clear out the merge
267 state with hg update --clean or similar aborts. The 't' record type
267 state with hg update --clean or similar aborts. The 't' record type
268 works around that by writing out what those versions treat as an
268 works around that by writing out what those versions treat as an
269 advisory record, but later versions interpret as special: the first
269 advisory record, but later versions interpret as special: the first
270 character is the 'real' record type and everything onwards is the data.
270 character is the 'real' record type and everything onwards is the data.
271
271
272 Returns list of records [(TYPE, data), ...]."""
272 Returns list of records [(TYPE, data), ...]."""
273 records = []
273 records = []
274 try:
274 try:
275 f = self._repo.vfs(self.statepathv2)
275 f = self._repo.vfs(self.statepathv2)
276 data = f.read()
276 data = f.read()
277 off = 0
277 off = 0
278 end = len(data)
278 end = len(data)
279 while off < end:
279 while off < end:
280 rtype = data[off]
280 rtype = data[off]
281 off += 1
281 off += 1
282 length = _unpack('>I', data[off:(off + 4)])[0]
282 length = _unpack('>I', data[off:(off + 4)])[0]
283 off += 4
283 off += 4
284 record = data[off:(off + length)]
284 record = data[off:(off + length)]
285 off += length
285 off += length
286 if rtype == 't':
286 if rtype == 't':
287 rtype, record = record[0], record[1:]
287 rtype, record = record[0], record[1:]
288 records.append((rtype, record))
288 records.append((rtype, record))
289 f.close()
289 f.close()
290 except IOError as err:
290 except IOError as err:
291 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
292 raise
292 raise
293 return records
293 return records
294
294
295 @util.propertycache
295 @util.propertycache
296 def mergedriver(self):
296 def mergedriver(self):
297 # protect against the following:
297 # protect against the following:
298 # - A configures a malicious merge driver in their hgrc, then
298 # - A configures a malicious merge driver in their hgrc, then
299 # pauses the merge
299 # pauses the merge
300 # - A edits their hgrc to remove references to the merge driver
300 # - A edits their hgrc to remove references to the merge driver
301 # - A gives a copy of their entire repo, including .hg, to B
301 # - A gives a copy of their entire repo, including .hg, to B
302 # - B inspects .hgrc and finds it to be clean
302 # - B inspects .hgrc and finds it to be clean
303 # - B then continues the merge and the malicious merge driver
303 # - B then continues the merge and the malicious merge driver
304 # gets invoked
304 # gets invoked
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 if (self._readmergedriver is not None
306 if (self._readmergedriver is not None
307 and self._readmergedriver != configmergedriver):
307 and self._readmergedriver != configmergedriver):
308 raise error.ConfigError(
308 raise error.ConfigError(
309 _("merge driver changed since merge started"),
309 _("merge driver changed since merge started"),
310 hint=_("revert merge driver change or abort merge"))
310 hint=_("revert merge driver change or abort merge"))
311
311
312 return configmergedriver
312 return configmergedriver
313
313
314 @util.propertycache
314 @util.propertycache
315 def localctx(self):
315 def localctx(self):
316 if self._local is None:
316 if self._local is None:
317 msg = "localctx accessed but self._local isn't set"
317 msg = "localctx accessed but self._local isn't set"
318 raise error.ProgrammingError(msg)
318 raise error.ProgrammingError(msg)
319 return self._repo[self._local]
319 return self._repo[self._local]
320
320
321 @util.propertycache
321 @util.propertycache
322 def otherctx(self):
322 def otherctx(self):
323 if self._other is None:
323 if self._other is None:
324 msg = "otherctx accessed but self._other isn't set"
324 msg = "otherctx accessed but self._other isn't set"
325 raise error.ProgrammingError(msg)
325 raise error.ProgrammingError(msg)
326 return self._repo[self._other]
326 return self._repo[self._other]
327
327
328 def active(self):
328 def active(self):
329 """Whether mergestate is active.
329 """Whether mergestate is active.
330
330
331 Returns True if there appears to be mergestate. This is a rough proxy
331 Returns True if there appears to be mergestate. This is a rough proxy
332 for "is a merge in progress."
332 for "is a merge in progress."
333 """
333 """
334 # Check local variables before looking at filesystem for performance
334 # Check local variables before looking at filesystem for performance
335 # reasons.
335 # reasons.
336 return bool(self._local) or bool(self._state) or \
336 return bool(self._local) or bool(self._state) or \
337 self._repo.vfs.exists(self.statepathv1) or \
337 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv2)
338 self._repo.vfs.exists(self.statepathv2)
339
339
340 def commit(self):
340 def commit(self):
341 """Write current state on disk (if necessary)"""
341 """Write current state on disk (if necessary)"""
342 if self._dirty:
342 if self._dirty:
343 records = self._makerecords()
343 records = self._makerecords()
344 self._writerecords(records)
344 self._writerecords(records)
345 self._dirty = False
345 self._dirty = False
346
346
347 def _makerecords(self):
347 def _makerecords(self):
348 records = []
348 records = []
349 records.append(('L', hex(self._local)))
349 records.append(('L', hex(self._local)))
350 records.append(('O', hex(self._other)))
350 records.append(('O', hex(self._other)))
351 if self.mergedriver:
351 if self.mergedriver:
352 records.append(('m', '\0'.join([
352 records.append(('m', '\0'.join([
353 self.mergedriver, self._mdstate])))
353 self.mergedriver, self._mdstate])))
354 for d, v in self._state.iteritems():
354 for d, v in self._state.iteritems():
355 if v[0] == 'd':
355 if v[0] == 'd':
356 records.append(('D', '\0'.join([d] + v)))
356 records.append(('D', '\0'.join([d] + v)))
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # older versions of Mercurial
358 # older versions of Mercurial
359 elif v[1] == nullhex or v[6] == nullhex:
359 elif v[1] == nullhex or v[6] == nullhex:
360 records.append(('C', '\0'.join([d] + v)))
360 records.append(('C', '\0'.join([d] + v)))
361 else:
361 else:
362 records.append(('F', '\0'.join([d] + v)))
362 records.append(('F', '\0'.join([d] + v)))
363 for filename, extras in sorted(self._stateextras.iteritems()):
363 for filename, extras in sorted(self._stateextras.iteritems()):
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 extras.iteritems())
365 extras.iteritems())
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 if self._labels is not None:
367 if self._labels is not None:
368 labels = '\0'.join(self._labels)
368 labels = '\0'.join(self._labels)
369 records.append(('l', labels))
369 records.append(('l', labels))
370 return records
370 return records
371
371
372 def _writerecords(self, records):
372 def _writerecords(self, records):
373 """Write current state on disk (both v1 and v2)"""
373 """Write current state on disk (both v1 and v2)"""
374 self._writerecordsv1(records)
374 self._writerecordsv1(records)
375 self._writerecordsv2(records)
375 self._writerecordsv2(records)
376
376
377 def _writerecordsv1(self, records):
377 def _writerecordsv1(self, records):
378 """Write current state on disk in a version 1 file"""
378 """Write current state on disk in a version 1 file"""
379 f = self._repo.vfs(self.statepathv1, 'w')
379 f = self._repo.vfs(self.statepathv1, 'w')
380 irecords = iter(records)
380 irecords = iter(records)
381 lrecords = next(irecords)
381 lrecords = next(irecords)
382 assert lrecords[0] == 'L'
382 assert lrecords[0] == 'L'
383 f.write(hex(self._local) + '\n')
383 f.write(hex(self._local) + '\n')
384 for rtype, data in irecords:
384 for rtype, data in irecords:
385 if rtype == 'F':
385 if rtype == 'F':
386 f.write('%s\n' % _droponode(data))
386 f.write('%s\n' % _droponode(data))
387 f.close()
387 f.close()
388
388
389 def _writerecordsv2(self, records):
389 def _writerecordsv2(self, records):
390 """Write current state on disk in a version 2 file
390 """Write current state on disk in a version 2 file
391
391
392 See the docstring for _readrecordsv2 for why we use 't'."""
392 See the docstring for _readrecordsv2 for why we use 't'."""
393 # these are the records that all version 2 clients can read
393 # these are the records that all version 2 clients can read
394 whitelist = 'LOF'
394 whitelist = 'LOF'
395 f = self._repo.vfs(self.statepathv2, 'w')
395 f = self._repo.vfs(self.statepathv2, 'w')
396 for key, data in records:
396 for key, data in records:
397 assert len(key) == 1
397 assert len(key) == 1
398 if key not in whitelist:
398 if key not in whitelist:
399 key, data = 't', '%s%s' % (key, data)
399 key, data = 't', '%s%s' % (key, data)
400 format = '>sI%is' % len(data)
400 format = '>sI%is' % len(data)
401 f.write(_pack(format, key, len(data), data))
401 f.write(_pack(format, key, len(data), data))
402 f.close()
402 f.close()
403
403
404 def add(self, fcl, fco, fca, fd):
404 def add(self, fcl, fco, fca, fd):
405 """add a new (potentially?) conflicting file the merge state
405 """add a new (potentially?) conflicting file the merge state
406 fcl: file context for local,
406 fcl: file context for local,
407 fco: file context for remote,
407 fco: file context for remote,
408 fca: file context for ancestors,
408 fca: file context for ancestors,
409 fd: file path of the resulting merge.
409 fd: file path of the resulting merge.
410
410
411 note: also write the local version to the `.hg/merge` directory.
411 note: also write the local version to the `.hg/merge` directory.
412 """
412 """
413 if fcl.isabsent():
413 if fcl.isabsent():
414 hash = nullhex
414 hash = nullhex
415 else:
415 else:
416 hash = hex(hashlib.sha1(fcl.path()).digest())
416 hash = hex(hashlib.sha1(fcl.path()).digest())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._state[fd] = ['u', hash, fcl.path(),
418 self._state[fd] = ['u', hash, fcl.path(),
419 fca.path(), hex(fca.filenode()),
419 fca.path(), hex(fca.filenode()),
420 fco.path(), hex(fco.filenode()),
420 fco.path(), hex(fco.filenode()),
421 fcl.flags()]
421 fcl.flags()]
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 self._dirty = True
423 self._dirty = True
424
424
425 def __contains__(self, dfile):
425 def __contains__(self, dfile):
426 return dfile in self._state
426 return dfile in self._state
427
427
428 def __getitem__(self, dfile):
428 def __getitem__(self, dfile):
429 return self._state[dfile][0]
429 return self._state[dfile][0]
430
430
431 def __iter__(self):
431 def __iter__(self):
432 return iter(sorted(self._state))
432 return iter(sorted(self._state))
433
433
434 def files(self):
434 def files(self):
435 return self._state.keys()
435 return self._state.keys()
436
436
437 def mark(self, dfile, state):
437 def mark(self, dfile, state):
438 self._state[dfile][0] = state
438 self._state[dfile][0] = state
439 self._dirty = True
439 self._dirty = True
440
440
441 def mdstate(self):
441 def mdstate(self):
442 return self._mdstate
442 return self._mdstate
443
443
444 def unresolved(self):
444 def unresolved(self):
445 """Obtain the paths of unresolved files."""
445 """Obtain the paths of unresolved files."""
446
446
447 for f, entry in self._state.iteritems():
447 for f, entry in self._state.iteritems():
448 if entry[0] == 'u':
448 if entry[0] == 'u':
449 yield f
449 yield f
450
450
451 def driverresolved(self):
451 def driverresolved(self):
452 """Obtain the paths of driver-resolved files."""
452 """Obtain the paths of driver-resolved files."""
453
453
454 for f, entry in self._state.items():
454 for f, entry in self._state.items():
455 if entry[0] == 'd':
455 if entry[0] == 'd':
456 yield f
456 yield f
457
457
458 def extras(self, filename):
458 def extras(self, filename):
459 return self._stateextras.setdefault(filename, {})
459 return self._stateextras.setdefault(filename, {})
460
460
461 def _resolve(self, preresolve, dfile, wctx):
461 def _resolve(self, preresolve, dfile, wctx):
462 """rerun merge process for file path `dfile`"""
462 """rerun merge process for file path `dfile`"""
463 if self[dfile] in 'rd':
463 if self[dfile] in 'rd':
464 return True, 0
464 return True, 0
465 stateentry = self._state[dfile]
465 stateentry = self._state[dfile]
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 octx = self._repo[self._other]
467 octx = self._repo[self._other]
468 extras = self.extras(dfile)
468 extras = self.extras(dfile)
469 anccommitnode = extras.get('ancestorlinknode')
469 anccommitnode = extras.get('ancestorlinknode')
470 if anccommitnode:
470 if anccommitnode:
471 actx = self._repo[anccommitnode]
471 actx = self._repo[anccommitnode]
472 else:
472 else:
473 actx = None
473 actx = None
474 fcd = self._filectxorabsent(hash, wctx, dfile)
474 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fco = self._filectxorabsent(onode, octx, ofile)
475 fco = self._filectxorabsent(onode, octx, ofile)
476 # TODO: move this to filectxorabsent
476 # TODO: move this to filectxorabsent
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 # "premerge" x flags
478 # "premerge" x flags
479 flo = fco.flags()
479 flo = fco.flags()
480 fla = fca.flags()
480 fla = fca.flags()
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if fca.node() == nullid and flags != flo:
482 if fca.node() == nullid and flags != flo:
483 if preresolve:
483 if preresolve:
484 self._repo.ui.warn(
484 self._repo.ui.warn(
485 _('warning: cannot merge flags for %s '
485 _('warning: cannot merge flags for %s '
486 'without common ancestor - keeping local flags\n')
486 'without common ancestor - keeping local flags\n')
487 % afile)
487 % afile)
488 elif flags == fla:
488 elif flags == fla:
489 flags = flo
489 flags = flo
490 if preresolve:
490 if preresolve:
491 # restore local
491 # restore local
492 if hash != nullhex:
492 if hash != nullhex:
493 f = self._repo.vfs('merge/' + hash)
493 f = self._repo.vfs('merge/' + hash)
494 wctx[dfile].write(f.read(), flags)
494 wctx[dfile].write(f.read(), flags)
495 f.close()
495 f.close()
496 else:
496 else:
497 wctx[dfile].remove(ignoremissing=True)
497 wctx[dfile].remove(ignoremissing=True)
498 complete, r, deleted = filemerge.premerge(self._repo, self._local,
498 complete, r, deleted = filemerge.premerge(self._repo, self._local,
499 lfile, fcd, fco, fca,
499 lfile, fcd, fco, fca,
500 labels=self._labels)
500 labels=self._labels)
501 else:
501 else:
502 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
502 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
503 lfile, fcd, fco, fca,
503 lfile, fcd, fco, fca,
504 labels=self._labels)
504 labels=self._labels)
505 if r is None:
505 if r is None:
506 # no real conflict
506 # no real conflict
507 del self._state[dfile]
507 del self._state[dfile]
508 self._stateextras.pop(dfile, None)
508 self._stateextras.pop(dfile, None)
509 self._dirty = True
509 self._dirty = True
510 elif not r:
510 elif not r:
511 self.mark(dfile, 'r')
511 self.mark(dfile, 'r')
512
512
513 if complete:
513 if complete:
514 action = None
514 action = None
515 if deleted:
515 if deleted:
516 if fcd.isabsent():
516 if fcd.isabsent():
517 # dc: local picked. Need to drop if present, which may
517 # dc: local picked. Need to drop if present, which may
518 # happen on re-resolves.
518 # happen on re-resolves.
519 action = 'f'
519 action = 'f'
520 else:
520 else:
521 # cd: remote picked (or otherwise deleted)
521 # cd: remote picked (or otherwise deleted)
522 action = 'r'
522 action = 'r'
523 else:
523 else:
524 if fcd.isabsent(): # dc: remote picked
524 if fcd.isabsent(): # dc: remote picked
525 action = 'g'
525 action = 'g'
526 elif fco.isabsent(): # cd: local picked
526 elif fco.isabsent(): # cd: local picked
527 if dfile in self.localctx:
527 if dfile in self.localctx:
528 action = 'am'
528 action = 'am'
529 else:
529 else:
530 action = 'a'
530 action = 'a'
531 # else: regular merges (no action necessary)
531 # else: regular merges (no action necessary)
532 self._results[dfile] = r, action
532 self._results[dfile] = r, action
533
533
534 return complete, r
534 return complete, r
535
535
536 def _filectxorabsent(self, hexnode, ctx, f):
536 def _filectxorabsent(self, hexnode, ctx, f):
537 if hexnode == nullhex:
537 if hexnode == nullhex:
538 return filemerge.absentfilectx(ctx, f)
538 return filemerge.absentfilectx(ctx, f)
539 else:
539 else:
540 return ctx[f]
540 return ctx[f]
541
541
542 def preresolve(self, dfile, wctx):
542 def preresolve(self, dfile, wctx):
543 """run premerge process for dfile
543 """run premerge process for dfile
544
544
545 Returns whether the merge is complete, and the exit code."""
545 Returns whether the merge is complete, and the exit code."""
546 return self._resolve(True, dfile, wctx)
546 return self._resolve(True, dfile, wctx)
547
547
548 def resolve(self, dfile, wctx):
548 def resolve(self, dfile, wctx):
549 """run merge process (assuming premerge was run) for dfile
549 """run merge process (assuming premerge was run) for dfile
550
550
551 Returns the exit code of the merge."""
551 Returns the exit code of the merge."""
552 return self._resolve(False, dfile, wctx)[1]
552 return self._resolve(False, dfile, wctx)[1]
553
553
554 def counts(self):
554 def counts(self):
555 """return counts for updated, merged and removed files in this
555 """return counts for updated, merged and removed files in this
556 session"""
556 session"""
557 updated, merged, removed = 0, 0, 0
557 updated, merged, removed = 0, 0, 0
558 for r, action in self._results.itervalues():
558 for r, action in self._results.itervalues():
559 if r is None:
559 if r is None:
560 updated += 1
560 updated += 1
561 elif r == 0:
561 elif r == 0:
562 if action == 'r':
562 if action == 'r':
563 removed += 1
563 removed += 1
564 else:
564 else:
565 merged += 1
565 merged += 1
566 return updated, merged, removed
566 return updated, merged, removed
567
567
568 def unresolvedcount(self):
568 def unresolvedcount(self):
569 """get unresolved count for this merge (persistent)"""
569 """get unresolved count for this merge (persistent)"""
570 return len(list(self.unresolved()))
570 return len(list(self.unresolved()))
571
571
572 def actions(self):
572 def actions(self):
573 """return lists of actions to perform on the dirstate"""
573 """return lists of actions to perform on the dirstate"""
574 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
574 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
575 for f, (r, action) in self._results.iteritems():
575 for f, (r, action) in self._results.iteritems():
576 if action is not None:
576 if action is not None:
577 actions[action].append((f, None, "merge result"))
577 actions[action].append((f, None, "merge result"))
578 return actions
578 return actions
579
579
580 def recordactions(self):
580 def recordactions(self):
581 """record remove/add/get actions in the dirstate"""
581 """record remove/add/get actions in the dirstate"""
582 branchmerge = self._repo.dirstate.p2() != nullid
582 branchmerge = self._repo.dirstate.p2() != nullid
583 recordupdates(self._repo, self.actions(), branchmerge)
583 recordupdates(self._repo, self.actions(), branchmerge)
584
584
585 def queueremove(self, f):
585 def queueremove(self, f):
586 """queues a file to be removed from the dirstate
586 """queues a file to be removed from the dirstate
587
587
588 Meant for use by custom merge drivers."""
588 Meant for use by custom merge drivers."""
589 self._results[f] = 0, 'r'
589 self._results[f] = 0, 'r'
590
590
591 def queueadd(self, f):
591 def queueadd(self, f):
592 """queues a file to be added to the dirstate
592 """queues a file to be added to the dirstate
593
593
594 Meant for use by custom merge drivers."""
594 Meant for use by custom merge drivers."""
595 self._results[f] = 0, 'a'
595 self._results[f] = 0, 'a'
596
596
597 def queueget(self, f):
597 def queueget(self, f):
598 """queues a file to be marked modified in the dirstate
598 """queues a file to be marked modified in the dirstate
599
599
600 Meant for use by custom merge drivers."""
600 Meant for use by custom merge drivers."""
601 self._results[f] = 0, 'g'
601 self._results[f] = 0, 'g'
602
602
603 def _getcheckunknownconfig(repo, section, name):
603 def _getcheckunknownconfig(repo, section, name):
604 config = repo.ui.config(section, name, default='abort')
604 config = repo.ui.config(section, name, default='abort')
605 valid = ['abort', 'ignore', 'warn']
605 valid = ['abort', 'ignore', 'warn']
606 if config not in valid:
606 if config not in valid:
607 validstr = ', '.join(["'" + v + "'" for v in valid])
607 validstr = ', '.join(["'" + v + "'" for v in valid])
608 raise error.ConfigError(_("%s.%s not valid "
608 raise error.ConfigError(_("%s.%s not valid "
609 "('%s' is none of %s)")
609 "('%s' is none of %s)")
610 % (section, name, config, validstr))
610 % (section, name, config, validstr))
611 return config
611 return config
612
612
613 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
613 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
614 if f2 is None:
614 if f2 is None:
615 f2 = f
615 f2 = f
616 return (repo.wvfs.audit.check(f)
616 return (repo.wvfs.audit.check(f)
617 and repo.wvfs.isfileorlink(f)
617 and repo.wvfs.isfileorlink(f)
618 and repo.dirstate.normalize(f) not in repo.dirstate
618 and repo.dirstate.normalize(f) not in repo.dirstate
619 and mctx[f2].cmp(wctx[f]))
619 and mctx[f2].cmp(wctx[f]))
620
620
621 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
621 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
622 """
622 """
623 Considers any actions that care about the presence of conflicting unknown
623 Considers any actions that care about the presence of conflicting unknown
624 files. For some actions, the result is to abort; for others, it is to
624 files. For some actions, the result is to abort; for others, it is to
625 choose a different action.
625 choose a different action.
626 """
626 """
627 conflicts = set()
627 conflicts = set()
628 warnconflicts = set()
628 warnconflicts = set()
629 abortconflicts = set()
629 abortconflicts = set()
630 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
630 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
631 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
631 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
632 if not force:
632 if not force:
633 def collectconflicts(conflicts, config):
633 def collectconflicts(conflicts, config):
634 if config == 'abort':
634 if config == 'abort':
635 abortconflicts.update(conflicts)
635 abortconflicts.update(conflicts)
636 elif config == 'warn':
636 elif config == 'warn':
637 warnconflicts.update(conflicts)
637 warnconflicts.update(conflicts)
638
638
639 for f, (m, args, msg) in actions.iteritems():
639 for f, (m, args, msg) in actions.iteritems():
640 if m in ('c', 'dc'):
640 if m in ('c', 'dc'):
641 if _checkunknownfile(repo, wctx, mctx, f):
641 if _checkunknownfile(repo, wctx, mctx, f):
642 conflicts.add(f)
642 conflicts.add(f)
643 elif m == 'dg':
643 elif m == 'dg':
644 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
644 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
645 conflicts.add(f)
645 conflicts.add(f)
646
646
647 ignoredconflicts = set([c for c in conflicts
647 ignoredconflicts = set([c for c in conflicts
648 if repo.dirstate._ignore(c)])
648 if repo.dirstate._ignore(c)])
649 unknownconflicts = conflicts - ignoredconflicts
649 unknownconflicts = conflicts - ignoredconflicts
650 collectconflicts(ignoredconflicts, ignoredconfig)
650 collectconflicts(ignoredconflicts, ignoredconfig)
651 collectconflicts(unknownconflicts, unknownconfig)
651 collectconflicts(unknownconflicts, unknownconfig)
652 else:
652 else:
653 for f, (m, args, msg) in actions.iteritems():
653 for f, (m, args, msg) in actions.iteritems():
654 if m == 'cm':
654 if m == 'cm':
655 fl2, anc = args
655 fl2, anc = args
656 different = _checkunknownfile(repo, wctx, mctx, f)
656 different = _checkunknownfile(repo, wctx, mctx, f)
657 if repo.dirstate._ignore(f):
657 if repo.dirstate._ignore(f):
658 config = ignoredconfig
658 config = ignoredconfig
659 else:
659 else:
660 config = unknownconfig
660 config = unknownconfig
661
661
662 # The behavior when force is True is described by this table:
662 # The behavior when force is True is described by this table:
663 # config different mergeforce | action backup
663 # config different mergeforce | action backup
664 # * n * | get n
664 # * n * | get n
665 # * y y | merge -
665 # * y y | merge -
666 # abort y n | merge - (1)
666 # abort y n | merge - (1)
667 # warn y n | warn + get y
667 # warn y n | warn + get y
668 # ignore y n | get y
668 # ignore y n | get y
669 #
669 #
670 # (1) this is probably the wrong behavior here -- we should
670 # (1) this is probably the wrong behavior here -- we should
671 # probably abort, but some actions like rebases currently
671 # probably abort, but some actions like rebases currently
672 # don't like an abort happening in the middle of
672 # don't like an abort happening in the middle of
673 # merge.update.
673 # merge.update.
674 if not different:
674 if not different:
675 actions[f] = ('g', (fl2, False), "remote created")
675 actions[f] = ('g', (fl2, False), "remote created")
676 elif mergeforce or config == 'abort':
676 elif mergeforce or config == 'abort':
677 actions[f] = ('m', (f, f, None, False, anc),
677 actions[f] = ('m', (f, f, None, False, anc),
678 "remote differs from untracked local")
678 "remote differs from untracked local")
679 elif config == 'abort':
679 elif config == 'abort':
680 abortconflicts.add(f)
680 abortconflicts.add(f)
681 else:
681 else:
682 if config == 'warn':
682 if config == 'warn':
683 warnconflicts.add(f)
683 warnconflicts.add(f)
684 actions[f] = ('g', (fl2, True), "remote created")
684 actions[f] = ('g', (fl2, True), "remote created")
685
685
686 for f in sorted(abortconflicts):
686 for f in sorted(abortconflicts):
687 repo.ui.warn(_("%s: untracked file differs\n") % f)
687 repo.ui.warn(_("%s: untracked file differs\n") % f)
688 if abortconflicts:
688 if abortconflicts:
689 raise error.Abort(_("untracked files in working directory "
689 raise error.Abort(_("untracked files in working directory "
690 "differ from files in requested revision"))
690 "differ from files in requested revision"))
691
691
692 for f in sorted(warnconflicts):
692 for f in sorted(warnconflicts):
693 repo.ui.warn(_("%s: replacing untracked file\n") % f)
693 repo.ui.warn(_("%s: replacing untracked file\n") % f)
694
694
695 for f, (m, args, msg) in actions.iteritems():
695 for f, (m, args, msg) in actions.iteritems():
696 backup = f in conflicts
696 backup = f in conflicts
697 if m == 'c':
697 if m == 'c':
698 flags, = args
698 flags, = args
699 actions[f] = ('g', (flags, backup), msg)
699 actions[f] = ('g', (flags, backup), msg)
700
700
701 def _forgetremoved(wctx, mctx, branchmerge):
701 def _forgetremoved(wctx, mctx, branchmerge):
702 """
702 """
703 Forget removed files
703 Forget removed files
704
704
705 If we're jumping between revisions (as opposed to merging), and if
705 If we're jumping between revisions (as opposed to merging), and if
706 neither the working directory nor the target rev has the file,
706 neither the working directory nor the target rev has the file,
707 then we need to remove it from the dirstate, to prevent the
707 then we need to remove it from the dirstate, to prevent the
708 dirstate from listing the file when it is no longer in the
708 dirstate from listing the file when it is no longer in the
709 manifest.
709 manifest.
710
710
711 If we're merging, and the other revision has removed a file
711 If we're merging, and the other revision has removed a file
712 that is not present in the working directory, we need to mark it
712 that is not present in the working directory, we need to mark it
713 as removed.
713 as removed.
714 """
714 """
715
715
716 actions = {}
716 actions = {}
717 m = 'f'
717 m = 'f'
718 if branchmerge:
718 if branchmerge:
719 m = 'r'
719 m = 'r'
720 for f in wctx.deleted():
720 for f in wctx.deleted():
721 if f not in mctx:
721 if f not in mctx:
722 actions[f] = m, None, "forget deleted"
722 actions[f] = m, None, "forget deleted"
723
723
724 if not branchmerge:
724 if not branchmerge:
725 for f in wctx.removed():
725 for f in wctx.removed():
726 if f not in mctx:
726 if f not in mctx:
727 actions[f] = 'f', None, "forget removed"
727 actions[f] = 'f', None, "forget removed"
728
728
729 return actions
729 return actions
730
730
731 def _checkcollision(repo, wmf, actions):
731 def _checkcollision(repo, wmf, actions):
732 # build provisional merged manifest up
732 # build provisional merged manifest up
733 pmmf = set(wmf)
733 pmmf = set(wmf)
734
734
735 if actions:
735 if actions:
736 # k, dr, e and rd are no-op
736 # k, dr, e and rd are no-op
737 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
737 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
738 for f, args, msg in actions[m]:
738 for f, args, msg in actions[m]:
739 pmmf.add(f)
739 pmmf.add(f)
740 for f, args, msg in actions['r']:
740 for f, args, msg in actions['r']:
741 pmmf.discard(f)
741 pmmf.discard(f)
742 for f, args, msg in actions['dm']:
742 for f, args, msg in actions['dm']:
743 f2, flags = args
743 f2, flags = args
744 pmmf.discard(f2)
744 pmmf.discard(f2)
745 pmmf.add(f)
745 pmmf.add(f)
746 for f, args, msg in actions['dg']:
746 for f, args, msg in actions['dg']:
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['m']:
748 for f, args, msg in actions['m']:
749 f1, f2, fa, move, anc = args
749 f1, f2, fa, move, anc = args
750 if move:
750 if move:
751 pmmf.discard(f1)
751 pmmf.discard(f1)
752 pmmf.add(f)
752 pmmf.add(f)
753
753
754 # check case-folding collision in provisional merged manifest
754 # check case-folding collision in provisional merged manifest
755 foldmap = {}
755 foldmap = {}
756 for f in sorted(pmmf):
756 for f in pmmf:
757 fold = util.normcase(f)
757 fold = util.normcase(f)
758 if fold in foldmap:
758 if fold in foldmap:
759 raise error.Abort(_("case-folding collision between %s and %s")
759 raise error.Abort(_("case-folding collision between %s and %s")
760 % (f, foldmap[fold]))
760 % (f, foldmap[fold]))
761 foldmap[fold] = f
761 foldmap[fold] = f
762
762
763 # check case-folding of directories
763 # check case-folding of directories
764 foldprefix = unfoldprefix = lastfull = ''
764 foldprefix = unfoldprefix = lastfull = ''
765 for fold, f in sorted(foldmap.items()):
765 for fold, f in sorted(foldmap.items()):
766 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
766 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
767 # the folded prefix matches but actual casing is different
767 # the folded prefix matches but actual casing is different
768 raise error.Abort(_("case-folding collision between "
768 raise error.Abort(_("case-folding collision between "
769 "%s and directory of %s") % (lastfull, f))
769 "%s and directory of %s") % (lastfull, f))
770 foldprefix = fold + '/'
770 foldprefix = fold + '/'
771 unfoldprefix = f + '/'
771 unfoldprefix = f + '/'
772 lastfull = f
772 lastfull = f
773
773
774 def driverpreprocess(repo, ms, wctx, labels=None):
774 def driverpreprocess(repo, ms, wctx, labels=None):
775 """run the preprocess step of the merge driver, if any
775 """run the preprocess step of the merge driver, if any
776
776
777 This is currently not implemented -- it's an extension point."""
777 This is currently not implemented -- it's an extension point."""
778 return True
778 return True
779
779
780 def driverconclude(repo, ms, wctx, labels=None):
780 def driverconclude(repo, ms, wctx, labels=None):
781 """run the conclude step of the merge driver, if any
781 """run the conclude step of the merge driver, if any
782
782
783 This is currently not implemented -- it's an extension point."""
783 This is currently not implemented -- it's an extension point."""
784 return True
784 return True
785
785
786 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
786 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
787 acceptremote, followcopies, forcefulldiff=False):
787 acceptremote, followcopies, forcefulldiff=False):
788 """
788 """
789 Merge wctx and p2 with ancestor pa and generate merge action list
789 Merge wctx and p2 with ancestor pa and generate merge action list
790
790
791 branchmerge and force are as passed in to update
791 branchmerge and force are as passed in to update
792 matcher = matcher to filter file lists
792 matcher = matcher to filter file lists
793 acceptremote = accept the incoming changes without prompting
793 acceptremote = accept the incoming changes without prompting
794 """
794 """
795 if matcher is not None and matcher.always():
795 if matcher is not None and matcher.always():
796 matcher = None
796 matcher = None
797
797
798 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
798 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
799
799
800 # manifests fetched in order are going to be faster, so prime the caches
800 # manifests fetched in order are going to be faster, so prime the caches
801 [x.manifest() for x in
801 [x.manifest() for x in
802 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
802 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
803
803
804 if followcopies:
804 if followcopies:
805 ret = copies.mergecopies(repo, wctx, p2, pa)
805 ret = copies.mergecopies(repo, wctx, p2, pa)
806 copy, movewithdir, diverge, renamedelete, dirmove = ret
806 copy, movewithdir, diverge, renamedelete, dirmove = ret
807
807
808 boolbm = pycompat.bytestr(bool(branchmerge))
808 boolbm = pycompat.bytestr(bool(branchmerge))
809 boolf = pycompat.bytestr(bool(force))
809 boolf = pycompat.bytestr(bool(force))
810 boolm = pycompat.bytestr(bool(matcher))
810 boolm = pycompat.bytestr(bool(matcher))
811 repo.ui.note(_("resolving manifests\n"))
811 repo.ui.note(_("resolving manifests\n"))
812 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
812 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
813 % (boolbm, boolf, boolm))
813 % (boolbm, boolf, boolm))
814 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
814 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
815
815
816 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
816 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
817 copied = set(copy.values())
817 copied = set(copy.values())
818 copied.update(movewithdir.values())
818 copied.update(movewithdir.values())
819
819
820 if '.hgsubstate' in m1:
820 if '.hgsubstate' in m1:
821 # check whether sub state is modified
821 # check whether sub state is modified
822 if any(wctx.sub(s).dirty() for s in wctx.substate):
822 if any(wctx.sub(s).dirty() for s in wctx.substate):
823 m1['.hgsubstate'] = modifiednodeid
823 m1['.hgsubstate'] = modifiednodeid
824
824
825 # Don't use m2-vs-ma optimization if:
825 # Don't use m2-vs-ma optimization if:
826 # - ma is the same as m1 or m2, which we're just going to diff again later
826 # - ma is the same as m1 or m2, which we're just going to diff again later
827 # - The caller specifically asks for a full diff, which is useful during bid
827 # - The caller specifically asks for a full diff, which is useful during bid
828 # merge.
828 # merge.
829 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
829 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
830 # Identify which files are relevant to the merge, so we can limit the
830 # Identify which files are relevant to the merge, so we can limit the
831 # total m1-vs-m2 diff to just those files. This has significant
831 # total m1-vs-m2 diff to just those files. This has significant
832 # performance benefits in large repositories.
832 # performance benefits in large repositories.
833 relevantfiles = set(ma.diff(m2).keys())
833 relevantfiles = set(ma.diff(m2).keys())
834
834
835 # For copied and moved files, we need to add the source file too.
835 # For copied and moved files, we need to add the source file too.
836 for copykey, copyvalue in copy.iteritems():
836 for copykey, copyvalue in copy.iteritems():
837 if copyvalue in relevantfiles:
837 if copyvalue in relevantfiles:
838 relevantfiles.add(copykey)
838 relevantfiles.add(copykey)
839 for movedirkey in movewithdir:
839 for movedirkey in movewithdir:
840 relevantfiles.add(movedirkey)
840 relevantfiles.add(movedirkey)
841 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
841 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
842 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
842 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
843
843
844 diff = m1.diff(m2, match=matcher)
844 diff = m1.diff(m2, match=matcher)
845
845
846 if matcher is None:
846 if matcher is None:
847 matcher = matchmod.always('', '')
847 matcher = matchmod.always('', '')
848
848
849 actions = {}
849 actions = {}
850 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
850 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
851 if n1 and n2: # file exists on both local and remote side
851 if n1 and n2: # file exists on both local and remote side
852 if f not in ma:
852 if f not in ma:
853 fa = copy.get(f, None)
853 fa = copy.get(f, None)
854 if fa is not None:
854 if fa is not None:
855 actions[f] = ('m', (f, f, fa, False, pa.node()),
855 actions[f] = ('m', (f, f, fa, False, pa.node()),
856 "both renamed from " + fa)
856 "both renamed from " + fa)
857 else:
857 else:
858 actions[f] = ('m', (f, f, None, False, pa.node()),
858 actions[f] = ('m', (f, f, None, False, pa.node()),
859 "both created")
859 "both created")
860 else:
860 else:
861 a = ma[f]
861 a = ma[f]
862 fla = ma.flags(f)
862 fla = ma.flags(f)
863 nol = 'l' not in fl1 + fl2 + fla
863 nol = 'l' not in fl1 + fl2 + fla
864 if n2 == a and fl2 == fla:
864 if n2 == a and fl2 == fla:
865 actions[f] = ('k' , (), "remote unchanged")
865 actions[f] = ('k' , (), "remote unchanged")
866 elif n1 == a and fl1 == fla: # local unchanged - use remote
866 elif n1 == a and fl1 == fla: # local unchanged - use remote
867 if n1 == n2: # optimization: keep local content
867 if n1 == n2: # optimization: keep local content
868 actions[f] = ('e', (fl2,), "update permissions")
868 actions[f] = ('e', (fl2,), "update permissions")
869 else:
869 else:
870 actions[f] = ('g', (fl2, False), "remote is newer")
870 actions[f] = ('g', (fl2, False), "remote is newer")
871 elif nol and n2 == a: # remote only changed 'x'
871 elif nol and n2 == a: # remote only changed 'x'
872 actions[f] = ('e', (fl2,), "update permissions")
872 actions[f] = ('e', (fl2,), "update permissions")
873 elif nol and n1 == a: # local only changed 'x'
873 elif nol and n1 == a: # local only changed 'x'
874 actions[f] = ('g', (fl1, False), "remote is newer")
874 actions[f] = ('g', (fl1, False), "remote is newer")
875 else: # both changed something
875 else: # both changed something
876 actions[f] = ('m', (f, f, f, False, pa.node()),
876 actions[f] = ('m', (f, f, f, False, pa.node()),
877 "versions differ")
877 "versions differ")
878 elif n1: # file exists only on local side
878 elif n1: # file exists only on local side
879 if f in copied:
879 if f in copied:
880 pass # we'll deal with it on m2 side
880 pass # we'll deal with it on m2 side
881 elif f in movewithdir: # directory rename, move local
881 elif f in movewithdir: # directory rename, move local
882 f2 = movewithdir[f]
882 f2 = movewithdir[f]
883 if f2 in m2:
883 if f2 in m2:
884 actions[f2] = ('m', (f, f2, None, True, pa.node()),
884 actions[f2] = ('m', (f, f2, None, True, pa.node()),
885 "remote directory rename, both created")
885 "remote directory rename, both created")
886 else:
886 else:
887 actions[f2] = ('dm', (f, fl1),
887 actions[f2] = ('dm', (f, fl1),
888 "remote directory rename - move from " + f)
888 "remote directory rename - move from " + f)
889 elif f in copy:
889 elif f in copy:
890 f2 = copy[f]
890 f2 = copy[f]
891 actions[f] = ('m', (f, f2, f2, False, pa.node()),
891 actions[f] = ('m', (f, f2, f2, False, pa.node()),
892 "local copied/moved from " + f2)
892 "local copied/moved from " + f2)
893 elif f in ma: # clean, a different, no remote
893 elif f in ma: # clean, a different, no remote
894 if n1 != ma[f]:
894 if n1 != ma[f]:
895 if acceptremote:
895 if acceptremote:
896 actions[f] = ('r', None, "remote delete")
896 actions[f] = ('r', None, "remote delete")
897 else:
897 else:
898 actions[f] = ('cd', (f, None, f, False, pa.node()),
898 actions[f] = ('cd', (f, None, f, False, pa.node()),
899 "prompt changed/deleted")
899 "prompt changed/deleted")
900 elif n1 == addednodeid:
900 elif n1 == addednodeid:
901 # This extra 'a' is added by working copy manifest to mark
901 # This extra 'a' is added by working copy manifest to mark
902 # the file as locally added. We should forget it instead of
902 # the file as locally added. We should forget it instead of
903 # deleting it.
903 # deleting it.
904 actions[f] = ('f', None, "remote deleted")
904 actions[f] = ('f', None, "remote deleted")
905 else:
905 else:
906 actions[f] = ('r', None, "other deleted")
906 actions[f] = ('r', None, "other deleted")
907 elif n2: # file exists only on remote side
907 elif n2: # file exists only on remote side
908 if f in copied:
908 if f in copied:
909 pass # we'll deal with it on m1 side
909 pass # we'll deal with it on m1 side
910 elif f in movewithdir:
910 elif f in movewithdir:
911 f2 = movewithdir[f]
911 f2 = movewithdir[f]
912 if f2 in m1:
912 if f2 in m1:
913 actions[f2] = ('m', (f2, f, None, False, pa.node()),
913 actions[f2] = ('m', (f2, f, None, False, pa.node()),
914 "local directory rename, both created")
914 "local directory rename, both created")
915 else:
915 else:
916 actions[f2] = ('dg', (f, fl2),
916 actions[f2] = ('dg', (f, fl2),
917 "local directory rename - get from " + f)
917 "local directory rename - get from " + f)
918 elif f in copy:
918 elif f in copy:
919 f2 = copy[f]
919 f2 = copy[f]
920 if f2 in m2:
920 if f2 in m2:
921 actions[f] = ('m', (f2, f, f2, False, pa.node()),
921 actions[f] = ('m', (f2, f, f2, False, pa.node()),
922 "remote copied from " + f2)
922 "remote copied from " + f2)
923 else:
923 else:
924 actions[f] = ('m', (f2, f, f2, True, pa.node()),
924 actions[f] = ('m', (f2, f, f2, True, pa.node()),
925 "remote moved from " + f2)
925 "remote moved from " + f2)
926 elif f not in ma:
926 elif f not in ma:
927 # local unknown, remote created: the logic is described by the
927 # local unknown, remote created: the logic is described by the
928 # following table:
928 # following table:
929 #
929 #
930 # force branchmerge different | action
930 # force branchmerge different | action
931 # n * * | create
931 # n * * | create
932 # y n * | create
932 # y n * | create
933 # y y n | create
933 # y y n | create
934 # y y y | merge
934 # y y y | merge
935 #
935 #
936 # Checking whether the files are different is expensive, so we
936 # Checking whether the files are different is expensive, so we
937 # don't do that when we can avoid it.
937 # don't do that when we can avoid it.
938 if not force:
938 if not force:
939 actions[f] = ('c', (fl2,), "remote created")
939 actions[f] = ('c', (fl2,), "remote created")
940 elif not branchmerge:
940 elif not branchmerge:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 else:
942 else:
943 actions[f] = ('cm', (fl2, pa.node()),
943 actions[f] = ('cm', (fl2, pa.node()),
944 "remote created, get or merge")
944 "remote created, get or merge")
945 elif n2 != ma[f]:
945 elif n2 != ma[f]:
946 df = None
946 df = None
947 for d in dirmove:
947 for d in dirmove:
948 if f.startswith(d):
948 if f.startswith(d):
949 # new file added in a directory that was moved
949 # new file added in a directory that was moved
950 df = dirmove[d] + f[len(d):]
950 df = dirmove[d] + f[len(d):]
951 break
951 break
952 if df is not None and df in m1:
952 if df is not None and df in m1:
953 actions[df] = ('m', (df, f, f, False, pa.node()),
953 actions[df] = ('m', (df, f, f, False, pa.node()),
954 "local directory rename - respect move from " + f)
954 "local directory rename - respect move from " + f)
955 elif acceptremote:
955 elif acceptremote:
956 actions[f] = ('c', (fl2,), "remote recreating")
956 actions[f] = ('c', (fl2,), "remote recreating")
957 else:
957 else:
958 actions[f] = ('dc', (None, f, f, False, pa.node()),
958 actions[f] = ('dc', (None, f, f, False, pa.node()),
959 "prompt deleted/changed")
959 "prompt deleted/changed")
960
960
961 return actions, diverge, renamedelete
961 return actions, diverge, renamedelete
962
962
963 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
963 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
964 """Resolves false conflicts where the nodeid changed but the content
964 """Resolves false conflicts where the nodeid changed but the content
965 remained the same."""
965 remained the same."""
966
966
967 for f, (m, args, msg) in actions.items():
967 for f, (m, args, msg) in actions.items():
968 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
968 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
969 # local did change but ended up with same content
969 # local did change but ended up with same content
970 actions[f] = 'r', None, "prompt same"
970 actions[f] = 'r', None, "prompt same"
971 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
971 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
972 # remote did change but ended up with same content
972 # remote did change but ended up with same content
973 del actions[f] # don't get = keep local deleted
973 del actions[f] # don't get = keep local deleted
974
974
975 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
975 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
976 acceptremote, followcopies, matcher=None,
976 acceptremote, followcopies, matcher=None,
977 mergeforce=False):
977 mergeforce=False):
978 """Calculate the actions needed to merge mctx into wctx using ancestors"""
978 """Calculate the actions needed to merge mctx into wctx using ancestors"""
979 # Avoid cycle.
979 # Avoid cycle.
980 from . import sparse
980 from . import sparse
981
981
982 if len(ancestors) == 1: # default
982 if len(ancestors) == 1: # default
983 actions, diverge, renamedelete = manifestmerge(
983 actions, diverge, renamedelete = manifestmerge(
984 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
984 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
985 acceptremote, followcopies)
985 acceptremote, followcopies)
986 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
986 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
987
987
988 else: # only when merge.preferancestor=* - the default
988 else: # only when merge.preferancestor=* - the default
989 repo.ui.note(
989 repo.ui.note(
990 _("note: merging %s and %s using bids from ancestors %s\n") %
990 _("note: merging %s and %s using bids from ancestors %s\n") %
991 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
991 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
992
992
993 # Call for bids
993 # Call for bids
994 fbids = {} # mapping filename to bids (action method to list af actions)
994 fbids = {} # mapping filename to bids (action method to list af actions)
995 diverge, renamedelete = None, None
995 diverge, renamedelete = None, None
996 for ancestor in ancestors:
996 for ancestor in ancestors:
997 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
997 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
998 actions, diverge1, renamedelete1 = manifestmerge(
998 actions, diverge1, renamedelete1 = manifestmerge(
999 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
999 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1000 acceptremote, followcopies, forcefulldiff=True)
1000 acceptremote, followcopies, forcefulldiff=True)
1001 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1001 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1002
1002
1003 # Track the shortest set of warning on the theory that bid
1003 # Track the shortest set of warning on the theory that bid
1004 # merge will correctly incorporate more information
1004 # merge will correctly incorporate more information
1005 if diverge is None or len(diverge1) < len(diverge):
1005 if diverge is None or len(diverge1) < len(diverge):
1006 diverge = diverge1
1006 diverge = diverge1
1007 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1007 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1008 renamedelete = renamedelete1
1008 renamedelete = renamedelete1
1009
1009
1010 for f, a in sorted(actions.iteritems()):
1010 for f, a in sorted(actions.iteritems()):
1011 m, args, msg = a
1011 m, args, msg = a
1012 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1012 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1013 if f in fbids:
1013 if f in fbids:
1014 d = fbids[f]
1014 d = fbids[f]
1015 if m in d:
1015 if m in d:
1016 d[m].append(a)
1016 d[m].append(a)
1017 else:
1017 else:
1018 d[m] = [a]
1018 d[m] = [a]
1019 else:
1019 else:
1020 fbids[f] = {m: [a]}
1020 fbids[f] = {m: [a]}
1021
1021
1022 # Pick the best bid for each file
1022 # Pick the best bid for each file
1023 repo.ui.note(_('\nauction for merging merge bids\n'))
1023 repo.ui.note(_('\nauction for merging merge bids\n'))
1024 actions = {}
1024 actions = {}
1025 dms = [] # filenames that have dm actions
1025 dms = [] # filenames that have dm actions
1026 for f, bids in sorted(fbids.items()):
1026 for f, bids in sorted(fbids.items()):
1027 # bids is a mapping from action method to list af actions
1027 # bids is a mapping from action method to list af actions
1028 # Consensus?
1028 # Consensus?
1029 if len(bids) == 1: # all bids are the same kind of method
1029 if len(bids) == 1: # all bids are the same kind of method
1030 m, l = bids.items()[0]
1030 m, l = bids.items()[0]
1031 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1031 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1032 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1032 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1033 actions[f] = l[0]
1033 actions[f] = l[0]
1034 if m == 'dm':
1034 if m == 'dm':
1035 dms.append(f)
1035 dms.append(f)
1036 continue
1036 continue
1037 # If keep is an option, just do it.
1037 # If keep is an option, just do it.
1038 if 'k' in bids:
1038 if 'k' in bids:
1039 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1039 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1040 actions[f] = bids['k'][0]
1040 actions[f] = bids['k'][0]
1041 continue
1041 continue
1042 # If there are gets and they all agree [how could they not?], do it.
1042 # If there are gets and they all agree [how could they not?], do it.
1043 if 'g' in bids:
1043 if 'g' in bids:
1044 ga0 = bids['g'][0]
1044 ga0 = bids['g'][0]
1045 if all(a == ga0 for a in bids['g'][1:]):
1045 if all(a == ga0 for a in bids['g'][1:]):
1046 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1046 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1047 actions[f] = ga0
1047 actions[f] = ga0
1048 continue
1048 continue
1049 # TODO: Consider other simple actions such as mode changes
1049 # TODO: Consider other simple actions such as mode changes
1050 # Handle inefficient democrazy.
1050 # Handle inefficient democrazy.
1051 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1051 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1052 for m, l in sorted(bids.items()):
1052 for m, l in sorted(bids.items()):
1053 for _f, args, msg in l:
1053 for _f, args, msg in l:
1054 repo.ui.note(' %s -> %s\n' % (msg, m))
1054 repo.ui.note(' %s -> %s\n' % (msg, m))
1055 # Pick random action. TODO: Instead, prompt user when resolving
1055 # Pick random action. TODO: Instead, prompt user when resolving
1056 m, l = bids.items()[0]
1056 m, l = bids.items()[0]
1057 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1057 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1058 (f, m))
1058 (f, m))
1059 actions[f] = l[0]
1059 actions[f] = l[0]
1060 if m == 'dm':
1060 if m == 'dm':
1061 dms.append(f)
1061 dms.append(f)
1062 continue
1062 continue
1063 # Work around 'dm' that can cause multiple actions for the same file
1063 # Work around 'dm' that can cause multiple actions for the same file
1064 for f in dms:
1064 for f in dms:
1065 dm, (f0, flags), msg = actions[f]
1065 dm, (f0, flags), msg = actions[f]
1066 assert dm == 'dm', dm
1066 assert dm == 'dm', dm
1067 if f0 in actions and actions[f0][0] == 'r':
1067 if f0 in actions and actions[f0][0] == 'r':
1068 # We have one bid for removing a file and another for moving it.
1068 # We have one bid for removing a file and another for moving it.
1069 # These two could be merged as first move and then delete ...
1069 # These two could be merged as first move and then delete ...
1070 # but instead drop moving and just delete.
1070 # but instead drop moving and just delete.
1071 del actions[f]
1071 del actions[f]
1072 repo.ui.note(_('end of auction\n\n'))
1072 repo.ui.note(_('end of auction\n\n'))
1073
1073
1074 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1074 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1075
1075
1076 if wctx.rev() is None:
1076 if wctx.rev() is None:
1077 fractions = _forgetremoved(wctx, mctx, branchmerge)
1077 fractions = _forgetremoved(wctx, mctx, branchmerge)
1078 actions.update(fractions)
1078 actions.update(fractions)
1079
1079
1080 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1080 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1081 actions)
1081 actions)
1082
1082
1083 return prunedactions, diverge, renamedelete
1083 return prunedactions, diverge, renamedelete
1084
1084
1085 def batchremove(repo, wctx, actions):
1085 def batchremove(repo, wctx, actions):
1086 """apply removes to the working directory
1086 """apply removes to the working directory
1087
1087
1088 yields tuples for progress updates
1088 yields tuples for progress updates
1089 """
1089 """
1090 verbose = repo.ui.verbose
1090 verbose = repo.ui.verbose
1091 try:
1091 try:
1092 cwd = pycompat.getcwd()
1092 cwd = pycompat.getcwd()
1093 except OSError as err:
1093 except OSError as err:
1094 if err.errno != errno.ENOENT:
1094 if err.errno != errno.ENOENT:
1095 raise
1095 raise
1096 cwd = None
1096 cwd = None
1097 i = 0
1097 i = 0
1098 for f, args, msg in actions:
1098 for f, args, msg in actions:
1099 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1099 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1100 if verbose:
1100 if verbose:
1101 repo.ui.note(_("removing %s\n") % f)
1101 repo.ui.note(_("removing %s\n") % f)
1102 wctx[f].audit()
1102 wctx[f].audit()
1103 try:
1103 try:
1104 wctx[f].remove(ignoremissing=True)
1104 wctx[f].remove(ignoremissing=True)
1105 except OSError as inst:
1105 except OSError as inst:
1106 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1106 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1107 (f, inst.strerror))
1107 (f, inst.strerror))
1108 if i == 100:
1108 if i == 100:
1109 yield i, f
1109 yield i, f
1110 i = 0
1110 i = 0
1111 i += 1
1111 i += 1
1112 if i > 0:
1112 if i > 0:
1113 yield i, f
1113 yield i, f
1114 if cwd:
1114 if cwd:
1115 # cwd was present before we started to remove files
1115 # cwd was present before we started to remove files
1116 # let's check if it is present after we removed them
1116 # let's check if it is present after we removed them
1117 try:
1117 try:
1118 pycompat.getcwd()
1118 pycompat.getcwd()
1119 except OSError as err:
1119 except OSError as err:
1120 if err.errno != errno.ENOENT:
1120 if err.errno != errno.ENOENT:
1121 raise
1121 raise
1122 # Print a warning if cwd was deleted
1122 # Print a warning if cwd was deleted
1123 repo.ui.warn(_("current directory was removed\n"
1123 repo.ui.warn(_("current directory was removed\n"
1124 "(consider changing to repo root: %s)\n") %
1124 "(consider changing to repo root: %s)\n") %
1125 repo.root)
1125 repo.root)
1126
1126
1127 def batchget(repo, mctx, wctx, actions):
1127 def batchget(repo, mctx, wctx, actions):
1128 """apply gets to the working directory
1128 """apply gets to the working directory
1129
1129
1130 mctx is the context to get from
1130 mctx is the context to get from
1131
1131
1132 yields tuples for progress updates
1132 yields tuples for progress updates
1133 """
1133 """
1134 verbose = repo.ui.verbose
1134 verbose = repo.ui.verbose
1135 fctx = mctx.filectx
1135 fctx = mctx.filectx
1136 ui = repo.ui
1136 ui = repo.ui
1137 i = 0
1137 i = 0
1138 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1138 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1139 for f, (flags, backup), msg in actions:
1139 for f, (flags, backup), msg in actions:
1140 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1140 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1141 if verbose:
1141 if verbose:
1142 repo.ui.note(_("getting %s\n") % f)
1142 repo.ui.note(_("getting %s\n") % f)
1143
1143
1144 if backup:
1144 if backup:
1145 absf = repo.wjoin(f)
1145 absf = repo.wjoin(f)
1146 orig = scmutil.origpath(ui, repo, absf)
1146 orig = scmutil.origpath(ui, repo, absf)
1147 try:
1147 try:
1148 if repo.wvfs.isfileorlink(f):
1148 if repo.wvfs.isfileorlink(f):
1149 util.rename(absf, orig)
1149 util.rename(absf, orig)
1150 except OSError as e:
1150 except OSError as e:
1151 if e.errno != errno.ENOENT:
1151 if e.errno != errno.ENOENT:
1152 raise
1152 raise
1153
1153
1154 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1154 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1155 repo.wvfs.removedirs(f)
1155 repo.wvfs.removedirs(f)
1156 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1156 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1157 if i == 100:
1157 if i == 100:
1158 yield i, f
1158 yield i, f
1159 i = 0
1159 i = 0
1160 i += 1
1160 i += 1
1161 if i > 0:
1161 if i > 0:
1162 yield i, f
1162 yield i, f
1163
1163
1164 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1164 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1165 """apply the merge action list to the working directory
1165 """apply the merge action list to the working directory
1166
1166
1167 wctx is the working copy context
1167 wctx is the working copy context
1168 mctx is the context to be merged into the working copy
1168 mctx is the context to be merged into the working copy
1169
1169
1170 Return a tuple of counts (updated, merged, removed, unresolved) that
1170 Return a tuple of counts (updated, merged, removed, unresolved) that
1171 describes how many files were affected by the update.
1171 describes how many files were affected by the update.
1172 """
1172 """
1173
1173
1174 updated, merged, removed = 0, 0, 0
1174 updated, merged, removed = 0, 0, 0
1175 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1175 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1176 moves = []
1176 moves = []
1177 for m, l in actions.items():
1177 for m, l in actions.items():
1178 l.sort()
1178 l.sort()
1179
1179
1180 # 'cd' and 'dc' actions are treated like other merge conflicts
1180 # 'cd' and 'dc' actions are treated like other merge conflicts
1181 mergeactions = sorted(actions['cd'])
1181 mergeactions = sorted(actions['cd'])
1182 mergeactions.extend(sorted(actions['dc']))
1182 mergeactions.extend(sorted(actions['dc']))
1183 mergeactions.extend(actions['m'])
1183 mergeactions.extend(actions['m'])
1184 for f, args, msg in mergeactions:
1184 for f, args, msg in mergeactions:
1185 f1, f2, fa, move, anc = args
1185 f1, f2, fa, move, anc = args
1186 if f == '.hgsubstate': # merged internally
1186 if f == '.hgsubstate': # merged internally
1187 continue
1187 continue
1188 if f1 is None:
1188 if f1 is None:
1189 fcl = filemerge.absentfilectx(wctx, fa)
1189 fcl = filemerge.absentfilectx(wctx, fa)
1190 else:
1190 else:
1191 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1191 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1192 fcl = wctx[f1]
1192 fcl = wctx[f1]
1193 if f2 is None:
1193 if f2 is None:
1194 fco = filemerge.absentfilectx(mctx, fa)
1194 fco = filemerge.absentfilectx(mctx, fa)
1195 else:
1195 else:
1196 fco = mctx[f2]
1196 fco = mctx[f2]
1197 actx = repo[anc]
1197 actx = repo[anc]
1198 if fa in actx:
1198 if fa in actx:
1199 fca = actx[fa]
1199 fca = actx[fa]
1200 else:
1200 else:
1201 # TODO: move to absentfilectx
1201 # TODO: move to absentfilectx
1202 fca = repo.filectx(f1, fileid=nullrev)
1202 fca = repo.filectx(f1, fileid=nullrev)
1203 ms.add(fcl, fco, fca, f)
1203 ms.add(fcl, fco, fca, f)
1204 if f1 != f and move:
1204 if f1 != f and move:
1205 moves.append(f1)
1205 moves.append(f1)
1206
1206
1207 _updating = _('updating')
1207 _updating = _('updating')
1208 _files = _('files')
1208 _files = _('files')
1209 progress = repo.ui.progress
1209 progress = repo.ui.progress
1210
1210
1211 # remove renamed files after safely stored
1211 # remove renamed files after safely stored
1212 for f in moves:
1212 for f in moves:
1213 if wctx[f].lexists():
1213 if wctx[f].lexists():
1214 repo.ui.debug("removing %s\n" % f)
1214 repo.ui.debug("removing %s\n" % f)
1215 wctx[f].audit()
1215 wctx[f].audit()
1216 wctx[f].remove()
1216 wctx[f].remove()
1217
1217
1218 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1218 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1219
1219
1220 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1220 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1221 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1221 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1222
1222
1223 # remove in parallel (must come first)
1223 # remove in parallel (must come first)
1224 z = 0
1224 z = 0
1225 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1225 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1226 actions['r'])
1226 actions['r'])
1227 for i, item in prog:
1227 for i, item in prog:
1228 z += i
1228 z += i
1229 progress(_updating, z, item=item, total=numupdates, unit=_files)
1229 progress(_updating, z, item=item, total=numupdates, unit=_files)
1230 removed = len(actions['r'])
1230 removed = len(actions['r'])
1231
1231
1232 # get in parallel
1232 # get in parallel
1233 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1233 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1234 actions['g'])
1234 actions['g'])
1235 for i, item in prog:
1235 for i, item in prog:
1236 z += i
1236 z += i
1237 progress(_updating, z, item=item, total=numupdates, unit=_files)
1237 progress(_updating, z, item=item, total=numupdates, unit=_files)
1238 updated = len(actions['g'])
1238 updated = len(actions['g'])
1239
1239
1240 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1240 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1241 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1241 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1242
1242
1243 # forget (manifest only, just log it) (must come first)
1243 # forget (manifest only, just log it) (must come first)
1244 for f, args, msg in actions['f']:
1244 for f, args, msg in actions['f']:
1245 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1245 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1246 z += 1
1246 z += 1
1247 progress(_updating, z, item=f, total=numupdates, unit=_files)
1247 progress(_updating, z, item=f, total=numupdates, unit=_files)
1248
1248
1249 # re-add (manifest only, just log it)
1249 # re-add (manifest only, just log it)
1250 for f, args, msg in actions['a']:
1250 for f, args, msg in actions['a']:
1251 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1251 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1252 z += 1
1252 z += 1
1253 progress(_updating, z, item=f, total=numupdates, unit=_files)
1253 progress(_updating, z, item=f, total=numupdates, unit=_files)
1254
1254
1255 # re-add/mark as modified (manifest only, just log it)
1255 # re-add/mark as modified (manifest only, just log it)
1256 for f, args, msg in actions['am']:
1256 for f, args, msg in actions['am']:
1257 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1257 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1258 z += 1
1258 z += 1
1259 progress(_updating, z, item=f, total=numupdates, unit=_files)
1259 progress(_updating, z, item=f, total=numupdates, unit=_files)
1260
1260
1261 # keep (noop, just log it)
1261 # keep (noop, just log it)
1262 for f, args, msg in actions['k']:
1262 for f, args, msg in actions['k']:
1263 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1263 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1264 # no progress
1264 # no progress
1265
1265
1266 # directory rename, move local
1266 # directory rename, move local
1267 for f, args, msg in actions['dm']:
1267 for f, args, msg in actions['dm']:
1268 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1268 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1269 z += 1
1269 z += 1
1270 progress(_updating, z, item=f, total=numupdates, unit=_files)
1270 progress(_updating, z, item=f, total=numupdates, unit=_files)
1271 f0, flags = args
1271 f0, flags = args
1272 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1272 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1273 wctx[f].audit()
1273 wctx[f].audit()
1274 wctx[f].write(wctx.filectx(f0).data(), flags)
1274 wctx[f].write(wctx.filectx(f0).data(), flags)
1275 wctx[f0].remove()
1275 wctx[f0].remove()
1276 updated += 1
1276 updated += 1
1277
1277
1278 # local directory rename, get
1278 # local directory rename, get
1279 for f, args, msg in actions['dg']:
1279 for f, args, msg in actions['dg']:
1280 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1280 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1281 z += 1
1281 z += 1
1282 progress(_updating, z, item=f, total=numupdates, unit=_files)
1282 progress(_updating, z, item=f, total=numupdates, unit=_files)
1283 f0, flags = args
1283 f0, flags = args
1284 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1284 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1285 wctx[f].write(mctx.filectx(f0).data(), flags)
1285 wctx[f].write(mctx.filectx(f0).data(), flags)
1286 updated += 1
1286 updated += 1
1287
1287
1288 # exec
1288 # exec
1289 for f, args, msg in actions['e']:
1289 for f, args, msg in actions['e']:
1290 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1290 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1291 z += 1
1291 z += 1
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1293 flags, = args
1293 flags, = args
1294 wctx[f].audit()
1294 wctx[f].audit()
1295 wctx[f].setflags('l' in flags, 'x' in flags)
1295 wctx[f].setflags('l' in flags, 'x' in flags)
1296 updated += 1
1296 updated += 1
1297
1297
1298 # the ordering is important here -- ms.mergedriver will raise if the merge
1298 # the ordering is important here -- ms.mergedriver will raise if the merge
1299 # driver has changed, and we want to be able to bypass it when overwrite is
1299 # driver has changed, and we want to be able to bypass it when overwrite is
1300 # True
1300 # True
1301 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1301 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1302
1302
1303 if usemergedriver:
1303 if usemergedriver:
1304 ms.commit()
1304 ms.commit()
1305 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1305 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1306 # the driver might leave some files unresolved
1306 # the driver might leave some files unresolved
1307 unresolvedf = set(ms.unresolved())
1307 unresolvedf = set(ms.unresolved())
1308 if not proceed:
1308 if not proceed:
1309 # XXX setting unresolved to at least 1 is a hack to make sure we
1309 # XXX setting unresolved to at least 1 is a hack to make sure we
1310 # error out
1310 # error out
1311 return updated, merged, removed, max(len(unresolvedf), 1)
1311 return updated, merged, removed, max(len(unresolvedf), 1)
1312 newactions = []
1312 newactions = []
1313 for f, args, msg in mergeactions:
1313 for f, args, msg in mergeactions:
1314 if f in unresolvedf:
1314 if f in unresolvedf:
1315 newactions.append((f, args, msg))
1315 newactions.append((f, args, msg))
1316 mergeactions = newactions
1316 mergeactions = newactions
1317
1317
1318 # premerge
1318 # premerge
1319 tocomplete = []
1319 tocomplete = []
1320 for f, args, msg in mergeactions:
1320 for f, args, msg in mergeactions:
1321 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1321 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1322 z += 1
1322 z += 1
1323 progress(_updating, z, item=f, total=numupdates, unit=_files)
1323 progress(_updating, z, item=f, total=numupdates, unit=_files)
1324 if f == '.hgsubstate': # subrepo states need updating
1324 if f == '.hgsubstate': # subrepo states need updating
1325 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1325 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1326 overwrite, labels)
1326 overwrite, labels)
1327 continue
1327 continue
1328 wctx[f].audit()
1328 wctx[f].audit()
1329 complete, r = ms.preresolve(f, wctx)
1329 complete, r = ms.preresolve(f, wctx)
1330 if not complete:
1330 if not complete:
1331 numupdates += 1
1331 numupdates += 1
1332 tocomplete.append((f, args, msg))
1332 tocomplete.append((f, args, msg))
1333
1333
1334 # merge
1334 # merge
1335 for f, args, msg in tocomplete:
1335 for f, args, msg in tocomplete:
1336 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1336 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1337 z += 1
1337 z += 1
1338 progress(_updating, z, item=f, total=numupdates, unit=_files)
1338 progress(_updating, z, item=f, total=numupdates, unit=_files)
1339 ms.resolve(f, wctx)
1339 ms.resolve(f, wctx)
1340
1340
1341 ms.commit()
1341 ms.commit()
1342
1342
1343 unresolved = ms.unresolvedcount()
1343 unresolved = ms.unresolvedcount()
1344
1344
1345 if usemergedriver and not unresolved and ms.mdstate() != 's':
1345 if usemergedriver and not unresolved and ms.mdstate() != 's':
1346 if not driverconclude(repo, ms, wctx, labels=labels):
1346 if not driverconclude(repo, ms, wctx, labels=labels):
1347 # XXX setting unresolved to at least 1 is a hack to make sure we
1347 # XXX setting unresolved to at least 1 is a hack to make sure we
1348 # error out
1348 # error out
1349 unresolved = max(unresolved, 1)
1349 unresolved = max(unresolved, 1)
1350
1350
1351 ms.commit()
1351 ms.commit()
1352
1352
1353 msupdated, msmerged, msremoved = ms.counts()
1353 msupdated, msmerged, msremoved = ms.counts()
1354 updated += msupdated
1354 updated += msupdated
1355 merged += msmerged
1355 merged += msmerged
1356 removed += msremoved
1356 removed += msremoved
1357
1357
1358 extraactions = ms.actions()
1358 extraactions = ms.actions()
1359 if extraactions:
1359 if extraactions:
1360 mfiles = set(a[0] for a in actions['m'])
1360 mfiles = set(a[0] for a in actions['m'])
1361 for k, acts in extraactions.iteritems():
1361 for k, acts in extraactions.iteritems():
1362 actions[k].extend(acts)
1362 actions[k].extend(acts)
1363 # Remove these files from actions['m'] as well. This is important
1363 # Remove these files from actions['m'] as well. This is important
1364 # because in recordupdates, files in actions['m'] are processed
1364 # because in recordupdates, files in actions['m'] are processed
1365 # after files in other actions, and the merge driver might add
1365 # after files in other actions, and the merge driver might add
1366 # files to those actions via extraactions above. This can lead to a
1366 # files to those actions via extraactions above. This can lead to a
1367 # file being recorded twice, with poor results. This is especially
1367 # file being recorded twice, with poor results. This is especially
1368 # problematic for actions['r'] (currently only possible with the
1368 # problematic for actions['r'] (currently only possible with the
1369 # merge driver in the initial merge process; interrupted merges
1369 # merge driver in the initial merge process; interrupted merges
1370 # don't go through this flow).
1370 # don't go through this flow).
1371 #
1371 #
1372 # The real fix here is to have indexes by both file and action so
1372 # The real fix here is to have indexes by both file and action so
1373 # that when the action for a file is changed it is automatically
1373 # that when the action for a file is changed it is automatically
1374 # reflected in the other action lists. But that involves a more
1374 # reflected in the other action lists. But that involves a more
1375 # complex data structure, so this will do for now.
1375 # complex data structure, so this will do for now.
1376 #
1376 #
1377 # We don't need to do the same operation for 'dc' and 'cd' because
1377 # We don't need to do the same operation for 'dc' and 'cd' because
1378 # those lists aren't consulted again.
1378 # those lists aren't consulted again.
1379 mfiles.difference_update(a[0] for a in acts)
1379 mfiles.difference_update(a[0] for a in acts)
1380
1380
1381 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1381 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1382
1382
1383 progress(_updating, None, total=numupdates, unit=_files)
1383 progress(_updating, None, total=numupdates, unit=_files)
1384
1384
1385 return updated, merged, removed, unresolved
1385 return updated, merged, removed, unresolved
1386
1386
1387 def recordupdates(repo, actions, branchmerge):
1387 def recordupdates(repo, actions, branchmerge):
1388 "record merge actions to the dirstate"
1388 "record merge actions to the dirstate"
1389 # remove (must come first)
1389 # remove (must come first)
1390 for f, args, msg in actions.get('r', []):
1390 for f, args, msg in actions.get('r', []):
1391 if branchmerge:
1391 if branchmerge:
1392 repo.dirstate.remove(f)
1392 repo.dirstate.remove(f)
1393 else:
1393 else:
1394 repo.dirstate.drop(f)
1394 repo.dirstate.drop(f)
1395
1395
1396 # forget (must come first)
1396 # forget (must come first)
1397 for f, args, msg in actions.get('f', []):
1397 for f, args, msg in actions.get('f', []):
1398 repo.dirstate.drop(f)
1398 repo.dirstate.drop(f)
1399
1399
1400 # re-add
1400 # re-add
1401 for f, args, msg in actions.get('a', []):
1401 for f, args, msg in actions.get('a', []):
1402 repo.dirstate.add(f)
1402 repo.dirstate.add(f)
1403
1403
1404 # re-add/mark as modified
1404 # re-add/mark as modified
1405 for f, args, msg in actions.get('am', []):
1405 for f, args, msg in actions.get('am', []):
1406 if branchmerge:
1406 if branchmerge:
1407 repo.dirstate.normallookup(f)
1407 repo.dirstate.normallookup(f)
1408 else:
1408 else:
1409 repo.dirstate.add(f)
1409 repo.dirstate.add(f)
1410
1410
1411 # exec change
1411 # exec change
1412 for f, args, msg in actions.get('e', []):
1412 for f, args, msg in actions.get('e', []):
1413 repo.dirstate.normallookup(f)
1413 repo.dirstate.normallookup(f)
1414
1414
1415 # keep
1415 # keep
1416 for f, args, msg in actions.get('k', []):
1416 for f, args, msg in actions.get('k', []):
1417 pass
1417 pass
1418
1418
1419 # get
1419 # get
1420 for f, args, msg in actions.get('g', []):
1420 for f, args, msg in actions.get('g', []):
1421 if branchmerge:
1421 if branchmerge:
1422 repo.dirstate.otherparent(f)
1422 repo.dirstate.otherparent(f)
1423 else:
1423 else:
1424 repo.dirstate.normal(f)
1424 repo.dirstate.normal(f)
1425
1425
1426 # merge
1426 # merge
1427 for f, args, msg in actions.get('m', []):
1427 for f, args, msg in actions.get('m', []):
1428 f1, f2, fa, move, anc = args
1428 f1, f2, fa, move, anc = args
1429 if branchmerge:
1429 if branchmerge:
1430 # We've done a branch merge, mark this file as merged
1430 # We've done a branch merge, mark this file as merged
1431 # so that we properly record the merger later
1431 # so that we properly record the merger later
1432 repo.dirstate.merge(f)
1432 repo.dirstate.merge(f)
1433 if f1 != f2: # copy/rename
1433 if f1 != f2: # copy/rename
1434 if move:
1434 if move:
1435 repo.dirstate.remove(f1)
1435 repo.dirstate.remove(f1)
1436 if f1 != f:
1436 if f1 != f:
1437 repo.dirstate.copy(f1, f)
1437 repo.dirstate.copy(f1, f)
1438 else:
1438 else:
1439 repo.dirstate.copy(f2, f)
1439 repo.dirstate.copy(f2, f)
1440 else:
1440 else:
1441 # We've update-merged a locally modified file, so
1441 # We've update-merged a locally modified file, so
1442 # we set the dirstate to emulate a normal checkout
1442 # we set the dirstate to emulate a normal checkout
1443 # of that file some time in the past. Thus our
1443 # of that file some time in the past. Thus our
1444 # merge will appear as a normal local file
1444 # merge will appear as a normal local file
1445 # modification.
1445 # modification.
1446 if f2 == f: # file not locally copied/moved
1446 if f2 == f: # file not locally copied/moved
1447 repo.dirstate.normallookup(f)
1447 repo.dirstate.normallookup(f)
1448 if move:
1448 if move:
1449 repo.dirstate.drop(f1)
1449 repo.dirstate.drop(f1)
1450
1450
1451 # directory rename, move local
1451 # directory rename, move local
1452 for f, args, msg in actions.get('dm', []):
1452 for f, args, msg in actions.get('dm', []):
1453 f0, flag = args
1453 f0, flag = args
1454 if branchmerge:
1454 if branchmerge:
1455 repo.dirstate.add(f)
1455 repo.dirstate.add(f)
1456 repo.dirstate.remove(f0)
1456 repo.dirstate.remove(f0)
1457 repo.dirstate.copy(f0, f)
1457 repo.dirstate.copy(f0, f)
1458 else:
1458 else:
1459 repo.dirstate.normal(f)
1459 repo.dirstate.normal(f)
1460 repo.dirstate.drop(f0)
1460 repo.dirstate.drop(f0)
1461
1461
1462 # directory rename, get
1462 # directory rename, get
1463 for f, args, msg in actions.get('dg', []):
1463 for f, args, msg in actions.get('dg', []):
1464 f0, flag = args
1464 f0, flag = args
1465 if branchmerge:
1465 if branchmerge:
1466 repo.dirstate.add(f)
1466 repo.dirstate.add(f)
1467 repo.dirstate.copy(f0, f)
1467 repo.dirstate.copy(f0, f)
1468 else:
1468 else:
1469 repo.dirstate.normal(f)
1469 repo.dirstate.normal(f)
1470
1470
1471 def update(repo, node, branchmerge, force, ancestor=None,
1471 def update(repo, node, branchmerge, force, ancestor=None,
1472 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1472 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1473 updatecheck=None):
1473 updatecheck=None):
1474 """
1474 """
1475 Perform a merge between the working directory and the given node
1475 Perform a merge between the working directory and the given node
1476
1476
1477 node = the node to update to
1477 node = the node to update to
1478 branchmerge = whether to merge between branches
1478 branchmerge = whether to merge between branches
1479 force = whether to force branch merging or file overwriting
1479 force = whether to force branch merging or file overwriting
1480 matcher = a matcher to filter file lists (dirstate not updated)
1480 matcher = a matcher to filter file lists (dirstate not updated)
1481 mergeancestor = whether it is merging with an ancestor. If true,
1481 mergeancestor = whether it is merging with an ancestor. If true,
1482 we should accept the incoming changes for any prompts that occur.
1482 we should accept the incoming changes for any prompts that occur.
1483 If false, merging with an ancestor (fast-forward) is only allowed
1483 If false, merging with an ancestor (fast-forward) is only allowed
1484 between different named branches. This flag is used by rebase extension
1484 between different named branches. This flag is used by rebase extension
1485 as a temporary fix and should be avoided in general.
1485 as a temporary fix and should be avoided in general.
1486 labels = labels to use for base, local and other
1486 labels = labels to use for base, local and other
1487 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1487 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1488 this is True, then 'force' should be True as well.
1488 this is True, then 'force' should be True as well.
1489
1489
1490 The table below shows all the behaviors of the update command
1490 The table below shows all the behaviors of the update command
1491 given the -c and -C or no options, whether the working directory
1491 given the -c and -C or no options, whether the working directory
1492 is dirty, whether a revision is specified, and the relationship of
1492 is dirty, whether a revision is specified, and the relationship of
1493 the parent rev to the target rev (linear or not). Match from top first. The
1493 the parent rev to the target rev (linear or not). Match from top first. The
1494 -n option doesn't exist on the command line, but represents the
1494 -n option doesn't exist on the command line, but represents the
1495 experimental.updatecheck=noconflict option.
1495 experimental.updatecheck=noconflict option.
1496
1496
1497 This logic is tested by test-update-branches.t.
1497 This logic is tested by test-update-branches.t.
1498
1498
1499 -c -C -n -m dirty rev linear | result
1499 -c -C -n -m dirty rev linear | result
1500 y y * * * * * | (1)
1500 y y * * * * * | (1)
1501 y * y * * * * | (1)
1501 y * y * * * * | (1)
1502 y * * y * * * | (1)
1502 y * * y * * * | (1)
1503 * y y * * * * | (1)
1503 * y y * * * * | (1)
1504 * y * y * * * | (1)
1504 * y * y * * * | (1)
1505 * * y y * * * | (1)
1505 * * y y * * * | (1)
1506 * * * * * n n | x
1506 * * * * * n n | x
1507 * * * * n * * | ok
1507 * * * * n * * | ok
1508 n n n n y * y | merge
1508 n n n n y * y | merge
1509 n n n n y y n | (2)
1509 n n n n y y n | (2)
1510 n n n y y * * | merge
1510 n n n y y * * | merge
1511 n n y n y * * | merge if no conflict
1511 n n y n y * * | merge if no conflict
1512 n y n n y * * | discard
1512 n y n n y * * | discard
1513 y n n n y * * | (3)
1513 y n n n y * * | (3)
1514
1514
1515 x = can't happen
1515 x = can't happen
1516 * = don't-care
1516 * = don't-care
1517 1 = incompatible options (checked in commands.py)
1517 1 = incompatible options (checked in commands.py)
1518 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1518 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1519 3 = abort: uncommitted changes (checked in commands.py)
1519 3 = abort: uncommitted changes (checked in commands.py)
1520
1520
1521 Return the same tuple as applyupdates().
1521 Return the same tuple as applyupdates().
1522 """
1522 """
1523 # Avoid cycle.
1523 # Avoid cycle.
1524 from . import sparse
1524 from . import sparse
1525
1525
1526 # This function used to find the default destination if node was None, but
1526 # This function used to find the default destination if node was None, but
1527 # that's now in destutil.py.
1527 # that's now in destutil.py.
1528 assert node is not None
1528 assert node is not None
1529 if not branchmerge and not force:
1529 if not branchmerge and not force:
1530 # TODO: remove the default once all callers that pass branchmerge=False
1530 # TODO: remove the default once all callers that pass branchmerge=False
1531 # and force=False pass a value for updatecheck. We may want to allow
1531 # and force=False pass a value for updatecheck. We may want to allow
1532 # updatecheck='abort' to better suppport some of these callers.
1532 # updatecheck='abort' to better suppport some of these callers.
1533 if updatecheck is None:
1533 if updatecheck is None:
1534 updatecheck = 'linear'
1534 updatecheck = 'linear'
1535 assert updatecheck in ('none', 'linear', 'noconflict')
1535 assert updatecheck in ('none', 'linear', 'noconflict')
1536 # If we're doing a partial update, we need to skip updating
1536 # If we're doing a partial update, we need to skip updating
1537 # the dirstate, so make a note of any partial-ness to the
1537 # the dirstate, so make a note of any partial-ness to the
1538 # update here.
1538 # update here.
1539 if matcher is None or matcher.always():
1539 if matcher is None or matcher.always():
1540 partial = False
1540 partial = False
1541 else:
1541 else:
1542 partial = True
1542 partial = True
1543 with repo.wlock():
1543 with repo.wlock():
1544 wc = repo[None]
1544 wc = repo[None]
1545 pl = wc.parents()
1545 pl = wc.parents()
1546 p1 = pl[0]
1546 p1 = pl[0]
1547 pas = [None]
1547 pas = [None]
1548 if ancestor is not None:
1548 if ancestor is not None:
1549 pas = [repo[ancestor]]
1549 pas = [repo[ancestor]]
1550
1550
1551 overwrite = force and not branchmerge
1551 overwrite = force and not branchmerge
1552
1552
1553 p2 = repo[node]
1553 p2 = repo[node]
1554 if pas[0] is None:
1554 if pas[0] is None:
1555 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1555 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1556 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1556 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1557 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1557 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1558 else:
1558 else:
1559 pas = [p1.ancestor(p2, warn=branchmerge)]
1559 pas = [p1.ancestor(p2, warn=branchmerge)]
1560
1560
1561 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1561 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1562
1562
1563 ### check phase
1563 ### check phase
1564 if not overwrite:
1564 if not overwrite:
1565 if len(pl) > 1:
1565 if len(pl) > 1:
1566 raise error.Abort(_("outstanding uncommitted merge"))
1566 raise error.Abort(_("outstanding uncommitted merge"))
1567 ms = mergestate.read(repo)
1567 ms = mergestate.read(repo)
1568 if list(ms.unresolved()):
1568 if list(ms.unresolved()):
1569 raise error.Abort(_("outstanding merge conflicts"))
1569 raise error.Abort(_("outstanding merge conflicts"))
1570 if branchmerge:
1570 if branchmerge:
1571 if pas == [p2]:
1571 if pas == [p2]:
1572 raise error.Abort(_("merging with a working directory ancestor"
1572 raise error.Abort(_("merging with a working directory ancestor"
1573 " has no effect"))
1573 " has no effect"))
1574 elif pas == [p1]:
1574 elif pas == [p1]:
1575 if not mergeancestor and wc.branch() == p2.branch():
1575 if not mergeancestor and wc.branch() == p2.branch():
1576 raise error.Abort(_("nothing to merge"),
1576 raise error.Abort(_("nothing to merge"),
1577 hint=_("use 'hg update' "
1577 hint=_("use 'hg update' "
1578 "or check 'hg heads'"))
1578 "or check 'hg heads'"))
1579 if not force and (wc.files() or wc.deleted()):
1579 if not force and (wc.files() or wc.deleted()):
1580 raise error.Abort(_("uncommitted changes"),
1580 raise error.Abort(_("uncommitted changes"),
1581 hint=_("use 'hg status' to list changes"))
1581 hint=_("use 'hg status' to list changes"))
1582 for s in sorted(wc.substate):
1582 for s in sorted(wc.substate):
1583 wc.sub(s).bailifchanged()
1583 wc.sub(s).bailifchanged()
1584
1584
1585 elif not overwrite:
1585 elif not overwrite:
1586 if p1 == p2: # no-op update
1586 if p1 == p2: # no-op update
1587 # call the hooks and exit early
1587 # call the hooks and exit early
1588 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1588 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1589 repo.hook('update', parent1=xp2, parent2='', error=0)
1589 repo.hook('update', parent1=xp2, parent2='', error=0)
1590 return 0, 0, 0, 0
1590 return 0, 0, 0, 0
1591
1591
1592 if (updatecheck == 'linear' and
1592 if (updatecheck == 'linear' and
1593 pas not in ([p1], [p2])): # nonlinear
1593 pas not in ([p1], [p2])): # nonlinear
1594 dirty = wc.dirty(missing=True)
1594 dirty = wc.dirty(missing=True)
1595 if dirty:
1595 if dirty:
1596 # Branching is a bit strange to ensure we do the minimal
1596 # Branching is a bit strange to ensure we do the minimal
1597 # amount of call to obsutil.foreground.
1597 # amount of call to obsutil.foreground.
1598 foreground = obsutil.foreground(repo, [p1.node()])
1598 foreground = obsutil.foreground(repo, [p1.node()])
1599 # note: the <node> variable contains a random identifier
1599 # note: the <node> variable contains a random identifier
1600 if repo[node].node() in foreground:
1600 if repo[node].node() in foreground:
1601 pass # allow updating to successors
1601 pass # allow updating to successors
1602 else:
1602 else:
1603 msg = _("uncommitted changes")
1603 msg = _("uncommitted changes")
1604 hint = _("commit or update --clean to discard changes")
1604 hint = _("commit or update --clean to discard changes")
1605 raise error.UpdateAbort(msg, hint=hint)
1605 raise error.UpdateAbort(msg, hint=hint)
1606 else:
1606 else:
1607 # Allow jumping branches if clean and specific rev given
1607 # Allow jumping branches if clean and specific rev given
1608 pass
1608 pass
1609
1609
1610 if overwrite:
1610 if overwrite:
1611 pas = [wc]
1611 pas = [wc]
1612 elif not branchmerge:
1612 elif not branchmerge:
1613 pas = [p1]
1613 pas = [p1]
1614
1614
1615 # deprecated config: merge.followcopies
1615 # deprecated config: merge.followcopies
1616 followcopies = repo.ui.configbool('merge', 'followcopies')
1616 followcopies = repo.ui.configbool('merge', 'followcopies')
1617 if overwrite:
1617 if overwrite:
1618 followcopies = False
1618 followcopies = False
1619 elif not pas[0]:
1619 elif not pas[0]:
1620 followcopies = False
1620 followcopies = False
1621 if not branchmerge and not wc.dirty(missing=True):
1621 if not branchmerge and not wc.dirty(missing=True):
1622 followcopies = False
1622 followcopies = False
1623
1623
1624 ### calculate phase
1624 ### calculate phase
1625 actionbyfile, diverge, renamedelete = calculateupdates(
1625 actionbyfile, diverge, renamedelete = calculateupdates(
1626 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1626 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1627 followcopies, matcher=matcher, mergeforce=mergeforce)
1627 followcopies, matcher=matcher, mergeforce=mergeforce)
1628
1628
1629 if updatecheck == 'noconflict':
1629 if updatecheck == 'noconflict':
1630 for f, (m, args, msg) in actionbyfile.iteritems():
1630 for f, (m, args, msg) in actionbyfile.iteritems():
1631 if m not in ('g', 'k', 'e', 'r'):
1631 if m not in ('g', 'k', 'e', 'r'):
1632 msg = _("conflicting changes")
1632 msg = _("conflicting changes")
1633 hint = _("commit or update --clean to discard changes")
1633 hint = _("commit or update --clean to discard changes")
1634 raise error.Abort(msg, hint=hint)
1634 raise error.Abort(msg, hint=hint)
1635
1635
1636 # Prompt and create actions. Most of this is in the resolve phase
1636 # Prompt and create actions. Most of this is in the resolve phase
1637 # already, but we can't handle .hgsubstate in filemerge or
1637 # already, but we can't handle .hgsubstate in filemerge or
1638 # subrepo.submerge yet so we have to keep prompting for it.
1638 # subrepo.submerge yet so we have to keep prompting for it.
1639 if '.hgsubstate' in actionbyfile:
1639 if '.hgsubstate' in actionbyfile:
1640 f = '.hgsubstate'
1640 f = '.hgsubstate'
1641 m, args, msg = actionbyfile[f]
1641 m, args, msg = actionbyfile[f]
1642 prompts = filemerge.partextras(labels)
1642 prompts = filemerge.partextras(labels)
1643 prompts['f'] = f
1643 prompts['f'] = f
1644 if m == 'cd':
1644 if m == 'cd':
1645 if repo.ui.promptchoice(
1645 if repo.ui.promptchoice(
1646 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1646 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1647 "use (c)hanged version or (d)elete?"
1647 "use (c)hanged version or (d)elete?"
1648 "$$ &Changed $$ &Delete") % prompts, 0):
1648 "$$ &Changed $$ &Delete") % prompts, 0):
1649 actionbyfile[f] = ('r', None, "prompt delete")
1649 actionbyfile[f] = ('r', None, "prompt delete")
1650 elif f in p1:
1650 elif f in p1:
1651 actionbyfile[f] = ('am', None, "prompt keep")
1651 actionbyfile[f] = ('am', None, "prompt keep")
1652 else:
1652 else:
1653 actionbyfile[f] = ('a', None, "prompt keep")
1653 actionbyfile[f] = ('a', None, "prompt keep")
1654 elif m == 'dc':
1654 elif m == 'dc':
1655 f1, f2, fa, move, anc = args
1655 f1, f2, fa, move, anc = args
1656 flags = p2[f2].flags()
1656 flags = p2[f2].flags()
1657 if repo.ui.promptchoice(
1657 if repo.ui.promptchoice(
1658 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1658 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1659 "use (c)hanged version or leave (d)eleted?"
1659 "use (c)hanged version or leave (d)eleted?"
1660 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1660 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1661 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1661 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1662 else:
1662 else:
1663 del actionbyfile[f]
1663 del actionbyfile[f]
1664
1664
1665 # Convert to dictionary-of-lists format
1665 # Convert to dictionary-of-lists format
1666 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1666 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1667 for f, (m, args, msg) in actionbyfile.iteritems():
1667 for f, (m, args, msg) in actionbyfile.iteritems():
1668 if m not in actions:
1668 if m not in actions:
1669 actions[m] = []
1669 actions[m] = []
1670 actions[m].append((f, args, msg))
1670 actions[m].append((f, args, msg))
1671
1671
1672 if not util.fscasesensitive(repo.path):
1672 if not util.fscasesensitive(repo.path):
1673 # check collision between files only in p2 for clean update
1673 # check collision between files only in p2 for clean update
1674 if (not branchmerge and
1674 if (not branchmerge and
1675 (force or not wc.dirty(missing=True, branch=False))):
1675 (force or not wc.dirty(missing=True, branch=False))):
1676 _checkcollision(repo, p2.manifest(), None)
1676 _checkcollision(repo, p2.manifest(), None)
1677 else:
1677 else:
1678 _checkcollision(repo, wc.manifest(), actions)
1678 _checkcollision(repo, wc.manifest(), actions)
1679
1679
1680 # divergent renames
1680 # divergent renames
1681 for f, fl in sorted(diverge.iteritems()):
1681 for f, fl in sorted(diverge.iteritems()):
1682 repo.ui.warn(_("note: possible conflict - %s was renamed "
1682 repo.ui.warn(_("note: possible conflict - %s was renamed "
1683 "multiple times to:\n") % f)
1683 "multiple times to:\n") % f)
1684 for nf in fl:
1684 for nf in fl:
1685 repo.ui.warn(" %s\n" % nf)
1685 repo.ui.warn(" %s\n" % nf)
1686
1686
1687 # rename and delete
1687 # rename and delete
1688 for f, fl in sorted(renamedelete.iteritems()):
1688 for f, fl in sorted(renamedelete.iteritems()):
1689 repo.ui.warn(_("note: possible conflict - %s was deleted "
1689 repo.ui.warn(_("note: possible conflict - %s was deleted "
1690 "and renamed to:\n") % f)
1690 "and renamed to:\n") % f)
1691 for nf in fl:
1691 for nf in fl:
1692 repo.ui.warn(" %s\n" % nf)
1692 repo.ui.warn(" %s\n" % nf)
1693
1693
1694 ### apply phase
1694 ### apply phase
1695 if not branchmerge: # just jump to the new rev
1695 if not branchmerge: # just jump to the new rev
1696 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1696 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1697 if not partial:
1697 if not partial:
1698 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1698 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1699 # note that we're in the middle of an update
1699 # note that we're in the middle of an update
1700 repo.vfs.write('updatestate', p2.hex())
1700 repo.vfs.write('updatestate', p2.hex())
1701
1701
1702 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1702 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1703
1703
1704 if not partial:
1704 if not partial:
1705 with repo.dirstate.parentchange():
1705 with repo.dirstate.parentchange():
1706 repo.setparents(fp1, fp2)
1706 repo.setparents(fp1, fp2)
1707 recordupdates(repo, actions, branchmerge)
1707 recordupdates(repo, actions, branchmerge)
1708 # update completed, clear state
1708 # update completed, clear state
1709 util.unlink(repo.vfs.join('updatestate'))
1709 util.unlink(repo.vfs.join('updatestate'))
1710
1710
1711 if not branchmerge:
1711 if not branchmerge:
1712 repo.dirstate.setbranch(p2.branch())
1712 repo.dirstate.setbranch(p2.branch())
1713
1713
1714 # If we're updating to a location, clean up any stale temporary includes
1714 # If we're updating to a location, clean up any stale temporary includes
1715 # (ex: this happens during hg rebase --abort).
1715 # (ex: this happens during hg rebase --abort).
1716 if not branchmerge:
1716 if not branchmerge:
1717 sparse.prunetemporaryincludes(repo)
1717 sparse.prunetemporaryincludes(repo)
1718
1718
1719 if not partial:
1719 if not partial:
1720 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1720 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1721 return stats
1721 return stats
1722
1722
1723 def graft(repo, ctx, pctx, labels, keepparent=False):
1723 def graft(repo, ctx, pctx, labels, keepparent=False):
1724 """Do a graft-like merge.
1724 """Do a graft-like merge.
1725
1725
1726 This is a merge where the merge ancestor is chosen such that one
1726 This is a merge where the merge ancestor is chosen such that one
1727 or more changesets are grafted onto the current changeset. In
1727 or more changesets are grafted onto the current changeset. In
1728 addition to the merge, this fixes up the dirstate to include only
1728 addition to the merge, this fixes up the dirstate to include only
1729 a single parent (if keepparent is False) and tries to duplicate any
1729 a single parent (if keepparent is False) and tries to duplicate any
1730 renames/copies appropriately.
1730 renames/copies appropriately.
1731
1731
1732 ctx - changeset to rebase
1732 ctx - changeset to rebase
1733 pctx - merge base, usually ctx.p1()
1733 pctx - merge base, usually ctx.p1()
1734 labels - merge labels eg ['local', 'graft']
1734 labels - merge labels eg ['local', 'graft']
1735 keepparent - keep second parent if any
1735 keepparent - keep second parent if any
1736
1736
1737 """
1737 """
1738 # If we're grafting a descendant onto an ancestor, be sure to pass
1738 # If we're grafting a descendant onto an ancestor, be sure to pass
1739 # mergeancestor=True to update. This does two things: 1) allows the merge if
1739 # mergeancestor=True to update. This does two things: 1) allows the merge if
1740 # the destination is the same as the parent of the ctx (so we can use graft
1740 # the destination is the same as the parent of the ctx (so we can use graft
1741 # to copy commits), and 2) informs update that the incoming changes are
1741 # to copy commits), and 2) informs update that the incoming changes are
1742 # newer than the destination so it doesn't prompt about "remote changed foo
1742 # newer than the destination so it doesn't prompt about "remote changed foo
1743 # which local deleted".
1743 # which local deleted".
1744 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1744 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1745
1745
1746 stats = update(repo, ctx.node(), True, True, pctx.node(),
1746 stats = update(repo, ctx.node(), True, True, pctx.node(),
1747 mergeancestor=mergeancestor, labels=labels)
1747 mergeancestor=mergeancestor, labels=labels)
1748
1748
1749 pother = nullid
1749 pother = nullid
1750 parents = ctx.parents()
1750 parents = ctx.parents()
1751 if keepparent and len(parents) == 2 and pctx in parents:
1751 if keepparent and len(parents) == 2 and pctx in parents:
1752 parents.remove(pctx)
1752 parents.remove(pctx)
1753 pother = parents[0].node()
1753 pother = parents[0].node()
1754
1754
1755 with repo.dirstate.parentchange():
1755 with repo.dirstate.parentchange():
1756 repo.setparents(repo['.'].node(), pother)
1756 repo.setparents(repo['.'].node(), pother)
1757 repo.dirstate.write(repo.currenttransaction())
1757 repo.dirstate.write(repo.currenttransaction())
1758 # fix up dirstate for copies and renames
1758 # fix up dirstate for copies and renames
1759 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1759 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1760 return stats
1760 return stats
General Comments 0
You need to be logged in to leave comments. Login now