##// END OF EJS Templates
merge: remove calls to flushall()...
Phil Cohen -
r35284:46510597 default
parent child Browse files
Show More
@@ -1,2065 +1,2053 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 extensions,
28 extensions,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 P: a path conflict (file vs directory)
70 P: a path conflict (file vs directory)
71 m: the external merge driver defined for this merge plus its run state
71 m: the external merge driver defined for this merge plus its run state
72 (experimental)
72 (experimental)
73 f: a (filename, dictionary) tuple of optional values for a given file
73 f: a (filename, dictionary) tuple of optional values for a given file
74 X: unsupported mandatory record type (used in tests)
74 X: unsupported mandatory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
76 l: the labels for the parts of the merge.
76 l: the labels for the parts of the merge.
77
77
78 Merge driver run states (experimental):
78 Merge driver run states (experimental):
79 u: driver-resolved files unmarked -- needs to be run next time we're about
79 u: driver-resolved files unmarked -- needs to be run next time we're about
80 to resolve or commit
80 to resolve or commit
81 m: driver-resolved files marked -- only needs to be run before commit
81 m: driver-resolved files marked -- only needs to be run before commit
82 s: success/skipped -- does not need to be run any more
82 s: success/skipped -- does not need to be run any more
83
83
84 Merge record states (stored in self._state, indexed by filename):
84 Merge record states (stored in self._state, indexed by filename):
85 u: unresolved conflict
85 u: unresolved conflict
86 r: resolved conflict
86 r: resolved conflict
87 pu: unresolved path conflict (file conflicts with directory)
87 pu: unresolved path conflict (file conflicts with directory)
88 pr: resolved path conflict
88 pr: resolved path conflict
89 d: driver-resolved conflict
89 d: driver-resolved conflict
90
90
91 The resolve command transitions between 'u' and 'r' for conflicts and
91 The resolve command transitions between 'u' and 'r' for conflicts and
92 'pu' and 'pr' for path conflicts.
92 'pu' and 'pr' for path conflicts.
93 '''
93 '''
94 statepathv1 = 'merge/state'
94 statepathv1 = 'merge/state'
95 statepathv2 = 'merge/state2'
95 statepathv2 = 'merge/state2'
96
96
97 @staticmethod
97 @staticmethod
98 def clean(repo, node=None, other=None, labels=None):
98 def clean(repo, node=None, other=None, labels=None):
99 """Initialize a brand new merge state, removing any existing state on
99 """Initialize a brand new merge state, removing any existing state on
100 disk."""
100 disk."""
101 ms = mergestate(repo)
101 ms = mergestate(repo)
102 ms.reset(node, other, labels)
102 ms.reset(node, other, labels)
103 return ms
103 return ms
104
104
105 @staticmethod
105 @staticmethod
106 def read(repo):
106 def read(repo):
107 """Initialize the merge state, reading it from disk."""
107 """Initialize the merge state, reading it from disk."""
108 ms = mergestate(repo)
108 ms = mergestate(repo)
109 ms._read()
109 ms._read()
110 return ms
110 return ms
111
111
112 def __init__(self, repo):
112 def __init__(self, repo):
113 """Initialize the merge state.
113 """Initialize the merge state.
114
114
115 Do not use this directly! Instead call read() or clean()."""
115 Do not use this directly! Instead call read() or clean()."""
116 self._repo = repo
116 self._repo = repo
117 self._dirty = False
117 self._dirty = False
118 self._labels = None
118 self._labels = None
119
119
120 def reset(self, node=None, other=None, labels=None):
120 def reset(self, node=None, other=None, labels=None):
121 self._state = {}
121 self._state = {}
122 self._stateextras = {}
122 self._stateextras = {}
123 self._local = None
123 self._local = None
124 self._other = None
124 self._other = None
125 self._labels = labels
125 self._labels = labels
126 for var in ('localctx', 'otherctx'):
126 for var in ('localctx', 'otherctx'):
127 if var in vars(self):
127 if var in vars(self):
128 delattr(self, var)
128 delattr(self, var)
129 if node:
129 if node:
130 self._local = node
130 self._local = node
131 self._other = other
131 self._other = other
132 self._readmergedriver = None
132 self._readmergedriver = None
133 if self.mergedriver:
133 if self.mergedriver:
134 self._mdstate = 's'
134 self._mdstate = 's'
135 else:
135 else:
136 self._mdstate = 'u'
136 self._mdstate = 'u'
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
138 self._results = {}
138 self._results = {}
139 self._dirty = False
139 self._dirty = False
140
140
141 def _read(self):
141 def _read(self):
142 """Analyse each record content to restore a serialized state from disk
142 """Analyse each record content to restore a serialized state from disk
143
143
144 This function process "record" entry produced by the de-serialization
144 This function process "record" entry produced by the de-serialization
145 of on disk file.
145 of on disk file.
146 """
146 """
147 self._state = {}
147 self._state = {}
148 self._stateextras = {}
148 self._stateextras = {}
149 self._local = None
149 self._local = None
150 self._other = None
150 self._other = None
151 for var in ('localctx', 'otherctx'):
151 for var in ('localctx', 'otherctx'):
152 if var in vars(self):
152 if var in vars(self):
153 delattr(self, var)
153 delattr(self, var)
154 self._readmergedriver = None
154 self._readmergedriver = None
155 self._mdstate = 's'
155 self._mdstate = 's'
156 unsupported = set()
156 unsupported = set()
157 records = self._readrecords()
157 records = self._readrecords()
158 for rtype, record in records:
158 for rtype, record in records:
159 if rtype == 'L':
159 if rtype == 'L':
160 self._local = bin(record)
160 self._local = bin(record)
161 elif rtype == 'O':
161 elif rtype == 'O':
162 self._other = bin(record)
162 self._other = bin(record)
163 elif rtype == 'm':
163 elif rtype == 'm':
164 bits = record.split('\0', 1)
164 bits = record.split('\0', 1)
165 mdstate = bits[1]
165 mdstate = bits[1]
166 if len(mdstate) != 1 or mdstate not in 'ums':
166 if len(mdstate) != 1 or mdstate not in 'ums':
167 # the merge driver should be idempotent, so just rerun it
167 # the merge driver should be idempotent, so just rerun it
168 mdstate = 'u'
168 mdstate = 'u'
169
169
170 self._readmergedriver = bits[0]
170 self._readmergedriver = bits[0]
171 self._mdstate = mdstate
171 self._mdstate = mdstate
172 elif rtype in 'FDCP':
172 elif rtype in 'FDCP':
173 bits = record.split('\0')
173 bits = record.split('\0')
174 self._state[bits[0]] = bits[1:]
174 self._state[bits[0]] = bits[1:]
175 elif rtype == 'f':
175 elif rtype == 'f':
176 filename, rawextras = record.split('\0', 1)
176 filename, rawextras = record.split('\0', 1)
177 extraparts = rawextras.split('\0')
177 extraparts = rawextras.split('\0')
178 extras = {}
178 extras = {}
179 i = 0
179 i = 0
180 while i < len(extraparts):
180 while i < len(extraparts):
181 extras[extraparts[i]] = extraparts[i + 1]
181 extras[extraparts[i]] = extraparts[i + 1]
182 i += 2
182 i += 2
183
183
184 self._stateextras[filename] = extras
184 self._stateextras[filename] = extras
185 elif rtype == 'l':
185 elif rtype == 'l':
186 labels = record.split('\0', 2)
186 labels = record.split('\0', 2)
187 self._labels = [l for l in labels if len(l) > 0]
187 self._labels = [l for l in labels if len(l) > 0]
188 elif not rtype.islower():
188 elif not rtype.islower():
189 unsupported.add(rtype)
189 unsupported.add(rtype)
190 self._results = {}
190 self._results = {}
191 self._dirty = False
191 self._dirty = False
192
192
193 if unsupported:
193 if unsupported:
194 raise error.UnsupportedMergeRecords(unsupported)
194 raise error.UnsupportedMergeRecords(unsupported)
195
195
196 def _readrecords(self):
196 def _readrecords(self):
197 """Read merge state from disk and return a list of record (TYPE, data)
197 """Read merge state from disk and return a list of record (TYPE, data)
198
198
199 We read data from both v1 and v2 files and decide which one to use.
199 We read data from both v1 and v2 files and decide which one to use.
200
200
201 V1 has been used by version prior to 2.9.1 and contains less data than
201 V1 has been used by version prior to 2.9.1 and contains less data than
202 v2. We read both versions and check if no data in v2 contradicts
202 v2. We read both versions and check if no data in v2 contradicts
203 v1. If there is not contradiction we can safely assume that both v1
203 v1. If there is not contradiction we can safely assume that both v1
204 and v2 were written at the same time and use the extract data in v2. If
204 and v2 were written at the same time and use the extract data in v2. If
205 there is contradiction we ignore v2 content as we assume an old version
205 there is contradiction we ignore v2 content as we assume an old version
206 of Mercurial has overwritten the mergestate file and left an old v2
206 of Mercurial has overwritten the mergestate file and left an old v2
207 file around.
207 file around.
208
208
209 returns list of record [(TYPE, data), ...]"""
209 returns list of record [(TYPE, data), ...]"""
210 v1records = self._readrecordsv1()
210 v1records = self._readrecordsv1()
211 v2records = self._readrecordsv2()
211 v2records = self._readrecordsv2()
212 if self._v1v2match(v1records, v2records):
212 if self._v1v2match(v1records, v2records):
213 return v2records
213 return v2records
214 else:
214 else:
215 # v1 file is newer than v2 file, use it
215 # v1 file is newer than v2 file, use it
216 # we have to infer the "other" changeset of the merge
216 # we have to infer the "other" changeset of the merge
217 # we cannot do better than that with v1 of the format
217 # we cannot do better than that with v1 of the format
218 mctx = self._repo[None].parents()[-1]
218 mctx = self._repo[None].parents()[-1]
219 v1records.append(('O', mctx.hex()))
219 v1records.append(('O', mctx.hex()))
220 # add place holder "other" file node information
220 # add place holder "other" file node information
221 # nobody is using it yet so we do no need to fetch the data
221 # nobody is using it yet so we do no need to fetch the data
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
223 for idx, r in enumerate(v1records):
223 for idx, r in enumerate(v1records):
224 if r[0] == 'F':
224 if r[0] == 'F':
225 bits = r[1].split('\0')
225 bits = r[1].split('\0')
226 bits.insert(-2, '')
226 bits.insert(-2, '')
227 v1records[idx] = (r[0], '\0'.join(bits))
227 v1records[idx] = (r[0], '\0'.join(bits))
228 return v1records
228 return v1records
229
229
230 def _v1v2match(self, v1records, v2records):
230 def _v1v2match(self, v1records, v2records):
231 oldv2 = set() # old format version of v2 record
231 oldv2 = set() # old format version of v2 record
232 for rec in v2records:
232 for rec in v2records:
233 if rec[0] == 'L':
233 if rec[0] == 'L':
234 oldv2.add(rec)
234 oldv2.add(rec)
235 elif rec[0] == 'F':
235 elif rec[0] == 'F':
236 # drop the onode data (not contained in v1)
236 # drop the onode data (not contained in v1)
237 oldv2.add(('F', _droponode(rec[1])))
237 oldv2.add(('F', _droponode(rec[1])))
238 for rec in v1records:
238 for rec in v1records:
239 if rec not in oldv2:
239 if rec not in oldv2:
240 return False
240 return False
241 else:
241 else:
242 return True
242 return True
243
243
244 def _readrecordsv1(self):
244 def _readrecordsv1(self):
245 """read on disk merge state for version 1 file
245 """read on disk merge state for version 1 file
246
246
247 returns list of record [(TYPE, data), ...]
247 returns list of record [(TYPE, data), ...]
248
248
249 Note: the "F" data from this file are one entry short
249 Note: the "F" data from this file are one entry short
250 (no "other file node" entry)
250 (no "other file node" entry)
251 """
251 """
252 records = []
252 records = []
253 try:
253 try:
254 f = self._repo.vfs(self.statepathv1)
254 f = self._repo.vfs(self.statepathv1)
255 for i, l in enumerate(f):
255 for i, l in enumerate(f):
256 if i == 0:
256 if i == 0:
257 records.append(('L', l[:-1]))
257 records.append(('L', l[:-1]))
258 else:
258 else:
259 records.append(('F', l[:-1]))
259 records.append(('F', l[:-1]))
260 f.close()
260 f.close()
261 except IOError as err:
261 except IOError as err:
262 if err.errno != errno.ENOENT:
262 if err.errno != errno.ENOENT:
263 raise
263 raise
264 return records
264 return records
265
265
266 def _readrecordsv2(self):
266 def _readrecordsv2(self):
267 """read on disk merge state for version 2 file
267 """read on disk merge state for version 2 file
268
268
269 This format is a list of arbitrary records of the form:
269 This format is a list of arbitrary records of the form:
270
270
271 [type][length][content]
271 [type][length][content]
272
272
273 `type` is a single character, `length` is a 4 byte integer, and
273 `type` is a single character, `length` is a 4 byte integer, and
274 `content` is an arbitrary byte sequence of length `length`.
274 `content` is an arbitrary byte sequence of length `length`.
275
275
276 Mercurial versions prior to 3.7 have a bug where if there are
276 Mercurial versions prior to 3.7 have a bug where if there are
277 unsupported mandatory merge records, attempting to clear out the merge
277 unsupported mandatory merge records, attempting to clear out the merge
278 state with hg update --clean or similar aborts. The 't' record type
278 state with hg update --clean or similar aborts. The 't' record type
279 works around that by writing out what those versions treat as an
279 works around that by writing out what those versions treat as an
280 advisory record, but later versions interpret as special: the first
280 advisory record, but later versions interpret as special: the first
281 character is the 'real' record type and everything onwards is the data.
281 character is the 'real' record type and everything onwards is the data.
282
282
283 Returns list of records [(TYPE, data), ...]."""
283 Returns list of records [(TYPE, data), ...]."""
284 records = []
284 records = []
285 try:
285 try:
286 f = self._repo.vfs(self.statepathv2)
286 f = self._repo.vfs(self.statepathv2)
287 data = f.read()
287 data = f.read()
288 off = 0
288 off = 0
289 end = len(data)
289 end = len(data)
290 while off < end:
290 while off < end:
291 rtype = data[off]
291 rtype = data[off]
292 off += 1
292 off += 1
293 length = _unpack('>I', data[off:(off + 4)])[0]
293 length = _unpack('>I', data[off:(off + 4)])[0]
294 off += 4
294 off += 4
295 record = data[off:(off + length)]
295 record = data[off:(off + length)]
296 off += length
296 off += length
297 if rtype == 't':
297 if rtype == 't':
298 rtype, record = record[0], record[1:]
298 rtype, record = record[0], record[1:]
299 records.append((rtype, record))
299 records.append((rtype, record))
300 f.close()
300 f.close()
301 except IOError as err:
301 except IOError as err:
302 if err.errno != errno.ENOENT:
302 if err.errno != errno.ENOENT:
303 raise
303 raise
304 return records
304 return records
305
305
306 @util.propertycache
306 @util.propertycache
307 def mergedriver(self):
307 def mergedriver(self):
308 # protect against the following:
308 # protect against the following:
309 # - A configures a malicious merge driver in their hgrc, then
309 # - A configures a malicious merge driver in their hgrc, then
310 # pauses the merge
310 # pauses the merge
311 # - A edits their hgrc to remove references to the merge driver
311 # - A edits their hgrc to remove references to the merge driver
312 # - A gives a copy of their entire repo, including .hg, to B
312 # - A gives a copy of their entire repo, including .hg, to B
313 # - B inspects .hgrc and finds it to be clean
313 # - B inspects .hgrc and finds it to be clean
314 # - B then continues the merge and the malicious merge driver
314 # - B then continues the merge and the malicious merge driver
315 # gets invoked
315 # gets invoked
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
317 if (self._readmergedriver is not None
317 if (self._readmergedriver is not None
318 and self._readmergedriver != configmergedriver):
318 and self._readmergedriver != configmergedriver):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _("merge driver changed since merge started"),
320 _("merge driver changed since merge started"),
321 hint=_("revert merge driver change or abort merge"))
321 hint=_("revert merge driver change or abort merge"))
322
322
323 return configmergedriver
323 return configmergedriver
324
324
325 @util.propertycache
325 @util.propertycache
326 def localctx(self):
326 def localctx(self):
327 if self._local is None:
327 if self._local is None:
328 msg = "localctx accessed but self._local isn't set"
328 msg = "localctx accessed but self._local isn't set"
329 raise error.ProgrammingError(msg)
329 raise error.ProgrammingError(msg)
330 return self._repo[self._local]
330 return self._repo[self._local]
331
331
332 @util.propertycache
332 @util.propertycache
333 def otherctx(self):
333 def otherctx(self):
334 if self._other is None:
334 if self._other is None:
335 msg = "otherctx accessed but self._other isn't set"
335 msg = "otherctx accessed but self._other isn't set"
336 raise error.ProgrammingError(msg)
336 raise error.ProgrammingError(msg)
337 return self._repo[self._other]
337 return self._repo[self._other]
338
338
339 def active(self):
339 def active(self):
340 """Whether mergestate is active.
340 """Whether mergestate is active.
341
341
342 Returns True if there appears to be mergestate. This is a rough proxy
342 Returns True if there appears to be mergestate. This is a rough proxy
343 for "is a merge in progress."
343 for "is a merge in progress."
344 """
344 """
345 # Check local variables before looking at filesystem for performance
345 # Check local variables before looking at filesystem for performance
346 # reasons.
346 # reasons.
347 return bool(self._local) or bool(self._state) or \
347 return bool(self._local) or bool(self._state) or \
348 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv1) or \
349 self._repo.vfs.exists(self.statepathv2)
349 self._repo.vfs.exists(self.statepathv2)
350
350
351 def commit(self):
351 def commit(self):
352 """Write current state on disk (if necessary)"""
352 """Write current state on disk (if necessary)"""
353 if self._dirty:
353 if self._dirty:
354 records = self._makerecords()
354 records = self._makerecords()
355 self._writerecords(records)
355 self._writerecords(records)
356 self._dirty = False
356 self._dirty = False
357
357
358 def _makerecords(self):
358 def _makerecords(self):
359 records = []
359 records = []
360 records.append(('L', hex(self._local)))
360 records.append(('L', hex(self._local)))
361 records.append(('O', hex(self._other)))
361 records.append(('O', hex(self._other)))
362 if self.mergedriver:
362 if self.mergedriver:
363 records.append(('m', '\0'.join([
363 records.append(('m', '\0'.join([
364 self.mergedriver, self._mdstate])))
364 self.mergedriver, self._mdstate])))
365 # Write out state items. In all cases, the value of the state map entry
365 # Write out state items. In all cases, the value of the state map entry
366 # is written as the contents of the record. The record type depends on
366 # is written as the contents of the record. The record type depends on
367 # the type of state that is stored, and capital-letter records are used
367 # the type of state that is stored, and capital-letter records are used
368 # to prevent older versions of Mercurial that do not support the feature
368 # to prevent older versions of Mercurial that do not support the feature
369 # from loading them.
369 # from loading them.
370 for filename, v in self._state.iteritems():
370 for filename, v in self._state.iteritems():
371 if v[0] == 'd':
371 if v[0] == 'd':
372 # Driver-resolved merge. These are stored in 'D' records.
372 # Driver-resolved merge. These are stored in 'D' records.
373 records.append(('D', '\0'.join([filename] + v)))
373 records.append(('D', '\0'.join([filename] + v)))
374 elif v[0] in ('pu', 'pr'):
374 elif v[0] in ('pu', 'pr'):
375 # Path conflicts. These are stored in 'P' records. The current
375 # Path conflicts. These are stored in 'P' records. The current
376 # resolution state ('pu' or 'pr') is stored within the record.
376 # resolution state ('pu' or 'pr') is stored within the record.
377 records.append(('P', '\0'.join([filename] + v)))
377 records.append(('P', '\0'.join([filename] + v)))
378 elif v[1] == nullhex or v[6] == nullhex:
378 elif v[1] == nullhex or v[6] == nullhex:
379 # Change/Delete or Delete/Change conflicts. These are stored in
379 # Change/Delete or Delete/Change conflicts. These are stored in
380 # 'C' records. v[1] is the local file, and is nullhex when the
380 # 'C' records. v[1] is the local file, and is nullhex when the
381 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # file is deleted locally ('dc'). v[6] is the remote file, and
382 # is nullhex when the file is deleted remotely ('cd').
382 # is nullhex when the file is deleted remotely ('cd').
383 records.append(('C', '\0'.join([filename] + v)))
383 records.append(('C', '\0'.join([filename] + v)))
384 else:
384 else:
385 # Normal files. These are stored in 'F' records.
385 # Normal files. These are stored in 'F' records.
386 records.append(('F', '\0'.join([filename] + v)))
386 records.append(('F', '\0'.join([filename] + v)))
387 for filename, extras in sorted(self._stateextras.iteritems()):
387 for filename, extras in sorted(self._stateextras.iteritems()):
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
389 extras.iteritems())
389 extras.iteritems())
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
391 if self._labels is not None:
391 if self._labels is not None:
392 labels = '\0'.join(self._labels)
392 labels = '\0'.join(self._labels)
393 records.append(('l', labels))
393 records.append(('l', labels))
394 return records
394 return records
395
395
396 def _writerecords(self, records):
396 def _writerecords(self, records):
397 """Write current state on disk (both v1 and v2)"""
397 """Write current state on disk (both v1 and v2)"""
398 self._writerecordsv1(records)
398 self._writerecordsv1(records)
399 self._writerecordsv2(records)
399 self._writerecordsv2(records)
400
400
401 def _writerecordsv1(self, records):
401 def _writerecordsv1(self, records):
402 """Write current state on disk in a version 1 file"""
402 """Write current state on disk in a version 1 file"""
403 f = self._repo.vfs(self.statepathv1, 'w')
403 f = self._repo.vfs(self.statepathv1, 'w')
404 irecords = iter(records)
404 irecords = iter(records)
405 lrecords = next(irecords)
405 lrecords = next(irecords)
406 assert lrecords[0] == 'L'
406 assert lrecords[0] == 'L'
407 f.write(hex(self._local) + '\n')
407 f.write(hex(self._local) + '\n')
408 for rtype, data in irecords:
408 for rtype, data in irecords:
409 if rtype == 'F':
409 if rtype == 'F':
410 f.write('%s\n' % _droponode(data))
410 f.write('%s\n' % _droponode(data))
411 f.close()
411 f.close()
412
412
413 def _writerecordsv2(self, records):
413 def _writerecordsv2(self, records):
414 """Write current state on disk in a version 2 file
414 """Write current state on disk in a version 2 file
415
415
416 See the docstring for _readrecordsv2 for why we use 't'."""
416 See the docstring for _readrecordsv2 for why we use 't'."""
417 # these are the records that all version 2 clients can read
417 # these are the records that all version 2 clients can read
418 whitelist = 'LOF'
418 whitelist = 'LOF'
419 f = self._repo.vfs(self.statepathv2, 'w')
419 f = self._repo.vfs(self.statepathv2, 'w')
420 for key, data in records:
420 for key, data in records:
421 assert len(key) == 1
421 assert len(key) == 1
422 if key not in whitelist:
422 if key not in whitelist:
423 key, data = 't', '%s%s' % (key, data)
423 key, data = 't', '%s%s' % (key, data)
424 format = '>sI%is' % len(data)
424 format = '>sI%is' % len(data)
425 f.write(_pack(format, key, len(data), data))
425 f.write(_pack(format, key, len(data), data))
426 f.close()
426 f.close()
427
427
428 def add(self, fcl, fco, fca, fd):
428 def add(self, fcl, fco, fca, fd):
429 """add a new (potentially?) conflicting file the merge state
429 """add a new (potentially?) conflicting file the merge state
430 fcl: file context for local,
430 fcl: file context for local,
431 fco: file context for remote,
431 fco: file context for remote,
432 fca: file context for ancestors,
432 fca: file context for ancestors,
433 fd: file path of the resulting merge.
433 fd: file path of the resulting merge.
434
434
435 note: also write the local version to the `.hg/merge` directory.
435 note: also write the local version to the `.hg/merge` directory.
436 """
436 """
437 if fcl.isabsent():
437 if fcl.isabsent():
438 hash = nullhex
438 hash = nullhex
439 else:
439 else:
440 hash = hex(hashlib.sha1(fcl.path()).digest())
440 hash = hex(hashlib.sha1(fcl.path()).digest())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
442 self._state[fd] = ['u', hash, fcl.path(),
442 self._state[fd] = ['u', hash, fcl.path(),
443 fca.path(), hex(fca.filenode()),
443 fca.path(), hex(fca.filenode()),
444 fco.path(), hex(fco.filenode()),
444 fco.path(), hex(fco.filenode()),
445 fcl.flags()]
445 fcl.flags()]
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
447 self._dirty = True
447 self._dirty = True
448
448
449 def addpath(self, path, frename, forigin):
449 def addpath(self, path, frename, forigin):
450 """add a new conflicting path to the merge state
450 """add a new conflicting path to the merge state
451 path: the path that conflicts
451 path: the path that conflicts
452 frename: the filename the conflicting file was renamed to
452 frename: the filename the conflicting file was renamed to
453 forigin: origin of the file ('l' or 'r' for local/remote)
453 forigin: origin of the file ('l' or 'r' for local/remote)
454 """
454 """
455 self._state[path] = ['pu', frename, forigin]
455 self._state[path] = ['pu', frename, forigin]
456 self._dirty = True
456 self._dirty = True
457
457
458 def __contains__(self, dfile):
458 def __contains__(self, dfile):
459 return dfile in self._state
459 return dfile in self._state
460
460
461 def __getitem__(self, dfile):
461 def __getitem__(self, dfile):
462 return self._state[dfile][0]
462 return self._state[dfile][0]
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(sorted(self._state))
465 return iter(sorted(self._state))
466
466
467 def files(self):
467 def files(self):
468 return self._state.keys()
468 return self._state.keys()
469
469
470 def mark(self, dfile, state):
470 def mark(self, dfile, state):
471 self._state[dfile][0] = state
471 self._state[dfile][0] = state
472 self._dirty = True
472 self._dirty = True
473
473
474 def mdstate(self):
474 def mdstate(self):
475 return self._mdstate
475 return self._mdstate
476
476
477 def unresolved(self):
477 def unresolved(self):
478 """Obtain the paths of unresolved files."""
478 """Obtain the paths of unresolved files."""
479
479
480 for f, entry in self._state.iteritems():
480 for f, entry in self._state.iteritems():
481 if entry[0] in ('u', 'pu'):
481 if entry[0] in ('u', 'pu'):
482 yield f
482 yield f
483
483
484 def driverresolved(self):
484 def driverresolved(self):
485 """Obtain the paths of driver-resolved files."""
485 """Obtain the paths of driver-resolved files."""
486
486
487 for f, entry in self._state.items():
487 for f, entry in self._state.items():
488 if entry[0] == 'd':
488 if entry[0] == 'd':
489 yield f
489 yield f
490
490
491 def extras(self, filename):
491 def extras(self, filename):
492 return self._stateextras.setdefault(filename, {})
492 return self._stateextras.setdefault(filename, {})
493
493
494 def _resolve(self, preresolve, dfile, wctx):
494 def _resolve(self, preresolve, dfile, wctx):
495 """rerun merge process for file path `dfile`"""
495 """rerun merge process for file path `dfile`"""
496 if self[dfile] in 'rd':
496 if self[dfile] in 'rd':
497 return True, 0
497 return True, 0
498 stateentry = self._state[dfile]
498 stateentry = self._state[dfile]
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
500 octx = self._repo[self._other]
500 octx = self._repo[self._other]
501 extras = self.extras(dfile)
501 extras = self.extras(dfile)
502 anccommitnode = extras.get('ancestorlinknode')
502 anccommitnode = extras.get('ancestorlinknode')
503 if anccommitnode:
503 if anccommitnode:
504 actx = self._repo[anccommitnode]
504 actx = self._repo[anccommitnode]
505 else:
505 else:
506 actx = None
506 actx = None
507 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fcd = self._filectxorabsent(hash, wctx, dfile)
508 fco = self._filectxorabsent(onode, octx, ofile)
508 fco = self._filectxorabsent(onode, octx, ofile)
509 # TODO: move this to filectxorabsent
509 # TODO: move this to filectxorabsent
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
511 # "premerge" x flags
511 # "premerge" x flags
512 flo = fco.flags()
512 flo = fco.flags()
513 fla = fca.flags()
513 fla = fca.flags()
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
515 if fca.node() == nullid and flags != flo:
515 if fca.node() == nullid and flags != flo:
516 if preresolve:
516 if preresolve:
517 self._repo.ui.warn(
517 self._repo.ui.warn(
518 _('warning: cannot merge flags for %s '
518 _('warning: cannot merge flags for %s '
519 'without common ancestor - keeping local flags\n')
519 'without common ancestor - keeping local flags\n')
520 % afile)
520 % afile)
521 elif flags == fla:
521 elif flags == fla:
522 flags = flo
522 flags = flo
523 if preresolve:
523 if preresolve:
524 # restore local
524 # restore local
525 if hash != nullhex:
525 if hash != nullhex:
526 f = self._repo.vfs('merge/' + hash)
526 f = self._repo.vfs('merge/' + hash)
527 wctx[dfile].write(f.read(), flags)
527 wctx[dfile].write(f.read(), flags)
528 f.close()
528 f.close()
529 else:
529 else:
530 wctx[dfile].remove(ignoremissing=True)
530 wctx[dfile].remove(ignoremissing=True)
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
532 self._local, lfile, fcd,
532 self._local, lfile, fcd,
533 fco, fca,
533 fco, fca,
534 labels=self._labels)
534 labels=self._labels)
535 else:
535 else:
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
537 self._local, lfile, fcd,
537 self._local, lfile, fcd,
538 fco, fca,
538 fco, fca,
539 labels=self._labels)
539 labels=self._labels)
540 if r is None:
540 if r is None:
541 # no real conflict
541 # no real conflict
542 del self._state[dfile]
542 del self._state[dfile]
543 self._stateextras.pop(dfile, None)
543 self._stateextras.pop(dfile, None)
544 self._dirty = True
544 self._dirty = True
545 elif not r:
545 elif not r:
546 self.mark(dfile, 'r')
546 self.mark(dfile, 'r')
547
547
548 if complete:
548 if complete:
549 action = None
549 action = None
550 if deleted:
550 if deleted:
551 if fcd.isabsent():
551 if fcd.isabsent():
552 # dc: local picked. Need to drop if present, which may
552 # dc: local picked. Need to drop if present, which may
553 # happen on re-resolves.
553 # happen on re-resolves.
554 action = 'f'
554 action = 'f'
555 else:
555 else:
556 # cd: remote picked (or otherwise deleted)
556 # cd: remote picked (or otherwise deleted)
557 action = 'r'
557 action = 'r'
558 else:
558 else:
559 if fcd.isabsent(): # dc: remote picked
559 if fcd.isabsent(): # dc: remote picked
560 action = 'g'
560 action = 'g'
561 elif fco.isabsent(): # cd: local picked
561 elif fco.isabsent(): # cd: local picked
562 if dfile in self.localctx:
562 if dfile in self.localctx:
563 action = 'am'
563 action = 'am'
564 else:
564 else:
565 action = 'a'
565 action = 'a'
566 # else: regular merges (no action necessary)
566 # else: regular merges (no action necessary)
567 self._results[dfile] = r, action
567 self._results[dfile] = r, action
568
568
569 return complete, r
569 return complete, r
570
570
571 def _filectxorabsent(self, hexnode, ctx, f):
571 def _filectxorabsent(self, hexnode, ctx, f):
572 if hexnode == nullhex:
572 if hexnode == nullhex:
573 return filemerge.absentfilectx(ctx, f)
573 return filemerge.absentfilectx(ctx, f)
574 else:
574 else:
575 return ctx[f]
575 return ctx[f]
576
576
577 def preresolve(self, dfile, wctx):
577 def preresolve(self, dfile, wctx):
578 """run premerge process for dfile
578 """run premerge process for dfile
579
579
580 Returns whether the merge is complete, and the exit code."""
580 Returns whether the merge is complete, and the exit code."""
581 return self._resolve(True, dfile, wctx)
581 return self._resolve(True, dfile, wctx)
582
582
583 def resolve(self, dfile, wctx):
583 def resolve(self, dfile, wctx):
584 """run merge process (assuming premerge was run) for dfile
584 """run merge process (assuming premerge was run) for dfile
585
585
586 Returns the exit code of the merge."""
586 Returns the exit code of the merge."""
587 return self._resolve(False, dfile, wctx)[1]
587 return self._resolve(False, dfile, wctx)[1]
588
588
589 def counts(self):
589 def counts(self):
590 """return counts for updated, merged and removed files in this
590 """return counts for updated, merged and removed files in this
591 session"""
591 session"""
592 updated, merged, removed = 0, 0, 0
592 updated, merged, removed = 0, 0, 0
593 for r, action in self._results.itervalues():
593 for r, action in self._results.itervalues():
594 if r is None:
594 if r is None:
595 updated += 1
595 updated += 1
596 elif r == 0:
596 elif r == 0:
597 if action == 'r':
597 if action == 'r':
598 removed += 1
598 removed += 1
599 else:
599 else:
600 merged += 1
600 merged += 1
601 return updated, merged, removed
601 return updated, merged, removed
602
602
603 def unresolvedcount(self):
603 def unresolvedcount(self):
604 """get unresolved count for this merge (persistent)"""
604 """get unresolved count for this merge (persistent)"""
605 return len(list(self.unresolved()))
605 return len(list(self.unresolved()))
606
606
607 def actions(self):
607 def actions(self):
608 """return lists of actions to perform on the dirstate"""
608 """return lists of actions to perform on the dirstate"""
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
610 for f, (r, action) in self._results.iteritems():
610 for f, (r, action) in self._results.iteritems():
611 if action is not None:
611 if action is not None:
612 actions[action].append((f, None, "merge result"))
612 actions[action].append((f, None, "merge result"))
613 return actions
613 return actions
614
614
615 def recordactions(self):
615 def recordactions(self):
616 """record remove/add/get actions in the dirstate"""
616 """record remove/add/get actions in the dirstate"""
617 branchmerge = self._repo.dirstate.p2() != nullid
617 branchmerge = self._repo.dirstate.p2() != nullid
618 recordupdates(self._repo, self.actions(), branchmerge)
618 recordupdates(self._repo, self.actions(), branchmerge)
619
619
620 def queueremove(self, f):
620 def queueremove(self, f):
621 """queues a file to be removed from the dirstate
621 """queues a file to be removed from the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'r'
624 self._results[f] = 0, 'r'
625
625
626 def queueadd(self, f):
626 def queueadd(self, f):
627 """queues a file to be added to the dirstate
627 """queues a file to be added to the dirstate
628
628
629 Meant for use by custom merge drivers."""
629 Meant for use by custom merge drivers."""
630 self._results[f] = 0, 'a'
630 self._results[f] = 0, 'a'
631
631
632 def queueget(self, f):
632 def queueget(self, f):
633 """queues a file to be marked modified in the dirstate
633 """queues a file to be marked modified in the dirstate
634
634
635 Meant for use by custom merge drivers."""
635 Meant for use by custom merge drivers."""
636 self._results[f] = 0, 'g'
636 self._results[f] = 0, 'g'
637
637
638 def _getcheckunknownconfig(repo, section, name):
638 def _getcheckunknownconfig(repo, section, name):
639 config = repo.ui.config(section, name)
639 config = repo.ui.config(section, name)
640 valid = ['abort', 'ignore', 'warn']
640 valid = ['abort', 'ignore', 'warn']
641 if config not in valid:
641 if config not in valid:
642 validstr = ', '.join(["'" + v + "'" for v in valid])
642 validstr = ', '.join(["'" + v + "'" for v in valid])
643 raise error.ConfigError(_("%s.%s not valid "
643 raise error.ConfigError(_("%s.%s not valid "
644 "('%s' is none of %s)")
644 "('%s' is none of %s)")
645 % (section, name, config, validstr))
645 % (section, name, config, validstr))
646 return config
646 return config
647
647
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
649 if f2 is None:
649 if f2 is None:
650 f2 = f
650 f2 = f
651 return (repo.wvfs.audit.check(f)
651 return (repo.wvfs.audit.check(f)
652 and repo.wvfs.isfileorlink(f)
652 and repo.wvfs.isfileorlink(f)
653 and repo.dirstate.normalize(f) not in repo.dirstate
653 and repo.dirstate.normalize(f) not in repo.dirstate
654 and mctx[f2].cmp(wctx[f]))
654 and mctx[f2].cmp(wctx[f]))
655
655
656 class _unknowndirschecker(object):
656 class _unknowndirschecker(object):
657 """
657 """
658 Look for any unknown files or directories that may have a path conflict
658 Look for any unknown files or directories that may have a path conflict
659 with a file. If any path prefix of the file exists as a file or link,
659 with a file. If any path prefix of the file exists as a file or link,
660 then it conflicts. If the file itself is a directory that contains any
660 then it conflicts. If the file itself is a directory that contains any
661 file that is not tracked, then it conflicts.
661 file that is not tracked, then it conflicts.
662
662
663 Returns the shortest path at which a conflict occurs, or None if there is
663 Returns the shortest path at which a conflict occurs, or None if there is
664 no conflict.
664 no conflict.
665 """
665 """
666 def __init__(self):
666 def __init__(self):
667 # A set of paths known to be good. This prevents repeated checking of
667 # A set of paths known to be good. This prevents repeated checking of
668 # dirs. It will be updated with any new dirs that are checked and found
668 # dirs. It will be updated with any new dirs that are checked and found
669 # to be safe.
669 # to be safe.
670 self._unknowndircache = set()
670 self._unknowndircache = set()
671
671
672 # A set of paths that are known to be absent. This prevents repeated
672 # A set of paths that are known to be absent. This prevents repeated
673 # checking of subdirectories that are known not to exist. It will be
673 # checking of subdirectories that are known not to exist. It will be
674 # updated with any new dirs that are checked and found to be absent.
674 # updated with any new dirs that are checked and found to be absent.
675 self._missingdircache = set()
675 self._missingdircache = set()
676
676
677 def __call__(self, repo, f):
677 def __call__(self, repo, f):
678 # Check for path prefixes that exist as unknown files.
678 # Check for path prefixes that exist as unknown files.
679 for p in reversed(list(util.finddirs(f))):
679 for p in reversed(list(util.finddirs(f))):
680 if p in self._missingdircache:
680 if p in self._missingdircache:
681 return
681 return
682 if p in self._unknowndircache:
682 if p in self._unknowndircache:
683 continue
683 continue
684 if repo.wvfs.audit.check(p):
684 if repo.wvfs.audit.check(p):
685 if (repo.wvfs.isfileorlink(p)
685 if (repo.wvfs.isfileorlink(p)
686 and repo.dirstate.normalize(p) not in repo.dirstate):
686 and repo.dirstate.normalize(p) not in repo.dirstate):
687 return p
687 return p
688 if not repo.wvfs.lexists(p):
688 if not repo.wvfs.lexists(p):
689 self._missingdircache.add(p)
689 self._missingdircache.add(p)
690 return
690 return
691 self._unknowndircache.add(p)
691 self._unknowndircache.add(p)
692
692
693 # Check if the file conflicts with a directory containing unknown files.
693 # Check if the file conflicts with a directory containing unknown files.
694 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
694 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
695 # Does the directory contain any files that are not in the dirstate?
695 # Does the directory contain any files that are not in the dirstate?
696 for p, dirs, files in repo.wvfs.walk(f):
696 for p, dirs, files in repo.wvfs.walk(f):
697 for fn in files:
697 for fn in files:
698 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
698 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
699 if relf not in repo.dirstate:
699 if relf not in repo.dirstate:
700 return f
700 return f
701 return None
701 return None
702
702
703 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
703 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
704 """
704 """
705 Considers any actions that care about the presence of conflicting unknown
705 Considers any actions that care about the presence of conflicting unknown
706 files. For some actions, the result is to abort; for others, it is to
706 files. For some actions, the result is to abort; for others, it is to
707 choose a different action.
707 choose a different action.
708 """
708 """
709 fileconflicts = set()
709 fileconflicts = set()
710 pathconflicts = set()
710 pathconflicts = set()
711 warnconflicts = set()
711 warnconflicts = set()
712 abortconflicts = set()
712 abortconflicts = set()
713 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
713 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
714 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
714 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
715 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
715 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
716 if not force:
716 if not force:
717 def collectconflicts(conflicts, config):
717 def collectconflicts(conflicts, config):
718 if config == 'abort':
718 if config == 'abort':
719 abortconflicts.update(conflicts)
719 abortconflicts.update(conflicts)
720 elif config == 'warn':
720 elif config == 'warn':
721 warnconflicts.update(conflicts)
721 warnconflicts.update(conflicts)
722
722
723 checkunknowndirs = _unknowndirschecker()
723 checkunknowndirs = _unknowndirschecker()
724 for f, (m, args, msg) in actions.iteritems():
724 for f, (m, args, msg) in actions.iteritems():
725 if m in ('c', 'dc'):
725 if m in ('c', 'dc'):
726 if _checkunknownfile(repo, wctx, mctx, f):
726 if _checkunknownfile(repo, wctx, mctx, f):
727 fileconflicts.add(f)
727 fileconflicts.add(f)
728 elif pathconfig and f not in wctx:
728 elif pathconfig and f not in wctx:
729 path = checkunknowndirs(repo, f)
729 path = checkunknowndirs(repo, f)
730 if path is not None:
730 if path is not None:
731 pathconflicts.add(path)
731 pathconflicts.add(path)
732 elif m == 'dg':
732 elif m == 'dg':
733 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
733 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
734 fileconflicts.add(f)
734 fileconflicts.add(f)
735
735
736 allconflicts = fileconflicts | pathconflicts
736 allconflicts = fileconflicts | pathconflicts
737 ignoredconflicts = set([c for c in allconflicts
737 ignoredconflicts = set([c for c in allconflicts
738 if repo.dirstate._ignore(c)])
738 if repo.dirstate._ignore(c)])
739 unknownconflicts = allconflicts - ignoredconflicts
739 unknownconflicts = allconflicts - ignoredconflicts
740 collectconflicts(ignoredconflicts, ignoredconfig)
740 collectconflicts(ignoredconflicts, ignoredconfig)
741 collectconflicts(unknownconflicts, unknownconfig)
741 collectconflicts(unknownconflicts, unknownconfig)
742 else:
742 else:
743 for f, (m, args, msg) in actions.iteritems():
743 for f, (m, args, msg) in actions.iteritems():
744 if m == 'cm':
744 if m == 'cm':
745 fl2, anc = args
745 fl2, anc = args
746 different = _checkunknownfile(repo, wctx, mctx, f)
746 different = _checkunknownfile(repo, wctx, mctx, f)
747 if repo.dirstate._ignore(f):
747 if repo.dirstate._ignore(f):
748 config = ignoredconfig
748 config = ignoredconfig
749 else:
749 else:
750 config = unknownconfig
750 config = unknownconfig
751
751
752 # The behavior when force is True is described by this table:
752 # The behavior when force is True is described by this table:
753 # config different mergeforce | action backup
753 # config different mergeforce | action backup
754 # * n * | get n
754 # * n * | get n
755 # * y y | merge -
755 # * y y | merge -
756 # abort y n | merge - (1)
756 # abort y n | merge - (1)
757 # warn y n | warn + get y
757 # warn y n | warn + get y
758 # ignore y n | get y
758 # ignore y n | get y
759 #
759 #
760 # (1) this is probably the wrong behavior here -- we should
760 # (1) this is probably the wrong behavior here -- we should
761 # probably abort, but some actions like rebases currently
761 # probably abort, but some actions like rebases currently
762 # don't like an abort happening in the middle of
762 # don't like an abort happening in the middle of
763 # merge.update.
763 # merge.update.
764 if not different:
764 if not different:
765 actions[f] = ('g', (fl2, False), "remote created")
765 actions[f] = ('g', (fl2, False), "remote created")
766 elif mergeforce or config == 'abort':
766 elif mergeforce or config == 'abort':
767 actions[f] = ('m', (f, f, None, False, anc),
767 actions[f] = ('m', (f, f, None, False, anc),
768 "remote differs from untracked local")
768 "remote differs from untracked local")
769 elif config == 'abort':
769 elif config == 'abort':
770 abortconflicts.add(f)
770 abortconflicts.add(f)
771 else:
771 else:
772 if config == 'warn':
772 if config == 'warn':
773 warnconflicts.add(f)
773 warnconflicts.add(f)
774 actions[f] = ('g', (fl2, True), "remote created")
774 actions[f] = ('g', (fl2, True), "remote created")
775
775
776 for f in sorted(abortconflicts):
776 for f in sorted(abortconflicts):
777 warn = repo.ui.warn
777 warn = repo.ui.warn
778 if f in pathconflicts:
778 if f in pathconflicts:
779 if repo.wvfs.isfileorlink(f):
779 if repo.wvfs.isfileorlink(f):
780 warn(_("%s: untracked file conflicts with directory\n") % f)
780 warn(_("%s: untracked file conflicts with directory\n") % f)
781 else:
781 else:
782 warn(_("%s: untracked directory conflicts with file\n") % f)
782 warn(_("%s: untracked directory conflicts with file\n") % f)
783 else:
783 else:
784 warn(_("%s: untracked file differs\n") % f)
784 warn(_("%s: untracked file differs\n") % f)
785 if abortconflicts:
785 if abortconflicts:
786 raise error.Abort(_("untracked files in working directory "
786 raise error.Abort(_("untracked files in working directory "
787 "differ from files in requested revision"))
787 "differ from files in requested revision"))
788
788
789 for f in sorted(warnconflicts):
789 for f in sorted(warnconflicts):
790 if repo.wvfs.isfileorlink(f):
790 if repo.wvfs.isfileorlink(f):
791 repo.ui.warn(_("%s: replacing untracked file\n") % f)
791 repo.ui.warn(_("%s: replacing untracked file\n") % f)
792 else:
792 else:
793 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
793 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
794
794
795 for f, (m, args, msg) in actions.iteritems():
795 for f, (m, args, msg) in actions.iteritems():
796 if m == 'c':
796 if m == 'c':
797 backup = (f in fileconflicts or f in pathconflicts or
797 backup = (f in fileconflicts or f in pathconflicts or
798 any(p in pathconflicts for p in util.finddirs(f)))
798 any(p in pathconflicts for p in util.finddirs(f)))
799 flags, = args
799 flags, = args
800 actions[f] = ('g', (flags, backup), msg)
800 actions[f] = ('g', (flags, backup), msg)
801
801
802 def _forgetremoved(wctx, mctx, branchmerge):
802 def _forgetremoved(wctx, mctx, branchmerge):
803 """
803 """
804 Forget removed files
804 Forget removed files
805
805
806 If we're jumping between revisions (as opposed to merging), and if
806 If we're jumping between revisions (as opposed to merging), and if
807 neither the working directory nor the target rev has the file,
807 neither the working directory nor the target rev has the file,
808 then we need to remove it from the dirstate, to prevent the
808 then we need to remove it from the dirstate, to prevent the
809 dirstate from listing the file when it is no longer in the
809 dirstate from listing the file when it is no longer in the
810 manifest.
810 manifest.
811
811
812 If we're merging, and the other revision has removed a file
812 If we're merging, and the other revision has removed a file
813 that is not present in the working directory, we need to mark it
813 that is not present in the working directory, we need to mark it
814 as removed.
814 as removed.
815 """
815 """
816
816
817 actions = {}
817 actions = {}
818 m = 'f'
818 m = 'f'
819 if branchmerge:
819 if branchmerge:
820 m = 'r'
820 m = 'r'
821 for f in wctx.deleted():
821 for f in wctx.deleted():
822 if f not in mctx:
822 if f not in mctx:
823 actions[f] = m, None, "forget deleted"
823 actions[f] = m, None, "forget deleted"
824
824
825 if not branchmerge:
825 if not branchmerge:
826 for f in wctx.removed():
826 for f in wctx.removed():
827 if f not in mctx:
827 if f not in mctx:
828 actions[f] = 'f', None, "forget removed"
828 actions[f] = 'f', None, "forget removed"
829
829
830 return actions
830 return actions
831
831
832 def _checkcollision(repo, wmf, actions):
832 def _checkcollision(repo, wmf, actions):
833 # build provisional merged manifest up
833 # build provisional merged manifest up
834 pmmf = set(wmf)
834 pmmf = set(wmf)
835
835
836 if actions:
836 if actions:
837 # k, dr, e and rd are no-op
837 # k, dr, e and rd are no-op
838 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
838 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
839 for f, args, msg in actions[m]:
839 for f, args, msg in actions[m]:
840 pmmf.add(f)
840 pmmf.add(f)
841 for f, args, msg in actions['r']:
841 for f, args, msg in actions['r']:
842 pmmf.discard(f)
842 pmmf.discard(f)
843 for f, args, msg in actions['dm']:
843 for f, args, msg in actions['dm']:
844 f2, flags = args
844 f2, flags = args
845 pmmf.discard(f2)
845 pmmf.discard(f2)
846 pmmf.add(f)
846 pmmf.add(f)
847 for f, args, msg in actions['dg']:
847 for f, args, msg in actions['dg']:
848 pmmf.add(f)
848 pmmf.add(f)
849 for f, args, msg in actions['m']:
849 for f, args, msg in actions['m']:
850 f1, f2, fa, move, anc = args
850 f1, f2, fa, move, anc = args
851 if move:
851 if move:
852 pmmf.discard(f1)
852 pmmf.discard(f1)
853 pmmf.add(f)
853 pmmf.add(f)
854
854
855 # check case-folding collision in provisional merged manifest
855 # check case-folding collision in provisional merged manifest
856 foldmap = {}
856 foldmap = {}
857 for f in pmmf:
857 for f in pmmf:
858 fold = util.normcase(f)
858 fold = util.normcase(f)
859 if fold in foldmap:
859 if fold in foldmap:
860 raise error.Abort(_("case-folding collision between %s and %s")
860 raise error.Abort(_("case-folding collision between %s and %s")
861 % (f, foldmap[fold]))
861 % (f, foldmap[fold]))
862 foldmap[fold] = f
862 foldmap[fold] = f
863
863
864 # check case-folding of directories
864 # check case-folding of directories
865 foldprefix = unfoldprefix = lastfull = ''
865 foldprefix = unfoldprefix = lastfull = ''
866 for fold, f in sorted(foldmap.items()):
866 for fold, f in sorted(foldmap.items()):
867 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
867 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
868 # the folded prefix matches but actual casing is different
868 # the folded prefix matches but actual casing is different
869 raise error.Abort(_("case-folding collision between "
869 raise error.Abort(_("case-folding collision between "
870 "%s and directory of %s") % (lastfull, f))
870 "%s and directory of %s") % (lastfull, f))
871 foldprefix = fold + '/'
871 foldprefix = fold + '/'
872 unfoldprefix = f + '/'
872 unfoldprefix = f + '/'
873 lastfull = f
873 lastfull = f
874
874
875 def driverpreprocess(repo, ms, wctx, labels=None):
875 def driverpreprocess(repo, ms, wctx, labels=None):
876 """run the preprocess step of the merge driver, if any
876 """run the preprocess step of the merge driver, if any
877
877
878 This is currently not implemented -- it's an extension point."""
878 This is currently not implemented -- it's an extension point."""
879 return True
879 return True
880
880
881 def driverconclude(repo, ms, wctx, labels=None):
881 def driverconclude(repo, ms, wctx, labels=None):
882 """run the conclude step of the merge driver, if any
882 """run the conclude step of the merge driver, if any
883
883
884 This is currently not implemented -- it's an extension point."""
884 This is currently not implemented -- it's an extension point."""
885 return True
885 return True
886
886
887 def _filesindirs(repo, manifest, dirs):
887 def _filesindirs(repo, manifest, dirs):
888 """
888 """
889 Generator that yields pairs of all the files in the manifest that are found
889 Generator that yields pairs of all the files in the manifest that are found
890 inside the directories listed in dirs, and which directory they are found
890 inside the directories listed in dirs, and which directory they are found
891 in.
891 in.
892 """
892 """
893 for f in manifest:
893 for f in manifest:
894 for p in util.finddirs(f):
894 for p in util.finddirs(f):
895 if p in dirs:
895 if p in dirs:
896 yield f, p
896 yield f, p
897 break
897 break
898
898
899 def checkpathconflicts(repo, wctx, mctx, actions):
899 def checkpathconflicts(repo, wctx, mctx, actions):
900 """
900 """
901 Check if any actions introduce path conflicts in the repository, updating
901 Check if any actions introduce path conflicts in the repository, updating
902 actions to record or handle the path conflict accordingly.
902 actions to record or handle the path conflict accordingly.
903 """
903 """
904 mf = wctx.manifest()
904 mf = wctx.manifest()
905
905
906 # The set of local files that conflict with a remote directory.
906 # The set of local files that conflict with a remote directory.
907 localconflicts = set()
907 localconflicts = set()
908
908
909 # The set of directories that conflict with a remote file, and so may cause
909 # The set of directories that conflict with a remote file, and so may cause
910 # conflicts if they still contain any files after the merge.
910 # conflicts if they still contain any files after the merge.
911 remoteconflicts = set()
911 remoteconflicts = set()
912
912
913 # The set of directories that appear as both a file and a directory in the
913 # The set of directories that appear as both a file and a directory in the
914 # remote manifest. These indicate an invalid remote manifest, which
914 # remote manifest. These indicate an invalid remote manifest, which
915 # can't be updated to cleanly.
915 # can't be updated to cleanly.
916 invalidconflicts = set()
916 invalidconflicts = set()
917
917
918 # The set of directories that contain files that are being created.
918 # The set of directories that contain files that are being created.
919 createdfiledirs = set()
919 createdfiledirs = set()
920
920
921 # The set of files deleted by all the actions.
921 # The set of files deleted by all the actions.
922 deletedfiles = set()
922 deletedfiles = set()
923
923
924 for f, (m, args, msg) in actions.items():
924 for f, (m, args, msg) in actions.items():
925 if m in ('c', 'dc', 'm', 'cm'):
925 if m in ('c', 'dc', 'm', 'cm'):
926 # This action may create a new local file.
926 # This action may create a new local file.
927 createdfiledirs.update(util.finddirs(f))
927 createdfiledirs.update(util.finddirs(f))
928 if mf.hasdir(f):
928 if mf.hasdir(f):
929 # The file aliases a local directory. This might be ok if all
929 # The file aliases a local directory. This might be ok if all
930 # the files in the local directory are being deleted. This
930 # the files in the local directory are being deleted. This
931 # will be checked once we know what all the deleted files are.
931 # will be checked once we know what all the deleted files are.
932 remoteconflicts.add(f)
932 remoteconflicts.add(f)
933 # Track the names of all deleted files.
933 # Track the names of all deleted files.
934 if m == 'r':
934 if m == 'r':
935 deletedfiles.add(f)
935 deletedfiles.add(f)
936 if m == 'm':
936 if m == 'm':
937 f1, f2, fa, move, anc = args
937 f1, f2, fa, move, anc = args
938 if move:
938 if move:
939 deletedfiles.add(f1)
939 deletedfiles.add(f1)
940 if m == 'dm':
940 if m == 'dm':
941 f2, flags = args
941 f2, flags = args
942 deletedfiles.add(f2)
942 deletedfiles.add(f2)
943
943
944 # Check all directories that contain created files for path conflicts.
944 # Check all directories that contain created files for path conflicts.
945 for p in createdfiledirs:
945 for p in createdfiledirs:
946 if p in mf:
946 if p in mf:
947 if p in mctx:
947 if p in mctx:
948 # A file is in a directory which aliases both a local
948 # A file is in a directory which aliases both a local
949 # and a remote file. This is an internal inconsistency
949 # and a remote file. This is an internal inconsistency
950 # within the remote manifest.
950 # within the remote manifest.
951 invalidconflicts.add(p)
951 invalidconflicts.add(p)
952 else:
952 else:
953 # A file is in a directory which aliases a local file.
953 # A file is in a directory which aliases a local file.
954 # We will need to rename the local file.
954 # We will need to rename the local file.
955 localconflicts.add(p)
955 localconflicts.add(p)
956 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
956 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
957 # The file is in a directory which aliases a remote file.
957 # The file is in a directory which aliases a remote file.
958 # This is an internal inconsistency within the remote
958 # This is an internal inconsistency within the remote
959 # manifest.
959 # manifest.
960 invalidconflicts.add(p)
960 invalidconflicts.add(p)
961
961
962 # Rename all local conflicting files that have not been deleted.
962 # Rename all local conflicting files that have not been deleted.
963 for p in localconflicts:
963 for p in localconflicts:
964 if p not in deletedfiles:
964 if p not in deletedfiles:
965 ctxname = str(wctx).rstrip('+')
965 ctxname = str(wctx).rstrip('+')
966 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
966 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
967 actions[pnew] = ('pr', (p,), "local path conflict")
967 actions[pnew] = ('pr', (p,), "local path conflict")
968 actions[p] = ('p', (pnew, 'l'), "path conflict")
968 actions[p] = ('p', (pnew, 'l'), "path conflict")
969
969
970 if remoteconflicts:
970 if remoteconflicts:
971 # Check if all files in the conflicting directories have been removed.
971 # Check if all files in the conflicting directories have been removed.
972 ctxname = str(mctx).rstrip('+')
972 ctxname = str(mctx).rstrip('+')
973 for f, p in _filesindirs(repo, mf, remoteconflicts):
973 for f, p in _filesindirs(repo, mf, remoteconflicts):
974 if f not in deletedfiles:
974 if f not in deletedfiles:
975 m, args, msg = actions[p]
975 m, args, msg = actions[p]
976 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
976 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
977 if m in ('dc', 'm'):
977 if m in ('dc', 'm'):
978 # Action was merge, just update target.
978 # Action was merge, just update target.
979 actions[pnew] = (m, args, msg)
979 actions[pnew] = (m, args, msg)
980 else:
980 else:
981 # Action was create, change to renamed get action.
981 # Action was create, change to renamed get action.
982 fl = args[0]
982 fl = args[0]
983 actions[pnew] = ('dg', (p, fl), "remote path conflict")
983 actions[pnew] = ('dg', (p, fl), "remote path conflict")
984 actions[p] = ('p', (pnew, 'r'), "path conflict")
984 actions[p] = ('p', (pnew, 'r'), "path conflict")
985 remoteconflicts.remove(p)
985 remoteconflicts.remove(p)
986 break
986 break
987
987
988 if invalidconflicts:
988 if invalidconflicts:
989 for p in invalidconflicts:
989 for p in invalidconflicts:
990 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
990 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
991 raise error.Abort(_("destination manifest contains path conflicts"))
991 raise error.Abort(_("destination manifest contains path conflicts"))
992
992
993 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
993 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
994 acceptremote, followcopies, forcefulldiff=False):
994 acceptremote, followcopies, forcefulldiff=False):
995 """
995 """
996 Merge wctx and p2 with ancestor pa and generate merge action list
996 Merge wctx and p2 with ancestor pa and generate merge action list
997
997
998 branchmerge and force are as passed in to update
998 branchmerge and force are as passed in to update
999 matcher = matcher to filter file lists
999 matcher = matcher to filter file lists
1000 acceptremote = accept the incoming changes without prompting
1000 acceptremote = accept the incoming changes without prompting
1001 """
1001 """
1002 if matcher is not None and matcher.always():
1002 if matcher is not None and matcher.always():
1003 matcher = None
1003 matcher = None
1004
1004
1005 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1005 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1006
1006
1007 # manifests fetched in order are going to be faster, so prime the caches
1007 # manifests fetched in order are going to be faster, so prime the caches
1008 [x.manifest() for x in
1008 [x.manifest() for x in
1009 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1009 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1010
1010
1011 if followcopies:
1011 if followcopies:
1012 ret = copies.mergecopies(repo, wctx, p2, pa)
1012 ret = copies.mergecopies(repo, wctx, p2, pa)
1013 copy, movewithdir, diverge, renamedelete, dirmove = ret
1013 copy, movewithdir, diverge, renamedelete, dirmove = ret
1014
1014
1015 boolbm = pycompat.bytestr(bool(branchmerge))
1015 boolbm = pycompat.bytestr(bool(branchmerge))
1016 boolf = pycompat.bytestr(bool(force))
1016 boolf = pycompat.bytestr(bool(force))
1017 boolm = pycompat.bytestr(bool(matcher))
1017 boolm = pycompat.bytestr(bool(matcher))
1018 repo.ui.note(_("resolving manifests\n"))
1018 repo.ui.note(_("resolving manifests\n"))
1019 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1019 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1020 % (boolbm, boolf, boolm))
1020 % (boolbm, boolf, boolm))
1021 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1021 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1022
1022
1023 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1023 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1024 copied = set(copy.values())
1024 copied = set(copy.values())
1025 copied.update(movewithdir.values())
1025 copied.update(movewithdir.values())
1026
1026
1027 if '.hgsubstate' in m1:
1027 if '.hgsubstate' in m1:
1028 # check whether sub state is modified
1028 # check whether sub state is modified
1029 if any(wctx.sub(s).dirty() for s in wctx.substate):
1029 if any(wctx.sub(s).dirty() for s in wctx.substate):
1030 m1['.hgsubstate'] = modifiednodeid
1030 m1['.hgsubstate'] = modifiednodeid
1031
1031
1032 # Don't use m2-vs-ma optimization if:
1032 # Don't use m2-vs-ma optimization if:
1033 # - ma is the same as m1 or m2, which we're just going to diff again later
1033 # - ma is the same as m1 or m2, which we're just going to diff again later
1034 # - The caller specifically asks for a full diff, which is useful during bid
1034 # - The caller specifically asks for a full diff, which is useful during bid
1035 # merge.
1035 # merge.
1036 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1036 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1037 # Identify which files are relevant to the merge, so we can limit the
1037 # Identify which files are relevant to the merge, so we can limit the
1038 # total m1-vs-m2 diff to just those files. This has significant
1038 # total m1-vs-m2 diff to just those files. This has significant
1039 # performance benefits in large repositories.
1039 # performance benefits in large repositories.
1040 relevantfiles = set(ma.diff(m2).keys())
1040 relevantfiles = set(ma.diff(m2).keys())
1041
1041
1042 # For copied and moved files, we need to add the source file too.
1042 # For copied and moved files, we need to add the source file too.
1043 for copykey, copyvalue in copy.iteritems():
1043 for copykey, copyvalue in copy.iteritems():
1044 if copyvalue in relevantfiles:
1044 if copyvalue in relevantfiles:
1045 relevantfiles.add(copykey)
1045 relevantfiles.add(copykey)
1046 for movedirkey in movewithdir:
1046 for movedirkey in movewithdir:
1047 relevantfiles.add(movedirkey)
1047 relevantfiles.add(movedirkey)
1048 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1048 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1049 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1049 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1050
1050
1051 diff = m1.diff(m2, match=matcher)
1051 diff = m1.diff(m2, match=matcher)
1052
1052
1053 if matcher is None:
1053 if matcher is None:
1054 matcher = matchmod.always('', '')
1054 matcher = matchmod.always('', '')
1055
1055
1056 actions = {}
1056 actions = {}
1057 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1057 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1058 if n1 and n2: # file exists on both local and remote side
1058 if n1 and n2: # file exists on both local and remote side
1059 if f not in ma:
1059 if f not in ma:
1060 fa = copy.get(f, None)
1060 fa = copy.get(f, None)
1061 if fa is not None:
1061 if fa is not None:
1062 actions[f] = ('m', (f, f, fa, False, pa.node()),
1062 actions[f] = ('m', (f, f, fa, False, pa.node()),
1063 "both renamed from " + fa)
1063 "both renamed from " + fa)
1064 else:
1064 else:
1065 actions[f] = ('m', (f, f, None, False, pa.node()),
1065 actions[f] = ('m', (f, f, None, False, pa.node()),
1066 "both created")
1066 "both created")
1067 else:
1067 else:
1068 a = ma[f]
1068 a = ma[f]
1069 fla = ma.flags(f)
1069 fla = ma.flags(f)
1070 nol = 'l' not in fl1 + fl2 + fla
1070 nol = 'l' not in fl1 + fl2 + fla
1071 if n2 == a and fl2 == fla:
1071 if n2 == a and fl2 == fla:
1072 actions[f] = ('k', (), "remote unchanged")
1072 actions[f] = ('k', (), "remote unchanged")
1073 elif n1 == a and fl1 == fla: # local unchanged - use remote
1073 elif n1 == a and fl1 == fla: # local unchanged - use remote
1074 if n1 == n2: # optimization: keep local content
1074 if n1 == n2: # optimization: keep local content
1075 actions[f] = ('e', (fl2,), "update permissions")
1075 actions[f] = ('e', (fl2,), "update permissions")
1076 else:
1076 else:
1077 actions[f] = ('g', (fl2, False), "remote is newer")
1077 actions[f] = ('g', (fl2, False), "remote is newer")
1078 elif nol and n2 == a: # remote only changed 'x'
1078 elif nol and n2 == a: # remote only changed 'x'
1079 actions[f] = ('e', (fl2,), "update permissions")
1079 actions[f] = ('e', (fl2,), "update permissions")
1080 elif nol and n1 == a: # local only changed 'x'
1080 elif nol and n1 == a: # local only changed 'x'
1081 actions[f] = ('g', (fl1, False), "remote is newer")
1081 actions[f] = ('g', (fl1, False), "remote is newer")
1082 else: # both changed something
1082 else: # both changed something
1083 actions[f] = ('m', (f, f, f, False, pa.node()),
1083 actions[f] = ('m', (f, f, f, False, pa.node()),
1084 "versions differ")
1084 "versions differ")
1085 elif n1: # file exists only on local side
1085 elif n1: # file exists only on local side
1086 if f in copied:
1086 if f in copied:
1087 pass # we'll deal with it on m2 side
1087 pass # we'll deal with it on m2 side
1088 elif f in movewithdir: # directory rename, move local
1088 elif f in movewithdir: # directory rename, move local
1089 f2 = movewithdir[f]
1089 f2 = movewithdir[f]
1090 if f2 in m2:
1090 if f2 in m2:
1091 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1091 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1092 "remote directory rename, both created")
1092 "remote directory rename, both created")
1093 else:
1093 else:
1094 actions[f2] = ('dm', (f, fl1),
1094 actions[f2] = ('dm', (f, fl1),
1095 "remote directory rename - move from " + f)
1095 "remote directory rename - move from " + f)
1096 elif f in copy:
1096 elif f in copy:
1097 f2 = copy[f]
1097 f2 = copy[f]
1098 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1098 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1099 "local copied/moved from " + f2)
1099 "local copied/moved from " + f2)
1100 elif f in ma: # clean, a different, no remote
1100 elif f in ma: # clean, a different, no remote
1101 if n1 != ma[f]:
1101 if n1 != ma[f]:
1102 if acceptremote:
1102 if acceptremote:
1103 actions[f] = ('r', None, "remote delete")
1103 actions[f] = ('r', None, "remote delete")
1104 else:
1104 else:
1105 actions[f] = ('cd', (f, None, f, False, pa.node()),
1105 actions[f] = ('cd', (f, None, f, False, pa.node()),
1106 "prompt changed/deleted")
1106 "prompt changed/deleted")
1107 elif n1 == addednodeid:
1107 elif n1 == addednodeid:
1108 # This extra 'a' is added by working copy manifest to mark
1108 # This extra 'a' is added by working copy manifest to mark
1109 # the file as locally added. We should forget it instead of
1109 # the file as locally added. We should forget it instead of
1110 # deleting it.
1110 # deleting it.
1111 actions[f] = ('f', None, "remote deleted")
1111 actions[f] = ('f', None, "remote deleted")
1112 else:
1112 else:
1113 actions[f] = ('r', None, "other deleted")
1113 actions[f] = ('r', None, "other deleted")
1114 elif n2: # file exists only on remote side
1114 elif n2: # file exists only on remote side
1115 if f in copied:
1115 if f in copied:
1116 pass # we'll deal with it on m1 side
1116 pass # we'll deal with it on m1 side
1117 elif f in movewithdir:
1117 elif f in movewithdir:
1118 f2 = movewithdir[f]
1118 f2 = movewithdir[f]
1119 if f2 in m1:
1119 if f2 in m1:
1120 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1120 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1121 "local directory rename, both created")
1121 "local directory rename, both created")
1122 else:
1122 else:
1123 actions[f2] = ('dg', (f, fl2),
1123 actions[f2] = ('dg', (f, fl2),
1124 "local directory rename - get from " + f)
1124 "local directory rename - get from " + f)
1125 elif f in copy:
1125 elif f in copy:
1126 f2 = copy[f]
1126 f2 = copy[f]
1127 if f2 in m2:
1127 if f2 in m2:
1128 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1128 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1129 "remote copied from " + f2)
1129 "remote copied from " + f2)
1130 else:
1130 else:
1131 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1131 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1132 "remote moved from " + f2)
1132 "remote moved from " + f2)
1133 elif f not in ma:
1133 elif f not in ma:
1134 # local unknown, remote created: the logic is described by the
1134 # local unknown, remote created: the logic is described by the
1135 # following table:
1135 # following table:
1136 #
1136 #
1137 # force branchmerge different | action
1137 # force branchmerge different | action
1138 # n * * | create
1138 # n * * | create
1139 # y n * | create
1139 # y n * | create
1140 # y y n | create
1140 # y y n | create
1141 # y y y | merge
1141 # y y y | merge
1142 #
1142 #
1143 # Checking whether the files are different is expensive, so we
1143 # Checking whether the files are different is expensive, so we
1144 # don't do that when we can avoid it.
1144 # don't do that when we can avoid it.
1145 if not force:
1145 if not force:
1146 actions[f] = ('c', (fl2,), "remote created")
1146 actions[f] = ('c', (fl2,), "remote created")
1147 elif not branchmerge:
1147 elif not branchmerge:
1148 actions[f] = ('c', (fl2,), "remote created")
1148 actions[f] = ('c', (fl2,), "remote created")
1149 else:
1149 else:
1150 actions[f] = ('cm', (fl2, pa.node()),
1150 actions[f] = ('cm', (fl2, pa.node()),
1151 "remote created, get or merge")
1151 "remote created, get or merge")
1152 elif n2 != ma[f]:
1152 elif n2 != ma[f]:
1153 df = None
1153 df = None
1154 for d in dirmove:
1154 for d in dirmove:
1155 if f.startswith(d):
1155 if f.startswith(d):
1156 # new file added in a directory that was moved
1156 # new file added in a directory that was moved
1157 df = dirmove[d] + f[len(d):]
1157 df = dirmove[d] + f[len(d):]
1158 break
1158 break
1159 if df is not None and df in m1:
1159 if df is not None and df in m1:
1160 actions[df] = ('m', (df, f, f, False, pa.node()),
1160 actions[df] = ('m', (df, f, f, False, pa.node()),
1161 "local directory rename - respect move from " + f)
1161 "local directory rename - respect move from " + f)
1162 elif acceptremote:
1162 elif acceptremote:
1163 actions[f] = ('c', (fl2,), "remote recreating")
1163 actions[f] = ('c', (fl2,), "remote recreating")
1164 else:
1164 else:
1165 actions[f] = ('dc', (None, f, f, False, pa.node()),
1165 actions[f] = ('dc', (None, f, f, False, pa.node()),
1166 "prompt deleted/changed")
1166 "prompt deleted/changed")
1167
1167
1168 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1168 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1169 # If we are merging, look for path conflicts.
1169 # If we are merging, look for path conflicts.
1170 checkpathconflicts(repo, wctx, p2, actions)
1170 checkpathconflicts(repo, wctx, p2, actions)
1171
1171
1172 return actions, diverge, renamedelete
1172 return actions, diverge, renamedelete
1173
1173
1174 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1174 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1175 """Resolves false conflicts where the nodeid changed but the content
1175 """Resolves false conflicts where the nodeid changed but the content
1176 remained the same."""
1176 remained the same."""
1177
1177
1178 for f, (m, args, msg) in actions.items():
1178 for f, (m, args, msg) in actions.items():
1179 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1179 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1180 # local did change but ended up with same content
1180 # local did change but ended up with same content
1181 actions[f] = 'r', None, "prompt same"
1181 actions[f] = 'r', None, "prompt same"
1182 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1182 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1183 # remote did change but ended up with same content
1183 # remote did change but ended up with same content
1184 del actions[f] # don't get = keep local deleted
1184 del actions[f] # don't get = keep local deleted
1185
1185
1186 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1186 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1187 acceptremote, followcopies, matcher=None,
1187 acceptremote, followcopies, matcher=None,
1188 mergeforce=False):
1188 mergeforce=False):
1189 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1189 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1190 # Avoid cycle.
1190 # Avoid cycle.
1191 from . import sparse
1191 from . import sparse
1192
1192
1193 if len(ancestors) == 1: # default
1193 if len(ancestors) == 1: # default
1194 actions, diverge, renamedelete = manifestmerge(
1194 actions, diverge, renamedelete = manifestmerge(
1195 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1195 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1196 acceptremote, followcopies)
1196 acceptremote, followcopies)
1197 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1197 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1198
1198
1199 else: # only when merge.preferancestor=* - the default
1199 else: # only when merge.preferancestor=* - the default
1200 repo.ui.note(
1200 repo.ui.note(
1201 _("note: merging %s and %s using bids from ancestors %s\n") %
1201 _("note: merging %s and %s using bids from ancestors %s\n") %
1202 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1202 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1203 for anc in ancestors)))
1203 for anc in ancestors)))
1204
1204
1205 # Call for bids
1205 # Call for bids
1206 fbids = {} # mapping filename to bids (action method to list af actions)
1206 fbids = {} # mapping filename to bids (action method to list af actions)
1207 diverge, renamedelete = None, None
1207 diverge, renamedelete = None, None
1208 for ancestor in ancestors:
1208 for ancestor in ancestors:
1209 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1209 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1210 actions, diverge1, renamedelete1 = manifestmerge(
1210 actions, diverge1, renamedelete1 = manifestmerge(
1211 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1211 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1212 acceptremote, followcopies, forcefulldiff=True)
1212 acceptremote, followcopies, forcefulldiff=True)
1213 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1213 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1214
1214
1215 # Track the shortest set of warning on the theory that bid
1215 # Track the shortest set of warning on the theory that bid
1216 # merge will correctly incorporate more information
1216 # merge will correctly incorporate more information
1217 if diverge is None or len(diverge1) < len(diverge):
1217 if diverge is None or len(diverge1) < len(diverge):
1218 diverge = diverge1
1218 diverge = diverge1
1219 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1219 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1220 renamedelete = renamedelete1
1220 renamedelete = renamedelete1
1221
1221
1222 for f, a in sorted(actions.iteritems()):
1222 for f, a in sorted(actions.iteritems()):
1223 m, args, msg = a
1223 m, args, msg = a
1224 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1224 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1225 if f in fbids:
1225 if f in fbids:
1226 d = fbids[f]
1226 d = fbids[f]
1227 if m in d:
1227 if m in d:
1228 d[m].append(a)
1228 d[m].append(a)
1229 else:
1229 else:
1230 d[m] = [a]
1230 d[m] = [a]
1231 else:
1231 else:
1232 fbids[f] = {m: [a]}
1232 fbids[f] = {m: [a]}
1233
1233
1234 # Pick the best bid for each file
1234 # Pick the best bid for each file
1235 repo.ui.note(_('\nauction for merging merge bids\n'))
1235 repo.ui.note(_('\nauction for merging merge bids\n'))
1236 actions = {}
1236 actions = {}
1237 dms = [] # filenames that have dm actions
1237 dms = [] # filenames that have dm actions
1238 for f, bids in sorted(fbids.items()):
1238 for f, bids in sorted(fbids.items()):
1239 # bids is a mapping from action method to list af actions
1239 # bids is a mapping from action method to list af actions
1240 # Consensus?
1240 # Consensus?
1241 if len(bids) == 1: # all bids are the same kind of method
1241 if len(bids) == 1: # all bids are the same kind of method
1242 m, l = list(bids.items())[0]
1242 m, l = list(bids.items())[0]
1243 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1243 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1244 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1244 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1245 actions[f] = l[0]
1245 actions[f] = l[0]
1246 if m == 'dm':
1246 if m == 'dm':
1247 dms.append(f)
1247 dms.append(f)
1248 continue
1248 continue
1249 # If keep is an option, just do it.
1249 # If keep is an option, just do it.
1250 if 'k' in bids:
1250 if 'k' in bids:
1251 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1251 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1252 actions[f] = bids['k'][0]
1252 actions[f] = bids['k'][0]
1253 continue
1253 continue
1254 # If there are gets and they all agree [how could they not?], do it.
1254 # If there are gets and they all agree [how could they not?], do it.
1255 if 'g' in bids:
1255 if 'g' in bids:
1256 ga0 = bids['g'][0]
1256 ga0 = bids['g'][0]
1257 if all(a == ga0 for a in bids['g'][1:]):
1257 if all(a == ga0 for a in bids['g'][1:]):
1258 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1258 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1259 actions[f] = ga0
1259 actions[f] = ga0
1260 continue
1260 continue
1261 # TODO: Consider other simple actions such as mode changes
1261 # TODO: Consider other simple actions such as mode changes
1262 # Handle inefficient democrazy.
1262 # Handle inefficient democrazy.
1263 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1263 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1264 for m, l in sorted(bids.items()):
1264 for m, l in sorted(bids.items()):
1265 for _f, args, msg in l:
1265 for _f, args, msg in l:
1266 repo.ui.note(' %s -> %s\n' % (msg, m))
1266 repo.ui.note(' %s -> %s\n' % (msg, m))
1267 # Pick random action. TODO: Instead, prompt user when resolving
1267 # Pick random action. TODO: Instead, prompt user when resolving
1268 m, l = list(bids.items())[0]
1268 m, l = list(bids.items())[0]
1269 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1269 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1270 (f, m))
1270 (f, m))
1271 actions[f] = l[0]
1271 actions[f] = l[0]
1272 if m == 'dm':
1272 if m == 'dm':
1273 dms.append(f)
1273 dms.append(f)
1274 continue
1274 continue
1275 # Work around 'dm' that can cause multiple actions for the same file
1275 # Work around 'dm' that can cause multiple actions for the same file
1276 for f in dms:
1276 for f in dms:
1277 dm, (f0, flags), msg = actions[f]
1277 dm, (f0, flags), msg = actions[f]
1278 assert dm == 'dm', dm
1278 assert dm == 'dm', dm
1279 if f0 in actions and actions[f0][0] == 'r':
1279 if f0 in actions and actions[f0][0] == 'r':
1280 # We have one bid for removing a file and another for moving it.
1280 # We have one bid for removing a file and another for moving it.
1281 # These two could be merged as first move and then delete ...
1281 # These two could be merged as first move and then delete ...
1282 # but instead drop moving and just delete.
1282 # but instead drop moving and just delete.
1283 del actions[f]
1283 del actions[f]
1284 repo.ui.note(_('end of auction\n\n'))
1284 repo.ui.note(_('end of auction\n\n'))
1285
1285
1286 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1286 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1287
1287
1288 if wctx.rev() is None:
1288 if wctx.rev() is None:
1289 fractions = _forgetremoved(wctx, mctx, branchmerge)
1289 fractions = _forgetremoved(wctx, mctx, branchmerge)
1290 actions.update(fractions)
1290 actions.update(fractions)
1291
1291
1292 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1292 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1293 actions)
1293 actions)
1294
1294
1295 return prunedactions, diverge, renamedelete
1295 return prunedactions, diverge, renamedelete
1296
1296
1297 def _getcwd():
1297 def _getcwd():
1298 try:
1298 try:
1299 return pycompat.getcwd()
1299 return pycompat.getcwd()
1300 except OSError as err:
1300 except OSError as err:
1301 if err.errno == errno.ENOENT:
1301 if err.errno == errno.ENOENT:
1302 return None
1302 return None
1303 raise
1303 raise
1304
1304
1305 def batchremove(repo, wctx, actions):
1305 def batchremove(repo, wctx, actions):
1306 """apply removes to the working directory
1306 """apply removes to the working directory
1307
1307
1308 yields tuples for progress updates
1308 yields tuples for progress updates
1309 """
1309 """
1310 verbose = repo.ui.verbose
1310 verbose = repo.ui.verbose
1311 cwd = _getcwd()
1311 cwd = _getcwd()
1312 i = 0
1312 i = 0
1313 for f, args, msg in actions:
1313 for f, args, msg in actions:
1314 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1314 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1315 if verbose:
1315 if verbose:
1316 repo.ui.note(_("removing %s\n") % f)
1316 repo.ui.note(_("removing %s\n") % f)
1317 wctx[f].audit()
1317 wctx[f].audit()
1318 try:
1318 try:
1319 wctx[f].remove(ignoremissing=True)
1319 wctx[f].remove(ignoremissing=True)
1320 except OSError as inst:
1320 except OSError as inst:
1321 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1321 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1322 (f, inst.strerror))
1322 (f, inst.strerror))
1323 if i == 100:
1323 if i == 100:
1324 yield i, f
1324 yield i, f
1325 i = 0
1325 i = 0
1326 i += 1
1326 i += 1
1327 if i > 0:
1327 if i > 0:
1328 yield i, f
1328 yield i, f
1329
1329
1330 if cwd and not _getcwd():
1330 if cwd and not _getcwd():
1331 # cwd was removed in the course of removing files; print a helpful
1331 # cwd was removed in the course of removing files; print a helpful
1332 # warning.
1332 # warning.
1333 repo.ui.warn(_("current directory was removed\n"
1333 repo.ui.warn(_("current directory was removed\n"
1334 "(consider changing to repo root: %s)\n") % repo.root)
1334 "(consider changing to repo root: %s)\n") % repo.root)
1335
1335
1336 # It's necessary to flush here in case we're inside a worker fork and will
1337 # quit after this function.
1338 wctx.flushall()
1339
1340 def batchget(repo, mctx, wctx, actions):
1336 def batchget(repo, mctx, wctx, actions):
1341 """apply gets to the working directory
1337 """apply gets to the working directory
1342
1338
1343 mctx is the context to get from
1339 mctx is the context to get from
1344
1340
1345 yields tuples for progress updates
1341 yields tuples for progress updates
1346 """
1342 """
1347 verbose = repo.ui.verbose
1343 verbose = repo.ui.verbose
1348 fctx = mctx.filectx
1344 fctx = mctx.filectx
1349 ui = repo.ui
1345 ui = repo.ui
1350 i = 0
1346 i = 0
1351 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1347 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1352 for f, (flags, backup), msg in actions:
1348 for f, (flags, backup), msg in actions:
1353 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1349 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1354 if verbose:
1350 if verbose:
1355 repo.ui.note(_("getting %s\n") % f)
1351 repo.ui.note(_("getting %s\n") % f)
1356
1352
1357 if backup:
1353 if backup:
1358 # If a file or directory exists with the same name, back that
1354 # If a file or directory exists with the same name, back that
1359 # up. Otherwise, look to see if there is a file that conflicts
1355 # up. Otherwise, look to see if there is a file that conflicts
1360 # with a directory this file is in, and if so, back that up.
1356 # with a directory this file is in, and if so, back that up.
1361 absf = repo.wjoin(f)
1357 absf = repo.wjoin(f)
1362 if not repo.wvfs.lexists(f):
1358 if not repo.wvfs.lexists(f):
1363 for p in util.finddirs(f):
1359 for p in util.finddirs(f):
1364 if repo.wvfs.isfileorlink(p):
1360 if repo.wvfs.isfileorlink(p):
1365 absf = repo.wjoin(p)
1361 absf = repo.wjoin(p)
1366 break
1362 break
1367 orig = scmutil.origpath(ui, repo, absf)
1363 orig = scmutil.origpath(ui, repo, absf)
1368 if repo.wvfs.lexists(absf):
1364 if repo.wvfs.lexists(absf):
1369 util.rename(absf, orig)
1365 util.rename(absf, orig)
1370 wctx[f].clearunknown()
1366 wctx[f].clearunknown()
1371 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1367 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1372 if i == 100:
1368 if i == 100:
1373 yield i, f
1369 yield i, f
1374 i = 0
1370 i = 0
1375 i += 1
1371 i += 1
1376 if i > 0:
1372 if i > 0:
1377 yield i, f
1373 yield i, f
1378
1374
1379 # It's necessary to flush here in case we're inside a worker fork and will
1380 # quit after this function.
1381 wctx.flushall()
1382
1375
1383 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1376 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1384 """apply the merge action list to the working directory
1377 """apply the merge action list to the working directory
1385
1378
1386 wctx is the working copy context
1379 wctx is the working copy context
1387 mctx is the context to be merged into the working copy
1380 mctx is the context to be merged into the working copy
1388
1381
1389 Return a tuple of counts (updated, merged, removed, unresolved) that
1382 Return a tuple of counts (updated, merged, removed, unresolved) that
1390 describes how many files were affected by the update.
1383 describes how many files were affected by the update.
1391 """
1384 """
1392
1385
1393 updated, merged, removed = 0, 0, 0
1386 updated, merged, removed = 0, 0, 0
1394 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1387 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1395 moves = []
1388 moves = []
1396 for m, l in actions.items():
1389 for m, l in actions.items():
1397 l.sort()
1390 l.sort()
1398
1391
1399 # 'cd' and 'dc' actions are treated like other merge conflicts
1392 # 'cd' and 'dc' actions are treated like other merge conflicts
1400 mergeactions = sorted(actions['cd'])
1393 mergeactions = sorted(actions['cd'])
1401 mergeactions.extend(sorted(actions['dc']))
1394 mergeactions.extend(sorted(actions['dc']))
1402 mergeactions.extend(actions['m'])
1395 mergeactions.extend(actions['m'])
1403 for f, args, msg in mergeactions:
1396 for f, args, msg in mergeactions:
1404 f1, f2, fa, move, anc = args
1397 f1, f2, fa, move, anc = args
1405 if f == '.hgsubstate': # merged internally
1398 if f == '.hgsubstate': # merged internally
1406 continue
1399 continue
1407 if f1 is None:
1400 if f1 is None:
1408 fcl = filemerge.absentfilectx(wctx, fa)
1401 fcl = filemerge.absentfilectx(wctx, fa)
1409 else:
1402 else:
1410 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1403 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1411 fcl = wctx[f1]
1404 fcl = wctx[f1]
1412 if f2 is None:
1405 if f2 is None:
1413 fco = filemerge.absentfilectx(mctx, fa)
1406 fco = filemerge.absentfilectx(mctx, fa)
1414 else:
1407 else:
1415 fco = mctx[f2]
1408 fco = mctx[f2]
1416 actx = repo[anc]
1409 actx = repo[anc]
1417 if fa in actx:
1410 if fa in actx:
1418 fca = actx[fa]
1411 fca = actx[fa]
1419 else:
1412 else:
1420 # TODO: move to absentfilectx
1413 # TODO: move to absentfilectx
1421 fca = repo.filectx(f1, fileid=nullrev)
1414 fca = repo.filectx(f1, fileid=nullrev)
1422 ms.add(fcl, fco, fca, f)
1415 ms.add(fcl, fco, fca, f)
1423 if f1 != f and move:
1416 if f1 != f and move:
1424 moves.append(f1)
1417 moves.append(f1)
1425
1418
1426 _updating = _('updating')
1419 _updating = _('updating')
1427 _files = _('files')
1420 _files = _('files')
1428 progress = repo.ui.progress
1421 progress = repo.ui.progress
1429
1422
1430 # remove renamed files after safely stored
1423 # remove renamed files after safely stored
1431 for f in moves:
1424 for f in moves:
1432 if wctx[f].lexists():
1425 if wctx[f].lexists():
1433 repo.ui.debug("removing %s\n" % f)
1426 repo.ui.debug("removing %s\n" % f)
1434 wctx[f].audit()
1427 wctx[f].audit()
1435 wctx[f].remove()
1428 wctx[f].remove()
1436
1429
1437 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1430 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1438 z = 0
1431 z = 0
1439
1432
1440 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1433 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1441 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1434 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1442
1435
1443 # record path conflicts
1436 # record path conflicts
1444 for f, args, msg in actions['p']:
1437 for f, args, msg in actions['p']:
1445 f1, fo = args
1438 f1, fo = args
1446 s = repo.ui.status
1439 s = repo.ui.status
1447 s(_("%s: path conflict - a file or link has the same name as a "
1440 s(_("%s: path conflict - a file or link has the same name as a "
1448 "directory\n") % f)
1441 "directory\n") % f)
1449 if fo == 'l':
1442 if fo == 'l':
1450 s(_("the local file has been renamed to %s\n") % f1)
1443 s(_("the local file has been renamed to %s\n") % f1)
1451 else:
1444 else:
1452 s(_("the remote file has been renamed to %s\n") % f1)
1445 s(_("the remote file has been renamed to %s\n") % f1)
1453 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1446 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1454 ms.addpath(f, f1, fo)
1447 ms.addpath(f, f1, fo)
1455 z += 1
1448 z += 1
1456 progress(_updating, z, item=f, total=numupdates, unit=_files)
1449 progress(_updating, z, item=f, total=numupdates, unit=_files)
1457
1450
1458 # When merging in-memory, we can't support worker processes, so set the
1451 # When merging in-memory, we can't support worker processes, so set the
1459 # per-item cost at 0 in that case.
1452 # per-item cost at 0 in that case.
1460 cost = 0 if wctx.isinmemory() else 0.001
1453 cost = 0 if wctx.isinmemory() else 0.001
1461
1454
1462 # remove in parallel (must come before resolving path conflicts and getting)
1455 # remove in parallel (must come before resolving path conflicts and getting)
1463 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1456 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1464 actions['r'])
1457 actions['r'])
1465 for i, item in prog:
1458 for i, item in prog:
1466 z += i
1459 z += i
1467 progress(_updating, z, item=item, total=numupdates, unit=_files)
1460 progress(_updating, z, item=item, total=numupdates, unit=_files)
1468 removed = len(actions['r'])
1461 removed = len(actions['r'])
1469
1462
1470 # resolve path conflicts (must come before getting)
1463 # resolve path conflicts (must come before getting)
1471 for f, args, msg in actions['pr']:
1464 for f, args, msg in actions['pr']:
1472 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1465 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1473 f0, = args
1466 f0, = args
1474 if wctx[f0].lexists():
1467 if wctx[f0].lexists():
1475 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1468 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1476 wctx[f].audit()
1469 wctx[f].audit()
1477 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1470 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1478 wctx[f0].remove()
1471 wctx[f0].remove()
1479 z += 1
1472 z += 1
1480 progress(_updating, z, item=f, total=numupdates, unit=_files)
1473 progress(_updating, z, item=f, total=numupdates, unit=_files)
1481
1474
1482 # We should flush before forking into worker processes, since those workers
1483 # flush when they complete, and we don't want to duplicate work.
1484 wctx.flushall()
1485
1486 # get in parallel
1475 # get in parallel
1487 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1476 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1488 actions['g'])
1477 actions['g'])
1489 for i, item in prog:
1478 for i, item in prog:
1490 z += i
1479 z += i
1491 progress(_updating, z, item=item, total=numupdates, unit=_files)
1480 progress(_updating, z, item=item, total=numupdates, unit=_files)
1492 updated = len(actions['g'])
1481 updated = len(actions['g'])
1493
1482
1494 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1483 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1495 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1484 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1496
1485
1497 # forget (manifest only, just log it) (must come first)
1486 # forget (manifest only, just log it) (must come first)
1498 for f, args, msg in actions['f']:
1487 for f, args, msg in actions['f']:
1499 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1488 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1500 z += 1
1489 z += 1
1501 progress(_updating, z, item=f, total=numupdates, unit=_files)
1490 progress(_updating, z, item=f, total=numupdates, unit=_files)
1502
1491
1503 # re-add (manifest only, just log it)
1492 # re-add (manifest only, just log it)
1504 for f, args, msg in actions['a']:
1493 for f, args, msg in actions['a']:
1505 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1494 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1506 z += 1
1495 z += 1
1507 progress(_updating, z, item=f, total=numupdates, unit=_files)
1496 progress(_updating, z, item=f, total=numupdates, unit=_files)
1508
1497
1509 # re-add/mark as modified (manifest only, just log it)
1498 # re-add/mark as modified (manifest only, just log it)
1510 for f, args, msg in actions['am']:
1499 for f, args, msg in actions['am']:
1511 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1500 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1512 z += 1
1501 z += 1
1513 progress(_updating, z, item=f, total=numupdates, unit=_files)
1502 progress(_updating, z, item=f, total=numupdates, unit=_files)
1514
1503
1515 # keep (noop, just log it)
1504 # keep (noop, just log it)
1516 for f, args, msg in actions['k']:
1505 for f, args, msg in actions['k']:
1517 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1506 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1518 # no progress
1507 # no progress
1519
1508
1520 # directory rename, move local
1509 # directory rename, move local
1521 for f, args, msg in actions['dm']:
1510 for f, args, msg in actions['dm']:
1522 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1511 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1523 z += 1
1512 z += 1
1524 progress(_updating, z, item=f, total=numupdates, unit=_files)
1513 progress(_updating, z, item=f, total=numupdates, unit=_files)
1525 f0, flags = args
1514 f0, flags = args
1526 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1515 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1527 wctx[f].audit()
1516 wctx[f].audit()
1528 wctx[f].write(wctx.filectx(f0).data(), flags)
1517 wctx[f].write(wctx.filectx(f0).data(), flags)
1529 wctx[f0].remove()
1518 wctx[f0].remove()
1530 updated += 1
1519 updated += 1
1531
1520
1532 # local directory rename, get
1521 # local directory rename, get
1533 for f, args, msg in actions['dg']:
1522 for f, args, msg in actions['dg']:
1534 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1523 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1535 z += 1
1524 z += 1
1536 progress(_updating, z, item=f, total=numupdates, unit=_files)
1525 progress(_updating, z, item=f, total=numupdates, unit=_files)
1537 f0, flags = args
1526 f0, flags = args
1538 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1527 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1539 wctx[f].write(mctx.filectx(f0).data(), flags)
1528 wctx[f].write(mctx.filectx(f0).data(), flags)
1540 updated += 1
1529 updated += 1
1541
1530
1542 # exec
1531 # exec
1543 for f, args, msg in actions['e']:
1532 for f, args, msg in actions['e']:
1544 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1533 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1545 z += 1
1534 z += 1
1546 progress(_updating, z, item=f, total=numupdates, unit=_files)
1535 progress(_updating, z, item=f, total=numupdates, unit=_files)
1547 flags, = args
1536 flags, = args
1548 wctx[f].audit()
1537 wctx[f].audit()
1549 wctx[f].setflags('l' in flags, 'x' in flags)
1538 wctx[f].setflags('l' in flags, 'x' in flags)
1550 updated += 1
1539 updated += 1
1551
1540
1552 # the ordering is important here -- ms.mergedriver will raise if the merge
1541 # the ordering is important here -- ms.mergedriver will raise if the merge
1553 # driver has changed, and we want to be able to bypass it when overwrite is
1542 # driver has changed, and we want to be able to bypass it when overwrite is
1554 # True
1543 # True
1555 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1544 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1556
1545
1557 if usemergedriver:
1546 if usemergedriver:
1558 ms.commit()
1547 ms.commit()
1559 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1548 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1560 # the driver might leave some files unresolved
1549 # the driver might leave some files unresolved
1561 unresolvedf = set(ms.unresolved())
1550 unresolvedf = set(ms.unresolved())
1562 if not proceed:
1551 if not proceed:
1563 # XXX setting unresolved to at least 1 is a hack to make sure we
1552 # XXX setting unresolved to at least 1 is a hack to make sure we
1564 # error out
1553 # error out
1565 return updated, merged, removed, max(len(unresolvedf), 1)
1554 return updated, merged, removed, max(len(unresolvedf), 1)
1566 newactions = []
1555 newactions = []
1567 for f, args, msg in mergeactions:
1556 for f, args, msg in mergeactions:
1568 if f in unresolvedf:
1557 if f in unresolvedf:
1569 newactions.append((f, args, msg))
1558 newactions.append((f, args, msg))
1570 mergeactions = newactions
1559 mergeactions = newactions
1571
1560
1572 try:
1561 try:
1573 # premerge
1562 # premerge
1574 tocomplete = []
1563 tocomplete = []
1575 for f, args, msg in mergeactions:
1564 for f, args, msg in mergeactions:
1576 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1565 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1577 z += 1
1566 z += 1
1578 progress(_updating, z, item=f, total=numupdates, unit=_files)
1567 progress(_updating, z, item=f, total=numupdates, unit=_files)
1579 if f == '.hgsubstate': # subrepo states need updating
1568 if f == '.hgsubstate': # subrepo states need updating
1580 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1569 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1581 overwrite, labels)
1570 overwrite, labels)
1582 continue
1571 continue
1583 wctx[f].audit()
1572 wctx[f].audit()
1584 complete, r = ms.preresolve(f, wctx)
1573 complete, r = ms.preresolve(f, wctx)
1585 if not complete:
1574 if not complete:
1586 numupdates += 1
1575 numupdates += 1
1587 tocomplete.append((f, args, msg))
1576 tocomplete.append((f, args, msg))
1588
1577
1589 # merge
1578 # merge
1590 for f, args, msg in tocomplete:
1579 for f, args, msg in tocomplete:
1591 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1580 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1592 z += 1
1581 z += 1
1593 progress(_updating, z, item=f, total=numupdates, unit=_files)
1582 progress(_updating, z, item=f, total=numupdates, unit=_files)
1594 ms.resolve(f, wctx)
1583 ms.resolve(f, wctx)
1595
1584
1596 finally:
1585 finally:
1597 ms.commit()
1586 ms.commit()
1598
1587
1599 unresolved = ms.unresolvedcount()
1588 unresolved = ms.unresolvedcount()
1600
1589
1601 if usemergedriver and not unresolved and ms.mdstate() != 's':
1590 if usemergedriver and not unresolved and ms.mdstate() != 's':
1602 if not driverconclude(repo, ms, wctx, labels=labels):
1591 if not driverconclude(repo, ms, wctx, labels=labels):
1603 # XXX setting unresolved to at least 1 is a hack to make sure we
1592 # XXX setting unresolved to at least 1 is a hack to make sure we
1604 # error out
1593 # error out
1605 unresolved = max(unresolved, 1)
1594 unresolved = max(unresolved, 1)
1606
1595
1607 ms.commit()
1596 ms.commit()
1608
1597
1609 msupdated, msmerged, msremoved = ms.counts()
1598 msupdated, msmerged, msremoved = ms.counts()
1610 updated += msupdated
1599 updated += msupdated
1611 merged += msmerged
1600 merged += msmerged
1612 removed += msremoved
1601 removed += msremoved
1613
1602
1614 extraactions = ms.actions()
1603 extraactions = ms.actions()
1615 if extraactions:
1604 if extraactions:
1616 mfiles = set(a[0] for a in actions['m'])
1605 mfiles = set(a[0] for a in actions['m'])
1617 for k, acts in extraactions.iteritems():
1606 for k, acts in extraactions.iteritems():
1618 actions[k].extend(acts)
1607 actions[k].extend(acts)
1619 # Remove these files from actions['m'] as well. This is important
1608 # Remove these files from actions['m'] as well. This is important
1620 # because in recordupdates, files in actions['m'] are processed
1609 # because in recordupdates, files in actions['m'] are processed
1621 # after files in other actions, and the merge driver might add
1610 # after files in other actions, and the merge driver might add
1622 # files to those actions via extraactions above. This can lead to a
1611 # files to those actions via extraactions above. This can lead to a
1623 # file being recorded twice, with poor results. This is especially
1612 # file being recorded twice, with poor results. This is especially
1624 # problematic for actions['r'] (currently only possible with the
1613 # problematic for actions['r'] (currently only possible with the
1625 # merge driver in the initial merge process; interrupted merges
1614 # merge driver in the initial merge process; interrupted merges
1626 # don't go through this flow).
1615 # don't go through this flow).
1627 #
1616 #
1628 # The real fix here is to have indexes by both file and action so
1617 # The real fix here is to have indexes by both file and action so
1629 # that when the action for a file is changed it is automatically
1618 # that when the action for a file is changed it is automatically
1630 # reflected in the other action lists. But that involves a more
1619 # reflected in the other action lists. But that involves a more
1631 # complex data structure, so this will do for now.
1620 # complex data structure, so this will do for now.
1632 #
1621 #
1633 # We don't need to do the same operation for 'dc' and 'cd' because
1622 # We don't need to do the same operation for 'dc' and 'cd' because
1634 # those lists aren't consulted again.
1623 # those lists aren't consulted again.
1635 mfiles.difference_update(a[0] for a in acts)
1624 mfiles.difference_update(a[0] for a in acts)
1636
1625
1637 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1626 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1638
1627
1639 progress(_updating, None, total=numupdates, unit=_files)
1628 progress(_updating, None, total=numupdates, unit=_files)
1640
1629
1641 return updated, merged, removed, unresolved
1630 return updated, merged, removed, unresolved
1642
1631
1643 def recordupdates(repo, actions, branchmerge):
1632 def recordupdates(repo, actions, branchmerge):
1644 "record merge actions to the dirstate"
1633 "record merge actions to the dirstate"
1645 # remove (must come first)
1634 # remove (must come first)
1646 for f, args, msg in actions.get('r', []):
1635 for f, args, msg in actions.get('r', []):
1647 if branchmerge:
1636 if branchmerge:
1648 repo.dirstate.remove(f)
1637 repo.dirstate.remove(f)
1649 else:
1638 else:
1650 repo.dirstate.drop(f)
1639 repo.dirstate.drop(f)
1651
1640
1652 # forget (must come first)
1641 # forget (must come first)
1653 for f, args, msg in actions.get('f', []):
1642 for f, args, msg in actions.get('f', []):
1654 repo.dirstate.drop(f)
1643 repo.dirstate.drop(f)
1655
1644
1656 # resolve path conflicts
1645 # resolve path conflicts
1657 for f, args, msg in actions.get('pr', []):
1646 for f, args, msg in actions.get('pr', []):
1658 f0, = args
1647 f0, = args
1659 origf0 = repo.dirstate.copied(f0) or f0
1648 origf0 = repo.dirstate.copied(f0) or f0
1660 repo.dirstate.add(f)
1649 repo.dirstate.add(f)
1661 repo.dirstate.copy(origf0, f)
1650 repo.dirstate.copy(origf0, f)
1662 if f0 == origf0:
1651 if f0 == origf0:
1663 repo.dirstate.remove(f0)
1652 repo.dirstate.remove(f0)
1664 else:
1653 else:
1665 repo.dirstate.drop(f0)
1654 repo.dirstate.drop(f0)
1666
1655
1667 # re-add
1656 # re-add
1668 for f, args, msg in actions.get('a', []):
1657 for f, args, msg in actions.get('a', []):
1669 repo.dirstate.add(f)
1658 repo.dirstate.add(f)
1670
1659
1671 # re-add/mark as modified
1660 # re-add/mark as modified
1672 for f, args, msg in actions.get('am', []):
1661 for f, args, msg in actions.get('am', []):
1673 if branchmerge:
1662 if branchmerge:
1674 repo.dirstate.normallookup(f)
1663 repo.dirstate.normallookup(f)
1675 else:
1664 else:
1676 repo.dirstate.add(f)
1665 repo.dirstate.add(f)
1677
1666
1678 # exec change
1667 # exec change
1679 for f, args, msg in actions.get('e', []):
1668 for f, args, msg in actions.get('e', []):
1680 repo.dirstate.normallookup(f)
1669 repo.dirstate.normallookup(f)
1681
1670
1682 # keep
1671 # keep
1683 for f, args, msg in actions.get('k', []):
1672 for f, args, msg in actions.get('k', []):
1684 pass
1673 pass
1685
1674
1686 # get
1675 # get
1687 for f, args, msg in actions.get('g', []):
1676 for f, args, msg in actions.get('g', []):
1688 if branchmerge:
1677 if branchmerge:
1689 repo.dirstate.otherparent(f)
1678 repo.dirstate.otherparent(f)
1690 else:
1679 else:
1691 repo.dirstate.normal(f)
1680 repo.dirstate.normal(f)
1692
1681
1693 # merge
1682 # merge
1694 for f, args, msg in actions.get('m', []):
1683 for f, args, msg in actions.get('m', []):
1695 f1, f2, fa, move, anc = args
1684 f1, f2, fa, move, anc = args
1696 if branchmerge:
1685 if branchmerge:
1697 # We've done a branch merge, mark this file as merged
1686 # We've done a branch merge, mark this file as merged
1698 # so that we properly record the merger later
1687 # so that we properly record the merger later
1699 repo.dirstate.merge(f)
1688 repo.dirstate.merge(f)
1700 if f1 != f2: # copy/rename
1689 if f1 != f2: # copy/rename
1701 if move:
1690 if move:
1702 repo.dirstate.remove(f1)
1691 repo.dirstate.remove(f1)
1703 if f1 != f:
1692 if f1 != f:
1704 repo.dirstate.copy(f1, f)
1693 repo.dirstate.copy(f1, f)
1705 else:
1694 else:
1706 repo.dirstate.copy(f2, f)
1695 repo.dirstate.copy(f2, f)
1707 else:
1696 else:
1708 # We've update-merged a locally modified file, so
1697 # We've update-merged a locally modified file, so
1709 # we set the dirstate to emulate a normal checkout
1698 # we set the dirstate to emulate a normal checkout
1710 # of that file some time in the past. Thus our
1699 # of that file some time in the past. Thus our
1711 # merge will appear as a normal local file
1700 # merge will appear as a normal local file
1712 # modification.
1701 # modification.
1713 if f2 == f: # file not locally copied/moved
1702 if f2 == f: # file not locally copied/moved
1714 repo.dirstate.normallookup(f)
1703 repo.dirstate.normallookup(f)
1715 if move:
1704 if move:
1716 repo.dirstate.drop(f1)
1705 repo.dirstate.drop(f1)
1717
1706
1718 # directory rename, move local
1707 # directory rename, move local
1719 for f, args, msg in actions.get('dm', []):
1708 for f, args, msg in actions.get('dm', []):
1720 f0, flag = args
1709 f0, flag = args
1721 if branchmerge:
1710 if branchmerge:
1722 repo.dirstate.add(f)
1711 repo.dirstate.add(f)
1723 repo.dirstate.remove(f0)
1712 repo.dirstate.remove(f0)
1724 repo.dirstate.copy(f0, f)
1713 repo.dirstate.copy(f0, f)
1725 else:
1714 else:
1726 repo.dirstate.normal(f)
1715 repo.dirstate.normal(f)
1727 repo.dirstate.drop(f0)
1716 repo.dirstate.drop(f0)
1728
1717
1729 # directory rename, get
1718 # directory rename, get
1730 for f, args, msg in actions.get('dg', []):
1719 for f, args, msg in actions.get('dg', []):
1731 f0, flag = args
1720 f0, flag = args
1732 if branchmerge:
1721 if branchmerge:
1733 repo.dirstate.add(f)
1722 repo.dirstate.add(f)
1734 repo.dirstate.copy(f0, f)
1723 repo.dirstate.copy(f0, f)
1735 else:
1724 else:
1736 repo.dirstate.normal(f)
1725 repo.dirstate.normal(f)
1737
1726
1738 def update(repo, node, branchmerge, force, ancestor=None,
1727 def update(repo, node, branchmerge, force, ancestor=None,
1739 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1728 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1740 updatecheck=None, wc=None):
1729 updatecheck=None, wc=None):
1741 """
1730 """
1742 Perform a merge between the working directory and the given node
1731 Perform a merge between the working directory and the given node
1743
1732
1744 node = the node to update to
1733 node = the node to update to
1745 branchmerge = whether to merge between branches
1734 branchmerge = whether to merge between branches
1746 force = whether to force branch merging or file overwriting
1735 force = whether to force branch merging or file overwriting
1747 matcher = a matcher to filter file lists (dirstate not updated)
1736 matcher = a matcher to filter file lists (dirstate not updated)
1748 mergeancestor = whether it is merging with an ancestor. If true,
1737 mergeancestor = whether it is merging with an ancestor. If true,
1749 we should accept the incoming changes for any prompts that occur.
1738 we should accept the incoming changes for any prompts that occur.
1750 If false, merging with an ancestor (fast-forward) is only allowed
1739 If false, merging with an ancestor (fast-forward) is only allowed
1751 between different named branches. This flag is used by rebase extension
1740 between different named branches. This flag is used by rebase extension
1752 as a temporary fix and should be avoided in general.
1741 as a temporary fix and should be avoided in general.
1753 labels = labels to use for base, local and other
1742 labels = labels to use for base, local and other
1754 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1743 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1755 this is True, then 'force' should be True as well.
1744 this is True, then 'force' should be True as well.
1756
1745
1757 The table below shows all the behaviors of the update command given the
1746 The table below shows all the behaviors of the update command given the
1758 -c/--check and -C/--clean or no options, whether the working directory is
1747 -c/--check and -C/--clean or no options, whether the working directory is
1759 dirty, whether a revision is specified, and the relationship of the parent
1748 dirty, whether a revision is specified, and the relationship of the parent
1760 rev to the target rev (linear or not). Match from top first. The -n
1749 rev to the target rev (linear or not). Match from top first. The -n
1761 option doesn't exist on the command line, but represents the
1750 option doesn't exist on the command line, but represents the
1762 experimental.updatecheck=noconflict option.
1751 experimental.updatecheck=noconflict option.
1763
1752
1764 This logic is tested by test-update-branches.t.
1753 This logic is tested by test-update-branches.t.
1765
1754
1766 -c -C -n -m dirty rev linear | result
1755 -c -C -n -m dirty rev linear | result
1767 y y * * * * * | (1)
1756 y y * * * * * | (1)
1768 y * y * * * * | (1)
1757 y * y * * * * | (1)
1769 y * * y * * * | (1)
1758 y * * y * * * | (1)
1770 * y y * * * * | (1)
1759 * y y * * * * | (1)
1771 * y * y * * * | (1)
1760 * y * y * * * | (1)
1772 * * y y * * * | (1)
1761 * * y y * * * | (1)
1773 * * * * * n n | x
1762 * * * * * n n | x
1774 * * * * n * * | ok
1763 * * * * n * * | ok
1775 n n n n y * y | merge
1764 n n n n y * y | merge
1776 n n n n y y n | (2)
1765 n n n n y y n | (2)
1777 n n n y y * * | merge
1766 n n n y y * * | merge
1778 n n y n y * * | merge if no conflict
1767 n n y n y * * | merge if no conflict
1779 n y n n y * * | discard
1768 n y n n y * * | discard
1780 y n n n y * * | (3)
1769 y n n n y * * | (3)
1781
1770
1782 x = can't happen
1771 x = can't happen
1783 * = don't-care
1772 * = don't-care
1784 1 = incompatible options (checked in commands.py)
1773 1 = incompatible options (checked in commands.py)
1785 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1774 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1786 3 = abort: uncommitted changes (checked in commands.py)
1775 3 = abort: uncommitted changes (checked in commands.py)
1787
1776
1788 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1777 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1789 to repo[None] if None is passed.
1778 to repo[None] if None is passed.
1790
1779
1791 Return the same tuple as applyupdates().
1780 Return the same tuple as applyupdates().
1792 """
1781 """
1793 # Avoid cycle.
1782 # Avoid cycle.
1794 from . import sparse
1783 from . import sparse
1795
1784
1796 # This function used to find the default destination if node was None, but
1785 # This function used to find the default destination if node was None, but
1797 # that's now in destutil.py.
1786 # that's now in destutil.py.
1798 assert node is not None
1787 assert node is not None
1799 if not branchmerge and not force:
1788 if not branchmerge and not force:
1800 # TODO: remove the default once all callers that pass branchmerge=False
1789 # TODO: remove the default once all callers that pass branchmerge=False
1801 # and force=False pass a value for updatecheck. We may want to allow
1790 # and force=False pass a value for updatecheck. We may want to allow
1802 # updatecheck='abort' to better suppport some of these callers.
1791 # updatecheck='abort' to better suppport some of these callers.
1803 if updatecheck is None:
1792 if updatecheck is None:
1804 updatecheck = 'linear'
1793 updatecheck = 'linear'
1805 assert updatecheck in ('none', 'linear', 'noconflict')
1794 assert updatecheck in ('none', 'linear', 'noconflict')
1806 # If we're doing a partial update, we need to skip updating
1795 # If we're doing a partial update, we need to skip updating
1807 # the dirstate, so make a note of any partial-ness to the
1796 # the dirstate, so make a note of any partial-ness to the
1808 # update here.
1797 # update here.
1809 if matcher is None or matcher.always():
1798 if matcher is None or matcher.always():
1810 partial = False
1799 partial = False
1811 else:
1800 else:
1812 partial = True
1801 partial = True
1813 with repo.wlock():
1802 with repo.wlock():
1814 if wc is None:
1803 if wc is None:
1815 wc = repo[None]
1804 wc = repo[None]
1816 pl = wc.parents()
1805 pl = wc.parents()
1817 p1 = pl[0]
1806 p1 = pl[0]
1818 pas = [None]
1807 pas = [None]
1819 if ancestor is not None:
1808 if ancestor is not None:
1820 pas = [repo[ancestor]]
1809 pas = [repo[ancestor]]
1821
1810
1822 overwrite = force and not branchmerge
1811 overwrite = force and not branchmerge
1823
1812
1824 p2 = repo[node]
1813 p2 = repo[node]
1825 if pas[0] is None:
1814 if pas[0] is None:
1826 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1815 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1827 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1816 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1828 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1817 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1829 else:
1818 else:
1830 pas = [p1.ancestor(p2, warn=branchmerge)]
1819 pas = [p1.ancestor(p2, warn=branchmerge)]
1831
1820
1832 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1821 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1833
1822
1834 ### check phase
1823 ### check phase
1835 if not overwrite:
1824 if not overwrite:
1836 if len(pl) > 1:
1825 if len(pl) > 1:
1837 raise error.Abort(_("outstanding uncommitted merge"))
1826 raise error.Abort(_("outstanding uncommitted merge"))
1838 ms = mergestate.read(repo)
1827 ms = mergestate.read(repo)
1839 if list(ms.unresolved()):
1828 if list(ms.unresolved()):
1840 raise error.Abort(_("outstanding merge conflicts"))
1829 raise error.Abort(_("outstanding merge conflicts"))
1841 if branchmerge:
1830 if branchmerge:
1842 if pas == [p2]:
1831 if pas == [p2]:
1843 raise error.Abort(_("merging with a working directory ancestor"
1832 raise error.Abort(_("merging with a working directory ancestor"
1844 " has no effect"))
1833 " has no effect"))
1845 elif pas == [p1]:
1834 elif pas == [p1]:
1846 if not mergeancestor and wc.branch() == p2.branch():
1835 if not mergeancestor and wc.branch() == p2.branch():
1847 raise error.Abort(_("nothing to merge"),
1836 raise error.Abort(_("nothing to merge"),
1848 hint=_("use 'hg update' "
1837 hint=_("use 'hg update' "
1849 "or check 'hg heads'"))
1838 "or check 'hg heads'"))
1850 if not force and (wc.files() or wc.deleted()):
1839 if not force and (wc.files() or wc.deleted()):
1851 raise error.Abort(_("uncommitted changes"),
1840 raise error.Abort(_("uncommitted changes"),
1852 hint=_("use 'hg status' to list changes"))
1841 hint=_("use 'hg status' to list changes"))
1853 for s in sorted(wc.substate):
1842 for s in sorted(wc.substate):
1854 wc.sub(s).bailifchanged()
1843 wc.sub(s).bailifchanged()
1855
1844
1856 elif not overwrite:
1845 elif not overwrite:
1857 if p1 == p2: # no-op update
1846 if p1 == p2: # no-op update
1858 # call the hooks and exit early
1847 # call the hooks and exit early
1859 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1848 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1860 repo.hook('update', parent1=xp2, parent2='', error=0)
1849 repo.hook('update', parent1=xp2, parent2='', error=0)
1861 return 0, 0, 0, 0
1850 return 0, 0, 0, 0
1862
1851
1863 if (updatecheck == 'linear' and
1852 if (updatecheck == 'linear' and
1864 pas not in ([p1], [p2])): # nonlinear
1853 pas not in ([p1], [p2])): # nonlinear
1865 dirty = wc.dirty(missing=True)
1854 dirty = wc.dirty(missing=True)
1866 if dirty:
1855 if dirty:
1867 # Branching is a bit strange to ensure we do the minimal
1856 # Branching is a bit strange to ensure we do the minimal
1868 # amount of call to obsutil.foreground.
1857 # amount of call to obsutil.foreground.
1869 foreground = obsutil.foreground(repo, [p1.node()])
1858 foreground = obsutil.foreground(repo, [p1.node()])
1870 # note: the <node> variable contains a random identifier
1859 # note: the <node> variable contains a random identifier
1871 if repo[node].node() in foreground:
1860 if repo[node].node() in foreground:
1872 pass # allow updating to successors
1861 pass # allow updating to successors
1873 else:
1862 else:
1874 msg = _("uncommitted changes")
1863 msg = _("uncommitted changes")
1875 hint = _("commit or update --clean to discard changes")
1864 hint = _("commit or update --clean to discard changes")
1876 raise error.UpdateAbort(msg, hint=hint)
1865 raise error.UpdateAbort(msg, hint=hint)
1877 else:
1866 else:
1878 # Allow jumping branches if clean and specific rev given
1867 # Allow jumping branches if clean and specific rev given
1879 pass
1868 pass
1880
1869
1881 if overwrite:
1870 if overwrite:
1882 pas = [wc]
1871 pas = [wc]
1883 elif not branchmerge:
1872 elif not branchmerge:
1884 pas = [p1]
1873 pas = [p1]
1885
1874
1886 # deprecated config: merge.followcopies
1875 # deprecated config: merge.followcopies
1887 followcopies = repo.ui.configbool('merge', 'followcopies')
1876 followcopies = repo.ui.configbool('merge', 'followcopies')
1888 if overwrite:
1877 if overwrite:
1889 followcopies = False
1878 followcopies = False
1890 elif not pas[0]:
1879 elif not pas[0]:
1891 followcopies = False
1880 followcopies = False
1892 if not branchmerge and not wc.dirty(missing=True):
1881 if not branchmerge and not wc.dirty(missing=True):
1893 followcopies = False
1882 followcopies = False
1894
1883
1895 ### calculate phase
1884 ### calculate phase
1896 actionbyfile, diverge, renamedelete = calculateupdates(
1885 actionbyfile, diverge, renamedelete = calculateupdates(
1897 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1886 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1898 followcopies, matcher=matcher, mergeforce=mergeforce)
1887 followcopies, matcher=matcher, mergeforce=mergeforce)
1899
1888
1900 if updatecheck == 'noconflict':
1889 if updatecheck == 'noconflict':
1901 for f, (m, args, msg) in actionbyfile.iteritems():
1890 for f, (m, args, msg) in actionbyfile.iteritems():
1902 if m not in ('g', 'k', 'e', 'r', 'pr'):
1891 if m not in ('g', 'k', 'e', 'r', 'pr'):
1903 msg = _("conflicting changes")
1892 msg = _("conflicting changes")
1904 hint = _("commit or update --clean to discard changes")
1893 hint = _("commit or update --clean to discard changes")
1905 raise error.Abort(msg, hint=hint)
1894 raise error.Abort(msg, hint=hint)
1906
1895
1907 # Prompt and create actions. Most of this is in the resolve phase
1896 # Prompt and create actions. Most of this is in the resolve phase
1908 # already, but we can't handle .hgsubstate in filemerge or
1897 # already, but we can't handle .hgsubstate in filemerge or
1909 # subrepo.submerge yet so we have to keep prompting for it.
1898 # subrepo.submerge yet so we have to keep prompting for it.
1910 if '.hgsubstate' in actionbyfile:
1899 if '.hgsubstate' in actionbyfile:
1911 f = '.hgsubstate'
1900 f = '.hgsubstate'
1912 m, args, msg = actionbyfile[f]
1901 m, args, msg = actionbyfile[f]
1913 prompts = filemerge.partextras(labels)
1902 prompts = filemerge.partextras(labels)
1914 prompts['f'] = f
1903 prompts['f'] = f
1915 if m == 'cd':
1904 if m == 'cd':
1916 if repo.ui.promptchoice(
1905 if repo.ui.promptchoice(
1917 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1906 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1918 "use (c)hanged version or (d)elete?"
1907 "use (c)hanged version or (d)elete?"
1919 "$$ &Changed $$ &Delete") % prompts, 0):
1908 "$$ &Changed $$ &Delete") % prompts, 0):
1920 actionbyfile[f] = ('r', None, "prompt delete")
1909 actionbyfile[f] = ('r', None, "prompt delete")
1921 elif f in p1:
1910 elif f in p1:
1922 actionbyfile[f] = ('am', None, "prompt keep")
1911 actionbyfile[f] = ('am', None, "prompt keep")
1923 else:
1912 else:
1924 actionbyfile[f] = ('a', None, "prompt keep")
1913 actionbyfile[f] = ('a', None, "prompt keep")
1925 elif m == 'dc':
1914 elif m == 'dc':
1926 f1, f2, fa, move, anc = args
1915 f1, f2, fa, move, anc = args
1927 flags = p2[f2].flags()
1916 flags = p2[f2].flags()
1928 if repo.ui.promptchoice(
1917 if repo.ui.promptchoice(
1929 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1918 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1930 "use (c)hanged version or leave (d)eleted?"
1919 "use (c)hanged version or leave (d)eleted?"
1931 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1920 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1932 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1921 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1933 else:
1922 else:
1934 del actionbyfile[f]
1923 del actionbyfile[f]
1935
1924
1936 # Convert to dictionary-of-lists format
1925 # Convert to dictionary-of-lists format
1937 actions = dict((m, [])
1926 actions = dict((m, [])
1938 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1927 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1939 for f, (m, args, msg) in actionbyfile.iteritems():
1928 for f, (m, args, msg) in actionbyfile.iteritems():
1940 if m not in actions:
1929 if m not in actions:
1941 actions[m] = []
1930 actions[m] = []
1942 actions[m].append((f, args, msg))
1931 actions[m].append((f, args, msg))
1943
1932
1944 if not util.fscasesensitive(repo.path):
1933 if not util.fscasesensitive(repo.path):
1945 # check collision between files only in p2 for clean update
1934 # check collision between files only in p2 for clean update
1946 if (not branchmerge and
1935 if (not branchmerge and
1947 (force or not wc.dirty(missing=True, branch=False))):
1936 (force or not wc.dirty(missing=True, branch=False))):
1948 _checkcollision(repo, p2.manifest(), None)
1937 _checkcollision(repo, p2.manifest(), None)
1949 else:
1938 else:
1950 _checkcollision(repo, wc.manifest(), actions)
1939 _checkcollision(repo, wc.manifest(), actions)
1951
1940
1952 # divergent renames
1941 # divergent renames
1953 for f, fl in sorted(diverge.iteritems()):
1942 for f, fl in sorted(diverge.iteritems()):
1954 repo.ui.warn(_("note: possible conflict - %s was renamed "
1943 repo.ui.warn(_("note: possible conflict - %s was renamed "
1955 "multiple times to:\n") % f)
1944 "multiple times to:\n") % f)
1956 for nf in fl:
1945 for nf in fl:
1957 repo.ui.warn(" %s\n" % nf)
1946 repo.ui.warn(" %s\n" % nf)
1958
1947
1959 # rename and delete
1948 # rename and delete
1960 for f, fl in sorted(renamedelete.iteritems()):
1949 for f, fl in sorted(renamedelete.iteritems()):
1961 repo.ui.warn(_("note: possible conflict - %s was deleted "
1950 repo.ui.warn(_("note: possible conflict - %s was deleted "
1962 "and renamed to:\n") % f)
1951 "and renamed to:\n") % f)
1963 for nf in fl:
1952 for nf in fl:
1964 repo.ui.warn(" %s\n" % nf)
1953 repo.ui.warn(" %s\n" % nf)
1965
1954
1966 ### apply phase
1955 ### apply phase
1967 if not branchmerge: # just jump to the new rev
1956 if not branchmerge: # just jump to the new rev
1968 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1957 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1969 if not partial:
1958 if not partial:
1970 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1959 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1971 # note that we're in the middle of an update
1960 # note that we're in the middle of an update
1972 repo.vfs.write('updatestate', p2.hex())
1961 repo.vfs.write('updatestate', p2.hex())
1973
1962
1974 # Advertise fsmonitor when its presence could be useful.
1963 # Advertise fsmonitor when its presence could be useful.
1975 #
1964 #
1976 # We only advertise when performing an update from an empty working
1965 # We only advertise when performing an update from an empty working
1977 # directory. This typically only occurs during initial clone.
1966 # directory. This typically only occurs during initial clone.
1978 #
1967 #
1979 # We give users a mechanism to disable the warning in case it is
1968 # We give users a mechanism to disable the warning in case it is
1980 # annoying.
1969 # annoying.
1981 #
1970 #
1982 # We only allow on Linux and MacOS because that's where fsmonitor is
1971 # We only allow on Linux and MacOS because that's where fsmonitor is
1983 # considered stable.
1972 # considered stable.
1984 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1973 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1985 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1974 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1986 'warn_update_file_count')
1975 'warn_update_file_count')
1987 try:
1976 try:
1988 extensions.find('fsmonitor')
1977 extensions.find('fsmonitor')
1989 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1978 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1990 # We intentionally don't look at whether fsmonitor has disabled
1979 # We intentionally don't look at whether fsmonitor has disabled
1991 # itself because a) fsmonitor may have already printed a warning
1980 # itself because a) fsmonitor may have already printed a warning
1992 # b) we only care about the config state here.
1981 # b) we only care about the config state here.
1993 except KeyError:
1982 except KeyError:
1994 fsmonitorenabled = False
1983 fsmonitorenabled = False
1995
1984
1996 if (fsmonitorwarning
1985 if (fsmonitorwarning
1997 and not fsmonitorenabled
1986 and not fsmonitorenabled
1998 and p1.node() == nullid
1987 and p1.node() == nullid
1999 and len(actions['g']) >= fsmonitorthreshold
1988 and len(actions['g']) >= fsmonitorthreshold
2000 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1989 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2001 repo.ui.warn(
1990 repo.ui.warn(
2002 _('(warning: large working directory being used without '
1991 _('(warning: large working directory being used without '
2003 'fsmonitor enabled; enable fsmonitor to improve performance; '
1992 'fsmonitor enabled; enable fsmonitor to improve performance; '
2004 'see "hg help -e fsmonitor")\n'))
1993 'see "hg help -e fsmonitor")\n'))
2005
1994
2006 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1995 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2007 wc.flushall()
2008
1996
2009 if not partial:
1997 if not partial:
2010 with repo.dirstate.parentchange():
1998 with repo.dirstate.parentchange():
2011 repo.setparents(fp1, fp2)
1999 repo.setparents(fp1, fp2)
2012 recordupdates(repo, actions, branchmerge)
2000 recordupdates(repo, actions, branchmerge)
2013 # update completed, clear state
2001 # update completed, clear state
2014 util.unlink(repo.vfs.join('updatestate'))
2002 util.unlink(repo.vfs.join('updatestate'))
2015
2003
2016 if not branchmerge:
2004 if not branchmerge:
2017 repo.dirstate.setbranch(p2.branch())
2005 repo.dirstate.setbranch(p2.branch())
2018
2006
2019 # If we're updating to a location, clean up any stale temporary includes
2007 # If we're updating to a location, clean up any stale temporary includes
2020 # (ex: this happens during hg rebase --abort).
2008 # (ex: this happens during hg rebase --abort).
2021 if not branchmerge:
2009 if not branchmerge:
2022 sparse.prunetemporaryincludes(repo)
2010 sparse.prunetemporaryincludes(repo)
2023
2011
2024 if not partial:
2012 if not partial:
2025 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2013 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2026 return stats
2014 return stats
2027
2015
2028 def graft(repo, ctx, pctx, labels, keepparent=False):
2016 def graft(repo, ctx, pctx, labels, keepparent=False):
2029 """Do a graft-like merge.
2017 """Do a graft-like merge.
2030
2018
2031 This is a merge where the merge ancestor is chosen such that one
2019 This is a merge where the merge ancestor is chosen such that one
2032 or more changesets are grafted onto the current changeset. In
2020 or more changesets are grafted onto the current changeset. In
2033 addition to the merge, this fixes up the dirstate to include only
2021 addition to the merge, this fixes up the dirstate to include only
2034 a single parent (if keepparent is False) and tries to duplicate any
2022 a single parent (if keepparent is False) and tries to duplicate any
2035 renames/copies appropriately.
2023 renames/copies appropriately.
2036
2024
2037 ctx - changeset to rebase
2025 ctx - changeset to rebase
2038 pctx - merge base, usually ctx.p1()
2026 pctx - merge base, usually ctx.p1()
2039 labels - merge labels eg ['local', 'graft']
2027 labels - merge labels eg ['local', 'graft']
2040 keepparent - keep second parent if any
2028 keepparent - keep second parent if any
2041
2029
2042 """
2030 """
2043 # If we're grafting a descendant onto an ancestor, be sure to pass
2031 # If we're grafting a descendant onto an ancestor, be sure to pass
2044 # mergeancestor=True to update. This does two things: 1) allows the merge if
2032 # mergeancestor=True to update. This does two things: 1) allows the merge if
2045 # the destination is the same as the parent of the ctx (so we can use graft
2033 # the destination is the same as the parent of the ctx (so we can use graft
2046 # to copy commits), and 2) informs update that the incoming changes are
2034 # to copy commits), and 2) informs update that the incoming changes are
2047 # newer than the destination so it doesn't prompt about "remote changed foo
2035 # newer than the destination so it doesn't prompt about "remote changed foo
2048 # which local deleted".
2036 # which local deleted".
2049 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2037 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2050
2038
2051 stats = update(repo, ctx.node(), True, True, pctx.node(),
2039 stats = update(repo, ctx.node(), True, True, pctx.node(),
2052 mergeancestor=mergeancestor, labels=labels)
2040 mergeancestor=mergeancestor, labels=labels)
2053
2041
2054 pother = nullid
2042 pother = nullid
2055 parents = ctx.parents()
2043 parents = ctx.parents()
2056 if keepparent and len(parents) == 2 and pctx in parents:
2044 if keepparent and len(parents) == 2 and pctx in parents:
2057 parents.remove(pctx)
2045 parents.remove(pctx)
2058 pother = parents[0].node()
2046 pother = parents[0].node()
2059
2047
2060 with repo.dirstate.parentchange():
2048 with repo.dirstate.parentchange():
2061 repo.setparents(repo['.'].node(), pother)
2049 repo.setparents(repo['.'].node(), pother)
2062 repo.dirstate.write(repo.currenttransaction())
2050 repo.dirstate.write(repo.currenttransaction())
2063 # fix up dirstate for copies and renames
2051 # fix up dirstate for copies and renames
2064 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2052 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2065 return stats
2053 return stats
General Comments 0
You need to be logged in to leave comments. Login now