##// END OF EJS Templates
merge: check created file dirs for path conflicts only once (issue5716)...
Mark Thomas -
r35182:a92b9f8e 4.4.2 stable
parent child Browse files
Show More
@@ -1,2060 +1,2065 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 extensions,
28 extensions,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 P: a path conflict (file vs directory)
70 P: a path conflict (file vs directory)
71 m: the external merge driver defined for this merge plus its run state
71 m: the external merge driver defined for this merge plus its run state
72 (experimental)
72 (experimental)
73 f: a (filename, dictionary) tuple of optional values for a given file
73 f: a (filename, dictionary) tuple of optional values for a given file
74 X: unsupported mandatory record type (used in tests)
74 X: unsupported mandatory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
76 l: the labels for the parts of the merge.
76 l: the labels for the parts of the merge.
77
77
78 Merge driver run states (experimental):
78 Merge driver run states (experimental):
79 u: driver-resolved files unmarked -- needs to be run next time we're about
79 u: driver-resolved files unmarked -- needs to be run next time we're about
80 to resolve or commit
80 to resolve or commit
81 m: driver-resolved files marked -- only needs to be run before commit
81 m: driver-resolved files marked -- only needs to be run before commit
82 s: success/skipped -- does not need to be run any more
82 s: success/skipped -- does not need to be run any more
83
83
84 Merge record states (stored in self._state, indexed by filename):
84 Merge record states (stored in self._state, indexed by filename):
85 u: unresolved conflict
85 u: unresolved conflict
86 r: resolved conflict
86 r: resolved conflict
87 pu: unresolved path conflict (file conflicts with directory)
87 pu: unresolved path conflict (file conflicts with directory)
88 pr: resolved path conflict
88 pr: resolved path conflict
89 d: driver-resolved conflict
89 d: driver-resolved conflict
90
90
91 The resolve command transitions between 'u' and 'r' for conflicts and
91 The resolve command transitions between 'u' and 'r' for conflicts and
92 'pu' and 'pr' for path conflicts.
92 'pu' and 'pr' for path conflicts.
93 '''
93 '''
94 statepathv1 = 'merge/state'
94 statepathv1 = 'merge/state'
95 statepathv2 = 'merge/state2'
95 statepathv2 = 'merge/state2'
96
96
97 @staticmethod
97 @staticmethod
98 def clean(repo, node=None, other=None, labels=None):
98 def clean(repo, node=None, other=None, labels=None):
99 """Initialize a brand new merge state, removing any existing state on
99 """Initialize a brand new merge state, removing any existing state on
100 disk."""
100 disk."""
101 ms = mergestate(repo)
101 ms = mergestate(repo)
102 ms.reset(node, other, labels)
102 ms.reset(node, other, labels)
103 return ms
103 return ms
104
104
105 @staticmethod
105 @staticmethod
106 def read(repo):
106 def read(repo):
107 """Initialize the merge state, reading it from disk."""
107 """Initialize the merge state, reading it from disk."""
108 ms = mergestate(repo)
108 ms = mergestate(repo)
109 ms._read()
109 ms._read()
110 return ms
110 return ms
111
111
112 def __init__(self, repo):
112 def __init__(self, repo):
113 """Initialize the merge state.
113 """Initialize the merge state.
114
114
115 Do not use this directly! Instead call read() or clean()."""
115 Do not use this directly! Instead call read() or clean()."""
116 self._repo = repo
116 self._repo = repo
117 self._dirty = False
117 self._dirty = False
118 self._labels = None
118 self._labels = None
119
119
120 def reset(self, node=None, other=None, labels=None):
120 def reset(self, node=None, other=None, labels=None):
121 self._state = {}
121 self._state = {}
122 self._stateextras = {}
122 self._stateextras = {}
123 self._local = None
123 self._local = None
124 self._other = None
124 self._other = None
125 self._labels = labels
125 self._labels = labels
126 for var in ('localctx', 'otherctx'):
126 for var in ('localctx', 'otherctx'):
127 if var in vars(self):
127 if var in vars(self):
128 delattr(self, var)
128 delattr(self, var)
129 if node:
129 if node:
130 self._local = node
130 self._local = node
131 self._other = other
131 self._other = other
132 self._readmergedriver = None
132 self._readmergedriver = None
133 if self.mergedriver:
133 if self.mergedriver:
134 self._mdstate = 's'
134 self._mdstate = 's'
135 else:
135 else:
136 self._mdstate = 'u'
136 self._mdstate = 'u'
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
138 self._results = {}
138 self._results = {}
139 self._dirty = False
139 self._dirty = False
140
140
141 def _read(self):
141 def _read(self):
142 """Analyse each record content to restore a serialized state from disk
142 """Analyse each record content to restore a serialized state from disk
143
143
144 This function process "record" entry produced by the de-serialization
144 This function process "record" entry produced by the de-serialization
145 of on disk file.
145 of on disk file.
146 """
146 """
147 self._state = {}
147 self._state = {}
148 self._stateextras = {}
148 self._stateextras = {}
149 self._local = None
149 self._local = None
150 self._other = None
150 self._other = None
151 for var in ('localctx', 'otherctx'):
151 for var in ('localctx', 'otherctx'):
152 if var in vars(self):
152 if var in vars(self):
153 delattr(self, var)
153 delattr(self, var)
154 self._readmergedriver = None
154 self._readmergedriver = None
155 self._mdstate = 's'
155 self._mdstate = 's'
156 unsupported = set()
156 unsupported = set()
157 records = self._readrecords()
157 records = self._readrecords()
158 for rtype, record in records:
158 for rtype, record in records:
159 if rtype == 'L':
159 if rtype == 'L':
160 self._local = bin(record)
160 self._local = bin(record)
161 elif rtype == 'O':
161 elif rtype == 'O':
162 self._other = bin(record)
162 self._other = bin(record)
163 elif rtype == 'm':
163 elif rtype == 'm':
164 bits = record.split('\0', 1)
164 bits = record.split('\0', 1)
165 mdstate = bits[1]
165 mdstate = bits[1]
166 if len(mdstate) != 1 or mdstate not in 'ums':
166 if len(mdstate) != 1 or mdstate not in 'ums':
167 # the merge driver should be idempotent, so just rerun it
167 # the merge driver should be idempotent, so just rerun it
168 mdstate = 'u'
168 mdstate = 'u'
169
169
170 self._readmergedriver = bits[0]
170 self._readmergedriver = bits[0]
171 self._mdstate = mdstate
171 self._mdstate = mdstate
172 elif rtype in 'FDCP':
172 elif rtype in 'FDCP':
173 bits = record.split('\0')
173 bits = record.split('\0')
174 self._state[bits[0]] = bits[1:]
174 self._state[bits[0]] = bits[1:]
175 elif rtype == 'f':
175 elif rtype == 'f':
176 filename, rawextras = record.split('\0', 1)
176 filename, rawextras = record.split('\0', 1)
177 extraparts = rawextras.split('\0')
177 extraparts = rawextras.split('\0')
178 extras = {}
178 extras = {}
179 i = 0
179 i = 0
180 while i < len(extraparts):
180 while i < len(extraparts):
181 extras[extraparts[i]] = extraparts[i + 1]
181 extras[extraparts[i]] = extraparts[i + 1]
182 i += 2
182 i += 2
183
183
184 self._stateextras[filename] = extras
184 self._stateextras[filename] = extras
185 elif rtype == 'l':
185 elif rtype == 'l':
186 labels = record.split('\0', 2)
186 labels = record.split('\0', 2)
187 self._labels = [l for l in labels if len(l) > 0]
187 self._labels = [l for l in labels if len(l) > 0]
188 elif not rtype.islower():
188 elif not rtype.islower():
189 unsupported.add(rtype)
189 unsupported.add(rtype)
190 self._results = {}
190 self._results = {}
191 self._dirty = False
191 self._dirty = False
192
192
193 if unsupported:
193 if unsupported:
194 raise error.UnsupportedMergeRecords(unsupported)
194 raise error.UnsupportedMergeRecords(unsupported)
195
195
196 def _readrecords(self):
196 def _readrecords(self):
197 """Read merge state from disk and return a list of record (TYPE, data)
197 """Read merge state from disk and return a list of record (TYPE, data)
198
198
199 We read data from both v1 and v2 files and decide which one to use.
199 We read data from both v1 and v2 files and decide which one to use.
200
200
201 V1 has been used by version prior to 2.9.1 and contains less data than
201 V1 has been used by version prior to 2.9.1 and contains less data than
202 v2. We read both versions and check if no data in v2 contradicts
202 v2. We read both versions and check if no data in v2 contradicts
203 v1. If there is not contradiction we can safely assume that both v1
203 v1. If there is not contradiction we can safely assume that both v1
204 and v2 were written at the same time and use the extract data in v2. If
204 and v2 were written at the same time and use the extract data in v2. If
205 there is contradiction we ignore v2 content as we assume an old version
205 there is contradiction we ignore v2 content as we assume an old version
206 of Mercurial has overwritten the mergestate file and left an old v2
206 of Mercurial has overwritten the mergestate file and left an old v2
207 file around.
207 file around.
208
208
209 returns list of record [(TYPE, data), ...]"""
209 returns list of record [(TYPE, data), ...]"""
210 v1records = self._readrecordsv1()
210 v1records = self._readrecordsv1()
211 v2records = self._readrecordsv2()
211 v2records = self._readrecordsv2()
212 if self._v1v2match(v1records, v2records):
212 if self._v1v2match(v1records, v2records):
213 return v2records
213 return v2records
214 else:
214 else:
215 # v1 file is newer than v2 file, use it
215 # v1 file is newer than v2 file, use it
216 # we have to infer the "other" changeset of the merge
216 # we have to infer the "other" changeset of the merge
217 # we cannot do better than that with v1 of the format
217 # we cannot do better than that with v1 of the format
218 mctx = self._repo[None].parents()[-1]
218 mctx = self._repo[None].parents()[-1]
219 v1records.append(('O', mctx.hex()))
219 v1records.append(('O', mctx.hex()))
220 # add place holder "other" file node information
220 # add place holder "other" file node information
221 # nobody is using it yet so we do no need to fetch the data
221 # nobody is using it yet so we do no need to fetch the data
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
223 for idx, r in enumerate(v1records):
223 for idx, r in enumerate(v1records):
224 if r[0] == 'F':
224 if r[0] == 'F':
225 bits = r[1].split('\0')
225 bits = r[1].split('\0')
226 bits.insert(-2, '')
226 bits.insert(-2, '')
227 v1records[idx] = (r[0], '\0'.join(bits))
227 v1records[idx] = (r[0], '\0'.join(bits))
228 return v1records
228 return v1records
229
229
230 def _v1v2match(self, v1records, v2records):
230 def _v1v2match(self, v1records, v2records):
231 oldv2 = set() # old format version of v2 record
231 oldv2 = set() # old format version of v2 record
232 for rec in v2records:
232 for rec in v2records:
233 if rec[0] == 'L':
233 if rec[0] == 'L':
234 oldv2.add(rec)
234 oldv2.add(rec)
235 elif rec[0] == 'F':
235 elif rec[0] == 'F':
236 # drop the onode data (not contained in v1)
236 # drop the onode data (not contained in v1)
237 oldv2.add(('F', _droponode(rec[1])))
237 oldv2.add(('F', _droponode(rec[1])))
238 for rec in v1records:
238 for rec in v1records:
239 if rec not in oldv2:
239 if rec not in oldv2:
240 return False
240 return False
241 else:
241 else:
242 return True
242 return True
243
243
244 def _readrecordsv1(self):
244 def _readrecordsv1(self):
245 """read on disk merge state for version 1 file
245 """read on disk merge state for version 1 file
246
246
247 returns list of record [(TYPE, data), ...]
247 returns list of record [(TYPE, data), ...]
248
248
249 Note: the "F" data from this file are one entry short
249 Note: the "F" data from this file are one entry short
250 (no "other file node" entry)
250 (no "other file node" entry)
251 """
251 """
252 records = []
252 records = []
253 try:
253 try:
254 f = self._repo.vfs(self.statepathv1)
254 f = self._repo.vfs(self.statepathv1)
255 for i, l in enumerate(f):
255 for i, l in enumerate(f):
256 if i == 0:
256 if i == 0:
257 records.append(('L', l[:-1]))
257 records.append(('L', l[:-1]))
258 else:
258 else:
259 records.append(('F', l[:-1]))
259 records.append(('F', l[:-1]))
260 f.close()
260 f.close()
261 except IOError as err:
261 except IOError as err:
262 if err.errno != errno.ENOENT:
262 if err.errno != errno.ENOENT:
263 raise
263 raise
264 return records
264 return records
265
265
266 def _readrecordsv2(self):
266 def _readrecordsv2(self):
267 """read on disk merge state for version 2 file
267 """read on disk merge state for version 2 file
268
268
269 This format is a list of arbitrary records of the form:
269 This format is a list of arbitrary records of the form:
270
270
271 [type][length][content]
271 [type][length][content]
272
272
273 `type` is a single character, `length` is a 4 byte integer, and
273 `type` is a single character, `length` is a 4 byte integer, and
274 `content` is an arbitrary byte sequence of length `length`.
274 `content` is an arbitrary byte sequence of length `length`.
275
275
276 Mercurial versions prior to 3.7 have a bug where if there are
276 Mercurial versions prior to 3.7 have a bug where if there are
277 unsupported mandatory merge records, attempting to clear out the merge
277 unsupported mandatory merge records, attempting to clear out the merge
278 state with hg update --clean or similar aborts. The 't' record type
278 state with hg update --clean or similar aborts. The 't' record type
279 works around that by writing out what those versions treat as an
279 works around that by writing out what those versions treat as an
280 advisory record, but later versions interpret as special: the first
280 advisory record, but later versions interpret as special: the first
281 character is the 'real' record type and everything onwards is the data.
281 character is the 'real' record type and everything onwards is the data.
282
282
283 Returns list of records [(TYPE, data), ...]."""
283 Returns list of records [(TYPE, data), ...]."""
284 records = []
284 records = []
285 try:
285 try:
286 f = self._repo.vfs(self.statepathv2)
286 f = self._repo.vfs(self.statepathv2)
287 data = f.read()
287 data = f.read()
288 off = 0
288 off = 0
289 end = len(data)
289 end = len(data)
290 while off < end:
290 while off < end:
291 rtype = data[off]
291 rtype = data[off]
292 off += 1
292 off += 1
293 length = _unpack('>I', data[off:(off + 4)])[0]
293 length = _unpack('>I', data[off:(off + 4)])[0]
294 off += 4
294 off += 4
295 record = data[off:(off + length)]
295 record = data[off:(off + length)]
296 off += length
296 off += length
297 if rtype == 't':
297 if rtype == 't':
298 rtype, record = record[0], record[1:]
298 rtype, record = record[0], record[1:]
299 records.append((rtype, record))
299 records.append((rtype, record))
300 f.close()
300 f.close()
301 except IOError as err:
301 except IOError as err:
302 if err.errno != errno.ENOENT:
302 if err.errno != errno.ENOENT:
303 raise
303 raise
304 return records
304 return records
305
305
306 @util.propertycache
306 @util.propertycache
307 def mergedriver(self):
307 def mergedriver(self):
308 # protect against the following:
308 # protect against the following:
309 # - A configures a malicious merge driver in their hgrc, then
309 # - A configures a malicious merge driver in their hgrc, then
310 # pauses the merge
310 # pauses the merge
311 # - A edits their hgrc to remove references to the merge driver
311 # - A edits their hgrc to remove references to the merge driver
312 # - A gives a copy of their entire repo, including .hg, to B
312 # - A gives a copy of their entire repo, including .hg, to B
313 # - B inspects .hgrc and finds it to be clean
313 # - B inspects .hgrc and finds it to be clean
314 # - B then continues the merge and the malicious merge driver
314 # - B then continues the merge and the malicious merge driver
315 # gets invoked
315 # gets invoked
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
317 if (self._readmergedriver is not None
317 if (self._readmergedriver is not None
318 and self._readmergedriver != configmergedriver):
318 and self._readmergedriver != configmergedriver):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _("merge driver changed since merge started"),
320 _("merge driver changed since merge started"),
321 hint=_("revert merge driver change or abort merge"))
321 hint=_("revert merge driver change or abort merge"))
322
322
323 return configmergedriver
323 return configmergedriver
324
324
325 @util.propertycache
325 @util.propertycache
326 def localctx(self):
326 def localctx(self):
327 if self._local is None:
327 if self._local is None:
328 msg = "localctx accessed but self._local isn't set"
328 msg = "localctx accessed but self._local isn't set"
329 raise error.ProgrammingError(msg)
329 raise error.ProgrammingError(msg)
330 return self._repo[self._local]
330 return self._repo[self._local]
331
331
332 @util.propertycache
332 @util.propertycache
333 def otherctx(self):
333 def otherctx(self):
334 if self._other is None:
334 if self._other is None:
335 msg = "otherctx accessed but self._other isn't set"
335 msg = "otherctx accessed but self._other isn't set"
336 raise error.ProgrammingError(msg)
336 raise error.ProgrammingError(msg)
337 return self._repo[self._other]
337 return self._repo[self._other]
338
338
339 def active(self):
339 def active(self):
340 """Whether mergestate is active.
340 """Whether mergestate is active.
341
341
342 Returns True if there appears to be mergestate. This is a rough proxy
342 Returns True if there appears to be mergestate. This is a rough proxy
343 for "is a merge in progress."
343 for "is a merge in progress."
344 """
344 """
345 # Check local variables before looking at filesystem for performance
345 # Check local variables before looking at filesystem for performance
346 # reasons.
346 # reasons.
347 return bool(self._local) or bool(self._state) or \
347 return bool(self._local) or bool(self._state) or \
348 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv1) or \
349 self._repo.vfs.exists(self.statepathv2)
349 self._repo.vfs.exists(self.statepathv2)
350
350
351 def commit(self):
351 def commit(self):
352 """Write current state on disk (if necessary)"""
352 """Write current state on disk (if necessary)"""
353 if self._dirty:
353 if self._dirty:
354 records = self._makerecords()
354 records = self._makerecords()
355 self._writerecords(records)
355 self._writerecords(records)
356 self._dirty = False
356 self._dirty = False
357
357
358 def _makerecords(self):
358 def _makerecords(self):
359 records = []
359 records = []
360 records.append(('L', hex(self._local)))
360 records.append(('L', hex(self._local)))
361 records.append(('O', hex(self._other)))
361 records.append(('O', hex(self._other)))
362 if self.mergedriver:
362 if self.mergedriver:
363 records.append(('m', '\0'.join([
363 records.append(('m', '\0'.join([
364 self.mergedriver, self._mdstate])))
364 self.mergedriver, self._mdstate])))
365 # Write out state items. In all cases, the value of the state map entry
365 # Write out state items. In all cases, the value of the state map entry
366 # is written as the contents of the record. The record type depends on
366 # is written as the contents of the record. The record type depends on
367 # the type of state that is stored, and capital-letter records are used
367 # the type of state that is stored, and capital-letter records are used
368 # to prevent older versions of Mercurial that do not support the feature
368 # to prevent older versions of Mercurial that do not support the feature
369 # from loading them.
369 # from loading them.
370 for filename, v in self._state.iteritems():
370 for filename, v in self._state.iteritems():
371 if v[0] == 'd':
371 if v[0] == 'd':
372 # Driver-resolved merge. These are stored in 'D' records.
372 # Driver-resolved merge. These are stored in 'D' records.
373 records.append(('D', '\0'.join([filename] + v)))
373 records.append(('D', '\0'.join([filename] + v)))
374 elif v[0] in ('pu', 'pr'):
374 elif v[0] in ('pu', 'pr'):
375 # Path conflicts. These are stored in 'P' records. The current
375 # Path conflicts. These are stored in 'P' records. The current
376 # resolution state ('pu' or 'pr') is stored within the record.
376 # resolution state ('pu' or 'pr') is stored within the record.
377 records.append(('P', '\0'.join([filename] + v)))
377 records.append(('P', '\0'.join([filename] + v)))
378 elif v[1] == nullhex or v[6] == nullhex:
378 elif v[1] == nullhex or v[6] == nullhex:
379 # Change/Delete or Delete/Change conflicts. These are stored in
379 # Change/Delete or Delete/Change conflicts. These are stored in
380 # 'C' records. v[1] is the local file, and is nullhex when the
380 # 'C' records. v[1] is the local file, and is nullhex when the
381 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # file is deleted locally ('dc'). v[6] is the remote file, and
382 # is nullhex when the file is deleted remotely ('cd').
382 # is nullhex when the file is deleted remotely ('cd').
383 records.append(('C', '\0'.join([filename] + v)))
383 records.append(('C', '\0'.join([filename] + v)))
384 else:
384 else:
385 # Normal files. These are stored in 'F' records.
385 # Normal files. These are stored in 'F' records.
386 records.append(('F', '\0'.join([filename] + v)))
386 records.append(('F', '\0'.join([filename] + v)))
387 for filename, extras in sorted(self._stateextras.iteritems()):
387 for filename, extras in sorted(self._stateextras.iteritems()):
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
389 extras.iteritems())
389 extras.iteritems())
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
391 if self._labels is not None:
391 if self._labels is not None:
392 labels = '\0'.join(self._labels)
392 labels = '\0'.join(self._labels)
393 records.append(('l', labels))
393 records.append(('l', labels))
394 return records
394 return records
395
395
396 def _writerecords(self, records):
396 def _writerecords(self, records):
397 """Write current state on disk (both v1 and v2)"""
397 """Write current state on disk (both v1 and v2)"""
398 self._writerecordsv1(records)
398 self._writerecordsv1(records)
399 self._writerecordsv2(records)
399 self._writerecordsv2(records)
400
400
401 def _writerecordsv1(self, records):
401 def _writerecordsv1(self, records):
402 """Write current state on disk in a version 1 file"""
402 """Write current state on disk in a version 1 file"""
403 f = self._repo.vfs(self.statepathv1, 'w')
403 f = self._repo.vfs(self.statepathv1, 'w')
404 irecords = iter(records)
404 irecords = iter(records)
405 lrecords = next(irecords)
405 lrecords = next(irecords)
406 assert lrecords[0] == 'L'
406 assert lrecords[0] == 'L'
407 f.write(hex(self._local) + '\n')
407 f.write(hex(self._local) + '\n')
408 for rtype, data in irecords:
408 for rtype, data in irecords:
409 if rtype == 'F':
409 if rtype == 'F':
410 f.write('%s\n' % _droponode(data))
410 f.write('%s\n' % _droponode(data))
411 f.close()
411 f.close()
412
412
413 def _writerecordsv2(self, records):
413 def _writerecordsv2(self, records):
414 """Write current state on disk in a version 2 file
414 """Write current state on disk in a version 2 file
415
415
416 See the docstring for _readrecordsv2 for why we use 't'."""
416 See the docstring for _readrecordsv2 for why we use 't'."""
417 # these are the records that all version 2 clients can read
417 # these are the records that all version 2 clients can read
418 whitelist = 'LOF'
418 whitelist = 'LOF'
419 f = self._repo.vfs(self.statepathv2, 'w')
419 f = self._repo.vfs(self.statepathv2, 'w')
420 for key, data in records:
420 for key, data in records:
421 assert len(key) == 1
421 assert len(key) == 1
422 if key not in whitelist:
422 if key not in whitelist:
423 key, data = 't', '%s%s' % (key, data)
423 key, data = 't', '%s%s' % (key, data)
424 format = '>sI%is' % len(data)
424 format = '>sI%is' % len(data)
425 f.write(_pack(format, key, len(data), data))
425 f.write(_pack(format, key, len(data), data))
426 f.close()
426 f.close()
427
427
428 def add(self, fcl, fco, fca, fd):
428 def add(self, fcl, fco, fca, fd):
429 """add a new (potentially?) conflicting file the merge state
429 """add a new (potentially?) conflicting file the merge state
430 fcl: file context for local,
430 fcl: file context for local,
431 fco: file context for remote,
431 fco: file context for remote,
432 fca: file context for ancestors,
432 fca: file context for ancestors,
433 fd: file path of the resulting merge.
433 fd: file path of the resulting merge.
434
434
435 note: also write the local version to the `.hg/merge` directory.
435 note: also write the local version to the `.hg/merge` directory.
436 """
436 """
437 if fcl.isabsent():
437 if fcl.isabsent():
438 hash = nullhex
438 hash = nullhex
439 else:
439 else:
440 hash = hex(hashlib.sha1(fcl.path()).digest())
440 hash = hex(hashlib.sha1(fcl.path()).digest())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
442 self._state[fd] = ['u', hash, fcl.path(),
442 self._state[fd] = ['u', hash, fcl.path(),
443 fca.path(), hex(fca.filenode()),
443 fca.path(), hex(fca.filenode()),
444 fco.path(), hex(fco.filenode()),
444 fco.path(), hex(fco.filenode()),
445 fcl.flags()]
445 fcl.flags()]
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
447 self._dirty = True
447 self._dirty = True
448
448
449 def addpath(self, path, frename, forigin):
449 def addpath(self, path, frename, forigin):
450 """add a new conflicting path to the merge state
450 """add a new conflicting path to the merge state
451 path: the path that conflicts
451 path: the path that conflicts
452 frename: the filename the conflicting file was renamed to
452 frename: the filename the conflicting file was renamed to
453 forigin: origin of the file ('l' or 'r' for local/remote)
453 forigin: origin of the file ('l' or 'r' for local/remote)
454 """
454 """
455 self._state[path] = ['pu', frename, forigin]
455 self._state[path] = ['pu', frename, forigin]
456 self._dirty = True
456 self._dirty = True
457
457
458 def __contains__(self, dfile):
458 def __contains__(self, dfile):
459 return dfile in self._state
459 return dfile in self._state
460
460
461 def __getitem__(self, dfile):
461 def __getitem__(self, dfile):
462 return self._state[dfile][0]
462 return self._state[dfile][0]
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(sorted(self._state))
465 return iter(sorted(self._state))
466
466
467 def files(self):
467 def files(self):
468 return self._state.keys()
468 return self._state.keys()
469
469
470 def mark(self, dfile, state):
470 def mark(self, dfile, state):
471 self._state[dfile][0] = state
471 self._state[dfile][0] = state
472 self._dirty = True
472 self._dirty = True
473
473
474 def mdstate(self):
474 def mdstate(self):
475 return self._mdstate
475 return self._mdstate
476
476
477 def unresolved(self):
477 def unresolved(self):
478 """Obtain the paths of unresolved files."""
478 """Obtain the paths of unresolved files."""
479
479
480 for f, entry in self._state.iteritems():
480 for f, entry in self._state.iteritems():
481 if entry[0] in ('u', 'pu'):
481 if entry[0] in ('u', 'pu'):
482 yield f
482 yield f
483
483
484 def driverresolved(self):
484 def driverresolved(self):
485 """Obtain the paths of driver-resolved files."""
485 """Obtain the paths of driver-resolved files."""
486
486
487 for f, entry in self._state.items():
487 for f, entry in self._state.items():
488 if entry[0] == 'd':
488 if entry[0] == 'd':
489 yield f
489 yield f
490
490
491 def extras(self, filename):
491 def extras(self, filename):
492 return self._stateextras.setdefault(filename, {})
492 return self._stateextras.setdefault(filename, {})
493
493
494 def _resolve(self, preresolve, dfile, wctx):
494 def _resolve(self, preresolve, dfile, wctx):
495 """rerun merge process for file path `dfile`"""
495 """rerun merge process for file path `dfile`"""
496 if self[dfile] in 'rd':
496 if self[dfile] in 'rd':
497 return True, 0
497 return True, 0
498 stateentry = self._state[dfile]
498 stateentry = self._state[dfile]
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
500 octx = self._repo[self._other]
500 octx = self._repo[self._other]
501 extras = self.extras(dfile)
501 extras = self.extras(dfile)
502 anccommitnode = extras.get('ancestorlinknode')
502 anccommitnode = extras.get('ancestorlinknode')
503 if anccommitnode:
503 if anccommitnode:
504 actx = self._repo[anccommitnode]
504 actx = self._repo[anccommitnode]
505 else:
505 else:
506 actx = None
506 actx = None
507 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fcd = self._filectxorabsent(hash, wctx, dfile)
508 fco = self._filectxorabsent(onode, octx, ofile)
508 fco = self._filectxorabsent(onode, octx, ofile)
509 # TODO: move this to filectxorabsent
509 # TODO: move this to filectxorabsent
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
511 # "premerge" x flags
511 # "premerge" x flags
512 flo = fco.flags()
512 flo = fco.flags()
513 fla = fca.flags()
513 fla = fca.flags()
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
515 if fca.node() == nullid and flags != flo:
515 if fca.node() == nullid and flags != flo:
516 if preresolve:
516 if preresolve:
517 self._repo.ui.warn(
517 self._repo.ui.warn(
518 _('warning: cannot merge flags for %s '
518 _('warning: cannot merge flags for %s '
519 'without common ancestor - keeping local flags\n')
519 'without common ancestor - keeping local flags\n')
520 % afile)
520 % afile)
521 elif flags == fla:
521 elif flags == fla:
522 flags = flo
522 flags = flo
523 if preresolve:
523 if preresolve:
524 # restore local
524 # restore local
525 if hash != nullhex:
525 if hash != nullhex:
526 f = self._repo.vfs('merge/' + hash)
526 f = self._repo.vfs('merge/' + hash)
527 wctx[dfile].write(f.read(), flags)
527 wctx[dfile].write(f.read(), flags)
528 f.close()
528 f.close()
529 else:
529 else:
530 wctx[dfile].remove(ignoremissing=True)
530 wctx[dfile].remove(ignoremissing=True)
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
532 self._local, lfile, fcd,
532 self._local, lfile, fcd,
533 fco, fca,
533 fco, fca,
534 labels=self._labels)
534 labels=self._labels)
535 else:
535 else:
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
537 self._local, lfile, fcd,
537 self._local, lfile, fcd,
538 fco, fca,
538 fco, fca,
539 labels=self._labels)
539 labels=self._labels)
540 if r is None:
540 if r is None:
541 # no real conflict
541 # no real conflict
542 del self._state[dfile]
542 del self._state[dfile]
543 self._stateextras.pop(dfile, None)
543 self._stateextras.pop(dfile, None)
544 self._dirty = True
544 self._dirty = True
545 elif not r:
545 elif not r:
546 self.mark(dfile, 'r')
546 self.mark(dfile, 'r')
547
547
548 if complete:
548 if complete:
549 action = None
549 action = None
550 if deleted:
550 if deleted:
551 if fcd.isabsent():
551 if fcd.isabsent():
552 # dc: local picked. Need to drop if present, which may
552 # dc: local picked. Need to drop if present, which may
553 # happen on re-resolves.
553 # happen on re-resolves.
554 action = 'f'
554 action = 'f'
555 else:
555 else:
556 # cd: remote picked (or otherwise deleted)
556 # cd: remote picked (or otherwise deleted)
557 action = 'r'
557 action = 'r'
558 else:
558 else:
559 if fcd.isabsent(): # dc: remote picked
559 if fcd.isabsent(): # dc: remote picked
560 action = 'g'
560 action = 'g'
561 elif fco.isabsent(): # cd: local picked
561 elif fco.isabsent(): # cd: local picked
562 if dfile in self.localctx:
562 if dfile in self.localctx:
563 action = 'am'
563 action = 'am'
564 else:
564 else:
565 action = 'a'
565 action = 'a'
566 # else: regular merges (no action necessary)
566 # else: regular merges (no action necessary)
567 self._results[dfile] = r, action
567 self._results[dfile] = r, action
568
568
569 return complete, r
569 return complete, r
570
570
571 def _filectxorabsent(self, hexnode, ctx, f):
571 def _filectxorabsent(self, hexnode, ctx, f):
572 if hexnode == nullhex:
572 if hexnode == nullhex:
573 return filemerge.absentfilectx(ctx, f)
573 return filemerge.absentfilectx(ctx, f)
574 else:
574 else:
575 return ctx[f]
575 return ctx[f]
576
576
577 def preresolve(self, dfile, wctx):
577 def preresolve(self, dfile, wctx):
578 """run premerge process for dfile
578 """run premerge process for dfile
579
579
580 Returns whether the merge is complete, and the exit code."""
580 Returns whether the merge is complete, and the exit code."""
581 return self._resolve(True, dfile, wctx)
581 return self._resolve(True, dfile, wctx)
582
582
583 def resolve(self, dfile, wctx):
583 def resolve(self, dfile, wctx):
584 """run merge process (assuming premerge was run) for dfile
584 """run merge process (assuming premerge was run) for dfile
585
585
586 Returns the exit code of the merge."""
586 Returns the exit code of the merge."""
587 return self._resolve(False, dfile, wctx)[1]
587 return self._resolve(False, dfile, wctx)[1]
588
588
589 def counts(self):
589 def counts(self):
590 """return counts for updated, merged and removed files in this
590 """return counts for updated, merged and removed files in this
591 session"""
591 session"""
592 updated, merged, removed = 0, 0, 0
592 updated, merged, removed = 0, 0, 0
593 for r, action in self._results.itervalues():
593 for r, action in self._results.itervalues():
594 if r is None:
594 if r is None:
595 updated += 1
595 updated += 1
596 elif r == 0:
596 elif r == 0:
597 if action == 'r':
597 if action == 'r':
598 removed += 1
598 removed += 1
599 else:
599 else:
600 merged += 1
600 merged += 1
601 return updated, merged, removed
601 return updated, merged, removed
602
602
603 def unresolvedcount(self):
603 def unresolvedcount(self):
604 """get unresolved count for this merge (persistent)"""
604 """get unresolved count for this merge (persistent)"""
605 return len(list(self.unresolved()))
605 return len(list(self.unresolved()))
606
606
607 def actions(self):
607 def actions(self):
608 """return lists of actions to perform on the dirstate"""
608 """return lists of actions to perform on the dirstate"""
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
610 for f, (r, action) in self._results.iteritems():
610 for f, (r, action) in self._results.iteritems():
611 if action is not None:
611 if action is not None:
612 actions[action].append((f, None, "merge result"))
612 actions[action].append((f, None, "merge result"))
613 return actions
613 return actions
614
614
615 def recordactions(self):
615 def recordactions(self):
616 """record remove/add/get actions in the dirstate"""
616 """record remove/add/get actions in the dirstate"""
617 branchmerge = self._repo.dirstate.p2() != nullid
617 branchmerge = self._repo.dirstate.p2() != nullid
618 recordupdates(self._repo, self.actions(), branchmerge)
618 recordupdates(self._repo, self.actions(), branchmerge)
619
619
620 def queueremove(self, f):
620 def queueremove(self, f):
621 """queues a file to be removed from the dirstate
621 """queues a file to be removed from the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'r'
624 self._results[f] = 0, 'r'
625
625
626 def queueadd(self, f):
626 def queueadd(self, f):
627 """queues a file to be added to the dirstate
627 """queues a file to be added to the dirstate
628
628
629 Meant for use by custom merge drivers."""
629 Meant for use by custom merge drivers."""
630 self._results[f] = 0, 'a'
630 self._results[f] = 0, 'a'
631
631
632 def queueget(self, f):
632 def queueget(self, f):
633 """queues a file to be marked modified in the dirstate
633 """queues a file to be marked modified in the dirstate
634
634
635 Meant for use by custom merge drivers."""
635 Meant for use by custom merge drivers."""
636 self._results[f] = 0, 'g'
636 self._results[f] = 0, 'g'
637
637
638 def _getcheckunknownconfig(repo, section, name):
638 def _getcheckunknownconfig(repo, section, name):
639 config = repo.ui.config(section, name)
639 config = repo.ui.config(section, name)
640 valid = ['abort', 'ignore', 'warn']
640 valid = ['abort', 'ignore', 'warn']
641 if config not in valid:
641 if config not in valid:
642 validstr = ', '.join(["'" + v + "'" for v in valid])
642 validstr = ', '.join(["'" + v + "'" for v in valid])
643 raise error.ConfigError(_("%s.%s not valid "
643 raise error.ConfigError(_("%s.%s not valid "
644 "('%s' is none of %s)")
644 "('%s' is none of %s)")
645 % (section, name, config, validstr))
645 % (section, name, config, validstr))
646 return config
646 return config
647
647
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
649 if f2 is None:
649 if f2 is None:
650 f2 = f
650 f2 = f
651 return (repo.wvfs.audit.check(f)
651 return (repo.wvfs.audit.check(f)
652 and repo.wvfs.isfileorlink(f)
652 and repo.wvfs.isfileorlink(f)
653 and repo.dirstate.normalize(f) not in repo.dirstate
653 and repo.dirstate.normalize(f) not in repo.dirstate
654 and mctx[f2].cmp(wctx[f]))
654 and mctx[f2].cmp(wctx[f]))
655
655
656 class _unknowndirschecker(object):
656 class _unknowndirschecker(object):
657 """
657 """
658 Look for any unknown files or directories that may have a path conflict
658 Look for any unknown files or directories that may have a path conflict
659 with a file. If any path prefix of the file exists as a file or link,
659 with a file. If any path prefix of the file exists as a file or link,
660 then it conflicts. If the file itself is a directory that contains any
660 then it conflicts. If the file itself is a directory that contains any
661 file that is not tracked, then it conflicts.
661 file that is not tracked, then it conflicts.
662
662
663 Returns the shortest path at which a conflict occurs, or None if there is
663 Returns the shortest path at which a conflict occurs, or None if there is
664 no conflict.
664 no conflict.
665 """
665 """
666 def __init__(self):
666 def __init__(self):
667 # A set of paths known to be good. This prevents repeated checking of
667 # A set of paths known to be good. This prevents repeated checking of
668 # dirs. It will be updated with any new dirs that are checked and found
668 # dirs. It will be updated with any new dirs that are checked and found
669 # to be safe.
669 # to be safe.
670 self._unknowndircache = set()
670 self._unknowndircache = set()
671
671
672 # A set of paths that are known to be absent. This prevents repeated
672 # A set of paths that are known to be absent. This prevents repeated
673 # checking of subdirectories that are known not to exist. It will be
673 # checking of subdirectories that are known not to exist. It will be
674 # updated with any new dirs that are checked and found to be absent.
674 # updated with any new dirs that are checked and found to be absent.
675 self._missingdircache = set()
675 self._missingdircache = set()
676
676
677 def __call__(self, repo, f):
677 def __call__(self, repo, f):
678 # Check for path prefixes that exist as unknown files.
678 # Check for path prefixes that exist as unknown files.
679 for p in reversed(list(util.finddirs(f))):
679 for p in reversed(list(util.finddirs(f))):
680 if p in self._missingdircache:
680 if p in self._missingdircache:
681 return
681 return
682 if p in self._unknowndircache:
682 if p in self._unknowndircache:
683 continue
683 continue
684 if repo.wvfs.audit.check(p):
684 if repo.wvfs.audit.check(p):
685 if (repo.wvfs.isfileorlink(p)
685 if (repo.wvfs.isfileorlink(p)
686 and repo.dirstate.normalize(p) not in repo.dirstate):
686 and repo.dirstate.normalize(p) not in repo.dirstate):
687 return p
687 return p
688 if not repo.wvfs.lexists(p):
688 if not repo.wvfs.lexists(p):
689 self._missingdircache.add(p)
689 self._missingdircache.add(p)
690 return
690 return
691 self._unknowndircache.add(p)
691 self._unknowndircache.add(p)
692
692
693 # Check if the file conflicts with a directory containing unknown files.
693 # Check if the file conflicts with a directory containing unknown files.
694 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
694 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
695 # Does the directory contain any files that are not in the dirstate?
695 # Does the directory contain any files that are not in the dirstate?
696 for p, dirs, files in repo.wvfs.walk(f):
696 for p, dirs, files in repo.wvfs.walk(f):
697 for fn in files:
697 for fn in files:
698 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
698 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
699 if relf not in repo.dirstate:
699 if relf not in repo.dirstate:
700 return f
700 return f
701 return None
701 return None
702
702
703 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
703 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
704 """
704 """
705 Considers any actions that care about the presence of conflicting unknown
705 Considers any actions that care about the presence of conflicting unknown
706 files. For some actions, the result is to abort; for others, it is to
706 files. For some actions, the result is to abort; for others, it is to
707 choose a different action.
707 choose a different action.
708 """
708 """
709 fileconflicts = set()
709 fileconflicts = set()
710 pathconflicts = set()
710 pathconflicts = set()
711 warnconflicts = set()
711 warnconflicts = set()
712 abortconflicts = set()
712 abortconflicts = set()
713 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
713 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
714 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
714 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
715 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
715 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
716 if not force:
716 if not force:
717 def collectconflicts(conflicts, config):
717 def collectconflicts(conflicts, config):
718 if config == 'abort':
718 if config == 'abort':
719 abortconflicts.update(conflicts)
719 abortconflicts.update(conflicts)
720 elif config == 'warn':
720 elif config == 'warn':
721 warnconflicts.update(conflicts)
721 warnconflicts.update(conflicts)
722
722
723 checkunknowndirs = _unknowndirschecker()
723 checkunknowndirs = _unknowndirschecker()
724 for f, (m, args, msg) in actions.iteritems():
724 for f, (m, args, msg) in actions.iteritems():
725 if m in ('c', 'dc'):
725 if m in ('c', 'dc'):
726 if _checkunknownfile(repo, wctx, mctx, f):
726 if _checkunknownfile(repo, wctx, mctx, f):
727 fileconflicts.add(f)
727 fileconflicts.add(f)
728 elif pathconfig and f not in wctx:
728 elif pathconfig and f not in wctx:
729 path = checkunknowndirs(repo, f)
729 path = checkunknowndirs(repo, f)
730 if path is not None:
730 if path is not None:
731 pathconflicts.add(path)
731 pathconflicts.add(path)
732 elif m == 'dg':
732 elif m == 'dg':
733 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
733 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
734 fileconflicts.add(f)
734 fileconflicts.add(f)
735
735
736 allconflicts = fileconflicts | pathconflicts
736 allconflicts = fileconflicts | pathconflicts
737 ignoredconflicts = set([c for c in allconflicts
737 ignoredconflicts = set([c for c in allconflicts
738 if repo.dirstate._ignore(c)])
738 if repo.dirstate._ignore(c)])
739 unknownconflicts = allconflicts - ignoredconflicts
739 unknownconflicts = allconflicts - ignoredconflicts
740 collectconflicts(ignoredconflicts, ignoredconfig)
740 collectconflicts(ignoredconflicts, ignoredconfig)
741 collectconflicts(unknownconflicts, unknownconfig)
741 collectconflicts(unknownconflicts, unknownconfig)
742 else:
742 else:
743 for f, (m, args, msg) in actions.iteritems():
743 for f, (m, args, msg) in actions.iteritems():
744 if m == 'cm':
744 if m == 'cm':
745 fl2, anc = args
745 fl2, anc = args
746 different = _checkunknownfile(repo, wctx, mctx, f)
746 different = _checkunknownfile(repo, wctx, mctx, f)
747 if repo.dirstate._ignore(f):
747 if repo.dirstate._ignore(f):
748 config = ignoredconfig
748 config = ignoredconfig
749 else:
749 else:
750 config = unknownconfig
750 config = unknownconfig
751
751
752 # The behavior when force is True is described by this table:
752 # The behavior when force is True is described by this table:
753 # config different mergeforce | action backup
753 # config different mergeforce | action backup
754 # * n * | get n
754 # * n * | get n
755 # * y y | merge -
755 # * y y | merge -
756 # abort y n | merge - (1)
756 # abort y n | merge - (1)
757 # warn y n | warn + get y
757 # warn y n | warn + get y
758 # ignore y n | get y
758 # ignore y n | get y
759 #
759 #
760 # (1) this is probably the wrong behavior here -- we should
760 # (1) this is probably the wrong behavior here -- we should
761 # probably abort, but some actions like rebases currently
761 # probably abort, but some actions like rebases currently
762 # don't like an abort happening in the middle of
762 # don't like an abort happening in the middle of
763 # merge.update.
763 # merge.update.
764 if not different:
764 if not different:
765 actions[f] = ('g', (fl2, False), "remote created")
765 actions[f] = ('g', (fl2, False), "remote created")
766 elif mergeforce or config == 'abort':
766 elif mergeforce or config == 'abort':
767 actions[f] = ('m', (f, f, None, False, anc),
767 actions[f] = ('m', (f, f, None, False, anc),
768 "remote differs from untracked local")
768 "remote differs from untracked local")
769 elif config == 'abort':
769 elif config == 'abort':
770 abortconflicts.add(f)
770 abortconflicts.add(f)
771 else:
771 else:
772 if config == 'warn':
772 if config == 'warn':
773 warnconflicts.add(f)
773 warnconflicts.add(f)
774 actions[f] = ('g', (fl2, True), "remote created")
774 actions[f] = ('g', (fl2, True), "remote created")
775
775
776 for f in sorted(abortconflicts):
776 for f in sorted(abortconflicts):
777 warn = repo.ui.warn
777 warn = repo.ui.warn
778 if f in pathconflicts:
778 if f in pathconflicts:
779 if repo.wvfs.isfileorlink(f):
779 if repo.wvfs.isfileorlink(f):
780 warn(_("%s: untracked file conflicts with directory\n") % f)
780 warn(_("%s: untracked file conflicts with directory\n") % f)
781 else:
781 else:
782 warn(_("%s: untracked directory conflicts with file\n") % f)
782 warn(_("%s: untracked directory conflicts with file\n") % f)
783 else:
783 else:
784 warn(_("%s: untracked file differs\n") % f)
784 warn(_("%s: untracked file differs\n") % f)
785 if abortconflicts:
785 if abortconflicts:
786 raise error.Abort(_("untracked files in working directory "
786 raise error.Abort(_("untracked files in working directory "
787 "differ from files in requested revision"))
787 "differ from files in requested revision"))
788
788
789 for f in sorted(warnconflicts):
789 for f in sorted(warnconflicts):
790 if repo.wvfs.isfileorlink(f):
790 if repo.wvfs.isfileorlink(f):
791 repo.ui.warn(_("%s: replacing untracked file\n") % f)
791 repo.ui.warn(_("%s: replacing untracked file\n") % f)
792 else:
792 else:
793 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
793 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
794
794
795 for f, (m, args, msg) in actions.iteritems():
795 for f, (m, args, msg) in actions.iteritems():
796 if m == 'c':
796 if m == 'c':
797 backup = (f in fileconflicts or f in pathconflicts or
797 backup = (f in fileconflicts or f in pathconflicts or
798 any(p in pathconflicts for p in util.finddirs(f)))
798 any(p in pathconflicts for p in util.finddirs(f)))
799 flags, = args
799 flags, = args
800 actions[f] = ('g', (flags, backup), msg)
800 actions[f] = ('g', (flags, backup), msg)
801
801
802 def _forgetremoved(wctx, mctx, branchmerge):
802 def _forgetremoved(wctx, mctx, branchmerge):
803 """
803 """
804 Forget removed files
804 Forget removed files
805
805
806 If we're jumping between revisions (as opposed to merging), and if
806 If we're jumping between revisions (as opposed to merging), and if
807 neither the working directory nor the target rev has the file,
807 neither the working directory nor the target rev has the file,
808 then we need to remove it from the dirstate, to prevent the
808 then we need to remove it from the dirstate, to prevent the
809 dirstate from listing the file when it is no longer in the
809 dirstate from listing the file when it is no longer in the
810 manifest.
810 manifest.
811
811
812 If we're merging, and the other revision has removed a file
812 If we're merging, and the other revision has removed a file
813 that is not present in the working directory, we need to mark it
813 that is not present in the working directory, we need to mark it
814 as removed.
814 as removed.
815 """
815 """
816
816
817 actions = {}
817 actions = {}
818 m = 'f'
818 m = 'f'
819 if branchmerge:
819 if branchmerge:
820 m = 'r'
820 m = 'r'
821 for f in wctx.deleted():
821 for f in wctx.deleted():
822 if f not in mctx:
822 if f not in mctx:
823 actions[f] = m, None, "forget deleted"
823 actions[f] = m, None, "forget deleted"
824
824
825 if not branchmerge:
825 if not branchmerge:
826 for f in wctx.removed():
826 for f in wctx.removed():
827 if f not in mctx:
827 if f not in mctx:
828 actions[f] = 'f', None, "forget removed"
828 actions[f] = 'f', None, "forget removed"
829
829
830 return actions
830 return actions
831
831
832 def _checkcollision(repo, wmf, actions):
832 def _checkcollision(repo, wmf, actions):
833 # build provisional merged manifest up
833 # build provisional merged manifest up
834 pmmf = set(wmf)
834 pmmf = set(wmf)
835
835
836 if actions:
836 if actions:
837 # k, dr, e and rd are no-op
837 # k, dr, e and rd are no-op
838 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
838 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
839 for f, args, msg in actions[m]:
839 for f, args, msg in actions[m]:
840 pmmf.add(f)
840 pmmf.add(f)
841 for f, args, msg in actions['r']:
841 for f, args, msg in actions['r']:
842 pmmf.discard(f)
842 pmmf.discard(f)
843 for f, args, msg in actions['dm']:
843 for f, args, msg in actions['dm']:
844 f2, flags = args
844 f2, flags = args
845 pmmf.discard(f2)
845 pmmf.discard(f2)
846 pmmf.add(f)
846 pmmf.add(f)
847 for f, args, msg in actions['dg']:
847 for f, args, msg in actions['dg']:
848 pmmf.add(f)
848 pmmf.add(f)
849 for f, args, msg in actions['m']:
849 for f, args, msg in actions['m']:
850 f1, f2, fa, move, anc = args
850 f1, f2, fa, move, anc = args
851 if move:
851 if move:
852 pmmf.discard(f1)
852 pmmf.discard(f1)
853 pmmf.add(f)
853 pmmf.add(f)
854
854
855 # check case-folding collision in provisional merged manifest
855 # check case-folding collision in provisional merged manifest
856 foldmap = {}
856 foldmap = {}
857 for f in pmmf:
857 for f in pmmf:
858 fold = util.normcase(f)
858 fold = util.normcase(f)
859 if fold in foldmap:
859 if fold in foldmap:
860 raise error.Abort(_("case-folding collision between %s and %s")
860 raise error.Abort(_("case-folding collision between %s and %s")
861 % (f, foldmap[fold]))
861 % (f, foldmap[fold]))
862 foldmap[fold] = f
862 foldmap[fold] = f
863
863
864 # check case-folding of directories
864 # check case-folding of directories
865 foldprefix = unfoldprefix = lastfull = ''
865 foldprefix = unfoldprefix = lastfull = ''
866 for fold, f in sorted(foldmap.items()):
866 for fold, f in sorted(foldmap.items()):
867 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
867 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
868 # the folded prefix matches but actual casing is different
868 # the folded prefix matches but actual casing is different
869 raise error.Abort(_("case-folding collision between "
869 raise error.Abort(_("case-folding collision between "
870 "%s and directory of %s") % (lastfull, f))
870 "%s and directory of %s") % (lastfull, f))
871 foldprefix = fold + '/'
871 foldprefix = fold + '/'
872 unfoldprefix = f + '/'
872 unfoldprefix = f + '/'
873 lastfull = f
873 lastfull = f
874
874
875 def driverpreprocess(repo, ms, wctx, labels=None):
875 def driverpreprocess(repo, ms, wctx, labels=None):
876 """run the preprocess step of the merge driver, if any
876 """run the preprocess step of the merge driver, if any
877
877
878 This is currently not implemented -- it's an extension point."""
878 This is currently not implemented -- it's an extension point."""
879 return True
879 return True
880
880
881 def driverconclude(repo, ms, wctx, labels=None):
881 def driverconclude(repo, ms, wctx, labels=None):
882 """run the conclude step of the merge driver, if any
882 """run the conclude step of the merge driver, if any
883
883
884 This is currently not implemented -- it's an extension point."""
884 This is currently not implemented -- it's an extension point."""
885 return True
885 return True
886
886
887 def _filesindirs(repo, manifest, dirs):
887 def _filesindirs(repo, manifest, dirs):
888 """
888 """
889 Generator that yields pairs of all the files in the manifest that are found
889 Generator that yields pairs of all the files in the manifest that are found
890 inside the directories listed in dirs, and which directory they are found
890 inside the directories listed in dirs, and which directory they are found
891 in.
891 in.
892 """
892 """
893 for f in manifest:
893 for f in manifest:
894 for p in util.finddirs(f):
894 for p in util.finddirs(f):
895 if p in dirs:
895 if p in dirs:
896 yield f, p
896 yield f, p
897 break
897 break
898
898
899 def checkpathconflicts(repo, wctx, mctx, actions):
899 def checkpathconflicts(repo, wctx, mctx, actions):
900 """
900 """
901 Check if any actions introduce path conflicts in the repository, updating
901 Check if any actions introduce path conflicts in the repository, updating
902 actions to record or handle the path conflict accordingly.
902 actions to record or handle the path conflict accordingly.
903 """
903 """
904 mf = wctx.manifest()
904 mf = wctx.manifest()
905
905
906 # The set of local files that conflict with a remote directory.
906 # The set of local files that conflict with a remote directory.
907 localconflicts = set()
907 localconflicts = set()
908
908
909 # The set of directories that conflict with a remote file, and so may cause
909 # The set of directories that conflict with a remote file, and so may cause
910 # conflicts if they still contain any files after the merge.
910 # conflicts if they still contain any files after the merge.
911 remoteconflicts = set()
911 remoteconflicts = set()
912
912
913 # The set of directories that appear as both a file and a directory in the
913 # The set of directories that appear as both a file and a directory in the
914 # remote manifest. These indicate an invalid remote manifest, which
914 # remote manifest. These indicate an invalid remote manifest, which
915 # can't be updated to cleanly.
915 # can't be updated to cleanly.
916 invalidconflicts = set()
916 invalidconflicts = set()
917
917
918 # The set of directories that contain files that are being created.
919 createdfiledirs = set()
920
918 # The set of files deleted by all the actions.
921 # The set of files deleted by all the actions.
919 deletedfiles = set()
922 deletedfiles = set()
920
923
921 for f, (m, args, msg) in actions.items():
924 for f, (m, args, msg) in actions.items():
922 if m in ('c', 'dc', 'm', 'cm'):
925 if m in ('c', 'dc', 'm', 'cm'):
923 # This action may create a new local file.
926 # This action may create a new local file.
927 createdfiledirs.update(util.finddirs(f))
924 if mf.hasdir(f):
928 if mf.hasdir(f):
925 # The file aliases a local directory. This might be ok if all
929 # The file aliases a local directory. This might be ok if all
926 # the files in the local directory are being deleted. This
930 # the files in the local directory are being deleted. This
927 # will be checked once we know what all the deleted files are.
931 # will be checked once we know what all the deleted files are.
928 remoteconflicts.add(f)
932 remoteconflicts.add(f)
929 for p in util.finddirs(f):
930 if p in mf:
931 if p in mctx:
932 # The file is in a directory which aliases both a local
933 # and a remote file. This is an internal inconsistency
934 # within the remote manifest.
935 invalidconflicts.add(p)
936 else:
937 # The file is in a directory which aliases a local file.
938 # We will need to rename the local file.
939 localconflicts.add(p)
940 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
941 # The file is in a directory which aliases a remote file.
942 # This is an internal inconsistency within the remote
943 # manifest.
944 invalidconflicts.add(p)
945
946 # Track the names of all deleted files.
933 # Track the names of all deleted files.
947 if m == 'r':
934 if m == 'r':
948 deletedfiles.add(f)
935 deletedfiles.add(f)
949 if m == 'm':
936 if m == 'm':
950 f1, f2, fa, move, anc = args
937 f1, f2, fa, move, anc = args
951 if move:
938 if move:
952 deletedfiles.add(f1)
939 deletedfiles.add(f1)
953 if m == 'dm':
940 if m == 'dm':
954 f2, flags = args
941 f2, flags = args
955 deletedfiles.add(f2)
942 deletedfiles.add(f2)
956
943
944 # Check all directories that contain created files for path conflicts.
945 for p in createdfiledirs:
946 if p in mf:
947 if p in mctx:
948 # A file is in a directory which aliases both a local
949 # and a remote file. This is an internal inconsistency
950 # within the remote manifest.
951 invalidconflicts.add(p)
952 else:
953 # A file is in a directory which aliases a local file.
954 # We will need to rename the local file.
955 localconflicts.add(p)
956 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
957 # The file is in a directory which aliases a remote file.
958 # This is an internal inconsistency within the remote
959 # manifest.
960 invalidconflicts.add(p)
961
957 # Rename all local conflicting files that have not been deleted.
962 # Rename all local conflicting files that have not been deleted.
958 for p in localconflicts:
963 for p in localconflicts:
959 if p not in deletedfiles:
964 if p not in deletedfiles:
960 ctxname = str(wctx).rstrip('+')
965 ctxname = str(wctx).rstrip('+')
961 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
966 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
962 actions[pnew] = ('pr', (p,), "local path conflict")
967 actions[pnew] = ('pr', (p,), "local path conflict")
963 actions[p] = ('p', (pnew, 'l'), "path conflict")
968 actions[p] = ('p', (pnew, 'l'), "path conflict")
964
969
965 if remoteconflicts:
970 if remoteconflicts:
966 # Check if all files in the conflicting directories have been removed.
971 # Check if all files in the conflicting directories have been removed.
967 ctxname = str(mctx).rstrip('+')
972 ctxname = str(mctx).rstrip('+')
968 for f, p in _filesindirs(repo, mf, remoteconflicts):
973 for f, p in _filesindirs(repo, mf, remoteconflicts):
969 if f not in deletedfiles:
974 if f not in deletedfiles:
970 m, args, msg = actions[p]
975 m, args, msg = actions[p]
971 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
976 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
972 if m in ('dc', 'm'):
977 if m in ('dc', 'm'):
973 # Action was merge, just update target.
978 # Action was merge, just update target.
974 actions[pnew] = (m, args, msg)
979 actions[pnew] = (m, args, msg)
975 else:
980 else:
976 # Action was create, change to renamed get action.
981 # Action was create, change to renamed get action.
977 fl = args[0]
982 fl = args[0]
978 actions[pnew] = ('dg', (p, fl), "remote path conflict")
983 actions[pnew] = ('dg', (p, fl), "remote path conflict")
979 actions[p] = ('p', (pnew, 'r'), "path conflict")
984 actions[p] = ('p', (pnew, 'r'), "path conflict")
980 remoteconflicts.remove(p)
985 remoteconflicts.remove(p)
981 break
986 break
982
987
983 if invalidconflicts:
988 if invalidconflicts:
984 for p in invalidconflicts:
989 for p in invalidconflicts:
985 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
990 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
986 raise error.Abort(_("destination manifest contains path conflicts"))
991 raise error.Abort(_("destination manifest contains path conflicts"))
987
992
988 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
993 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
989 acceptremote, followcopies, forcefulldiff=False):
994 acceptremote, followcopies, forcefulldiff=False):
990 """
995 """
991 Merge wctx and p2 with ancestor pa and generate merge action list
996 Merge wctx and p2 with ancestor pa and generate merge action list
992
997
993 branchmerge and force are as passed in to update
998 branchmerge and force are as passed in to update
994 matcher = matcher to filter file lists
999 matcher = matcher to filter file lists
995 acceptremote = accept the incoming changes without prompting
1000 acceptremote = accept the incoming changes without prompting
996 """
1001 """
997 if matcher is not None and matcher.always():
1002 if matcher is not None and matcher.always():
998 matcher = None
1003 matcher = None
999
1004
1000 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1005 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1001
1006
1002 # manifests fetched in order are going to be faster, so prime the caches
1007 # manifests fetched in order are going to be faster, so prime the caches
1003 [x.manifest() for x in
1008 [x.manifest() for x in
1004 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1009 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1005
1010
1006 if followcopies:
1011 if followcopies:
1007 ret = copies.mergecopies(repo, wctx, p2, pa)
1012 ret = copies.mergecopies(repo, wctx, p2, pa)
1008 copy, movewithdir, diverge, renamedelete, dirmove = ret
1013 copy, movewithdir, diverge, renamedelete, dirmove = ret
1009
1014
1010 boolbm = pycompat.bytestr(bool(branchmerge))
1015 boolbm = pycompat.bytestr(bool(branchmerge))
1011 boolf = pycompat.bytestr(bool(force))
1016 boolf = pycompat.bytestr(bool(force))
1012 boolm = pycompat.bytestr(bool(matcher))
1017 boolm = pycompat.bytestr(bool(matcher))
1013 repo.ui.note(_("resolving manifests\n"))
1018 repo.ui.note(_("resolving manifests\n"))
1014 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1019 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1015 % (boolbm, boolf, boolm))
1020 % (boolbm, boolf, boolm))
1016 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1021 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1017
1022
1018 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1023 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1019 copied = set(copy.values())
1024 copied = set(copy.values())
1020 copied.update(movewithdir.values())
1025 copied.update(movewithdir.values())
1021
1026
1022 if '.hgsubstate' in m1:
1027 if '.hgsubstate' in m1:
1023 # check whether sub state is modified
1028 # check whether sub state is modified
1024 if any(wctx.sub(s).dirty() for s in wctx.substate):
1029 if any(wctx.sub(s).dirty() for s in wctx.substate):
1025 m1['.hgsubstate'] = modifiednodeid
1030 m1['.hgsubstate'] = modifiednodeid
1026
1031
1027 # Don't use m2-vs-ma optimization if:
1032 # Don't use m2-vs-ma optimization if:
1028 # - ma is the same as m1 or m2, which we're just going to diff again later
1033 # - ma is the same as m1 or m2, which we're just going to diff again later
1029 # - The caller specifically asks for a full diff, which is useful during bid
1034 # - The caller specifically asks for a full diff, which is useful during bid
1030 # merge.
1035 # merge.
1031 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1036 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1032 # Identify which files are relevant to the merge, so we can limit the
1037 # Identify which files are relevant to the merge, so we can limit the
1033 # total m1-vs-m2 diff to just those files. This has significant
1038 # total m1-vs-m2 diff to just those files. This has significant
1034 # performance benefits in large repositories.
1039 # performance benefits in large repositories.
1035 relevantfiles = set(ma.diff(m2).keys())
1040 relevantfiles = set(ma.diff(m2).keys())
1036
1041
1037 # For copied and moved files, we need to add the source file too.
1042 # For copied and moved files, we need to add the source file too.
1038 for copykey, copyvalue in copy.iteritems():
1043 for copykey, copyvalue in copy.iteritems():
1039 if copyvalue in relevantfiles:
1044 if copyvalue in relevantfiles:
1040 relevantfiles.add(copykey)
1045 relevantfiles.add(copykey)
1041 for movedirkey in movewithdir:
1046 for movedirkey in movewithdir:
1042 relevantfiles.add(movedirkey)
1047 relevantfiles.add(movedirkey)
1043 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1048 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1044 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1049 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1045
1050
1046 diff = m1.diff(m2, match=matcher)
1051 diff = m1.diff(m2, match=matcher)
1047
1052
1048 if matcher is None:
1053 if matcher is None:
1049 matcher = matchmod.always('', '')
1054 matcher = matchmod.always('', '')
1050
1055
1051 actions = {}
1056 actions = {}
1052 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1057 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1053 if n1 and n2: # file exists on both local and remote side
1058 if n1 and n2: # file exists on both local and remote side
1054 if f not in ma:
1059 if f not in ma:
1055 fa = copy.get(f, None)
1060 fa = copy.get(f, None)
1056 if fa is not None:
1061 if fa is not None:
1057 actions[f] = ('m', (f, f, fa, False, pa.node()),
1062 actions[f] = ('m', (f, f, fa, False, pa.node()),
1058 "both renamed from " + fa)
1063 "both renamed from " + fa)
1059 else:
1064 else:
1060 actions[f] = ('m', (f, f, None, False, pa.node()),
1065 actions[f] = ('m', (f, f, None, False, pa.node()),
1061 "both created")
1066 "both created")
1062 else:
1067 else:
1063 a = ma[f]
1068 a = ma[f]
1064 fla = ma.flags(f)
1069 fla = ma.flags(f)
1065 nol = 'l' not in fl1 + fl2 + fla
1070 nol = 'l' not in fl1 + fl2 + fla
1066 if n2 == a and fl2 == fla:
1071 if n2 == a and fl2 == fla:
1067 actions[f] = ('k', (), "remote unchanged")
1072 actions[f] = ('k', (), "remote unchanged")
1068 elif n1 == a and fl1 == fla: # local unchanged - use remote
1073 elif n1 == a and fl1 == fla: # local unchanged - use remote
1069 if n1 == n2: # optimization: keep local content
1074 if n1 == n2: # optimization: keep local content
1070 actions[f] = ('e', (fl2,), "update permissions")
1075 actions[f] = ('e', (fl2,), "update permissions")
1071 else:
1076 else:
1072 actions[f] = ('g', (fl2, False), "remote is newer")
1077 actions[f] = ('g', (fl2, False), "remote is newer")
1073 elif nol and n2 == a: # remote only changed 'x'
1078 elif nol and n2 == a: # remote only changed 'x'
1074 actions[f] = ('e', (fl2,), "update permissions")
1079 actions[f] = ('e', (fl2,), "update permissions")
1075 elif nol and n1 == a: # local only changed 'x'
1080 elif nol and n1 == a: # local only changed 'x'
1076 actions[f] = ('g', (fl1, False), "remote is newer")
1081 actions[f] = ('g', (fl1, False), "remote is newer")
1077 else: # both changed something
1082 else: # both changed something
1078 actions[f] = ('m', (f, f, f, False, pa.node()),
1083 actions[f] = ('m', (f, f, f, False, pa.node()),
1079 "versions differ")
1084 "versions differ")
1080 elif n1: # file exists only on local side
1085 elif n1: # file exists only on local side
1081 if f in copied:
1086 if f in copied:
1082 pass # we'll deal with it on m2 side
1087 pass # we'll deal with it on m2 side
1083 elif f in movewithdir: # directory rename, move local
1088 elif f in movewithdir: # directory rename, move local
1084 f2 = movewithdir[f]
1089 f2 = movewithdir[f]
1085 if f2 in m2:
1090 if f2 in m2:
1086 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1091 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1087 "remote directory rename, both created")
1092 "remote directory rename, both created")
1088 else:
1093 else:
1089 actions[f2] = ('dm', (f, fl1),
1094 actions[f2] = ('dm', (f, fl1),
1090 "remote directory rename - move from " + f)
1095 "remote directory rename - move from " + f)
1091 elif f in copy:
1096 elif f in copy:
1092 f2 = copy[f]
1097 f2 = copy[f]
1093 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1098 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1094 "local copied/moved from " + f2)
1099 "local copied/moved from " + f2)
1095 elif f in ma: # clean, a different, no remote
1100 elif f in ma: # clean, a different, no remote
1096 if n1 != ma[f]:
1101 if n1 != ma[f]:
1097 if acceptremote:
1102 if acceptremote:
1098 actions[f] = ('r', None, "remote delete")
1103 actions[f] = ('r', None, "remote delete")
1099 else:
1104 else:
1100 actions[f] = ('cd', (f, None, f, False, pa.node()),
1105 actions[f] = ('cd', (f, None, f, False, pa.node()),
1101 "prompt changed/deleted")
1106 "prompt changed/deleted")
1102 elif n1 == addednodeid:
1107 elif n1 == addednodeid:
1103 # This extra 'a' is added by working copy manifest to mark
1108 # This extra 'a' is added by working copy manifest to mark
1104 # the file as locally added. We should forget it instead of
1109 # the file as locally added. We should forget it instead of
1105 # deleting it.
1110 # deleting it.
1106 actions[f] = ('f', None, "remote deleted")
1111 actions[f] = ('f', None, "remote deleted")
1107 else:
1112 else:
1108 actions[f] = ('r', None, "other deleted")
1113 actions[f] = ('r', None, "other deleted")
1109 elif n2: # file exists only on remote side
1114 elif n2: # file exists only on remote side
1110 if f in copied:
1115 if f in copied:
1111 pass # we'll deal with it on m1 side
1116 pass # we'll deal with it on m1 side
1112 elif f in movewithdir:
1117 elif f in movewithdir:
1113 f2 = movewithdir[f]
1118 f2 = movewithdir[f]
1114 if f2 in m1:
1119 if f2 in m1:
1115 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1120 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1116 "local directory rename, both created")
1121 "local directory rename, both created")
1117 else:
1122 else:
1118 actions[f2] = ('dg', (f, fl2),
1123 actions[f2] = ('dg', (f, fl2),
1119 "local directory rename - get from " + f)
1124 "local directory rename - get from " + f)
1120 elif f in copy:
1125 elif f in copy:
1121 f2 = copy[f]
1126 f2 = copy[f]
1122 if f2 in m2:
1127 if f2 in m2:
1123 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1128 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1124 "remote copied from " + f2)
1129 "remote copied from " + f2)
1125 else:
1130 else:
1126 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1131 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1127 "remote moved from " + f2)
1132 "remote moved from " + f2)
1128 elif f not in ma:
1133 elif f not in ma:
1129 # local unknown, remote created: the logic is described by the
1134 # local unknown, remote created: the logic is described by the
1130 # following table:
1135 # following table:
1131 #
1136 #
1132 # force branchmerge different | action
1137 # force branchmerge different | action
1133 # n * * | create
1138 # n * * | create
1134 # y n * | create
1139 # y n * | create
1135 # y y n | create
1140 # y y n | create
1136 # y y y | merge
1141 # y y y | merge
1137 #
1142 #
1138 # Checking whether the files are different is expensive, so we
1143 # Checking whether the files are different is expensive, so we
1139 # don't do that when we can avoid it.
1144 # don't do that when we can avoid it.
1140 if not force:
1145 if not force:
1141 actions[f] = ('c', (fl2,), "remote created")
1146 actions[f] = ('c', (fl2,), "remote created")
1142 elif not branchmerge:
1147 elif not branchmerge:
1143 actions[f] = ('c', (fl2,), "remote created")
1148 actions[f] = ('c', (fl2,), "remote created")
1144 else:
1149 else:
1145 actions[f] = ('cm', (fl2, pa.node()),
1150 actions[f] = ('cm', (fl2, pa.node()),
1146 "remote created, get or merge")
1151 "remote created, get or merge")
1147 elif n2 != ma[f]:
1152 elif n2 != ma[f]:
1148 df = None
1153 df = None
1149 for d in dirmove:
1154 for d in dirmove:
1150 if f.startswith(d):
1155 if f.startswith(d):
1151 # new file added in a directory that was moved
1156 # new file added in a directory that was moved
1152 df = dirmove[d] + f[len(d):]
1157 df = dirmove[d] + f[len(d):]
1153 break
1158 break
1154 if df is not None and df in m1:
1159 if df is not None and df in m1:
1155 actions[df] = ('m', (df, f, f, False, pa.node()),
1160 actions[df] = ('m', (df, f, f, False, pa.node()),
1156 "local directory rename - respect move from " + f)
1161 "local directory rename - respect move from " + f)
1157 elif acceptremote:
1162 elif acceptremote:
1158 actions[f] = ('c', (fl2,), "remote recreating")
1163 actions[f] = ('c', (fl2,), "remote recreating")
1159 else:
1164 else:
1160 actions[f] = ('dc', (None, f, f, False, pa.node()),
1165 actions[f] = ('dc', (None, f, f, False, pa.node()),
1161 "prompt deleted/changed")
1166 "prompt deleted/changed")
1162
1167
1163 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1168 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1164 # If we are merging, look for path conflicts.
1169 # If we are merging, look for path conflicts.
1165 checkpathconflicts(repo, wctx, p2, actions)
1170 checkpathconflicts(repo, wctx, p2, actions)
1166
1171
1167 return actions, diverge, renamedelete
1172 return actions, diverge, renamedelete
1168
1173
1169 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1174 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1170 """Resolves false conflicts where the nodeid changed but the content
1175 """Resolves false conflicts where the nodeid changed but the content
1171 remained the same."""
1176 remained the same."""
1172
1177
1173 for f, (m, args, msg) in actions.items():
1178 for f, (m, args, msg) in actions.items():
1174 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1179 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1175 # local did change but ended up with same content
1180 # local did change but ended up with same content
1176 actions[f] = 'r', None, "prompt same"
1181 actions[f] = 'r', None, "prompt same"
1177 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1182 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1178 # remote did change but ended up with same content
1183 # remote did change but ended up with same content
1179 del actions[f] # don't get = keep local deleted
1184 del actions[f] # don't get = keep local deleted
1180
1185
1181 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1186 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1182 acceptremote, followcopies, matcher=None,
1187 acceptremote, followcopies, matcher=None,
1183 mergeforce=False):
1188 mergeforce=False):
1184 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1189 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1185 # Avoid cycle.
1190 # Avoid cycle.
1186 from . import sparse
1191 from . import sparse
1187
1192
1188 if len(ancestors) == 1: # default
1193 if len(ancestors) == 1: # default
1189 actions, diverge, renamedelete = manifestmerge(
1194 actions, diverge, renamedelete = manifestmerge(
1190 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1195 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1191 acceptremote, followcopies)
1196 acceptremote, followcopies)
1192 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1197 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1193
1198
1194 else: # only when merge.preferancestor=* - the default
1199 else: # only when merge.preferancestor=* - the default
1195 repo.ui.note(
1200 repo.ui.note(
1196 _("note: merging %s and %s using bids from ancestors %s\n") %
1201 _("note: merging %s and %s using bids from ancestors %s\n") %
1197 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1202 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1198 for anc in ancestors)))
1203 for anc in ancestors)))
1199
1204
1200 # Call for bids
1205 # Call for bids
1201 fbids = {} # mapping filename to bids (action method to list af actions)
1206 fbids = {} # mapping filename to bids (action method to list af actions)
1202 diverge, renamedelete = None, None
1207 diverge, renamedelete = None, None
1203 for ancestor in ancestors:
1208 for ancestor in ancestors:
1204 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1209 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1205 actions, diverge1, renamedelete1 = manifestmerge(
1210 actions, diverge1, renamedelete1 = manifestmerge(
1206 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1211 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1207 acceptremote, followcopies, forcefulldiff=True)
1212 acceptremote, followcopies, forcefulldiff=True)
1208 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1213 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1209
1214
1210 # Track the shortest set of warning on the theory that bid
1215 # Track the shortest set of warning on the theory that bid
1211 # merge will correctly incorporate more information
1216 # merge will correctly incorporate more information
1212 if diverge is None or len(diverge1) < len(diverge):
1217 if diverge is None or len(diverge1) < len(diverge):
1213 diverge = diverge1
1218 diverge = diverge1
1214 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1219 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1215 renamedelete = renamedelete1
1220 renamedelete = renamedelete1
1216
1221
1217 for f, a in sorted(actions.iteritems()):
1222 for f, a in sorted(actions.iteritems()):
1218 m, args, msg = a
1223 m, args, msg = a
1219 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1224 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1220 if f in fbids:
1225 if f in fbids:
1221 d = fbids[f]
1226 d = fbids[f]
1222 if m in d:
1227 if m in d:
1223 d[m].append(a)
1228 d[m].append(a)
1224 else:
1229 else:
1225 d[m] = [a]
1230 d[m] = [a]
1226 else:
1231 else:
1227 fbids[f] = {m: [a]}
1232 fbids[f] = {m: [a]}
1228
1233
1229 # Pick the best bid for each file
1234 # Pick the best bid for each file
1230 repo.ui.note(_('\nauction for merging merge bids\n'))
1235 repo.ui.note(_('\nauction for merging merge bids\n'))
1231 actions = {}
1236 actions = {}
1232 dms = [] # filenames that have dm actions
1237 dms = [] # filenames that have dm actions
1233 for f, bids in sorted(fbids.items()):
1238 for f, bids in sorted(fbids.items()):
1234 # bids is a mapping from action method to list af actions
1239 # bids is a mapping from action method to list af actions
1235 # Consensus?
1240 # Consensus?
1236 if len(bids) == 1: # all bids are the same kind of method
1241 if len(bids) == 1: # all bids are the same kind of method
1237 m, l = list(bids.items())[0]
1242 m, l = list(bids.items())[0]
1238 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1243 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1239 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1244 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1240 actions[f] = l[0]
1245 actions[f] = l[0]
1241 if m == 'dm':
1246 if m == 'dm':
1242 dms.append(f)
1247 dms.append(f)
1243 continue
1248 continue
1244 # If keep is an option, just do it.
1249 # If keep is an option, just do it.
1245 if 'k' in bids:
1250 if 'k' in bids:
1246 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1251 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1247 actions[f] = bids['k'][0]
1252 actions[f] = bids['k'][0]
1248 continue
1253 continue
1249 # If there are gets and they all agree [how could they not?], do it.
1254 # If there are gets and they all agree [how could they not?], do it.
1250 if 'g' in bids:
1255 if 'g' in bids:
1251 ga0 = bids['g'][0]
1256 ga0 = bids['g'][0]
1252 if all(a == ga0 for a in bids['g'][1:]):
1257 if all(a == ga0 for a in bids['g'][1:]):
1253 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1258 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1254 actions[f] = ga0
1259 actions[f] = ga0
1255 continue
1260 continue
1256 # TODO: Consider other simple actions such as mode changes
1261 # TODO: Consider other simple actions such as mode changes
1257 # Handle inefficient democrazy.
1262 # Handle inefficient democrazy.
1258 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1263 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1259 for m, l in sorted(bids.items()):
1264 for m, l in sorted(bids.items()):
1260 for _f, args, msg in l:
1265 for _f, args, msg in l:
1261 repo.ui.note(' %s -> %s\n' % (msg, m))
1266 repo.ui.note(' %s -> %s\n' % (msg, m))
1262 # Pick random action. TODO: Instead, prompt user when resolving
1267 # Pick random action. TODO: Instead, prompt user when resolving
1263 m, l = list(bids.items())[0]
1268 m, l = list(bids.items())[0]
1264 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1269 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1265 (f, m))
1270 (f, m))
1266 actions[f] = l[0]
1271 actions[f] = l[0]
1267 if m == 'dm':
1272 if m == 'dm':
1268 dms.append(f)
1273 dms.append(f)
1269 continue
1274 continue
1270 # Work around 'dm' that can cause multiple actions for the same file
1275 # Work around 'dm' that can cause multiple actions for the same file
1271 for f in dms:
1276 for f in dms:
1272 dm, (f0, flags), msg = actions[f]
1277 dm, (f0, flags), msg = actions[f]
1273 assert dm == 'dm', dm
1278 assert dm == 'dm', dm
1274 if f0 in actions and actions[f0][0] == 'r':
1279 if f0 in actions and actions[f0][0] == 'r':
1275 # We have one bid for removing a file and another for moving it.
1280 # We have one bid for removing a file and another for moving it.
1276 # These two could be merged as first move and then delete ...
1281 # These two could be merged as first move and then delete ...
1277 # but instead drop moving and just delete.
1282 # but instead drop moving and just delete.
1278 del actions[f]
1283 del actions[f]
1279 repo.ui.note(_('end of auction\n\n'))
1284 repo.ui.note(_('end of auction\n\n'))
1280
1285
1281 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1286 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1282
1287
1283 if wctx.rev() is None:
1288 if wctx.rev() is None:
1284 fractions = _forgetremoved(wctx, mctx, branchmerge)
1289 fractions = _forgetremoved(wctx, mctx, branchmerge)
1285 actions.update(fractions)
1290 actions.update(fractions)
1286
1291
1287 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1292 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1288 actions)
1293 actions)
1289
1294
1290 return prunedactions, diverge, renamedelete
1295 return prunedactions, diverge, renamedelete
1291
1296
1292 def _getcwd():
1297 def _getcwd():
1293 try:
1298 try:
1294 return pycompat.getcwd()
1299 return pycompat.getcwd()
1295 except OSError as err:
1300 except OSError as err:
1296 if err.errno == errno.ENOENT:
1301 if err.errno == errno.ENOENT:
1297 return None
1302 return None
1298 raise
1303 raise
1299
1304
1300 def batchremove(repo, wctx, actions):
1305 def batchremove(repo, wctx, actions):
1301 """apply removes to the working directory
1306 """apply removes to the working directory
1302
1307
1303 yields tuples for progress updates
1308 yields tuples for progress updates
1304 """
1309 """
1305 verbose = repo.ui.verbose
1310 verbose = repo.ui.verbose
1306 cwd = _getcwd()
1311 cwd = _getcwd()
1307 i = 0
1312 i = 0
1308 for f, args, msg in actions:
1313 for f, args, msg in actions:
1309 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1314 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1310 if verbose:
1315 if verbose:
1311 repo.ui.note(_("removing %s\n") % f)
1316 repo.ui.note(_("removing %s\n") % f)
1312 wctx[f].audit()
1317 wctx[f].audit()
1313 try:
1318 try:
1314 wctx[f].remove(ignoremissing=True)
1319 wctx[f].remove(ignoremissing=True)
1315 except OSError as inst:
1320 except OSError as inst:
1316 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1321 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1317 (f, inst.strerror))
1322 (f, inst.strerror))
1318 if i == 100:
1323 if i == 100:
1319 yield i, f
1324 yield i, f
1320 i = 0
1325 i = 0
1321 i += 1
1326 i += 1
1322 if i > 0:
1327 if i > 0:
1323 yield i, f
1328 yield i, f
1324
1329
1325 if cwd and not _getcwd():
1330 if cwd and not _getcwd():
1326 # cwd was removed in the course of removing files; print a helpful
1331 # cwd was removed in the course of removing files; print a helpful
1327 # warning.
1332 # warning.
1328 repo.ui.warn(_("current directory was removed\n"
1333 repo.ui.warn(_("current directory was removed\n"
1329 "(consider changing to repo root: %s)\n") % repo.root)
1334 "(consider changing to repo root: %s)\n") % repo.root)
1330
1335
1331 # It's necessary to flush here in case we're inside a worker fork and will
1336 # It's necessary to flush here in case we're inside a worker fork and will
1332 # quit after this function.
1337 # quit after this function.
1333 wctx.flushall()
1338 wctx.flushall()
1334
1339
1335 def batchget(repo, mctx, wctx, actions):
1340 def batchget(repo, mctx, wctx, actions):
1336 """apply gets to the working directory
1341 """apply gets to the working directory
1337
1342
1338 mctx is the context to get from
1343 mctx is the context to get from
1339
1344
1340 yields tuples for progress updates
1345 yields tuples for progress updates
1341 """
1346 """
1342 verbose = repo.ui.verbose
1347 verbose = repo.ui.verbose
1343 fctx = mctx.filectx
1348 fctx = mctx.filectx
1344 ui = repo.ui
1349 ui = repo.ui
1345 i = 0
1350 i = 0
1346 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1351 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1347 for f, (flags, backup), msg in actions:
1352 for f, (flags, backup), msg in actions:
1348 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1353 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1349 if verbose:
1354 if verbose:
1350 repo.ui.note(_("getting %s\n") % f)
1355 repo.ui.note(_("getting %s\n") % f)
1351
1356
1352 if backup:
1357 if backup:
1353 # If a file or directory exists with the same name, back that
1358 # If a file or directory exists with the same name, back that
1354 # up. Otherwise, look to see if there is a file that conflicts
1359 # up. Otherwise, look to see if there is a file that conflicts
1355 # with a directory this file is in, and if so, back that up.
1360 # with a directory this file is in, and if so, back that up.
1356 absf = repo.wjoin(f)
1361 absf = repo.wjoin(f)
1357 if not repo.wvfs.lexists(f):
1362 if not repo.wvfs.lexists(f):
1358 for p in util.finddirs(f):
1363 for p in util.finddirs(f):
1359 if repo.wvfs.isfileorlink(p):
1364 if repo.wvfs.isfileorlink(p):
1360 absf = repo.wjoin(p)
1365 absf = repo.wjoin(p)
1361 break
1366 break
1362 orig = scmutil.origpath(ui, repo, absf)
1367 orig = scmutil.origpath(ui, repo, absf)
1363 if repo.wvfs.lexists(absf):
1368 if repo.wvfs.lexists(absf):
1364 util.rename(absf, orig)
1369 util.rename(absf, orig)
1365 wctx[f].clearunknown()
1370 wctx[f].clearunknown()
1366 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1371 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1367 if i == 100:
1372 if i == 100:
1368 yield i, f
1373 yield i, f
1369 i = 0
1374 i = 0
1370 i += 1
1375 i += 1
1371 if i > 0:
1376 if i > 0:
1372 yield i, f
1377 yield i, f
1373
1378
1374 # It's necessary to flush here in case we're inside a worker fork and will
1379 # It's necessary to flush here in case we're inside a worker fork and will
1375 # quit after this function.
1380 # quit after this function.
1376 wctx.flushall()
1381 wctx.flushall()
1377
1382
1378 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1383 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1379 """apply the merge action list to the working directory
1384 """apply the merge action list to the working directory
1380
1385
1381 wctx is the working copy context
1386 wctx is the working copy context
1382 mctx is the context to be merged into the working copy
1387 mctx is the context to be merged into the working copy
1383
1388
1384 Return a tuple of counts (updated, merged, removed, unresolved) that
1389 Return a tuple of counts (updated, merged, removed, unresolved) that
1385 describes how many files were affected by the update.
1390 describes how many files were affected by the update.
1386 """
1391 """
1387
1392
1388 updated, merged, removed = 0, 0, 0
1393 updated, merged, removed = 0, 0, 0
1389 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1394 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1390 moves = []
1395 moves = []
1391 for m, l in actions.items():
1396 for m, l in actions.items():
1392 l.sort()
1397 l.sort()
1393
1398
1394 # 'cd' and 'dc' actions are treated like other merge conflicts
1399 # 'cd' and 'dc' actions are treated like other merge conflicts
1395 mergeactions = sorted(actions['cd'])
1400 mergeactions = sorted(actions['cd'])
1396 mergeactions.extend(sorted(actions['dc']))
1401 mergeactions.extend(sorted(actions['dc']))
1397 mergeactions.extend(actions['m'])
1402 mergeactions.extend(actions['m'])
1398 for f, args, msg in mergeactions:
1403 for f, args, msg in mergeactions:
1399 f1, f2, fa, move, anc = args
1404 f1, f2, fa, move, anc = args
1400 if f == '.hgsubstate': # merged internally
1405 if f == '.hgsubstate': # merged internally
1401 continue
1406 continue
1402 if f1 is None:
1407 if f1 is None:
1403 fcl = filemerge.absentfilectx(wctx, fa)
1408 fcl = filemerge.absentfilectx(wctx, fa)
1404 else:
1409 else:
1405 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1410 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1406 fcl = wctx[f1]
1411 fcl = wctx[f1]
1407 if f2 is None:
1412 if f2 is None:
1408 fco = filemerge.absentfilectx(mctx, fa)
1413 fco = filemerge.absentfilectx(mctx, fa)
1409 else:
1414 else:
1410 fco = mctx[f2]
1415 fco = mctx[f2]
1411 actx = repo[anc]
1416 actx = repo[anc]
1412 if fa in actx:
1417 if fa in actx:
1413 fca = actx[fa]
1418 fca = actx[fa]
1414 else:
1419 else:
1415 # TODO: move to absentfilectx
1420 # TODO: move to absentfilectx
1416 fca = repo.filectx(f1, fileid=nullrev)
1421 fca = repo.filectx(f1, fileid=nullrev)
1417 ms.add(fcl, fco, fca, f)
1422 ms.add(fcl, fco, fca, f)
1418 if f1 != f and move:
1423 if f1 != f and move:
1419 moves.append(f1)
1424 moves.append(f1)
1420
1425
1421 _updating = _('updating')
1426 _updating = _('updating')
1422 _files = _('files')
1427 _files = _('files')
1423 progress = repo.ui.progress
1428 progress = repo.ui.progress
1424
1429
1425 # remove renamed files after safely stored
1430 # remove renamed files after safely stored
1426 for f in moves:
1431 for f in moves:
1427 if wctx[f].lexists():
1432 if wctx[f].lexists():
1428 repo.ui.debug("removing %s\n" % f)
1433 repo.ui.debug("removing %s\n" % f)
1429 wctx[f].audit()
1434 wctx[f].audit()
1430 wctx[f].remove()
1435 wctx[f].remove()
1431
1436
1432 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1437 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1433 z = 0
1438 z = 0
1434
1439
1435 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1440 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1436 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1441 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1437
1442
1438 # record path conflicts
1443 # record path conflicts
1439 for f, args, msg in actions['p']:
1444 for f, args, msg in actions['p']:
1440 f1, fo = args
1445 f1, fo = args
1441 s = repo.ui.status
1446 s = repo.ui.status
1442 s(_("%s: path conflict - a file or link has the same name as a "
1447 s(_("%s: path conflict - a file or link has the same name as a "
1443 "directory\n") % f)
1448 "directory\n") % f)
1444 if fo == 'l':
1449 if fo == 'l':
1445 s(_("the local file has been renamed to %s\n") % f1)
1450 s(_("the local file has been renamed to %s\n") % f1)
1446 else:
1451 else:
1447 s(_("the remote file has been renamed to %s\n") % f1)
1452 s(_("the remote file has been renamed to %s\n") % f1)
1448 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1453 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1449 ms.addpath(f, f1, fo)
1454 ms.addpath(f, f1, fo)
1450 z += 1
1455 z += 1
1451 progress(_updating, z, item=f, total=numupdates, unit=_files)
1456 progress(_updating, z, item=f, total=numupdates, unit=_files)
1452
1457
1453 # When merging in-memory, we can't support worker processes, so set the
1458 # When merging in-memory, we can't support worker processes, so set the
1454 # per-item cost at 0 in that case.
1459 # per-item cost at 0 in that case.
1455 cost = 0 if wctx.isinmemory() else 0.001
1460 cost = 0 if wctx.isinmemory() else 0.001
1456
1461
1457 # remove in parallel (must come before resolving path conflicts and getting)
1462 # remove in parallel (must come before resolving path conflicts and getting)
1458 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1463 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1459 actions['r'])
1464 actions['r'])
1460 for i, item in prog:
1465 for i, item in prog:
1461 z += i
1466 z += i
1462 progress(_updating, z, item=item, total=numupdates, unit=_files)
1467 progress(_updating, z, item=item, total=numupdates, unit=_files)
1463 removed = len(actions['r'])
1468 removed = len(actions['r'])
1464
1469
1465 # resolve path conflicts (must come before getting)
1470 # resolve path conflicts (must come before getting)
1466 for f, args, msg in actions['pr']:
1471 for f, args, msg in actions['pr']:
1467 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1472 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1468 f0, = args
1473 f0, = args
1469 if wctx[f0].lexists():
1474 if wctx[f0].lexists():
1470 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1475 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1471 wctx[f].audit()
1476 wctx[f].audit()
1472 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1477 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1473 wctx[f0].remove()
1478 wctx[f0].remove()
1474 z += 1
1479 z += 1
1475 progress(_updating, z, item=f, total=numupdates, unit=_files)
1480 progress(_updating, z, item=f, total=numupdates, unit=_files)
1476
1481
1477 # We should flush before forking into worker processes, since those workers
1482 # We should flush before forking into worker processes, since those workers
1478 # flush when they complete, and we don't want to duplicate work.
1483 # flush when they complete, and we don't want to duplicate work.
1479 wctx.flushall()
1484 wctx.flushall()
1480
1485
1481 # get in parallel
1486 # get in parallel
1482 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1487 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1483 actions['g'])
1488 actions['g'])
1484 for i, item in prog:
1489 for i, item in prog:
1485 z += i
1490 z += i
1486 progress(_updating, z, item=item, total=numupdates, unit=_files)
1491 progress(_updating, z, item=item, total=numupdates, unit=_files)
1487 updated = len(actions['g'])
1492 updated = len(actions['g'])
1488
1493
1489 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1494 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1490 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1495 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1491
1496
1492 # forget (manifest only, just log it) (must come first)
1497 # forget (manifest only, just log it) (must come first)
1493 for f, args, msg in actions['f']:
1498 for f, args, msg in actions['f']:
1494 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1499 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1495 z += 1
1500 z += 1
1496 progress(_updating, z, item=f, total=numupdates, unit=_files)
1501 progress(_updating, z, item=f, total=numupdates, unit=_files)
1497
1502
1498 # re-add (manifest only, just log it)
1503 # re-add (manifest only, just log it)
1499 for f, args, msg in actions['a']:
1504 for f, args, msg in actions['a']:
1500 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1505 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1501 z += 1
1506 z += 1
1502 progress(_updating, z, item=f, total=numupdates, unit=_files)
1507 progress(_updating, z, item=f, total=numupdates, unit=_files)
1503
1508
1504 # re-add/mark as modified (manifest only, just log it)
1509 # re-add/mark as modified (manifest only, just log it)
1505 for f, args, msg in actions['am']:
1510 for f, args, msg in actions['am']:
1506 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1511 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1507 z += 1
1512 z += 1
1508 progress(_updating, z, item=f, total=numupdates, unit=_files)
1513 progress(_updating, z, item=f, total=numupdates, unit=_files)
1509
1514
1510 # keep (noop, just log it)
1515 # keep (noop, just log it)
1511 for f, args, msg in actions['k']:
1516 for f, args, msg in actions['k']:
1512 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1517 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1513 # no progress
1518 # no progress
1514
1519
1515 # directory rename, move local
1520 # directory rename, move local
1516 for f, args, msg in actions['dm']:
1521 for f, args, msg in actions['dm']:
1517 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1522 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1518 z += 1
1523 z += 1
1519 progress(_updating, z, item=f, total=numupdates, unit=_files)
1524 progress(_updating, z, item=f, total=numupdates, unit=_files)
1520 f0, flags = args
1525 f0, flags = args
1521 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1526 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1522 wctx[f].audit()
1527 wctx[f].audit()
1523 wctx[f].write(wctx.filectx(f0).data(), flags)
1528 wctx[f].write(wctx.filectx(f0).data(), flags)
1524 wctx[f0].remove()
1529 wctx[f0].remove()
1525 updated += 1
1530 updated += 1
1526
1531
1527 # local directory rename, get
1532 # local directory rename, get
1528 for f, args, msg in actions['dg']:
1533 for f, args, msg in actions['dg']:
1529 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1534 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1530 z += 1
1535 z += 1
1531 progress(_updating, z, item=f, total=numupdates, unit=_files)
1536 progress(_updating, z, item=f, total=numupdates, unit=_files)
1532 f0, flags = args
1537 f0, flags = args
1533 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1538 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1534 wctx[f].write(mctx.filectx(f0).data(), flags)
1539 wctx[f].write(mctx.filectx(f0).data(), flags)
1535 updated += 1
1540 updated += 1
1536
1541
1537 # exec
1542 # exec
1538 for f, args, msg in actions['e']:
1543 for f, args, msg in actions['e']:
1539 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1544 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1540 z += 1
1545 z += 1
1541 progress(_updating, z, item=f, total=numupdates, unit=_files)
1546 progress(_updating, z, item=f, total=numupdates, unit=_files)
1542 flags, = args
1547 flags, = args
1543 wctx[f].audit()
1548 wctx[f].audit()
1544 wctx[f].setflags('l' in flags, 'x' in flags)
1549 wctx[f].setflags('l' in flags, 'x' in flags)
1545 updated += 1
1550 updated += 1
1546
1551
1547 # the ordering is important here -- ms.mergedriver will raise if the merge
1552 # the ordering is important here -- ms.mergedriver will raise if the merge
1548 # driver has changed, and we want to be able to bypass it when overwrite is
1553 # driver has changed, and we want to be able to bypass it when overwrite is
1549 # True
1554 # True
1550 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1555 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1551
1556
1552 if usemergedriver:
1557 if usemergedriver:
1553 ms.commit()
1558 ms.commit()
1554 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1559 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1555 # the driver might leave some files unresolved
1560 # the driver might leave some files unresolved
1556 unresolvedf = set(ms.unresolved())
1561 unresolvedf = set(ms.unresolved())
1557 if not proceed:
1562 if not proceed:
1558 # XXX setting unresolved to at least 1 is a hack to make sure we
1563 # XXX setting unresolved to at least 1 is a hack to make sure we
1559 # error out
1564 # error out
1560 return updated, merged, removed, max(len(unresolvedf), 1)
1565 return updated, merged, removed, max(len(unresolvedf), 1)
1561 newactions = []
1566 newactions = []
1562 for f, args, msg in mergeactions:
1567 for f, args, msg in mergeactions:
1563 if f in unresolvedf:
1568 if f in unresolvedf:
1564 newactions.append((f, args, msg))
1569 newactions.append((f, args, msg))
1565 mergeactions = newactions
1570 mergeactions = newactions
1566
1571
1567 try:
1572 try:
1568 # premerge
1573 # premerge
1569 tocomplete = []
1574 tocomplete = []
1570 for f, args, msg in mergeactions:
1575 for f, args, msg in mergeactions:
1571 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1576 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1572 z += 1
1577 z += 1
1573 progress(_updating, z, item=f, total=numupdates, unit=_files)
1578 progress(_updating, z, item=f, total=numupdates, unit=_files)
1574 if f == '.hgsubstate': # subrepo states need updating
1579 if f == '.hgsubstate': # subrepo states need updating
1575 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1580 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1576 overwrite, labels)
1581 overwrite, labels)
1577 continue
1582 continue
1578 wctx[f].audit()
1583 wctx[f].audit()
1579 complete, r = ms.preresolve(f, wctx)
1584 complete, r = ms.preresolve(f, wctx)
1580 if not complete:
1585 if not complete:
1581 numupdates += 1
1586 numupdates += 1
1582 tocomplete.append((f, args, msg))
1587 tocomplete.append((f, args, msg))
1583
1588
1584 # merge
1589 # merge
1585 for f, args, msg in tocomplete:
1590 for f, args, msg in tocomplete:
1586 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1591 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1587 z += 1
1592 z += 1
1588 progress(_updating, z, item=f, total=numupdates, unit=_files)
1593 progress(_updating, z, item=f, total=numupdates, unit=_files)
1589 ms.resolve(f, wctx)
1594 ms.resolve(f, wctx)
1590
1595
1591 finally:
1596 finally:
1592 ms.commit()
1597 ms.commit()
1593
1598
1594 unresolved = ms.unresolvedcount()
1599 unresolved = ms.unresolvedcount()
1595
1600
1596 if usemergedriver and not unresolved and ms.mdstate() != 's':
1601 if usemergedriver and not unresolved and ms.mdstate() != 's':
1597 if not driverconclude(repo, ms, wctx, labels=labels):
1602 if not driverconclude(repo, ms, wctx, labels=labels):
1598 # XXX setting unresolved to at least 1 is a hack to make sure we
1603 # XXX setting unresolved to at least 1 is a hack to make sure we
1599 # error out
1604 # error out
1600 unresolved = max(unresolved, 1)
1605 unresolved = max(unresolved, 1)
1601
1606
1602 ms.commit()
1607 ms.commit()
1603
1608
1604 msupdated, msmerged, msremoved = ms.counts()
1609 msupdated, msmerged, msremoved = ms.counts()
1605 updated += msupdated
1610 updated += msupdated
1606 merged += msmerged
1611 merged += msmerged
1607 removed += msremoved
1612 removed += msremoved
1608
1613
1609 extraactions = ms.actions()
1614 extraactions = ms.actions()
1610 if extraactions:
1615 if extraactions:
1611 mfiles = set(a[0] for a in actions['m'])
1616 mfiles = set(a[0] for a in actions['m'])
1612 for k, acts in extraactions.iteritems():
1617 for k, acts in extraactions.iteritems():
1613 actions[k].extend(acts)
1618 actions[k].extend(acts)
1614 # Remove these files from actions['m'] as well. This is important
1619 # Remove these files from actions['m'] as well. This is important
1615 # because in recordupdates, files in actions['m'] are processed
1620 # because in recordupdates, files in actions['m'] are processed
1616 # after files in other actions, and the merge driver might add
1621 # after files in other actions, and the merge driver might add
1617 # files to those actions via extraactions above. This can lead to a
1622 # files to those actions via extraactions above. This can lead to a
1618 # file being recorded twice, with poor results. This is especially
1623 # file being recorded twice, with poor results. This is especially
1619 # problematic for actions['r'] (currently only possible with the
1624 # problematic for actions['r'] (currently only possible with the
1620 # merge driver in the initial merge process; interrupted merges
1625 # merge driver in the initial merge process; interrupted merges
1621 # don't go through this flow).
1626 # don't go through this flow).
1622 #
1627 #
1623 # The real fix here is to have indexes by both file and action so
1628 # The real fix here is to have indexes by both file and action so
1624 # that when the action for a file is changed it is automatically
1629 # that when the action for a file is changed it is automatically
1625 # reflected in the other action lists. But that involves a more
1630 # reflected in the other action lists. But that involves a more
1626 # complex data structure, so this will do for now.
1631 # complex data structure, so this will do for now.
1627 #
1632 #
1628 # We don't need to do the same operation for 'dc' and 'cd' because
1633 # We don't need to do the same operation for 'dc' and 'cd' because
1629 # those lists aren't consulted again.
1634 # those lists aren't consulted again.
1630 mfiles.difference_update(a[0] for a in acts)
1635 mfiles.difference_update(a[0] for a in acts)
1631
1636
1632 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1637 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1633
1638
1634 progress(_updating, None, total=numupdates, unit=_files)
1639 progress(_updating, None, total=numupdates, unit=_files)
1635
1640
1636 return updated, merged, removed, unresolved
1641 return updated, merged, removed, unresolved
1637
1642
1638 def recordupdates(repo, actions, branchmerge):
1643 def recordupdates(repo, actions, branchmerge):
1639 "record merge actions to the dirstate"
1644 "record merge actions to the dirstate"
1640 # remove (must come first)
1645 # remove (must come first)
1641 for f, args, msg in actions.get('r', []):
1646 for f, args, msg in actions.get('r', []):
1642 if branchmerge:
1647 if branchmerge:
1643 repo.dirstate.remove(f)
1648 repo.dirstate.remove(f)
1644 else:
1649 else:
1645 repo.dirstate.drop(f)
1650 repo.dirstate.drop(f)
1646
1651
1647 # forget (must come first)
1652 # forget (must come first)
1648 for f, args, msg in actions.get('f', []):
1653 for f, args, msg in actions.get('f', []):
1649 repo.dirstate.drop(f)
1654 repo.dirstate.drop(f)
1650
1655
1651 # resolve path conflicts
1656 # resolve path conflicts
1652 for f, args, msg in actions.get('pr', []):
1657 for f, args, msg in actions.get('pr', []):
1653 f0, = args
1658 f0, = args
1654 origf0 = repo.dirstate.copied(f0) or f0
1659 origf0 = repo.dirstate.copied(f0) or f0
1655 repo.dirstate.add(f)
1660 repo.dirstate.add(f)
1656 repo.dirstate.copy(origf0, f)
1661 repo.dirstate.copy(origf0, f)
1657 if f0 == origf0:
1662 if f0 == origf0:
1658 repo.dirstate.remove(f0)
1663 repo.dirstate.remove(f0)
1659 else:
1664 else:
1660 repo.dirstate.drop(f0)
1665 repo.dirstate.drop(f0)
1661
1666
1662 # re-add
1667 # re-add
1663 for f, args, msg in actions.get('a', []):
1668 for f, args, msg in actions.get('a', []):
1664 repo.dirstate.add(f)
1669 repo.dirstate.add(f)
1665
1670
1666 # re-add/mark as modified
1671 # re-add/mark as modified
1667 for f, args, msg in actions.get('am', []):
1672 for f, args, msg in actions.get('am', []):
1668 if branchmerge:
1673 if branchmerge:
1669 repo.dirstate.normallookup(f)
1674 repo.dirstate.normallookup(f)
1670 else:
1675 else:
1671 repo.dirstate.add(f)
1676 repo.dirstate.add(f)
1672
1677
1673 # exec change
1678 # exec change
1674 for f, args, msg in actions.get('e', []):
1679 for f, args, msg in actions.get('e', []):
1675 repo.dirstate.normallookup(f)
1680 repo.dirstate.normallookup(f)
1676
1681
1677 # keep
1682 # keep
1678 for f, args, msg in actions.get('k', []):
1683 for f, args, msg in actions.get('k', []):
1679 pass
1684 pass
1680
1685
1681 # get
1686 # get
1682 for f, args, msg in actions.get('g', []):
1687 for f, args, msg in actions.get('g', []):
1683 if branchmerge:
1688 if branchmerge:
1684 repo.dirstate.otherparent(f)
1689 repo.dirstate.otherparent(f)
1685 else:
1690 else:
1686 repo.dirstate.normal(f)
1691 repo.dirstate.normal(f)
1687
1692
1688 # merge
1693 # merge
1689 for f, args, msg in actions.get('m', []):
1694 for f, args, msg in actions.get('m', []):
1690 f1, f2, fa, move, anc = args
1695 f1, f2, fa, move, anc = args
1691 if branchmerge:
1696 if branchmerge:
1692 # We've done a branch merge, mark this file as merged
1697 # We've done a branch merge, mark this file as merged
1693 # so that we properly record the merger later
1698 # so that we properly record the merger later
1694 repo.dirstate.merge(f)
1699 repo.dirstate.merge(f)
1695 if f1 != f2: # copy/rename
1700 if f1 != f2: # copy/rename
1696 if move:
1701 if move:
1697 repo.dirstate.remove(f1)
1702 repo.dirstate.remove(f1)
1698 if f1 != f:
1703 if f1 != f:
1699 repo.dirstate.copy(f1, f)
1704 repo.dirstate.copy(f1, f)
1700 else:
1705 else:
1701 repo.dirstate.copy(f2, f)
1706 repo.dirstate.copy(f2, f)
1702 else:
1707 else:
1703 # We've update-merged a locally modified file, so
1708 # We've update-merged a locally modified file, so
1704 # we set the dirstate to emulate a normal checkout
1709 # we set the dirstate to emulate a normal checkout
1705 # of that file some time in the past. Thus our
1710 # of that file some time in the past. Thus our
1706 # merge will appear as a normal local file
1711 # merge will appear as a normal local file
1707 # modification.
1712 # modification.
1708 if f2 == f: # file not locally copied/moved
1713 if f2 == f: # file not locally copied/moved
1709 repo.dirstate.normallookup(f)
1714 repo.dirstate.normallookup(f)
1710 if move:
1715 if move:
1711 repo.dirstate.drop(f1)
1716 repo.dirstate.drop(f1)
1712
1717
1713 # directory rename, move local
1718 # directory rename, move local
1714 for f, args, msg in actions.get('dm', []):
1719 for f, args, msg in actions.get('dm', []):
1715 f0, flag = args
1720 f0, flag = args
1716 if branchmerge:
1721 if branchmerge:
1717 repo.dirstate.add(f)
1722 repo.dirstate.add(f)
1718 repo.dirstate.remove(f0)
1723 repo.dirstate.remove(f0)
1719 repo.dirstate.copy(f0, f)
1724 repo.dirstate.copy(f0, f)
1720 else:
1725 else:
1721 repo.dirstate.normal(f)
1726 repo.dirstate.normal(f)
1722 repo.dirstate.drop(f0)
1727 repo.dirstate.drop(f0)
1723
1728
1724 # directory rename, get
1729 # directory rename, get
1725 for f, args, msg in actions.get('dg', []):
1730 for f, args, msg in actions.get('dg', []):
1726 f0, flag = args
1731 f0, flag = args
1727 if branchmerge:
1732 if branchmerge:
1728 repo.dirstate.add(f)
1733 repo.dirstate.add(f)
1729 repo.dirstate.copy(f0, f)
1734 repo.dirstate.copy(f0, f)
1730 else:
1735 else:
1731 repo.dirstate.normal(f)
1736 repo.dirstate.normal(f)
1732
1737
1733 def update(repo, node, branchmerge, force, ancestor=None,
1738 def update(repo, node, branchmerge, force, ancestor=None,
1734 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1739 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1735 updatecheck=None, wc=None):
1740 updatecheck=None, wc=None):
1736 """
1741 """
1737 Perform a merge between the working directory and the given node
1742 Perform a merge between the working directory and the given node
1738
1743
1739 node = the node to update to
1744 node = the node to update to
1740 branchmerge = whether to merge between branches
1745 branchmerge = whether to merge between branches
1741 force = whether to force branch merging or file overwriting
1746 force = whether to force branch merging or file overwriting
1742 matcher = a matcher to filter file lists (dirstate not updated)
1747 matcher = a matcher to filter file lists (dirstate not updated)
1743 mergeancestor = whether it is merging with an ancestor. If true,
1748 mergeancestor = whether it is merging with an ancestor. If true,
1744 we should accept the incoming changes for any prompts that occur.
1749 we should accept the incoming changes for any prompts that occur.
1745 If false, merging with an ancestor (fast-forward) is only allowed
1750 If false, merging with an ancestor (fast-forward) is only allowed
1746 between different named branches. This flag is used by rebase extension
1751 between different named branches. This flag is used by rebase extension
1747 as a temporary fix and should be avoided in general.
1752 as a temporary fix and should be avoided in general.
1748 labels = labels to use for base, local and other
1753 labels = labels to use for base, local and other
1749 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1754 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1750 this is True, then 'force' should be True as well.
1755 this is True, then 'force' should be True as well.
1751
1756
1752 The table below shows all the behaviors of the update command given the
1757 The table below shows all the behaviors of the update command given the
1753 -c/--check and -C/--clean or no options, whether the working directory is
1758 -c/--check and -C/--clean or no options, whether the working directory is
1754 dirty, whether a revision is specified, and the relationship of the parent
1759 dirty, whether a revision is specified, and the relationship of the parent
1755 rev to the target rev (linear or not). Match from top first. The -n
1760 rev to the target rev (linear or not). Match from top first. The -n
1756 option doesn't exist on the command line, but represents the
1761 option doesn't exist on the command line, but represents the
1757 experimental.updatecheck=noconflict option.
1762 experimental.updatecheck=noconflict option.
1758
1763
1759 This logic is tested by test-update-branches.t.
1764 This logic is tested by test-update-branches.t.
1760
1765
1761 -c -C -n -m dirty rev linear | result
1766 -c -C -n -m dirty rev linear | result
1762 y y * * * * * | (1)
1767 y y * * * * * | (1)
1763 y * y * * * * | (1)
1768 y * y * * * * | (1)
1764 y * * y * * * | (1)
1769 y * * y * * * | (1)
1765 * y y * * * * | (1)
1770 * y y * * * * | (1)
1766 * y * y * * * | (1)
1771 * y * y * * * | (1)
1767 * * y y * * * | (1)
1772 * * y y * * * | (1)
1768 * * * * * n n | x
1773 * * * * * n n | x
1769 * * * * n * * | ok
1774 * * * * n * * | ok
1770 n n n n y * y | merge
1775 n n n n y * y | merge
1771 n n n n y y n | (2)
1776 n n n n y y n | (2)
1772 n n n y y * * | merge
1777 n n n y y * * | merge
1773 n n y n y * * | merge if no conflict
1778 n n y n y * * | merge if no conflict
1774 n y n n y * * | discard
1779 n y n n y * * | discard
1775 y n n n y * * | (3)
1780 y n n n y * * | (3)
1776
1781
1777 x = can't happen
1782 x = can't happen
1778 * = don't-care
1783 * = don't-care
1779 1 = incompatible options (checked in commands.py)
1784 1 = incompatible options (checked in commands.py)
1780 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1785 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1781 3 = abort: uncommitted changes (checked in commands.py)
1786 3 = abort: uncommitted changes (checked in commands.py)
1782
1787
1783 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1788 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1784 to repo[None] if None is passed.
1789 to repo[None] if None is passed.
1785
1790
1786 Return the same tuple as applyupdates().
1791 Return the same tuple as applyupdates().
1787 """
1792 """
1788 # Avoid cycle.
1793 # Avoid cycle.
1789 from . import sparse
1794 from . import sparse
1790
1795
1791 # This function used to find the default destination if node was None, but
1796 # This function used to find the default destination if node was None, but
1792 # that's now in destutil.py.
1797 # that's now in destutil.py.
1793 assert node is not None
1798 assert node is not None
1794 if not branchmerge and not force:
1799 if not branchmerge and not force:
1795 # TODO: remove the default once all callers that pass branchmerge=False
1800 # TODO: remove the default once all callers that pass branchmerge=False
1796 # and force=False pass a value for updatecheck. We may want to allow
1801 # and force=False pass a value for updatecheck. We may want to allow
1797 # updatecheck='abort' to better suppport some of these callers.
1802 # updatecheck='abort' to better suppport some of these callers.
1798 if updatecheck is None:
1803 if updatecheck is None:
1799 updatecheck = 'linear'
1804 updatecheck = 'linear'
1800 assert updatecheck in ('none', 'linear', 'noconflict')
1805 assert updatecheck in ('none', 'linear', 'noconflict')
1801 # If we're doing a partial update, we need to skip updating
1806 # If we're doing a partial update, we need to skip updating
1802 # the dirstate, so make a note of any partial-ness to the
1807 # the dirstate, so make a note of any partial-ness to the
1803 # update here.
1808 # update here.
1804 if matcher is None or matcher.always():
1809 if matcher is None or matcher.always():
1805 partial = False
1810 partial = False
1806 else:
1811 else:
1807 partial = True
1812 partial = True
1808 with repo.wlock():
1813 with repo.wlock():
1809 if wc is None:
1814 if wc is None:
1810 wc = repo[None]
1815 wc = repo[None]
1811 pl = wc.parents()
1816 pl = wc.parents()
1812 p1 = pl[0]
1817 p1 = pl[0]
1813 pas = [None]
1818 pas = [None]
1814 if ancestor is not None:
1819 if ancestor is not None:
1815 pas = [repo[ancestor]]
1820 pas = [repo[ancestor]]
1816
1821
1817 overwrite = force and not branchmerge
1822 overwrite = force and not branchmerge
1818
1823
1819 p2 = repo[node]
1824 p2 = repo[node]
1820 if pas[0] is None:
1825 if pas[0] is None:
1821 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1826 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1822 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1827 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1823 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1828 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1824 else:
1829 else:
1825 pas = [p1.ancestor(p2, warn=branchmerge)]
1830 pas = [p1.ancestor(p2, warn=branchmerge)]
1826
1831
1827 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1832 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1828
1833
1829 ### check phase
1834 ### check phase
1830 if not overwrite:
1835 if not overwrite:
1831 if len(pl) > 1:
1836 if len(pl) > 1:
1832 raise error.Abort(_("outstanding uncommitted merge"))
1837 raise error.Abort(_("outstanding uncommitted merge"))
1833 ms = mergestate.read(repo)
1838 ms = mergestate.read(repo)
1834 if list(ms.unresolved()):
1839 if list(ms.unresolved()):
1835 raise error.Abort(_("outstanding merge conflicts"))
1840 raise error.Abort(_("outstanding merge conflicts"))
1836 if branchmerge:
1841 if branchmerge:
1837 if pas == [p2]:
1842 if pas == [p2]:
1838 raise error.Abort(_("merging with a working directory ancestor"
1843 raise error.Abort(_("merging with a working directory ancestor"
1839 " has no effect"))
1844 " has no effect"))
1840 elif pas == [p1]:
1845 elif pas == [p1]:
1841 if not mergeancestor and wc.branch() == p2.branch():
1846 if not mergeancestor and wc.branch() == p2.branch():
1842 raise error.Abort(_("nothing to merge"),
1847 raise error.Abort(_("nothing to merge"),
1843 hint=_("use 'hg update' "
1848 hint=_("use 'hg update' "
1844 "or check 'hg heads'"))
1849 "or check 'hg heads'"))
1845 if not force and (wc.files() or wc.deleted()):
1850 if not force and (wc.files() or wc.deleted()):
1846 raise error.Abort(_("uncommitted changes"),
1851 raise error.Abort(_("uncommitted changes"),
1847 hint=_("use 'hg status' to list changes"))
1852 hint=_("use 'hg status' to list changes"))
1848 for s in sorted(wc.substate):
1853 for s in sorted(wc.substate):
1849 wc.sub(s).bailifchanged()
1854 wc.sub(s).bailifchanged()
1850
1855
1851 elif not overwrite:
1856 elif not overwrite:
1852 if p1 == p2: # no-op update
1857 if p1 == p2: # no-op update
1853 # call the hooks and exit early
1858 # call the hooks and exit early
1854 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1859 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1855 repo.hook('update', parent1=xp2, parent2='', error=0)
1860 repo.hook('update', parent1=xp2, parent2='', error=0)
1856 return 0, 0, 0, 0
1861 return 0, 0, 0, 0
1857
1862
1858 if (updatecheck == 'linear' and
1863 if (updatecheck == 'linear' and
1859 pas not in ([p1], [p2])): # nonlinear
1864 pas not in ([p1], [p2])): # nonlinear
1860 dirty = wc.dirty(missing=True)
1865 dirty = wc.dirty(missing=True)
1861 if dirty:
1866 if dirty:
1862 # Branching is a bit strange to ensure we do the minimal
1867 # Branching is a bit strange to ensure we do the minimal
1863 # amount of call to obsutil.foreground.
1868 # amount of call to obsutil.foreground.
1864 foreground = obsutil.foreground(repo, [p1.node()])
1869 foreground = obsutil.foreground(repo, [p1.node()])
1865 # note: the <node> variable contains a random identifier
1870 # note: the <node> variable contains a random identifier
1866 if repo[node].node() in foreground:
1871 if repo[node].node() in foreground:
1867 pass # allow updating to successors
1872 pass # allow updating to successors
1868 else:
1873 else:
1869 msg = _("uncommitted changes")
1874 msg = _("uncommitted changes")
1870 hint = _("commit or update --clean to discard changes")
1875 hint = _("commit or update --clean to discard changes")
1871 raise error.UpdateAbort(msg, hint=hint)
1876 raise error.UpdateAbort(msg, hint=hint)
1872 else:
1877 else:
1873 # Allow jumping branches if clean and specific rev given
1878 # Allow jumping branches if clean and specific rev given
1874 pass
1879 pass
1875
1880
1876 if overwrite:
1881 if overwrite:
1877 pas = [wc]
1882 pas = [wc]
1878 elif not branchmerge:
1883 elif not branchmerge:
1879 pas = [p1]
1884 pas = [p1]
1880
1885
1881 # deprecated config: merge.followcopies
1886 # deprecated config: merge.followcopies
1882 followcopies = repo.ui.configbool('merge', 'followcopies')
1887 followcopies = repo.ui.configbool('merge', 'followcopies')
1883 if overwrite:
1888 if overwrite:
1884 followcopies = False
1889 followcopies = False
1885 elif not pas[0]:
1890 elif not pas[0]:
1886 followcopies = False
1891 followcopies = False
1887 if not branchmerge and not wc.dirty(missing=True):
1892 if not branchmerge and not wc.dirty(missing=True):
1888 followcopies = False
1893 followcopies = False
1889
1894
1890 ### calculate phase
1895 ### calculate phase
1891 actionbyfile, diverge, renamedelete = calculateupdates(
1896 actionbyfile, diverge, renamedelete = calculateupdates(
1892 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1897 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1893 followcopies, matcher=matcher, mergeforce=mergeforce)
1898 followcopies, matcher=matcher, mergeforce=mergeforce)
1894
1899
1895 if updatecheck == 'noconflict':
1900 if updatecheck == 'noconflict':
1896 for f, (m, args, msg) in actionbyfile.iteritems():
1901 for f, (m, args, msg) in actionbyfile.iteritems():
1897 if m not in ('g', 'k', 'e', 'r', 'pr'):
1902 if m not in ('g', 'k', 'e', 'r', 'pr'):
1898 msg = _("conflicting changes")
1903 msg = _("conflicting changes")
1899 hint = _("commit or update --clean to discard changes")
1904 hint = _("commit or update --clean to discard changes")
1900 raise error.Abort(msg, hint=hint)
1905 raise error.Abort(msg, hint=hint)
1901
1906
1902 # Prompt and create actions. Most of this is in the resolve phase
1907 # Prompt and create actions. Most of this is in the resolve phase
1903 # already, but we can't handle .hgsubstate in filemerge or
1908 # already, but we can't handle .hgsubstate in filemerge or
1904 # subrepo.submerge yet so we have to keep prompting for it.
1909 # subrepo.submerge yet so we have to keep prompting for it.
1905 if '.hgsubstate' in actionbyfile:
1910 if '.hgsubstate' in actionbyfile:
1906 f = '.hgsubstate'
1911 f = '.hgsubstate'
1907 m, args, msg = actionbyfile[f]
1912 m, args, msg = actionbyfile[f]
1908 prompts = filemerge.partextras(labels)
1913 prompts = filemerge.partextras(labels)
1909 prompts['f'] = f
1914 prompts['f'] = f
1910 if m == 'cd':
1915 if m == 'cd':
1911 if repo.ui.promptchoice(
1916 if repo.ui.promptchoice(
1912 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1917 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1913 "use (c)hanged version or (d)elete?"
1918 "use (c)hanged version or (d)elete?"
1914 "$$ &Changed $$ &Delete") % prompts, 0):
1919 "$$ &Changed $$ &Delete") % prompts, 0):
1915 actionbyfile[f] = ('r', None, "prompt delete")
1920 actionbyfile[f] = ('r', None, "prompt delete")
1916 elif f in p1:
1921 elif f in p1:
1917 actionbyfile[f] = ('am', None, "prompt keep")
1922 actionbyfile[f] = ('am', None, "prompt keep")
1918 else:
1923 else:
1919 actionbyfile[f] = ('a', None, "prompt keep")
1924 actionbyfile[f] = ('a', None, "prompt keep")
1920 elif m == 'dc':
1925 elif m == 'dc':
1921 f1, f2, fa, move, anc = args
1926 f1, f2, fa, move, anc = args
1922 flags = p2[f2].flags()
1927 flags = p2[f2].flags()
1923 if repo.ui.promptchoice(
1928 if repo.ui.promptchoice(
1924 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1929 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1925 "use (c)hanged version or leave (d)eleted?"
1930 "use (c)hanged version or leave (d)eleted?"
1926 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1931 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1927 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1932 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1928 else:
1933 else:
1929 del actionbyfile[f]
1934 del actionbyfile[f]
1930
1935
1931 # Convert to dictionary-of-lists format
1936 # Convert to dictionary-of-lists format
1932 actions = dict((m, [])
1937 actions = dict((m, [])
1933 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1938 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1934 for f, (m, args, msg) in actionbyfile.iteritems():
1939 for f, (m, args, msg) in actionbyfile.iteritems():
1935 if m not in actions:
1940 if m not in actions:
1936 actions[m] = []
1941 actions[m] = []
1937 actions[m].append((f, args, msg))
1942 actions[m].append((f, args, msg))
1938
1943
1939 if not util.fscasesensitive(repo.path):
1944 if not util.fscasesensitive(repo.path):
1940 # check collision between files only in p2 for clean update
1945 # check collision between files only in p2 for clean update
1941 if (not branchmerge and
1946 if (not branchmerge and
1942 (force or not wc.dirty(missing=True, branch=False))):
1947 (force or not wc.dirty(missing=True, branch=False))):
1943 _checkcollision(repo, p2.manifest(), None)
1948 _checkcollision(repo, p2.manifest(), None)
1944 else:
1949 else:
1945 _checkcollision(repo, wc.manifest(), actions)
1950 _checkcollision(repo, wc.manifest(), actions)
1946
1951
1947 # divergent renames
1952 # divergent renames
1948 for f, fl in sorted(diverge.iteritems()):
1953 for f, fl in sorted(diverge.iteritems()):
1949 repo.ui.warn(_("note: possible conflict - %s was renamed "
1954 repo.ui.warn(_("note: possible conflict - %s was renamed "
1950 "multiple times to:\n") % f)
1955 "multiple times to:\n") % f)
1951 for nf in fl:
1956 for nf in fl:
1952 repo.ui.warn(" %s\n" % nf)
1957 repo.ui.warn(" %s\n" % nf)
1953
1958
1954 # rename and delete
1959 # rename and delete
1955 for f, fl in sorted(renamedelete.iteritems()):
1960 for f, fl in sorted(renamedelete.iteritems()):
1956 repo.ui.warn(_("note: possible conflict - %s was deleted "
1961 repo.ui.warn(_("note: possible conflict - %s was deleted "
1957 "and renamed to:\n") % f)
1962 "and renamed to:\n") % f)
1958 for nf in fl:
1963 for nf in fl:
1959 repo.ui.warn(" %s\n" % nf)
1964 repo.ui.warn(" %s\n" % nf)
1960
1965
1961 ### apply phase
1966 ### apply phase
1962 if not branchmerge: # just jump to the new rev
1967 if not branchmerge: # just jump to the new rev
1963 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1968 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1964 if not partial:
1969 if not partial:
1965 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1970 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1966 # note that we're in the middle of an update
1971 # note that we're in the middle of an update
1967 repo.vfs.write('updatestate', p2.hex())
1972 repo.vfs.write('updatestate', p2.hex())
1968
1973
1969 # Advertise fsmonitor when its presence could be useful.
1974 # Advertise fsmonitor when its presence could be useful.
1970 #
1975 #
1971 # We only advertise when performing an update from an empty working
1976 # We only advertise when performing an update from an empty working
1972 # directory. This typically only occurs during initial clone.
1977 # directory. This typically only occurs during initial clone.
1973 #
1978 #
1974 # We give users a mechanism to disable the warning in case it is
1979 # We give users a mechanism to disable the warning in case it is
1975 # annoying.
1980 # annoying.
1976 #
1981 #
1977 # We only allow on Linux and MacOS because that's where fsmonitor is
1982 # We only allow on Linux and MacOS because that's where fsmonitor is
1978 # considered stable.
1983 # considered stable.
1979 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1984 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1980 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1985 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1981 'warn_update_file_count')
1986 'warn_update_file_count')
1982 try:
1987 try:
1983 extensions.find('fsmonitor')
1988 extensions.find('fsmonitor')
1984 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1989 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1985 # We intentionally don't look at whether fsmonitor has disabled
1990 # We intentionally don't look at whether fsmonitor has disabled
1986 # itself because a) fsmonitor may have already printed a warning
1991 # itself because a) fsmonitor may have already printed a warning
1987 # b) we only care about the config state here.
1992 # b) we only care about the config state here.
1988 except KeyError:
1993 except KeyError:
1989 fsmonitorenabled = False
1994 fsmonitorenabled = False
1990
1995
1991 if (fsmonitorwarning
1996 if (fsmonitorwarning
1992 and not fsmonitorenabled
1997 and not fsmonitorenabled
1993 and p1.node() == nullid
1998 and p1.node() == nullid
1994 and len(actions['g']) >= fsmonitorthreshold
1999 and len(actions['g']) >= fsmonitorthreshold
1995 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2000 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1996 repo.ui.warn(
2001 repo.ui.warn(
1997 _('(warning: large working directory being used without '
2002 _('(warning: large working directory being used without '
1998 'fsmonitor enabled; enable fsmonitor to improve performance; '
2003 'fsmonitor enabled; enable fsmonitor to improve performance; '
1999 'see "hg help -e fsmonitor")\n'))
2004 'see "hg help -e fsmonitor")\n'))
2000
2005
2001 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2006 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2002 wc.flushall()
2007 wc.flushall()
2003
2008
2004 if not partial:
2009 if not partial:
2005 with repo.dirstate.parentchange():
2010 with repo.dirstate.parentchange():
2006 repo.setparents(fp1, fp2)
2011 repo.setparents(fp1, fp2)
2007 recordupdates(repo, actions, branchmerge)
2012 recordupdates(repo, actions, branchmerge)
2008 # update completed, clear state
2013 # update completed, clear state
2009 util.unlink(repo.vfs.join('updatestate'))
2014 util.unlink(repo.vfs.join('updatestate'))
2010
2015
2011 if not branchmerge:
2016 if not branchmerge:
2012 repo.dirstate.setbranch(p2.branch())
2017 repo.dirstate.setbranch(p2.branch())
2013
2018
2014 # If we're updating to a location, clean up any stale temporary includes
2019 # If we're updating to a location, clean up any stale temporary includes
2015 # (ex: this happens during hg rebase --abort).
2020 # (ex: this happens during hg rebase --abort).
2016 if not branchmerge:
2021 if not branchmerge:
2017 sparse.prunetemporaryincludes(repo)
2022 sparse.prunetemporaryincludes(repo)
2018
2023
2019 if not partial:
2024 if not partial:
2020 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2025 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2021 return stats
2026 return stats
2022
2027
2023 def graft(repo, ctx, pctx, labels, keepparent=False):
2028 def graft(repo, ctx, pctx, labels, keepparent=False):
2024 """Do a graft-like merge.
2029 """Do a graft-like merge.
2025
2030
2026 This is a merge where the merge ancestor is chosen such that one
2031 This is a merge where the merge ancestor is chosen such that one
2027 or more changesets are grafted onto the current changeset. In
2032 or more changesets are grafted onto the current changeset. In
2028 addition to the merge, this fixes up the dirstate to include only
2033 addition to the merge, this fixes up the dirstate to include only
2029 a single parent (if keepparent is False) and tries to duplicate any
2034 a single parent (if keepparent is False) and tries to duplicate any
2030 renames/copies appropriately.
2035 renames/copies appropriately.
2031
2036
2032 ctx - changeset to rebase
2037 ctx - changeset to rebase
2033 pctx - merge base, usually ctx.p1()
2038 pctx - merge base, usually ctx.p1()
2034 labels - merge labels eg ['local', 'graft']
2039 labels - merge labels eg ['local', 'graft']
2035 keepparent - keep second parent if any
2040 keepparent - keep second parent if any
2036
2041
2037 """
2042 """
2038 # If we're grafting a descendant onto an ancestor, be sure to pass
2043 # If we're grafting a descendant onto an ancestor, be sure to pass
2039 # mergeancestor=True to update. This does two things: 1) allows the merge if
2044 # mergeancestor=True to update. This does two things: 1) allows the merge if
2040 # the destination is the same as the parent of the ctx (so we can use graft
2045 # the destination is the same as the parent of the ctx (so we can use graft
2041 # to copy commits), and 2) informs update that the incoming changes are
2046 # to copy commits), and 2) informs update that the incoming changes are
2042 # newer than the destination so it doesn't prompt about "remote changed foo
2047 # newer than the destination so it doesn't prompt about "remote changed foo
2043 # which local deleted".
2048 # which local deleted".
2044 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2049 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2045
2050
2046 stats = update(repo, ctx.node(), True, True, pctx.node(),
2051 stats = update(repo, ctx.node(), True, True, pctx.node(),
2047 mergeancestor=mergeancestor, labels=labels)
2052 mergeancestor=mergeancestor, labels=labels)
2048
2053
2049 pother = nullid
2054 pother = nullid
2050 parents = ctx.parents()
2055 parents = ctx.parents()
2051 if keepparent and len(parents) == 2 and pctx in parents:
2056 if keepparent and len(parents) == 2 and pctx in parents:
2052 parents.remove(pctx)
2057 parents.remove(pctx)
2053 pother = parents[0].node()
2058 pother = parents[0].node()
2054
2059
2055 with repo.dirstate.parentchange():
2060 with repo.dirstate.parentchange():
2056 repo.setparents(repo['.'].node(), pother)
2061 repo.setparents(repo['.'].node(), pother)
2057 repo.dirstate.write(repo.currenttransaction())
2062 repo.dirstate.write(repo.currenttransaction())
2058 # fix up dirstate for copies and renames
2063 # fix up dirstate for copies and renames
2059 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2064 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2060 return stats
2065 return stats
General Comments 0
You need to be logged in to leave comments. Login now