##// END OF EJS Templates
merge: add 'isknown=True' to a dirstate.normalize() in _unknowndirschecker...
Matt Harbison -
r37105:e4640ec3 default
parent child Browse files
Show More
@@ -1,2086 +1,2086 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepoutil,
33 subrepoutil,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 P: a path conflict (file vs directory)
69 P: a path conflict (file vs directory)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 Merge record states (stored in self._state, indexed by filename):
83 Merge record states (stored in self._state, indexed by filename):
84 u: unresolved conflict
84 u: unresolved conflict
85 r: resolved conflict
85 r: resolved conflict
86 pu: unresolved path conflict (file conflicts with directory)
86 pu: unresolved path conflict (file conflicts with directory)
87 pr: resolved path conflict
87 pr: resolved path conflict
88 d: driver-resolved conflict
88 d: driver-resolved conflict
89
89
90 The resolve command transitions between 'u' and 'r' for conflicts and
90 The resolve command transitions between 'u' and 'r' for conflicts and
91 'pu' and 'pr' for path conflicts.
91 'pu' and 'pr' for path conflicts.
92 '''
92 '''
93 statepathv1 = 'merge/state'
93 statepathv1 = 'merge/state'
94 statepathv2 = 'merge/state2'
94 statepathv2 = 'merge/state2'
95
95
96 @staticmethod
96 @staticmethod
97 def clean(repo, node=None, other=None, labels=None):
97 def clean(repo, node=None, other=None, labels=None):
98 """Initialize a brand new merge state, removing any existing state on
98 """Initialize a brand new merge state, removing any existing state on
99 disk."""
99 disk."""
100 ms = mergestate(repo)
100 ms = mergestate(repo)
101 ms.reset(node, other, labels)
101 ms.reset(node, other, labels)
102 return ms
102 return ms
103
103
104 @staticmethod
104 @staticmethod
105 def read(repo):
105 def read(repo):
106 """Initialize the merge state, reading it from disk."""
106 """Initialize the merge state, reading it from disk."""
107 ms = mergestate(repo)
107 ms = mergestate(repo)
108 ms._read()
108 ms._read()
109 return ms
109 return ms
110
110
111 def __init__(self, repo):
111 def __init__(self, repo):
112 """Initialize the merge state.
112 """Initialize the merge state.
113
113
114 Do not use this directly! Instead call read() or clean()."""
114 Do not use this directly! Instead call read() or clean()."""
115 self._repo = repo
115 self._repo = repo
116 self._dirty = False
116 self._dirty = False
117 self._labels = None
117 self._labels = None
118
118
119 def reset(self, node=None, other=None, labels=None):
119 def reset(self, node=None, other=None, labels=None):
120 self._state = {}
120 self._state = {}
121 self._stateextras = {}
121 self._stateextras = {}
122 self._local = None
122 self._local = None
123 self._other = None
123 self._other = None
124 self._labels = labels
124 self._labels = labels
125 for var in ('localctx', 'otherctx'):
125 for var in ('localctx', 'otherctx'):
126 if var in vars(self):
126 if var in vars(self):
127 delattr(self, var)
127 delattr(self, var)
128 if node:
128 if node:
129 self._local = node
129 self._local = node
130 self._other = other
130 self._other = other
131 self._readmergedriver = None
131 self._readmergedriver = None
132 if self.mergedriver:
132 if self.mergedriver:
133 self._mdstate = 's'
133 self._mdstate = 's'
134 else:
134 else:
135 self._mdstate = 'u'
135 self._mdstate = 'u'
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 self._results = {}
137 self._results = {}
138 self._dirty = False
138 self._dirty = False
139
139
140 def _read(self):
140 def _read(self):
141 """Analyse each record content to restore a serialized state from disk
141 """Analyse each record content to restore a serialized state from disk
142
142
143 This function process "record" entry produced by the de-serialization
143 This function process "record" entry produced by the de-serialization
144 of on disk file.
144 of on disk file.
145 """
145 """
146 self._state = {}
146 self._state = {}
147 self._stateextras = {}
147 self._stateextras = {}
148 self._local = None
148 self._local = None
149 self._other = None
149 self._other = None
150 for var in ('localctx', 'otherctx'):
150 for var in ('localctx', 'otherctx'):
151 if var in vars(self):
151 if var in vars(self):
152 delattr(self, var)
152 delattr(self, var)
153 self._readmergedriver = None
153 self._readmergedriver = None
154 self._mdstate = 's'
154 self._mdstate = 's'
155 unsupported = set()
155 unsupported = set()
156 records = self._readrecords()
156 records = self._readrecords()
157 for rtype, record in records:
157 for rtype, record in records:
158 if rtype == 'L':
158 if rtype == 'L':
159 self._local = bin(record)
159 self._local = bin(record)
160 elif rtype == 'O':
160 elif rtype == 'O':
161 self._other = bin(record)
161 self._other = bin(record)
162 elif rtype == 'm':
162 elif rtype == 'm':
163 bits = record.split('\0', 1)
163 bits = record.split('\0', 1)
164 mdstate = bits[1]
164 mdstate = bits[1]
165 if len(mdstate) != 1 or mdstate not in 'ums':
165 if len(mdstate) != 1 or mdstate not in 'ums':
166 # the merge driver should be idempotent, so just rerun it
166 # the merge driver should be idempotent, so just rerun it
167 mdstate = 'u'
167 mdstate = 'u'
168
168
169 self._readmergedriver = bits[0]
169 self._readmergedriver = bits[0]
170 self._mdstate = mdstate
170 self._mdstate = mdstate
171 elif rtype in 'FDCP':
171 elif rtype in 'FDCP':
172 bits = record.split('\0')
172 bits = record.split('\0')
173 self._state[bits[0]] = bits[1:]
173 self._state[bits[0]] = bits[1:]
174 elif rtype == 'f':
174 elif rtype == 'f':
175 filename, rawextras = record.split('\0', 1)
175 filename, rawextras = record.split('\0', 1)
176 extraparts = rawextras.split('\0')
176 extraparts = rawextras.split('\0')
177 extras = {}
177 extras = {}
178 i = 0
178 i = 0
179 while i < len(extraparts):
179 while i < len(extraparts):
180 extras[extraparts[i]] = extraparts[i + 1]
180 extras[extraparts[i]] = extraparts[i + 1]
181 i += 2
181 i += 2
182
182
183 self._stateextras[filename] = extras
183 self._stateextras[filename] = extras
184 elif rtype == 'l':
184 elif rtype == 'l':
185 labels = record.split('\0', 2)
185 labels = record.split('\0', 2)
186 self._labels = [l for l in labels if len(l) > 0]
186 self._labels = [l for l in labels if len(l) > 0]
187 elif not rtype.islower():
187 elif not rtype.islower():
188 unsupported.add(rtype)
188 unsupported.add(rtype)
189 self._results = {}
189 self._results = {}
190 self._dirty = False
190 self._dirty = False
191
191
192 if unsupported:
192 if unsupported:
193 raise error.UnsupportedMergeRecords(unsupported)
193 raise error.UnsupportedMergeRecords(unsupported)
194
194
195 def _readrecords(self):
195 def _readrecords(self):
196 """Read merge state from disk and return a list of record (TYPE, data)
196 """Read merge state from disk and return a list of record (TYPE, data)
197
197
198 We read data from both v1 and v2 files and decide which one to use.
198 We read data from both v1 and v2 files and decide which one to use.
199
199
200 V1 has been used by version prior to 2.9.1 and contains less data than
200 V1 has been used by version prior to 2.9.1 and contains less data than
201 v2. We read both versions and check if no data in v2 contradicts
201 v2. We read both versions and check if no data in v2 contradicts
202 v1. If there is not contradiction we can safely assume that both v1
202 v1. If there is not contradiction we can safely assume that both v1
203 and v2 were written at the same time and use the extract data in v2. If
203 and v2 were written at the same time and use the extract data in v2. If
204 there is contradiction we ignore v2 content as we assume an old version
204 there is contradiction we ignore v2 content as we assume an old version
205 of Mercurial has overwritten the mergestate file and left an old v2
205 of Mercurial has overwritten the mergestate file and left an old v2
206 file around.
206 file around.
207
207
208 returns list of record [(TYPE, data), ...]"""
208 returns list of record [(TYPE, data), ...]"""
209 v1records = self._readrecordsv1()
209 v1records = self._readrecordsv1()
210 v2records = self._readrecordsv2()
210 v2records = self._readrecordsv2()
211 if self._v1v2match(v1records, v2records):
211 if self._v1v2match(v1records, v2records):
212 return v2records
212 return v2records
213 else:
213 else:
214 # v1 file is newer than v2 file, use it
214 # v1 file is newer than v2 file, use it
215 # we have to infer the "other" changeset of the merge
215 # we have to infer the "other" changeset of the merge
216 # we cannot do better than that with v1 of the format
216 # we cannot do better than that with v1 of the format
217 mctx = self._repo[None].parents()[-1]
217 mctx = self._repo[None].parents()[-1]
218 v1records.append(('O', mctx.hex()))
218 v1records.append(('O', mctx.hex()))
219 # add place holder "other" file node information
219 # add place holder "other" file node information
220 # nobody is using it yet so we do no need to fetch the data
220 # nobody is using it yet so we do no need to fetch the data
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 for idx, r in enumerate(v1records):
222 for idx, r in enumerate(v1records):
223 if r[0] == 'F':
223 if r[0] == 'F':
224 bits = r[1].split('\0')
224 bits = r[1].split('\0')
225 bits.insert(-2, '')
225 bits.insert(-2, '')
226 v1records[idx] = (r[0], '\0'.join(bits))
226 v1records[idx] = (r[0], '\0'.join(bits))
227 return v1records
227 return v1records
228
228
229 def _v1v2match(self, v1records, v2records):
229 def _v1v2match(self, v1records, v2records):
230 oldv2 = set() # old format version of v2 record
230 oldv2 = set() # old format version of v2 record
231 for rec in v2records:
231 for rec in v2records:
232 if rec[0] == 'L':
232 if rec[0] == 'L':
233 oldv2.add(rec)
233 oldv2.add(rec)
234 elif rec[0] == 'F':
234 elif rec[0] == 'F':
235 # drop the onode data (not contained in v1)
235 # drop the onode data (not contained in v1)
236 oldv2.add(('F', _droponode(rec[1])))
236 oldv2.add(('F', _droponode(rec[1])))
237 for rec in v1records:
237 for rec in v1records:
238 if rec not in oldv2:
238 if rec not in oldv2:
239 return False
239 return False
240 else:
240 else:
241 return True
241 return True
242
242
243 def _readrecordsv1(self):
243 def _readrecordsv1(self):
244 """read on disk merge state for version 1 file
244 """read on disk merge state for version 1 file
245
245
246 returns list of record [(TYPE, data), ...]
246 returns list of record [(TYPE, data), ...]
247
247
248 Note: the "F" data from this file are one entry short
248 Note: the "F" data from this file are one entry short
249 (no "other file node" entry)
249 (no "other file node" entry)
250 """
250 """
251 records = []
251 records = []
252 try:
252 try:
253 f = self._repo.vfs(self.statepathv1)
253 f = self._repo.vfs(self.statepathv1)
254 for i, l in enumerate(f):
254 for i, l in enumerate(f):
255 if i == 0:
255 if i == 0:
256 records.append(('L', l[:-1]))
256 records.append(('L', l[:-1]))
257 else:
257 else:
258 records.append(('F', l[:-1]))
258 records.append(('F', l[:-1]))
259 f.close()
259 f.close()
260 except IOError as err:
260 except IOError as err:
261 if err.errno != errno.ENOENT:
261 if err.errno != errno.ENOENT:
262 raise
262 raise
263 return records
263 return records
264
264
265 def _readrecordsv2(self):
265 def _readrecordsv2(self):
266 """read on disk merge state for version 2 file
266 """read on disk merge state for version 2 file
267
267
268 This format is a list of arbitrary records of the form:
268 This format is a list of arbitrary records of the form:
269
269
270 [type][length][content]
270 [type][length][content]
271
271
272 `type` is a single character, `length` is a 4 byte integer, and
272 `type` is a single character, `length` is a 4 byte integer, and
273 `content` is an arbitrary byte sequence of length `length`.
273 `content` is an arbitrary byte sequence of length `length`.
274
274
275 Mercurial versions prior to 3.7 have a bug where if there are
275 Mercurial versions prior to 3.7 have a bug where if there are
276 unsupported mandatory merge records, attempting to clear out the merge
276 unsupported mandatory merge records, attempting to clear out the merge
277 state with hg update --clean or similar aborts. The 't' record type
277 state with hg update --clean or similar aborts. The 't' record type
278 works around that by writing out what those versions treat as an
278 works around that by writing out what those versions treat as an
279 advisory record, but later versions interpret as special: the first
279 advisory record, but later versions interpret as special: the first
280 character is the 'real' record type and everything onwards is the data.
280 character is the 'real' record type and everything onwards is the data.
281
281
282 Returns list of records [(TYPE, data), ...]."""
282 Returns list of records [(TYPE, data), ...]."""
283 records = []
283 records = []
284 try:
284 try:
285 f = self._repo.vfs(self.statepathv2)
285 f = self._repo.vfs(self.statepathv2)
286 data = f.read()
286 data = f.read()
287 off = 0
287 off = 0
288 end = len(data)
288 end = len(data)
289 while off < end:
289 while off < end:
290 rtype = data[off:off + 1]
290 rtype = data[off:off + 1]
291 off += 1
291 off += 1
292 length = _unpack('>I', data[off:(off + 4)])[0]
292 length = _unpack('>I', data[off:(off + 4)])[0]
293 off += 4
293 off += 4
294 record = data[off:(off + length)]
294 record = data[off:(off + length)]
295 off += length
295 off += length
296 if rtype == 't':
296 if rtype == 't':
297 rtype, record = record[0:1], record[1:]
297 rtype, record = record[0:1], record[1:]
298 records.append((rtype, record))
298 records.append((rtype, record))
299 f.close()
299 f.close()
300 except IOError as err:
300 except IOError as err:
301 if err.errno != errno.ENOENT:
301 if err.errno != errno.ENOENT:
302 raise
302 raise
303 return records
303 return records
304
304
305 @util.propertycache
305 @util.propertycache
306 def mergedriver(self):
306 def mergedriver(self):
307 # protect against the following:
307 # protect against the following:
308 # - A configures a malicious merge driver in their hgrc, then
308 # - A configures a malicious merge driver in their hgrc, then
309 # pauses the merge
309 # pauses the merge
310 # - A edits their hgrc to remove references to the merge driver
310 # - A edits their hgrc to remove references to the merge driver
311 # - A gives a copy of their entire repo, including .hg, to B
311 # - A gives a copy of their entire repo, including .hg, to B
312 # - B inspects .hgrc and finds it to be clean
312 # - B inspects .hgrc and finds it to be clean
313 # - B then continues the merge and the malicious merge driver
313 # - B then continues the merge and the malicious merge driver
314 # gets invoked
314 # gets invoked
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 if (self._readmergedriver is not None
316 if (self._readmergedriver is not None
317 and self._readmergedriver != configmergedriver):
317 and self._readmergedriver != configmergedriver):
318 raise error.ConfigError(
318 raise error.ConfigError(
319 _("merge driver changed since merge started"),
319 _("merge driver changed since merge started"),
320 hint=_("revert merge driver change or abort merge"))
320 hint=_("revert merge driver change or abort merge"))
321
321
322 return configmergedriver
322 return configmergedriver
323
323
324 @util.propertycache
324 @util.propertycache
325 def localctx(self):
325 def localctx(self):
326 if self._local is None:
326 if self._local is None:
327 msg = "localctx accessed but self._local isn't set"
327 msg = "localctx accessed but self._local isn't set"
328 raise error.ProgrammingError(msg)
328 raise error.ProgrammingError(msg)
329 return self._repo[self._local]
329 return self._repo[self._local]
330
330
331 @util.propertycache
331 @util.propertycache
332 def otherctx(self):
332 def otherctx(self):
333 if self._other is None:
333 if self._other is None:
334 msg = "otherctx accessed but self._other isn't set"
334 msg = "otherctx accessed but self._other isn't set"
335 raise error.ProgrammingError(msg)
335 raise error.ProgrammingError(msg)
336 return self._repo[self._other]
336 return self._repo[self._other]
337
337
338 def active(self):
338 def active(self):
339 """Whether mergestate is active.
339 """Whether mergestate is active.
340
340
341 Returns True if there appears to be mergestate. This is a rough proxy
341 Returns True if there appears to be mergestate. This is a rough proxy
342 for "is a merge in progress."
342 for "is a merge in progress."
343 """
343 """
344 # Check local variables before looking at filesystem for performance
344 # Check local variables before looking at filesystem for performance
345 # reasons.
345 # reasons.
346 return bool(self._local) or bool(self._state) or \
346 return bool(self._local) or bool(self._state) or \
347 self._repo.vfs.exists(self.statepathv1) or \
347 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv2)
348 self._repo.vfs.exists(self.statepathv2)
349
349
350 def commit(self):
350 def commit(self):
351 """Write current state on disk (if necessary)"""
351 """Write current state on disk (if necessary)"""
352 if self._dirty:
352 if self._dirty:
353 records = self._makerecords()
353 records = self._makerecords()
354 self._writerecords(records)
354 self._writerecords(records)
355 self._dirty = False
355 self._dirty = False
356
356
357 def _makerecords(self):
357 def _makerecords(self):
358 records = []
358 records = []
359 records.append(('L', hex(self._local)))
359 records.append(('L', hex(self._local)))
360 records.append(('O', hex(self._other)))
360 records.append(('O', hex(self._other)))
361 if self.mergedriver:
361 if self.mergedriver:
362 records.append(('m', '\0'.join([
362 records.append(('m', '\0'.join([
363 self.mergedriver, self._mdstate])))
363 self.mergedriver, self._mdstate])))
364 # Write out state items. In all cases, the value of the state map entry
364 # Write out state items. In all cases, the value of the state map entry
365 # is written as the contents of the record. The record type depends on
365 # is written as the contents of the record. The record type depends on
366 # the type of state that is stored, and capital-letter records are used
366 # the type of state that is stored, and capital-letter records are used
367 # to prevent older versions of Mercurial that do not support the feature
367 # to prevent older versions of Mercurial that do not support the feature
368 # from loading them.
368 # from loading them.
369 for filename, v in self._state.iteritems():
369 for filename, v in self._state.iteritems():
370 if v[0] == 'd':
370 if v[0] == 'd':
371 # Driver-resolved merge. These are stored in 'D' records.
371 # Driver-resolved merge. These are stored in 'D' records.
372 records.append(('D', '\0'.join([filename] + v)))
372 records.append(('D', '\0'.join([filename] + v)))
373 elif v[0] in ('pu', 'pr'):
373 elif v[0] in ('pu', 'pr'):
374 # Path conflicts. These are stored in 'P' records. The current
374 # Path conflicts. These are stored in 'P' records. The current
375 # resolution state ('pu' or 'pr') is stored within the record.
375 # resolution state ('pu' or 'pr') is stored within the record.
376 records.append(('P', '\0'.join([filename] + v)))
376 records.append(('P', '\0'.join([filename] + v)))
377 elif v[1] == nullhex or v[6] == nullhex:
377 elif v[1] == nullhex or v[6] == nullhex:
378 # Change/Delete or Delete/Change conflicts. These are stored in
378 # Change/Delete or Delete/Change conflicts. These are stored in
379 # 'C' records. v[1] is the local file, and is nullhex when the
379 # 'C' records. v[1] is the local file, and is nullhex when the
380 # file is deleted locally ('dc'). v[6] is the remote file, and
380 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # is nullhex when the file is deleted remotely ('cd').
381 # is nullhex when the file is deleted remotely ('cd').
382 records.append(('C', '\0'.join([filename] + v)))
382 records.append(('C', '\0'.join([filename] + v)))
383 else:
383 else:
384 # Normal files. These are stored in 'F' records.
384 # Normal files. These are stored in 'F' records.
385 records.append(('F', '\0'.join([filename] + v)))
385 records.append(('F', '\0'.join([filename] + v)))
386 for filename, extras in sorted(self._stateextras.iteritems()):
386 for filename, extras in sorted(self._stateextras.iteritems()):
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 extras.iteritems())
388 extras.iteritems())
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 if self._labels is not None:
390 if self._labels is not None:
391 labels = '\0'.join(self._labels)
391 labels = '\0'.join(self._labels)
392 records.append(('l', labels))
392 records.append(('l', labels))
393 return records
393 return records
394
394
395 def _writerecords(self, records):
395 def _writerecords(self, records):
396 """Write current state on disk (both v1 and v2)"""
396 """Write current state on disk (both v1 and v2)"""
397 self._writerecordsv1(records)
397 self._writerecordsv1(records)
398 self._writerecordsv2(records)
398 self._writerecordsv2(records)
399
399
400 def _writerecordsv1(self, records):
400 def _writerecordsv1(self, records):
401 """Write current state on disk in a version 1 file"""
401 """Write current state on disk in a version 1 file"""
402 f = self._repo.vfs(self.statepathv1, 'wb')
402 f = self._repo.vfs(self.statepathv1, 'wb')
403 irecords = iter(records)
403 irecords = iter(records)
404 lrecords = next(irecords)
404 lrecords = next(irecords)
405 assert lrecords[0] == 'L'
405 assert lrecords[0] == 'L'
406 f.write(hex(self._local) + '\n')
406 f.write(hex(self._local) + '\n')
407 for rtype, data in irecords:
407 for rtype, data in irecords:
408 if rtype == 'F':
408 if rtype == 'F':
409 f.write('%s\n' % _droponode(data))
409 f.write('%s\n' % _droponode(data))
410 f.close()
410 f.close()
411
411
412 def _writerecordsv2(self, records):
412 def _writerecordsv2(self, records):
413 """Write current state on disk in a version 2 file
413 """Write current state on disk in a version 2 file
414
414
415 See the docstring for _readrecordsv2 for why we use 't'."""
415 See the docstring for _readrecordsv2 for why we use 't'."""
416 # these are the records that all version 2 clients can read
416 # these are the records that all version 2 clients can read
417 whitelist = 'LOF'
417 whitelist = 'LOF'
418 f = self._repo.vfs(self.statepathv2, 'wb')
418 f = self._repo.vfs(self.statepathv2, 'wb')
419 for key, data in records:
419 for key, data in records:
420 assert len(key) == 1
420 assert len(key) == 1
421 if key not in whitelist:
421 if key not in whitelist:
422 key, data = 't', '%s%s' % (key, data)
422 key, data = 't', '%s%s' % (key, data)
423 format = '>sI%is' % len(data)
423 format = '>sI%is' % len(data)
424 f.write(_pack(format, key, len(data), data))
424 f.write(_pack(format, key, len(data), data))
425 f.close()
425 f.close()
426
426
427 def add(self, fcl, fco, fca, fd):
427 def add(self, fcl, fco, fca, fd):
428 """add a new (potentially?) conflicting file the merge state
428 """add a new (potentially?) conflicting file the merge state
429 fcl: file context for local,
429 fcl: file context for local,
430 fco: file context for remote,
430 fco: file context for remote,
431 fca: file context for ancestors,
431 fca: file context for ancestors,
432 fd: file path of the resulting merge.
432 fd: file path of the resulting merge.
433
433
434 note: also write the local version to the `.hg/merge` directory.
434 note: also write the local version to the `.hg/merge` directory.
435 """
435 """
436 if fcl.isabsent():
436 if fcl.isabsent():
437 hash = nullhex
437 hash = nullhex
438 else:
438 else:
439 hash = hex(hashlib.sha1(fcl.path()).digest())
439 hash = hex(hashlib.sha1(fcl.path()).digest())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._state[fd] = ['u', hash, fcl.path(),
441 self._state[fd] = ['u', hash, fcl.path(),
442 fca.path(), hex(fca.filenode()),
442 fca.path(), hex(fca.filenode()),
443 fco.path(), hex(fco.filenode()),
443 fco.path(), hex(fco.filenode()),
444 fcl.flags()]
444 fcl.flags()]
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._dirty = True
446 self._dirty = True
447
447
448 def addpath(self, path, frename, forigin):
448 def addpath(self, path, frename, forigin):
449 """add a new conflicting path to the merge state
449 """add a new conflicting path to the merge state
450 path: the path that conflicts
450 path: the path that conflicts
451 frename: the filename the conflicting file was renamed to
451 frename: the filename the conflicting file was renamed to
452 forigin: origin of the file ('l' or 'r' for local/remote)
452 forigin: origin of the file ('l' or 'r' for local/remote)
453 """
453 """
454 self._state[path] = ['pu', frename, forigin]
454 self._state[path] = ['pu', frename, forigin]
455 self._dirty = True
455 self._dirty = True
456
456
457 def __contains__(self, dfile):
457 def __contains__(self, dfile):
458 return dfile in self._state
458 return dfile in self._state
459
459
460 def __getitem__(self, dfile):
460 def __getitem__(self, dfile):
461 return self._state[dfile][0]
461 return self._state[dfile][0]
462
462
463 def __iter__(self):
463 def __iter__(self):
464 return iter(sorted(self._state))
464 return iter(sorted(self._state))
465
465
466 def files(self):
466 def files(self):
467 return self._state.keys()
467 return self._state.keys()
468
468
469 def mark(self, dfile, state):
469 def mark(self, dfile, state):
470 self._state[dfile][0] = state
470 self._state[dfile][0] = state
471 self._dirty = True
471 self._dirty = True
472
472
473 def mdstate(self):
473 def mdstate(self):
474 return self._mdstate
474 return self._mdstate
475
475
476 def unresolved(self):
476 def unresolved(self):
477 """Obtain the paths of unresolved files."""
477 """Obtain the paths of unresolved files."""
478
478
479 for f, entry in self._state.iteritems():
479 for f, entry in self._state.iteritems():
480 if entry[0] in ('u', 'pu'):
480 if entry[0] in ('u', 'pu'):
481 yield f
481 yield f
482
482
483 def driverresolved(self):
483 def driverresolved(self):
484 """Obtain the paths of driver-resolved files."""
484 """Obtain the paths of driver-resolved files."""
485
485
486 for f, entry in self._state.items():
486 for f, entry in self._state.items():
487 if entry[0] == 'd':
487 if entry[0] == 'd':
488 yield f
488 yield f
489
489
490 def extras(self, filename):
490 def extras(self, filename):
491 return self._stateextras.setdefault(filename, {})
491 return self._stateextras.setdefault(filename, {})
492
492
493 def _resolve(self, preresolve, dfile, wctx):
493 def _resolve(self, preresolve, dfile, wctx):
494 """rerun merge process for file path `dfile`"""
494 """rerun merge process for file path `dfile`"""
495 if self[dfile] in 'rd':
495 if self[dfile] in 'rd':
496 return True, 0
496 return True, 0
497 stateentry = self._state[dfile]
497 stateentry = self._state[dfile]
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 octx = self._repo[self._other]
499 octx = self._repo[self._other]
500 extras = self.extras(dfile)
500 extras = self.extras(dfile)
501 anccommitnode = extras.get('ancestorlinknode')
501 anccommitnode = extras.get('ancestorlinknode')
502 if anccommitnode:
502 if anccommitnode:
503 actx = self._repo[anccommitnode]
503 actx = self._repo[anccommitnode]
504 else:
504 else:
505 actx = None
505 actx = None
506 fcd = self._filectxorabsent(hash, wctx, dfile)
506 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fco = self._filectxorabsent(onode, octx, ofile)
507 fco = self._filectxorabsent(onode, octx, ofile)
508 # TODO: move this to filectxorabsent
508 # TODO: move this to filectxorabsent
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 # "premerge" x flags
510 # "premerge" x flags
511 flo = fco.flags()
511 flo = fco.flags()
512 fla = fca.flags()
512 fla = fca.flags()
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if fca.node() == nullid and flags != flo:
514 if fca.node() == nullid and flags != flo:
515 if preresolve:
515 if preresolve:
516 self._repo.ui.warn(
516 self._repo.ui.warn(
517 _('warning: cannot merge flags for %s '
517 _('warning: cannot merge flags for %s '
518 'without common ancestor - keeping local flags\n')
518 'without common ancestor - keeping local flags\n')
519 % afile)
519 % afile)
520 elif flags == fla:
520 elif flags == fla:
521 flags = flo
521 flags = flo
522 if preresolve:
522 if preresolve:
523 # restore local
523 # restore local
524 if hash != nullhex:
524 if hash != nullhex:
525 f = self._repo.vfs('merge/' + hash)
525 f = self._repo.vfs('merge/' + hash)
526 wctx[dfile].write(f.read(), flags)
526 wctx[dfile].write(f.read(), flags)
527 f.close()
527 f.close()
528 else:
528 else:
529 wctx[dfile].remove(ignoremissing=True)
529 wctx[dfile].remove(ignoremissing=True)
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 self._local, lfile, fcd,
531 self._local, lfile, fcd,
532 fco, fca,
532 fco, fca,
533 labels=self._labels)
533 labels=self._labels)
534 else:
534 else:
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 self._local, lfile, fcd,
536 self._local, lfile, fcd,
537 fco, fca,
537 fco, fca,
538 labels=self._labels)
538 labels=self._labels)
539 if r is None:
539 if r is None:
540 # no real conflict
540 # no real conflict
541 del self._state[dfile]
541 del self._state[dfile]
542 self._stateextras.pop(dfile, None)
542 self._stateextras.pop(dfile, None)
543 self._dirty = True
543 self._dirty = True
544 elif not r:
544 elif not r:
545 self.mark(dfile, 'r')
545 self.mark(dfile, 'r')
546
546
547 if complete:
547 if complete:
548 action = None
548 action = None
549 if deleted:
549 if deleted:
550 if fcd.isabsent():
550 if fcd.isabsent():
551 # dc: local picked. Need to drop if present, which may
551 # dc: local picked. Need to drop if present, which may
552 # happen on re-resolves.
552 # happen on re-resolves.
553 action = 'f'
553 action = 'f'
554 else:
554 else:
555 # cd: remote picked (or otherwise deleted)
555 # cd: remote picked (or otherwise deleted)
556 action = 'r'
556 action = 'r'
557 else:
557 else:
558 if fcd.isabsent(): # dc: remote picked
558 if fcd.isabsent(): # dc: remote picked
559 action = 'g'
559 action = 'g'
560 elif fco.isabsent(): # cd: local picked
560 elif fco.isabsent(): # cd: local picked
561 if dfile in self.localctx:
561 if dfile in self.localctx:
562 action = 'am'
562 action = 'am'
563 else:
563 else:
564 action = 'a'
564 action = 'a'
565 # else: regular merges (no action necessary)
565 # else: regular merges (no action necessary)
566 self._results[dfile] = r, action
566 self._results[dfile] = r, action
567
567
568 return complete, r
568 return complete, r
569
569
570 def _filectxorabsent(self, hexnode, ctx, f):
570 def _filectxorabsent(self, hexnode, ctx, f):
571 if hexnode == nullhex:
571 if hexnode == nullhex:
572 return filemerge.absentfilectx(ctx, f)
572 return filemerge.absentfilectx(ctx, f)
573 else:
573 else:
574 return ctx[f]
574 return ctx[f]
575
575
576 def preresolve(self, dfile, wctx):
576 def preresolve(self, dfile, wctx):
577 """run premerge process for dfile
577 """run premerge process for dfile
578
578
579 Returns whether the merge is complete, and the exit code."""
579 Returns whether the merge is complete, and the exit code."""
580 return self._resolve(True, dfile, wctx)
580 return self._resolve(True, dfile, wctx)
581
581
582 def resolve(self, dfile, wctx):
582 def resolve(self, dfile, wctx):
583 """run merge process (assuming premerge was run) for dfile
583 """run merge process (assuming premerge was run) for dfile
584
584
585 Returns the exit code of the merge."""
585 Returns the exit code of the merge."""
586 return self._resolve(False, dfile, wctx)[1]
586 return self._resolve(False, dfile, wctx)[1]
587
587
588 def counts(self):
588 def counts(self):
589 """return counts for updated, merged and removed files in this
589 """return counts for updated, merged and removed files in this
590 session"""
590 session"""
591 updated, merged, removed = 0, 0, 0
591 updated, merged, removed = 0, 0, 0
592 for r, action in self._results.itervalues():
592 for r, action in self._results.itervalues():
593 if r is None:
593 if r is None:
594 updated += 1
594 updated += 1
595 elif r == 0:
595 elif r == 0:
596 if action == 'r':
596 if action == 'r':
597 removed += 1
597 removed += 1
598 else:
598 else:
599 merged += 1
599 merged += 1
600 return updated, merged, removed
600 return updated, merged, removed
601
601
602 def unresolvedcount(self):
602 def unresolvedcount(self):
603 """get unresolved count for this merge (persistent)"""
603 """get unresolved count for this merge (persistent)"""
604 return len(list(self.unresolved()))
604 return len(list(self.unresolved()))
605
605
606 def actions(self):
606 def actions(self):
607 """return lists of actions to perform on the dirstate"""
607 """return lists of actions to perform on the dirstate"""
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 for f, (r, action) in self._results.iteritems():
609 for f, (r, action) in self._results.iteritems():
610 if action is not None:
610 if action is not None:
611 actions[action].append((f, None, "merge result"))
611 actions[action].append((f, None, "merge result"))
612 return actions
612 return actions
613
613
614 def recordactions(self):
614 def recordactions(self):
615 """record remove/add/get actions in the dirstate"""
615 """record remove/add/get actions in the dirstate"""
616 branchmerge = self._repo.dirstate.p2() != nullid
616 branchmerge = self._repo.dirstate.p2() != nullid
617 recordupdates(self._repo, self.actions(), branchmerge)
617 recordupdates(self._repo, self.actions(), branchmerge)
618
618
619 def queueremove(self, f):
619 def queueremove(self, f):
620 """queues a file to be removed from the dirstate
620 """queues a file to be removed from the dirstate
621
621
622 Meant for use by custom merge drivers."""
622 Meant for use by custom merge drivers."""
623 self._results[f] = 0, 'r'
623 self._results[f] = 0, 'r'
624
624
625 def queueadd(self, f):
625 def queueadd(self, f):
626 """queues a file to be added to the dirstate
626 """queues a file to be added to the dirstate
627
627
628 Meant for use by custom merge drivers."""
628 Meant for use by custom merge drivers."""
629 self._results[f] = 0, 'a'
629 self._results[f] = 0, 'a'
630
630
631 def queueget(self, f):
631 def queueget(self, f):
632 """queues a file to be marked modified in the dirstate
632 """queues a file to be marked modified in the dirstate
633
633
634 Meant for use by custom merge drivers."""
634 Meant for use by custom merge drivers."""
635 self._results[f] = 0, 'g'
635 self._results[f] = 0, 'g'
636
636
637 def _getcheckunknownconfig(repo, section, name):
637 def _getcheckunknownconfig(repo, section, name):
638 config = repo.ui.config(section, name)
638 config = repo.ui.config(section, name)
639 valid = ['abort', 'ignore', 'warn']
639 valid = ['abort', 'ignore', 'warn']
640 if config not in valid:
640 if config not in valid:
641 validstr = ', '.join(["'" + v + "'" for v in valid])
641 validstr = ', '.join(["'" + v + "'" for v in valid])
642 raise error.ConfigError(_("%s.%s not valid "
642 raise error.ConfigError(_("%s.%s not valid "
643 "('%s' is none of %s)")
643 "('%s' is none of %s)")
644 % (section, name, config, validstr))
644 % (section, name, config, validstr))
645 return config
645 return config
646
646
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 if wctx.isinmemory():
648 if wctx.isinmemory():
649 # Nothing to do in IMM because nothing in the "working copy" can be an
649 # Nothing to do in IMM because nothing in the "working copy" can be an
650 # unknown file.
650 # unknown file.
651 #
651 #
652 # Note that we should bail out here, not in ``_checkunknownfiles()``,
652 # Note that we should bail out here, not in ``_checkunknownfiles()``,
653 # because that function does other useful work.
653 # because that function does other useful work.
654 return False
654 return False
655
655
656 if f2 is None:
656 if f2 is None:
657 f2 = f
657 f2 = f
658 return (repo.wvfs.audit.check(f)
658 return (repo.wvfs.audit.check(f)
659 and repo.wvfs.isfileorlink(f)
659 and repo.wvfs.isfileorlink(f)
660 and repo.dirstate.normalize(f) not in repo.dirstate
660 and repo.dirstate.normalize(f) not in repo.dirstate
661 and mctx[f2].cmp(wctx[f]))
661 and mctx[f2].cmp(wctx[f]))
662
662
663 class _unknowndirschecker(object):
663 class _unknowndirschecker(object):
664 """
664 """
665 Look for any unknown files or directories that may have a path conflict
665 Look for any unknown files or directories that may have a path conflict
666 with a file. If any path prefix of the file exists as a file or link,
666 with a file. If any path prefix of the file exists as a file or link,
667 then it conflicts. If the file itself is a directory that contains any
667 then it conflicts. If the file itself is a directory that contains any
668 file that is not tracked, then it conflicts.
668 file that is not tracked, then it conflicts.
669
669
670 Returns the shortest path at which a conflict occurs, or None if there is
670 Returns the shortest path at which a conflict occurs, or None if there is
671 no conflict.
671 no conflict.
672 """
672 """
673 def __init__(self):
673 def __init__(self):
674 # A set of paths known to be good. This prevents repeated checking of
674 # A set of paths known to be good. This prevents repeated checking of
675 # dirs. It will be updated with any new dirs that are checked and found
675 # dirs. It will be updated with any new dirs that are checked and found
676 # to be safe.
676 # to be safe.
677 self._unknowndircache = set()
677 self._unknowndircache = set()
678
678
679 # A set of paths that are known to be absent. This prevents repeated
679 # A set of paths that are known to be absent. This prevents repeated
680 # checking of subdirectories that are known not to exist. It will be
680 # checking of subdirectories that are known not to exist. It will be
681 # updated with any new dirs that are checked and found to be absent.
681 # updated with any new dirs that are checked and found to be absent.
682 self._missingdircache = set()
682 self._missingdircache = set()
683
683
684 def __call__(self, repo, wctx, f):
684 def __call__(self, repo, wctx, f):
685 if wctx.isinmemory():
685 if wctx.isinmemory():
686 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
686 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
687 return False
687 return False
688
688
689 # Check for path prefixes that exist as unknown files.
689 # Check for path prefixes that exist as unknown files.
690 for p in reversed(list(util.finddirs(f))):
690 for p in reversed(list(util.finddirs(f))):
691 if p in self._missingdircache:
691 if p in self._missingdircache:
692 return
692 return
693 if p in self._unknowndircache:
693 if p in self._unknowndircache:
694 continue
694 continue
695 if repo.wvfs.audit.check(p):
695 if repo.wvfs.audit.check(p):
696 if (repo.wvfs.isfileorlink(p)
696 if (repo.wvfs.isfileorlink(p)
697 and repo.dirstate.normalize(p) not in repo.dirstate):
697 and repo.dirstate.normalize(p) not in repo.dirstate):
698 return p
698 return p
699 if not repo.wvfs.lexists(p):
699 if not repo.wvfs.lexists(p):
700 self._missingdircache.add(p)
700 self._missingdircache.add(p)
701 return
701 return
702 self._unknowndircache.add(p)
702 self._unknowndircache.add(p)
703
703
704 # Check if the file conflicts with a directory containing unknown files.
704 # Check if the file conflicts with a directory containing unknown files.
705 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
705 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
706 # Does the directory contain any files that are not in the dirstate?
706 # Does the directory contain any files that are not in the dirstate?
707 for p, dirs, files in repo.wvfs.walk(f):
707 for p, dirs, files in repo.wvfs.walk(f):
708 for fn in files:
708 for fn in files:
709 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
709 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
710 relf = repo.dirstate.normalize(relf)
710 relf = repo.dirstate.normalize(relf, isknown=True)
711 if relf not in repo.dirstate:
711 if relf not in repo.dirstate:
712 return f
712 return f
713 return None
713 return None
714
714
715 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
715 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
716 """
716 """
717 Considers any actions that care about the presence of conflicting unknown
717 Considers any actions that care about the presence of conflicting unknown
718 files. For some actions, the result is to abort; for others, it is to
718 files. For some actions, the result is to abort; for others, it is to
719 choose a different action.
719 choose a different action.
720 """
720 """
721 fileconflicts = set()
721 fileconflicts = set()
722 pathconflicts = set()
722 pathconflicts = set()
723 warnconflicts = set()
723 warnconflicts = set()
724 abortconflicts = set()
724 abortconflicts = set()
725 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
725 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
726 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
726 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
727 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
727 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
728 if not force:
728 if not force:
729 def collectconflicts(conflicts, config):
729 def collectconflicts(conflicts, config):
730 if config == 'abort':
730 if config == 'abort':
731 abortconflicts.update(conflicts)
731 abortconflicts.update(conflicts)
732 elif config == 'warn':
732 elif config == 'warn':
733 warnconflicts.update(conflicts)
733 warnconflicts.update(conflicts)
734
734
735 checkunknowndirs = _unknowndirschecker()
735 checkunknowndirs = _unknowndirschecker()
736 for f, (m, args, msg) in actions.iteritems():
736 for f, (m, args, msg) in actions.iteritems():
737 if m in ('c', 'dc'):
737 if m in ('c', 'dc'):
738 if _checkunknownfile(repo, wctx, mctx, f):
738 if _checkunknownfile(repo, wctx, mctx, f):
739 fileconflicts.add(f)
739 fileconflicts.add(f)
740 elif pathconfig and f not in wctx:
740 elif pathconfig and f not in wctx:
741 path = checkunknowndirs(repo, wctx, f)
741 path = checkunknowndirs(repo, wctx, f)
742 if path is not None:
742 if path is not None:
743 pathconflicts.add(path)
743 pathconflicts.add(path)
744 elif m == 'dg':
744 elif m == 'dg':
745 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
745 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
746 fileconflicts.add(f)
746 fileconflicts.add(f)
747
747
748 allconflicts = fileconflicts | pathconflicts
748 allconflicts = fileconflicts | pathconflicts
749 ignoredconflicts = set([c for c in allconflicts
749 ignoredconflicts = set([c for c in allconflicts
750 if repo.dirstate._ignore(c)])
750 if repo.dirstate._ignore(c)])
751 unknownconflicts = allconflicts - ignoredconflicts
751 unknownconflicts = allconflicts - ignoredconflicts
752 collectconflicts(ignoredconflicts, ignoredconfig)
752 collectconflicts(ignoredconflicts, ignoredconfig)
753 collectconflicts(unknownconflicts, unknownconfig)
753 collectconflicts(unknownconflicts, unknownconfig)
754 else:
754 else:
755 for f, (m, args, msg) in actions.iteritems():
755 for f, (m, args, msg) in actions.iteritems():
756 if m == 'cm':
756 if m == 'cm':
757 fl2, anc = args
757 fl2, anc = args
758 different = _checkunknownfile(repo, wctx, mctx, f)
758 different = _checkunknownfile(repo, wctx, mctx, f)
759 if repo.dirstate._ignore(f):
759 if repo.dirstate._ignore(f):
760 config = ignoredconfig
760 config = ignoredconfig
761 else:
761 else:
762 config = unknownconfig
762 config = unknownconfig
763
763
764 # The behavior when force is True is described by this table:
764 # The behavior when force is True is described by this table:
765 # config different mergeforce | action backup
765 # config different mergeforce | action backup
766 # * n * | get n
766 # * n * | get n
767 # * y y | merge -
767 # * y y | merge -
768 # abort y n | merge - (1)
768 # abort y n | merge - (1)
769 # warn y n | warn + get y
769 # warn y n | warn + get y
770 # ignore y n | get y
770 # ignore y n | get y
771 #
771 #
772 # (1) this is probably the wrong behavior here -- we should
772 # (1) this is probably the wrong behavior here -- we should
773 # probably abort, but some actions like rebases currently
773 # probably abort, but some actions like rebases currently
774 # don't like an abort happening in the middle of
774 # don't like an abort happening in the middle of
775 # merge.update.
775 # merge.update.
776 if not different:
776 if not different:
777 actions[f] = ('g', (fl2, False), "remote created")
777 actions[f] = ('g', (fl2, False), "remote created")
778 elif mergeforce or config == 'abort':
778 elif mergeforce or config == 'abort':
779 actions[f] = ('m', (f, f, None, False, anc),
779 actions[f] = ('m', (f, f, None, False, anc),
780 "remote differs from untracked local")
780 "remote differs from untracked local")
781 elif config == 'abort':
781 elif config == 'abort':
782 abortconflicts.add(f)
782 abortconflicts.add(f)
783 else:
783 else:
784 if config == 'warn':
784 if config == 'warn':
785 warnconflicts.add(f)
785 warnconflicts.add(f)
786 actions[f] = ('g', (fl2, True), "remote created")
786 actions[f] = ('g', (fl2, True), "remote created")
787
787
788 for f in sorted(abortconflicts):
788 for f in sorted(abortconflicts):
789 warn = repo.ui.warn
789 warn = repo.ui.warn
790 if f in pathconflicts:
790 if f in pathconflicts:
791 if repo.wvfs.isfileorlink(f):
791 if repo.wvfs.isfileorlink(f):
792 warn(_("%s: untracked file conflicts with directory\n") % f)
792 warn(_("%s: untracked file conflicts with directory\n") % f)
793 else:
793 else:
794 warn(_("%s: untracked directory conflicts with file\n") % f)
794 warn(_("%s: untracked directory conflicts with file\n") % f)
795 else:
795 else:
796 warn(_("%s: untracked file differs\n") % f)
796 warn(_("%s: untracked file differs\n") % f)
797 if abortconflicts:
797 if abortconflicts:
798 raise error.Abort(_("untracked files in working directory "
798 raise error.Abort(_("untracked files in working directory "
799 "differ from files in requested revision"))
799 "differ from files in requested revision"))
800
800
801 for f in sorted(warnconflicts):
801 for f in sorted(warnconflicts):
802 if repo.wvfs.isfileorlink(f):
802 if repo.wvfs.isfileorlink(f):
803 repo.ui.warn(_("%s: replacing untracked file\n") % f)
803 repo.ui.warn(_("%s: replacing untracked file\n") % f)
804 else:
804 else:
805 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
805 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
806
806
807 for f, (m, args, msg) in actions.iteritems():
807 for f, (m, args, msg) in actions.iteritems():
808 if m == 'c':
808 if m == 'c':
809 backup = (f in fileconflicts or f in pathconflicts or
809 backup = (f in fileconflicts or f in pathconflicts or
810 any(p in pathconflicts for p in util.finddirs(f)))
810 any(p in pathconflicts for p in util.finddirs(f)))
811 flags, = args
811 flags, = args
812 actions[f] = ('g', (flags, backup), msg)
812 actions[f] = ('g', (flags, backup), msg)
813
813
814 def _forgetremoved(wctx, mctx, branchmerge):
814 def _forgetremoved(wctx, mctx, branchmerge):
815 """
815 """
816 Forget removed files
816 Forget removed files
817
817
818 If we're jumping between revisions (as opposed to merging), and if
818 If we're jumping between revisions (as opposed to merging), and if
819 neither the working directory nor the target rev has the file,
819 neither the working directory nor the target rev has the file,
820 then we need to remove it from the dirstate, to prevent the
820 then we need to remove it from the dirstate, to prevent the
821 dirstate from listing the file when it is no longer in the
821 dirstate from listing the file when it is no longer in the
822 manifest.
822 manifest.
823
823
824 If we're merging, and the other revision has removed a file
824 If we're merging, and the other revision has removed a file
825 that is not present in the working directory, we need to mark it
825 that is not present in the working directory, we need to mark it
826 as removed.
826 as removed.
827 """
827 """
828
828
829 actions = {}
829 actions = {}
830 m = 'f'
830 m = 'f'
831 if branchmerge:
831 if branchmerge:
832 m = 'r'
832 m = 'r'
833 for f in wctx.deleted():
833 for f in wctx.deleted():
834 if f not in mctx:
834 if f not in mctx:
835 actions[f] = m, None, "forget deleted"
835 actions[f] = m, None, "forget deleted"
836
836
837 if not branchmerge:
837 if not branchmerge:
838 for f in wctx.removed():
838 for f in wctx.removed():
839 if f not in mctx:
839 if f not in mctx:
840 actions[f] = 'f', None, "forget removed"
840 actions[f] = 'f', None, "forget removed"
841
841
842 return actions
842 return actions
843
843
844 def _checkcollision(repo, wmf, actions):
844 def _checkcollision(repo, wmf, actions):
845 # build provisional merged manifest up
845 # build provisional merged manifest up
846 pmmf = set(wmf)
846 pmmf = set(wmf)
847
847
848 if actions:
848 if actions:
849 # k, dr, e and rd are no-op
849 # k, dr, e and rd are no-op
850 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
850 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
851 for f, args, msg in actions[m]:
851 for f, args, msg in actions[m]:
852 pmmf.add(f)
852 pmmf.add(f)
853 for f, args, msg in actions['r']:
853 for f, args, msg in actions['r']:
854 pmmf.discard(f)
854 pmmf.discard(f)
855 for f, args, msg in actions['dm']:
855 for f, args, msg in actions['dm']:
856 f2, flags = args
856 f2, flags = args
857 pmmf.discard(f2)
857 pmmf.discard(f2)
858 pmmf.add(f)
858 pmmf.add(f)
859 for f, args, msg in actions['dg']:
859 for f, args, msg in actions['dg']:
860 pmmf.add(f)
860 pmmf.add(f)
861 for f, args, msg in actions['m']:
861 for f, args, msg in actions['m']:
862 f1, f2, fa, move, anc = args
862 f1, f2, fa, move, anc = args
863 if move:
863 if move:
864 pmmf.discard(f1)
864 pmmf.discard(f1)
865 pmmf.add(f)
865 pmmf.add(f)
866
866
867 # check case-folding collision in provisional merged manifest
867 # check case-folding collision in provisional merged manifest
868 foldmap = {}
868 foldmap = {}
869 for f in pmmf:
869 for f in pmmf:
870 fold = util.normcase(f)
870 fold = util.normcase(f)
871 if fold in foldmap:
871 if fold in foldmap:
872 raise error.Abort(_("case-folding collision between %s and %s")
872 raise error.Abort(_("case-folding collision between %s and %s")
873 % (f, foldmap[fold]))
873 % (f, foldmap[fold]))
874 foldmap[fold] = f
874 foldmap[fold] = f
875
875
876 # check case-folding of directories
876 # check case-folding of directories
877 foldprefix = unfoldprefix = lastfull = ''
877 foldprefix = unfoldprefix = lastfull = ''
878 for fold, f in sorted(foldmap.items()):
878 for fold, f in sorted(foldmap.items()):
879 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
879 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
880 # the folded prefix matches but actual casing is different
880 # the folded prefix matches but actual casing is different
881 raise error.Abort(_("case-folding collision between "
881 raise error.Abort(_("case-folding collision between "
882 "%s and directory of %s") % (lastfull, f))
882 "%s and directory of %s") % (lastfull, f))
883 foldprefix = fold + '/'
883 foldprefix = fold + '/'
884 unfoldprefix = f + '/'
884 unfoldprefix = f + '/'
885 lastfull = f
885 lastfull = f
886
886
887 def driverpreprocess(repo, ms, wctx, labels=None):
887 def driverpreprocess(repo, ms, wctx, labels=None):
888 """run the preprocess step of the merge driver, if any
888 """run the preprocess step of the merge driver, if any
889
889
890 This is currently not implemented -- it's an extension point."""
890 This is currently not implemented -- it's an extension point."""
891 return True
891 return True
892
892
893 def driverconclude(repo, ms, wctx, labels=None):
893 def driverconclude(repo, ms, wctx, labels=None):
894 """run the conclude step of the merge driver, if any
894 """run the conclude step of the merge driver, if any
895
895
896 This is currently not implemented -- it's an extension point."""
896 This is currently not implemented -- it's an extension point."""
897 return True
897 return True
898
898
899 def _filesindirs(repo, manifest, dirs):
899 def _filesindirs(repo, manifest, dirs):
900 """
900 """
901 Generator that yields pairs of all the files in the manifest that are found
901 Generator that yields pairs of all the files in the manifest that are found
902 inside the directories listed in dirs, and which directory they are found
902 inside the directories listed in dirs, and which directory they are found
903 in.
903 in.
904 """
904 """
905 for f in manifest:
905 for f in manifest:
906 for p in util.finddirs(f):
906 for p in util.finddirs(f):
907 if p in dirs:
907 if p in dirs:
908 yield f, p
908 yield f, p
909 break
909 break
910
910
911 def checkpathconflicts(repo, wctx, mctx, actions):
911 def checkpathconflicts(repo, wctx, mctx, actions):
912 """
912 """
913 Check if any actions introduce path conflicts in the repository, updating
913 Check if any actions introduce path conflicts in the repository, updating
914 actions to record or handle the path conflict accordingly.
914 actions to record or handle the path conflict accordingly.
915 """
915 """
916 mf = wctx.manifest()
916 mf = wctx.manifest()
917
917
918 # The set of local files that conflict with a remote directory.
918 # The set of local files that conflict with a remote directory.
919 localconflicts = set()
919 localconflicts = set()
920
920
921 # The set of directories that conflict with a remote file, and so may cause
921 # The set of directories that conflict with a remote file, and so may cause
922 # conflicts if they still contain any files after the merge.
922 # conflicts if they still contain any files after the merge.
923 remoteconflicts = set()
923 remoteconflicts = set()
924
924
925 # The set of directories that appear as both a file and a directory in the
925 # The set of directories that appear as both a file and a directory in the
926 # remote manifest. These indicate an invalid remote manifest, which
926 # remote manifest. These indicate an invalid remote manifest, which
927 # can't be updated to cleanly.
927 # can't be updated to cleanly.
928 invalidconflicts = set()
928 invalidconflicts = set()
929
929
930 # The set of directories that contain files that are being created.
930 # The set of directories that contain files that are being created.
931 createdfiledirs = set()
931 createdfiledirs = set()
932
932
933 # The set of files deleted by all the actions.
933 # The set of files deleted by all the actions.
934 deletedfiles = set()
934 deletedfiles = set()
935
935
936 for f, (m, args, msg) in actions.items():
936 for f, (m, args, msg) in actions.items():
937 if m in ('c', 'dc', 'm', 'cm'):
937 if m in ('c', 'dc', 'm', 'cm'):
938 # This action may create a new local file.
938 # This action may create a new local file.
939 createdfiledirs.update(util.finddirs(f))
939 createdfiledirs.update(util.finddirs(f))
940 if mf.hasdir(f):
940 if mf.hasdir(f):
941 # The file aliases a local directory. This might be ok if all
941 # The file aliases a local directory. This might be ok if all
942 # the files in the local directory are being deleted. This
942 # the files in the local directory are being deleted. This
943 # will be checked once we know what all the deleted files are.
943 # will be checked once we know what all the deleted files are.
944 remoteconflicts.add(f)
944 remoteconflicts.add(f)
945 # Track the names of all deleted files.
945 # Track the names of all deleted files.
946 if m == 'r':
946 if m == 'r':
947 deletedfiles.add(f)
947 deletedfiles.add(f)
948 if m == 'm':
948 if m == 'm':
949 f1, f2, fa, move, anc = args
949 f1, f2, fa, move, anc = args
950 if move:
950 if move:
951 deletedfiles.add(f1)
951 deletedfiles.add(f1)
952 if m == 'dm':
952 if m == 'dm':
953 f2, flags = args
953 f2, flags = args
954 deletedfiles.add(f2)
954 deletedfiles.add(f2)
955
955
956 # Check all directories that contain created files for path conflicts.
956 # Check all directories that contain created files for path conflicts.
957 for p in createdfiledirs:
957 for p in createdfiledirs:
958 if p in mf:
958 if p in mf:
959 if p in mctx:
959 if p in mctx:
960 # A file is in a directory which aliases both a local
960 # A file is in a directory which aliases both a local
961 # and a remote file. This is an internal inconsistency
961 # and a remote file. This is an internal inconsistency
962 # within the remote manifest.
962 # within the remote manifest.
963 invalidconflicts.add(p)
963 invalidconflicts.add(p)
964 else:
964 else:
965 # A file is in a directory which aliases a local file.
965 # A file is in a directory which aliases a local file.
966 # We will need to rename the local file.
966 # We will need to rename the local file.
967 localconflicts.add(p)
967 localconflicts.add(p)
968 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
968 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
969 # The file is in a directory which aliases a remote file.
969 # The file is in a directory which aliases a remote file.
970 # This is an internal inconsistency within the remote
970 # This is an internal inconsistency within the remote
971 # manifest.
971 # manifest.
972 invalidconflicts.add(p)
972 invalidconflicts.add(p)
973
973
974 # Rename all local conflicting files that have not been deleted.
974 # Rename all local conflicting files that have not been deleted.
975 for p in localconflicts:
975 for p in localconflicts:
976 if p not in deletedfiles:
976 if p not in deletedfiles:
977 ctxname = bytes(wctx).rstrip('+')
977 ctxname = bytes(wctx).rstrip('+')
978 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
978 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
979 actions[pnew] = ('pr', (p,), "local path conflict")
979 actions[pnew] = ('pr', (p,), "local path conflict")
980 actions[p] = ('p', (pnew, 'l'), "path conflict")
980 actions[p] = ('p', (pnew, 'l'), "path conflict")
981
981
982 if remoteconflicts:
982 if remoteconflicts:
983 # Check if all files in the conflicting directories have been removed.
983 # Check if all files in the conflicting directories have been removed.
984 ctxname = bytes(mctx).rstrip('+')
984 ctxname = bytes(mctx).rstrip('+')
985 for f, p in _filesindirs(repo, mf, remoteconflicts):
985 for f, p in _filesindirs(repo, mf, remoteconflicts):
986 if f not in deletedfiles:
986 if f not in deletedfiles:
987 m, args, msg = actions[p]
987 m, args, msg = actions[p]
988 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
988 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
989 if m in ('dc', 'm'):
989 if m in ('dc', 'm'):
990 # Action was merge, just update target.
990 # Action was merge, just update target.
991 actions[pnew] = (m, args, msg)
991 actions[pnew] = (m, args, msg)
992 else:
992 else:
993 # Action was create, change to renamed get action.
993 # Action was create, change to renamed get action.
994 fl = args[0]
994 fl = args[0]
995 actions[pnew] = ('dg', (p, fl), "remote path conflict")
995 actions[pnew] = ('dg', (p, fl), "remote path conflict")
996 actions[p] = ('p', (pnew, 'r'), "path conflict")
996 actions[p] = ('p', (pnew, 'r'), "path conflict")
997 remoteconflicts.remove(p)
997 remoteconflicts.remove(p)
998 break
998 break
999
999
1000 if invalidconflicts:
1000 if invalidconflicts:
1001 for p in invalidconflicts:
1001 for p in invalidconflicts:
1002 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1002 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1003 raise error.Abort(_("destination manifest contains path conflicts"))
1003 raise error.Abort(_("destination manifest contains path conflicts"))
1004
1004
1005 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1005 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1006 acceptremote, followcopies, forcefulldiff=False):
1006 acceptremote, followcopies, forcefulldiff=False):
1007 """
1007 """
1008 Merge wctx and p2 with ancestor pa and generate merge action list
1008 Merge wctx and p2 with ancestor pa and generate merge action list
1009
1009
1010 branchmerge and force are as passed in to update
1010 branchmerge and force are as passed in to update
1011 matcher = matcher to filter file lists
1011 matcher = matcher to filter file lists
1012 acceptremote = accept the incoming changes without prompting
1012 acceptremote = accept the incoming changes without prompting
1013 """
1013 """
1014 if matcher is not None and matcher.always():
1014 if matcher is not None and matcher.always():
1015 matcher = None
1015 matcher = None
1016
1016
1017 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1017 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1018
1018
1019 # manifests fetched in order are going to be faster, so prime the caches
1019 # manifests fetched in order are going to be faster, so prime the caches
1020 [x.manifest() for x in
1020 [x.manifest() for x in
1021 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1021 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1022
1022
1023 if followcopies:
1023 if followcopies:
1024 ret = copies.mergecopies(repo, wctx, p2, pa)
1024 ret = copies.mergecopies(repo, wctx, p2, pa)
1025 copy, movewithdir, diverge, renamedelete, dirmove = ret
1025 copy, movewithdir, diverge, renamedelete, dirmove = ret
1026
1026
1027 boolbm = pycompat.bytestr(bool(branchmerge))
1027 boolbm = pycompat.bytestr(bool(branchmerge))
1028 boolf = pycompat.bytestr(bool(force))
1028 boolf = pycompat.bytestr(bool(force))
1029 boolm = pycompat.bytestr(bool(matcher))
1029 boolm = pycompat.bytestr(bool(matcher))
1030 repo.ui.note(_("resolving manifests\n"))
1030 repo.ui.note(_("resolving manifests\n"))
1031 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1031 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1032 % (boolbm, boolf, boolm))
1032 % (boolbm, boolf, boolm))
1033 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1033 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1034
1034
1035 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1035 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1036 copied = set(copy.values())
1036 copied = set(copy.values())
1037 copied.update(movewithdir.values())
1037 copied.update(movewithdir.values())
1038
1038
1039 if '.hgsubstate' in m1:
1039 if '.hgsubstate' in m1:
1040 # check whether sub state is modified
1040 # check whether sub state is modified
1041 if any(wctx.sub(s).dirty() for s in wctx.substate):
1041 if any(wctx.sub(s).dirty() for s in wctx.substate):
1042 m1['.hgsubstate'] = modifiednodeid
1042 m1['.hgsubstate'] = modifiednodeid
1043
1043
1044 # Don't use m2-vs-ma optimization if:
1044 # Don't use m2-vs-ma optimization if:
1045 # - ma is the same as m1 or m2, which we're just going to diff again later
1045 # - ma is the same as m1 or m2, which we're just going to diff again later
1046 # - The caller specifically asks for a full diff, which is useful during bid
1046 # - The caller specifically asks for a full diff, which is useful during bid
1047 # merge.
1047 # merge.
1048 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1048 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1049 # Identify which files are relevant to the merge, so we can limit the
1049 # Identify which files are relevant to the merge, so we can limit the
1050 # total m1-vs-m2 diff to just those files. This has significant
1050 # total m1-vs-m2 diff to just those files. This has significant
1051 # performance benefits in large repositories.
1051 # performance benefits in large repositories.
1052 relevantfiles = set(ma.diff(m2).keys())
1052 relevantfiles = set(ma.diff(m2).keys())
1053
1053
1054 # For copied and moved files, we need to add the source file too.
1054 # For copied and moved files, we need to add the source file too.
1055 for copykey, copyvalue in copy.iteritems():
1055 for copykey, copyvalue in copy.iteritems():
1056 if copyvalue in relevantfiles:
1056 if copyvalue in relevantfiles:
1057 relevantfiles.add(copykey)
1057 relevantfiles.add(copykey)
1058 for movedirkey in movewithdir:
1058 for movedirkey in movewithdir:
1059 relevantfiles.add(movedirkey)
1059 relevantfiles.add(movedirkey)
1060 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1060 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1061 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1061 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1062
1062
1063 diff = m1.diff(m2, match=matcher)
1063 diff = m1.diff(m2, match=matcher)
1064
1064
1065 if matcher is None:
1065 if matcher is None:
1066 matcher = matchmod.always('', '')
1066 matcher = matchmod.always('', '')
1067
1067
1068 actions = {}
1068 actions = {}
1069 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1069 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1070 if n1 and n2: # file exists on both local and remote side
1070 if n1 and n2: # file exists on both local and remote side
1071 if f not in ma:
1071 if f not in ma:
1072 fa = copy.get(f, None)
1072 fa = copy.get(f, None)
1073 if fa is not None:
1073 if fa is not None:
1074 actions[f] = ('m', (f, f, fa, False, pa.node()),
1074 actions[f] = ('m', (f, f, fa, False, pa.node()),
1075 "both renamed from " + fa)
1075 "both renamed from " + fa)
1076 else:
1076 else:
1077 actions[f] = ('m', (f, f, None, False, pa.node()),
1077 actions[f] = ('m', (f, f, None, False, pa.node()),
1078 "both created")
1078 "both created")
1079 else:
1079 else:
1080 a = ma[f]
1080 a = ma[f]
1081 fla = ma.flags(f)
1081 fla = ma.flags(f)
1082 nol = 'l' not in fl1 + fl2 + fla
1082 nol = 'l' not in fl1 + fl2 + fla
1083 if n2 == a and fl2 == fla:
1083 if n2 == a and fl2 == fla:
1084 actions[f] = ('k', (), "remote unchanged")
1084 actions[f] = ('k', (), "remote unchanged")
1085 elif n1 == a and fl1 == fla: # local unchanged - use remote
1085 elif n1 == a and fl1 == fla: # local unchanged - use remote
1086 if n1 == n2: # optimization: keep local content
1086 if n1 == n2: # optimization: keep local content
1087 actions[f] = ('e', (fl2,), "update permissions")
1087 actions[f] = ('e', (fl2,), "update permissions")
1088 else:
1088 else:
1089 actions[f] = ('g', (fl2, False), "remote is newer")
1089 actions[f] = ('g', (fl2, False), "remote is newer")
1090 elif nol and n2 == a: # remote only changed 'x'
1090 elif nol and n2 == a: # remote only changed 'x'
1091 actions[f] = ('e', (fl2,), "update permissions")
1091 actions[f] = ('e', (fl2,), "update permissions")
1092 elif nol and n1 == a: # local only changed 'x'
1092 elif nol and n1 == a: # local only changed 'x'
1093 actions[f] = ('g', (fl1, False), "remote is newer")
1093 actions[f] = ('g', (fl1, False), "remote is newer")
1094 else: # both changed something
1094 else: # both changed something
1095 actions[f] = ('m', (f, f, f, False, pa.node()),
1095 actions[f] = ('m', (f, f, f, False, pa.node()),
1096 "versions differ")
1096 "versions differ")
1097 elif n1: # file exists only on local side
1097 elif n1: # file exists only on local side
1098 if f in copied:
1098 if f in copied:
1099 pass # we'll deal with it on m2 side
1099 pass # we'll deal with it on m2 side
1100 elif f in movewithdir: # directory rename, move local
1100 elif f in movewithdir: # directory rename, move local
1101 f2 = movewithdir[f]
1101 f2 = movewithdir[f]
1102 if f2 in m2:
1102 if f2 in m2:
1103 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1103 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1104 "remote directory rename, both created")
1104 "remote directory rename, both created")
1105 else:
1105 else:
1106 actions[f2] = ('dm', (f, fl1),
1106 actions[f2] = ('dm', (f, fl1),
1107 "remote directory rename - move from " + f)
1107 "remote directory rename - move from " + f)
1108 elif f in copy:
1108 elif f in copy:
1109 f2 = copy[f]
1109 f2 = copy[f]
1110 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1110 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1111 "local copied/moved from " + f2)
1111 "local copied/moved from " + f2)
1112 elif f in ma: # clean, a different, no remote
1112 elif f in ma: # clean, a different, no remote
1113 if n1 != ma[f]:
1113 if n1 != ma[f]:
1114 if acceptremote:
1114 if acceptremote:
1115 actions[f] = ('r', None, "remote delete")
1115 actions[f] = ('r', None, "remote delete")
1116 else:
1116 else:
1117 actions[f] = ('cd', (f, None, f, False, pa.node()),
1117 actions[f] = ('cd', (f, None, f, False, pa.node()),
1118 "prompt changed/deleted")
1118 "prompt changed/deleted")
1119 elif n1 == addednodeid:
1119 elif n1 == addednodeid:
1120 # This extra 'a' is added by working copy manifest to mark
1120 # This extra 'a' is added by working copy manifest to mark
1121 # the file as locally added. We should forget it instead of
1121 # the file as locally added. We should forget it instead of
1122 # deleting it.
1122 # deleting it.
1123 actions[f] = ('f', None, "remote deleted")
1123 actions[f] = ('f', None, "remote deleted")
1124 else:
1124 else:
1125 actions[f] = ('r', None, "other deleted")
1125 actions[f] = ('r', None, "other deleted")
1126 elif n2: # file exists only on remote side
1126 elif n2: # file exists only on remote side
1127 if f in copied:
1127 if f in copied:
1128 pass # we'll deal with it on m1 side
1128 pass # we'll deal with it on m1 side
1129 elif f in movewithdir:
1129 elif f in movewithdir:
1130 f2 = movewithdir[f]
1130 f2 = movewithdir[f]
1131 if f2 in m1:
1131 if f2 in m1:
1132 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1132 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1133 "local directory rename, both created")
1133 "local directory rename, both created")
1134 else:
1134 else:
1135 actions[f2] = ('dg', (f, fl2),
1135 actions[f2] = ('dg', (f, fl2),
1136 "local directory rename - get from " + f)
1136 "local directory rename - get from " + f)
1137 elif f in copy:
1137 elif f in copy:
1138 f2 = copy[f]
1138 f2 = copy[f]
1139 if f2 in m2:
1139 if f2 in m2:
1140 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1140 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1141 "remote copied from " + f2)
1141 "remote copied from " + f2)
1142 else:
1142 else:
1143 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1143 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1144 "remote moved from " + f2)
1144 "remote moved from " + f2)
1145 elif f not in ma:
1145 elif f not in ma:
1146 # local unknown, remote created: the logic is described by the
1146 # local unknown, remote created: the logic is described by the
1147 # following table:
1147 # following table:
1148 #
1148 #
1149 # force branchmerge different | action
1149 # force branchmerge different | action
1150 # n * * | create
1150 # n * * | create
1151 # y n * | create
1151 # y n * | create
1152 # y y n | create
1152 # y y n | create
1153 # y y y | merge
1153 # y y y | merge
1154 #
1154 #
1155 # Checking whether the files are different is expensive, so we
1155 # Checking whether the files are different is expensive, so we
1156 # don't do that when we can avoid it.
1156 # don't do that when we can avoid it.
1157 if not force:
1157 if not force:
1158 actions[f] = ('c', (fl2,), "remote created")
1158 actions[f] = ('c', (fl2,), "remote created")
1159 elif not branchmerge:
1159 elif not branchmerge:
1160 actions[f] = ('c', (fl2,), "remote created")
1160 actions[f] = ('c', (fl2,), "remote created")
1161 else:
1161 else:
1162 actions[f] = ('cm', (fl2, pa.node()),
1162 actions[f] = ('cm', (fl2, pa.node()),
1163 "remote created, get or merge")
1163 "remote created, get or merge")
1164 elif n2 != ma[f]:
1164 elif n2 != ma[f]:
1165 df = None
1165 df = None
1166 for d in dirmove:
1166 for d in dirmove:
1167 if f.startswith(d):
1167 if f.startswith(d):
1168 # new file added in a directory that was moved
1168 # new file added in a directory that was moved
1169 df = dirmove[d] + f[len(d):]
1169 df = dirmove[d] + f[len(d):]
1170 break
1170 break
1171 if df is not None and df in m1:
1171 if df is not None and df in m1:
1172 actions[df] = ('m', (df, f, f, False, pa.node()),
1172 actions[df] = ('m', (df, f, f, False, pa.node()),
1173 "local directory rename - respect move from " + f)
1173 "local directory rename - respect move from " + f)
1174 elif acceptremote:
1174 elif acceptremote:
1175 actions[f] = ('c', (fl2,), "remote recreating")
1175 actions[f] = ('c', (fl2,), "remote recreating")
1176 else:
1176 else:
1177 actions[f] = ('dc', (None, f, f, False, pa.node()),
1177 actions[f] = ('dc', (None, f, f, False, pa.node()),
1178 "prompt deleted/changed")
1178 "prompt deleted/changed")
1179
1179
1180 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1180 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1181 # If we are merging, look for path conflicts.
1181 # If we are merging, look for path conflicts.
1182 checkpathconflicts(repo, wctx, p2, actions)
1182 checkpathconflicts(repo, wctx, p2, actions)
1183
1183
1184 return actions, diverge, renamedelete
1184 return actions, diverge, renamedelete
1185
1185
1186 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1186 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1187 """Resolves false conflicts where the nodeid changed but the content
1187 """Resolves false conflicts where the nodeid changed but the content
1188 remained the same."""
1188 remained the same."""
1189 # We force a copy of actions.items() because we're going to mutate
1189 # We force a copy of actions.items() because we're going to mutate
1190 # actions as we resolve trivial conflicts.
1190 # actions as we resolve trivial conflicts.
1191 for f, (m, args, msg) in list(actions.items()):
1191 for f, (m, args, msg) in list(actions.items()):
1192 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1192 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1193 # local did change but ended up with same content
1193 # local did change but ended up with same content
1194 actions[f] = 'r', None, "prompt same"
1194 actions[f] = 'r', None, "prompt same"
1195 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1195 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1196 # remote did change but ended up with same content
1196 # remote did change but ended up with same content
1197 del actions[f] # don't get = keep local deleted
1197 del actions[f] # don't get = keep local deleted
1198
1198
1199 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1199 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1200 acceptremote, followcopies, matcher=None,
1200 acceptremote, followcopies, matcher=None,
1201 mergeforce=False):
1201 mergeforce=False):
1202 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1202 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1203 # Avoid cycle.
1203 # Avoid cycle.
1204 from . import sparse
1204 from . import sparse
1205
1205
1206 if len(ancestors) == 1: # default
1206 if len(ancestors) == 1: # default
1207 actions, diverge, renamedelete = manifestmerge(
1207 actions, diverge, renamedelete = manifestmerge(
1208 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1208 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1209 acceptremote, followcopies)
1209 acceptremote, followcopies)
1210 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1210 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1211
1211
1212 else: # only when merge.preferancestor=* - the default
1212 else: # only when merge.preferancestor=* - the default
1213 repo.ui.note(
1213 repo.ui.note(
1214 _("note: merging %s and %s using bids from ancestors %s\n") %
1214 _("note: merging %s and %s using bids from ancestors %s\n") %
1215 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1215 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1216 for anc in ancestors)))
1216 for anc in ancestors)))
1217
1217
1218 # Call for bids
1218 # Call for bids
1219 fbids = {} # mapping filename to bids (action method to list af actions)
1219 fbids = {} # mapping filename to bids (action method to list af actions)
1220 diverge, renamedelete = None, None
1220 diverge, renamedelete = None, None
1221 for ancestor in ancestors:
1221 for ancestor in ancestors:
1222 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1222 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1223 actions, diverge1, renamedelete1 = manifestmerge(
1223 actions, diverge1, renamedelete1 = manifestmerge(
1224 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1224 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1225 acceptremote, followcopies, forcefulldiff=True)
1225 acceptremote, followcopies, forcefulldiff=True)
1226 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1226 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1227
1227
1228 # Track the shortest set of warning on the theory that bid
1228 # Track the shortest set of warning on the theory that bid
1229 # merge will correctly incorporate more information
1229 # merge will correctly incorporate more information
1230 if diverge is None or len(diverge1) < len(diverge):
1230 if diverge is None or len(diverge1) < len(diverge):
1231 diverge = diverge1
1231 diverge = diverge1
1232 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1232 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1233 renamedelete = renamedelete1
1233 renamedelete = renamedelete1
1234
1234
1235 for f, a in sorted(actions.iteritems()):
1235 for f, a in sorted(actions.iteritems()):
1236 m, args, msg = a
1236 m, args, msg = a
1237 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1237 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1238 if f in fbids:
1238 if f in fbids:
1239 d = fbids[f]
1239 d = fbids[f]
1240 if m in d:
1240 if m in d:
1241 d[m].append(a)
1241 d[m].append(a)
1242 else:
1242 else:
1243 d[m] = [a]
1243 d[m] = [a]
1244 else:
1244 else:
1245 fbids[f] = {m: [a]}
1245 fbids[f] = {m: [a]}
1246
1246
1247 # Pick the best bid for each file
1247 # Pick the best bid for each file
1248 repo.ui.note(_('\nauction for merging merge bids\n'))
1248 repo.ui.note(_('\nauction for merging merge bids\n'))
1249 actions = {}
1249 actions = {}
1250 dms = [] # filenames that have dm actions
1250 dms = [] # filenames that have dm actions
1251 for f, bids in sorted(fbids.items()):
1251 for f, bids in sorted(fbids.items()):
1252 # bids is a mapping from action method to list af actions
1252 # bids is a mapping from action method to list af actions
1253 # Consensus?
1253 # Consensus?
1254 if len(bids) == 1: # all bids are the same kind of method
1254 if len(bids) == 1: # all bids are the same kind of method
1255 m, l = list(bids.items())[0]
1255 m, l = list(bids.items())[0]
1256 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1256 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1257 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1257 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1258 actions[f] = l[0]
1258 actions[f] = l[0]
1259 if m == 'dm':
1259 if m == 'dm':
1260 dms.append(f)
1260 dms.append(f)
1261 continue
1261 continue
1262 # If keep is an option, just do it.
1262 # If keep is an option, just do it.
1263 if 'k' in bids:
1263 if 'k' in bids:
1264 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1264 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1265 actions[f] = bids['k'][0]
1265 actions[f] = bids['k'][0]
1266 continue
1266 continue
1267 # If there are gets and they all agree [how could they not?], do it.
1267 # If there are gets and they all agree [how could they not?], do it.
1268 if 'g' in bids:
1268 if 'g' in bids:
1269 ga0 = bids['g'][0]
1269 ga0 = bids['g'][0]
1270 if all(a == ga0 for a in bids['g'][1:]):
1270 if all(a == ga0 for a in bids['g'][1:]):
1271 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1271 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1272 actions[f] = ga0
1272 actions[f] = ga0
1273 continue
1273 continue
1274 # TODO: Consider other simple actions such as mode changes
1274 # TODO: Consider other simple actions such as mode changes
1275 # Handle inefficient democrazy.
1275 # Handle inefficient democrazy.
1276 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1276 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1277 for m, l in sorted(bids.items()):
1277 for m, l in sorted(bids.items()):
1278 for _f, args, msg in l:
1278 for _f, args, msg in l:
1279 repo.ui.note(' %s -> %s\n' % (msg, m))
1279 repo.ui.note(' %s -> %s\n' % (msg, m))
1280 # Pick random action. TODO: Instead, prompt user when resolving
1280 # Pick random action. TODO: Instead, prompt user when resolving
1281 m, l = list(bids.items())[0]
1281 m, l = list(bids.items())[0]
1282 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1282 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1283 (f, m))
1283 (f, m))
1284 actions[f] = l[0]
1284 actions[f] = l[0]
1285 if m == 'dm':
1285 if m == 'dm':
1286 dms.append(f)
1286 dms.append(f)
1287 continue
1287 continue
1288 # Work around 'dm' that can cause multiple actions for the same file
1288 # Work around 'dm' that can cause multiple actions for the same file
1289 for f in dms:
1289 for f in dms:
1290 dm, (f0, flags), msg = actions[f]
1290 dm, (f0, flags), msg = actions[f]
1291 assert dm == 'dm', dm
1291 assert dm == 'dm', dm
1292 if f0 in actions and actions[f0][0] == 'r':
1292 if f0 in actions and actions[f0][0] == 'r':
1293 # We have one bid for removing a file and another for moving it.
1293 # We have one bid for removing a file and another for moving it.
1294 # These two could be merged as first move and then delete ...
1294 # These two could be merged as first move and then delete ...
1295 # but instead drop moving and just delete.
1295 # but instead drop moving and just delete.
1296 del actions[f]
1296 del actions[f]
1297 repo.ui.note(_('end of auction\n\n'))
1297 repo.ui.note(_('end of auction\n\n'))
1298
1298
1299 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1299 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1300
1300
1301 if wctx.rev() is None:
1301 if wctx.rev() is None:
1302 fractions = _forgetremoved(wctx, mctx, branchmerge)
1302 fractions = _forgetremoved(wctx, mctx, branchmerge)
1303 actions.update(fractions)
1303 actions.update(fractions)
1304
1304
1305 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1305 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1306 actions)
1306 actions)
1307
1307
1308 return prunedactions, diverge, renamedelete
1308 return prunedactions, diverge, renamedelete
1309
1309
1310 def _getcwd():
1310 def _getcwd():
1311 try:
1311 try:
1312 return pycompat.getcwd()
1312 return pycompat.getcwd()
1313 except OSError as err:
1313 except OSError as err:
1314 if err.errno == errno.ENOENT:
1314 if err.errno == errno.ENOENT:
1315 return None
1315 return None
1316 raise
1316 raise
1317
1317
1318 def batchremove(repo, wctx, actions):
1318 def batchremove(repo, wctx, actions):
1319 """apply removes to the working directory
1319 """apply removes to the working directory
1320
1320
1321 yields tuples for progress updates
1321 yields tuples for progress updates
1322 """
1322 """
1323 verbose = repo.ui.verbose
1323 verbose = repo.ui.verbose
1324 cwd = _getcwd()
1324 cwd = _getcwd()
1325 i = 0
1325 i = 0
1326 for f, args, msg in actions:
1326 for f, args, msg in actions:
1327 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1327 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1328 if verbose:
1328 if verbose:
1329 repo.ui.note(_("removing %s\n") % f)
1329 repo.ui.note(_("removing %s\n") % f)
1330 wctx[f].audit()
1330 wctx[f].audit()
1331 try:
1331 try:
1332 wctx[f].remove(ignoremissing=True)
1332 wctx[f].remove(ignoremissing=True)
1333 except OSError as inst:
1333 except OSError as inst:
1334 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1334 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1335 (f, inst.strerror))
1335 (f, inst.strerror))
1336 if i == 100:
1336 if i == 100:
1337 yield i, f
1337 yield i, f
1338 i = 0
1338 i = 0
1339 i += 1
1339 i += 1
1340 if i > 0:
1340 if i > 0:
1341 yield i, f
1341 yield i, f
1342
1342
1343 if cwd and not _getcwd():
1343 if cwd and not _getcwd():
1344 # cwd was removed in the course of removing files; print a helpful
1344 # cwd was removed in the course of removing files; print a helpful
1345 # warning.
1345 # warning.
1346 repo.ui.warn(_("current directory was removed\n"
1346 repo.ui.warn(_("current directory was removed\n"
1347 "(consider changing to repo root: %s)\n") % repo.root)
1347 "(consider changing to repo root: %s)\n") % repo.root)
1348
1348
1349 def batchget(repo, mctx, wctx, actions):
1349 def batchget(repo, mctx, wctx, actions):
1350 """apply gets to the working directory
1350 """apply gets to the working directory
1351
1351
1352 mctx is the context to get from
1352 mctx is the context to get from
1353
1353
1354 yields tuples for progress updates
1354 yields tuples for progress updates
1355 """
1355 """
1356 verbose = repo.ui.verbose
1356 verbose = repo.ui.verbose
1357 fctx = mctx.filectx
1357 fctx = mctx.filectx
1358 ui = repo.ui
1358 ui = repo.ui
1359 i = 0
1359 i = 0
1360 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1360 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1361 for f, (flags, backup), msg in actions:
1361 for f, (flags, backup), msg in actions:
1362 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1362 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1363 if verbose:
1363 if verbose:
1364 repo.ui.note(_("getting %s\n") % f)
1364 repo.ui.note(_("getting %s\n") % f)
1365
1365
1366 if backup:
1366 if backup:
1367 # If a file or directory exists with the same name, back that
1367 # If a file or directory exists with the same name, back that
1368 # up. Otherwise, look to see if there is a file that conflicts
1368 # up. Otherwise, look to see if there is a file that conflicts
1369 # with a directory this file is in, and if so, back that up.
1369 # with a directory this file is in, and if so, back that up.
1370 absf = repo.wjoin(f)
1370 absf = repo.wjoin(f)
1371 if not repo.wvfs.lexists(f):
1371 if not repo.wvfs.lexists(f):
1372 for p in util.finddirs(f):
1372 for p in util.finddirs(f):
1373 if repo.wvfs.isfileorlink(p):
1373 if repo.wvfs.isfileorlink(p):
1374 absf = repo.wjoin(p)
1374 absf = repo.wjoin(p)
1375 break
1375 break
1376 orig = scmutil.origpath(ui, repo, absf)
1376 orig = scmutil.origpath(ui, repo, absf)
1377 if repo.wvfs.lexists(absf):
1377 if repo.wvfs.lexists(absf):
1378 util.rename(absf, orig)
1378 util.rename(absf, orig)
1379 wctx[f].clearunknown()
1379 wctx[f].clearunknown()
1380 atomictemp = ui.configbool("experimental", "update.atomic-file")
1380 atomictemp = ui.configbool("experimental", "update.atomic-file")
1381 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1381 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1382 atomictemp=atomictemp)
1382 atomictemp=atomictemp)
1383 if i == 100:
1383 if i == 100:
1384 yield i, f
1384 yield i, f
1385 i = 0
1385 i = 0
1386 i += 1
1386 i += 1
1387 if i > 0:
1387 if i > 0:
1388 yield i, f
1388 yield i, f
1389
1389
1390 def _prefetchfiles(repo, ctx, actions):
1390 def _prefetchfiles(repo, ctx, actions):
1391 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1391 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1392 of merge actions. ``ctx`` is the context being merged in."""
1392 of merge actions. ``ctx`` is the context being merged in."""
1393
1393
1394 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1394 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1395 # don't touch the context to be merged in. 'cd' is skipped, because
1395 # don't touch the context to be merged in. 'cd' is skipped, because
1396 # changed/deleted never resolves to something from the remote side.
1396 # changed/deleted never resolves to something from the remote side.
1397 oplist = [actions[a] for a in 'g dc dg m'.split()]
1397 oplist = [actions[a] for a in 'g dc dg m'.split()]
1398 prefetch = scmutil.fileprefetchhooks
1398 prefetch = scmutil.fileprefetchhooks
1399 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1399 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1400
1400
1401 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1401 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1402 """apply the merge action list to the working directory
1402 """apply the merge action list to the working directory
1403
1403
1404 wctx is the working copy context
1404 wctx is the working copy context
1405 mctx is the context to be merged into the working copy
1405 mctx is the context to be merged into the working copy
1406
1406
1407 Return a tuple of counts (updated, merged, removed, unresolved) that
1407 Return a tuple of counts (updated, merged, removed, unresolved) that
1408 describes how many files were affected by the update.
1408 describes how many files were affected by the update.
1409 """
1409 """
1410
1410
1411 _prefetchfiles(repo, mctx, actions)
1411 _prefetchfiles(repo, mctx, actions)
1412
1412
1413 updated, merged, removed = 0, 0, 0
1413 updated, merged, removed = 0, 0, 0
1414 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1414 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1415 moves = []
1415 moves = []
1416 for m, l in actions.items():
1416 for m, l in actions.items():
1417 l.sort()
1417 l.sort()
1418
1418
1419 # 'cd' and 'dc' actions are treated like other merge conflicts
1419 # 'cd' and 'dc' actions are treated like other merge conflicts
1420 mergeactions = sorted(actions['cd'])
1420 mergeactions = sorted(actions['cd'])
1421 mergeactions.extend(sorted(actions['dc']))
1421 mergeactions.extend(sorted(actions['dc']))
1422 mergeactions.extend(actions['m'])
1422 mergeactions.extend(actions['m'])
1423 for f, args, msg in mergeactions:
1423 for f, args, msg in mergeactions:
1424 f1, f2, fa, move, anc = args
1424 f1, f2, fa, move, anc = args
1425 if f == '.hgsubstate': # merged internally
1425 if f == '.hgsubstate': # merged internally
1426 continue
1426 continue
1427 if f1 is None:
1427 if f1 is None:
1428 fcl = filemerge.absentfilectx(wctx, fa)
1428 fcl = filemerge.absentfilectx(wctx, fa)
1429 else:
1429 else:
1430 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1430 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1431 fcl = wctx[f1]
1431 fcl = wctx[f1]
1432 if f2 is None:
1432 if f2 is None:
1433 fco = filemerge.absentfilectx(mctx, fa)
1433 fco = filemerge.absentfilectx(mctx, fa)
1434 else:
1434 else:
1435 fco = mctx[f2]
1435 fco = mctx[f2]
1436 actx = repo[anc]
1436 actx = repo[anc]
1437 if fa in actx:
1437 if fa in actx:
1438 fca = actx[fa]
1438 fca = actx[fa]
1439 else:
1439 else:
1440 # TODO: move to absentfilectx
1440 # TODO: move to absentfilectx
1441 fca = repo.filectx(f1, fileid=nullrev)
1441 fca = repo.filectx(f1, fileid=nullrev)
1442 ms.add(fcl, fco, fca, f)
1442 ms.add(fcl, fco, fca, f)
1443 if f1 != f and move:
1443 if f1 != f and move:
1444 moves.append(f1)
1444 moves.append(f1)
1445
1445
1446 _updating = _('updating')
1446 _updating = _('updating')
1447 _files = _('files')
1447 _files = _('files')
1448 progress = repo.ui.progress
1448 progress = repo.ui.progress
1449
1449
1450 # remove renamed files after safely stored
1450 # remove renamed files after safely stored
1451 for f in moves:
1451 for f in moves:
1452 if wctx[f].lexists():
1452 if wctx[f].lexists():
1453 repo.ui.debug("removing %s\n" % f)
1453 repo.ui.debug("removing %s\n" % f)
1454 wctx[f].audit()
1454 wctx[f].audit()
1455 wctx[f].remove()
1455 wctx[f].remove()
1456
1456
1457 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1457 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1458 z = 0
1458 z = 0
1459
1459
1460 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1460 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1461 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1461 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1462
1462
1463 # record path conflicts
1463 # record path conflicts
1464 for f, args, msg in actions['p']:
1464 for f, args, msg in actions['p']:
1465 f1, fo = args
1465 f1, fo = args
1466 s = repo.ui.status
1466 s = repo.ui.status
1467 s(_("%s: path conflict - a file or link has the same name as a "
1467 s(_("%s: path conflict - a file or link has the same name as a "
1468 "directory\n") % f)
1468 "directory\n") % f)
1469 if fo == 'l':
1469 if fo == 'l':
1470 s(_("the local file has been renamed to %s\n") % f1)
1470 s(_("the local file has been renamed to %s\n") % f1)
1471 else:
1471 else:
1472 s(_("the remote file has been renamed to %s\n") % f1)
1472 s(_("the remote file has been renamed to %s\n") % f1)
1473 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1473 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1474 ms.addpath(f, f1, fo)
1474 ms.addpath(f, f1, fo)
1475 z += 1
1475 z += 1
1476 progress(_updating, z, item=f, total=numupdates, unit=_files)
1476 progress(_updating, z, item=f, total=numupdates, unit=_files)
1477
1477
1478 # When merging in-memory, we can't support worker processes, so set the
1478 # When merging in-memory, we can't support worker processes, so set the
1479 # per-item cost at 0 in that case.
1479 # per-item cost at 0 in that case.
1480 cost = 0 if wctx.isinmemory() else 0.001
1480 cost = 0 if wctx.isinmemory() else 0.001
1481
1481
1482 # remove in parallel (must come before resolving path conflicts and getting)
1482 # remove in parallel (must come before resolving path conflicts and getting)
1483 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1483 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1484 actions['r'])
1484 actions['r'])
1485 for i, item in prog:
1485 for i, item in prog:
1486 z += i
1486 z += i
1487 progress(_updating, z, item=item, total=numupdates, unit=_files)
1487 progress(_updating, z, item=item, total=numupdates, unit=_files)
1488 removed = len(actions['r'])
1488 removed = len(actions['r'])
1489
1489
1490 # resolve path conflicts (must come before getting)
1490 # resolve path conflicts (must come before getting)
1491 for f, args, msg in actions['pr']:
1491 for f, args, msg in actions['pr']:
1492 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1492 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1493 f0, = args
1493 f0, = args
1494 if wctx[f0].lexists():
1494 if wctx[f0].lexists():
1495 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1495 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1496 wctx[f].audit()
1496 wctx[f].audit()
1497 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1497 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1498 wctx[f0].remove()
1498 wctx[f0].remove()
1499 z += 1
1499 z += 1
1500 progress(_updating, z, item=f, total=numupdates, unit=_files)
1500 progress(_updating, z, item=f, total=numupdates, unit=_files)
1501
1501
1502 # get in parallel
1502 # get in parallel
1503 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1503 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1504 actions['g'])
1504 actions['g'])
1505 for i, item in prog:
1505 for i, item in prog:
1506 z += i
1506 z += i
1507 progress(_updating, z, item=item, total=numupdates, unit=_files)
1507 progress(_updating, z, item=item, total=numupdates, unit=_files)
1508 updated = len(actions['g'])
1508 updated = len(actions['g'])
1509
1509
1510 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1510 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1511 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1511 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1512
1512
1513 # forget (manifest only, just log it) (must come first)
1513 # forget (manifest only, just log it) (must come first)
1514 for f, args, msg in actions['f']:
1514 for f, args, msg in actions['f']:
1515 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1515 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1516 z += 1
1516 z += 1
1517 progress(_updating, z, item=f, total=numupdates, unit=_files)
1517 progress(_updating, z, item=f, total=numupdates, unit=_files)
1518
1518
1519 # re-add (manifest only, just log it)
1519 # re-add (manifest only, just log it)
1520 for f, args, msg in actions['a']:
1520 for f, args, msg in actions['a']:
1521 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1521 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1522 z += 1
1522 z += 1
1523 progress(_updating, z, item=f, total=numupdates, unit=_files)
1523 progress(_updating, z, item=f, total=numupdates, unit=_files)
1524
1524
1525 # re-add/mark as modified (manifest only, just log it)
1525 # re-add/mark as modified (manifest only, just log it)
1526 for f, args, msg in actions['am']:
1526 for f, args, msg in actions['am']:
1527 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1527 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1528 z += 1
1528 z += 1
1529 progress(_updating, z, item=f, total=numupdates, unit=_files)
1529 progress(_updating, z, item=f, total=numupdates, unit=_files)
1530
1530
1531 # keep (noop, just log it)
1531 # keep (noop, just log it)
1532 for f, args, msg in actions['k']:
1532 for f, args, msg in actions['k']:
1533 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1533 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1534 # no progress
1534 # no progress
1535
1535
1536 # directory rename, move local
1536 # directory rename, move local
1537 for f, args, msg in actions['dm']:
1537 for f, args, msg in actions['dm']:
1538 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1538 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1539 z += 1
1539 z += 1
1540 progress(_updating, z, item=f, total=numupdates, unit=_files)
1540 progress(_updating, z, item=f, total=numupdates, unit=_files)
1541 f0, flags = args
1541 f0, flags = args
1542 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1542 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1543 wctx[f].audit()
1543 wctx[f].audit()
1544 wctx[f].write(wctx.filectx(f0).data(), flags)
1544 wctx[f].write(wctx.filectx(f0).data(), flags)
1545 wctx[f0].remove()
1545 wctx[f0].remove()
1546 updated += 1
1546 updated += 1
1547
1547
1548 # local directory rename, get
1548 # local directory rename, get
1549 for f, args, msg in actions['dg']:
1549 for f, args, msg in actions['dg']:
1550 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1550 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1551 z += 1
1551 z += 1
1552 progress(_updating, z, item=f, total=numupdates, unit=_files)
1552 progress(_updating, z, item=f, total=numupdates, unit=_files)
1553 f0, flags = args
1553 f0, flags = args
1554 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1554 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1555 wctx[f].write(mctx.filectx(f0).data(), flags)
1555 wctx[f].write(mctx.filectx(f0).data(), flags)
1556 updated += 1
1556 updated += 1
1557
1557
1558 # exec
1558 # exec
1559 for f, args, msg in actions['e']:
1559 for f, args, msg in actions['e']:
1560 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1560 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1561 z += 1
1561 z += 1
1562 progress(_updating, z, item=f, total=numupdates, unit=_files)
1562 progress(_updating, z, item=f, total=numupdates, unit=_files)
1563 flags, = args
1563 flags, = args
1564 wctx[f].audit()
1564 wctx[f].audit()
1565 wctx[f].setflags('l' in flags, 'x' in flags)
1565 wctx[f].setflags('l' in flags, 'x' in flags)
1566 updated += 1
1566 updated += 1
1567
1567
1568 # the ordering is important here -- ms.mergedriver will raise if the merge
1568 # the ordering is important here -- ms.mergedriver will raise if the merge
1569 # driver has changed, and we want to be able to bypass it when overwrite is
1569 # driver has changed, and we want to be able to bypass it when overwrite is
1570 # True
1570 # True
1571 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1571 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1572
1572
1573 if usemergedriver:
1573 if usemergedriver:
1574 if wctx.isinmemory():
1574 if wctx.isinmemory():
1575 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1575 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1576 "support mergedriver")
1576 "support mergedriver")
1577 ms.commit()
1577 ms.commit()
1578 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1578 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1579 # the driver might leave some files unresolved
1579 # the driver might leave some files unresolved
1580 unresolvedf = set(ms.unresolved())
1580 unresolvedf = set(ms.unresolved())
1581 if not proceed:
1581 if not proceed:
1582 # XXX setting unresolved to at least 1 is a hack to make sure we
1582 # XXX setting unresolved to at least 1 is a hack to make sure we
1583 # error out
1583 # error out
1584 return updated, merged, removed, max(len(unresolvedf), 1)
1584 return updated, merged, removed, max(len(unresolvedf), 1)
1585 newactions = []
1585 newactions = []
1586 for f, args, msg in mergeactions:
1586 for f, args, msg in mergeactions:
1587 if f in unresolvedf:
1587 if f in unresolvedf:
1588 newactions.append((f, args, msg))
1588 newactions.append((f, args, msg))
1589 mergeactions = newactions
1589 mergeactions = newactions
1590
1590
1591 try:
1591 try:
1592 # premerge
1592 # premerge
1593 tocomplete = []
1593 tocomplete = []
1594 for f, args, msg in mergeactions:
1594 for f, args, msg in mergeactions:
1595 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1595 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1596 z += 1
1596 z += 1
1597 progress(_updating, z, item=f, total=numupdates, unit=_files)
1597 progress(_updating, z, item=f, total=numupdates, unit=_files)
1598 if f == '.hgsubstate': # subrepo states need updating
1598 if f == '.hgsubstate': # subrepo states need updating
1599 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1599 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1600 overwrite, labels)
1600 overwrite, labels)
1601 continue
1601 continue
1602 wctx[f].audit()
1602 wctx[f].audit()
1603 complete, r = ms.preresolve(f, wctx)
1603 complete, r = ms.preresolve(f, wctx)
1604 if not complete:
1604 if not complete:
1605 numupdates += 1
1605 numupdates += 1
1606 tocomplete.append((f, args, msg))
1606 tocomplete.append((f, args, msg))
1607
1607
1608 # merge
1608 # merge
1609 for f, args, msg in tocomplete:
1609 for f, args, msg in tocomplete:
1610 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1610 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1611 z += 1
1611 z += 1
1612 progress(_updating, z, item=f, total=numupdates, unit=_files)
1612 progress(_updating, z, item=f, total=numupdates, unit=_files)
1613 ms.resolve(f, wctx)
1613 ms.resolve(f, wctx)
1614
1614
1615 finally:
1615 finally:
1616 ms.commit()
1616 ms.commit()
1617
1617
1618 unresolved = ms.unresolvedcount()
1618 unresolved = ms.unresolvedcount()
1619
1619
1620 if usemergedriver and not unresolved and ms.mdstate() != 's':
1620 if usemergedriver and not unresolved and ms.mdstate() != 's':
1621 if not driverconclude(repo, ms, wctx, labels=labels):
1621 if not driverconclude(repo, ms, wctx, labels=labels):
1622 # XXX setting unresolved to at least 1 is a hack to make sure we
1622 # XXX setting unresolved to at least 1 is a hack to make sure we
1623 # error out
1623 # error out
1624 unresolved = max(unresolved, 1)
1624 unresolved = max(unresolved, 1)
1625
1625
1626 ms.commit()
1626 ms.commit()
1627
1627
1628 msupdated, msmerged, msremoved = ms.counts()
1628 msupdated, msmerged, msremoved = ms.counts()
1629 updated += msupdated
1629 updated += msupdated
1630 merged += msmerged
1630 merged += msmerged
1631 removed += msremoved
1631 removed += msremoved
1632
1632
1633 extraactions = ms.actions()
1633 extraactions = ms.actions()
1634 if extraactions:
1634 if extraactions:
1635 mfiles = set(a[0] for a in actions['m'])
1635 mfiles = set(a[0] for a in actions['m'])
1636 for k, acts in extraactions.iteritems():
1636 for k, acts in extraactions.iteritems():
1637 actions[k].extend(acts)
1637 actions[k].extend(acts)
1638 # Remove these files from actions['m'] as well. This is important
1638 # Remove these files from actions['m'] as well. This is important
1639 # because in recordupdates, files in actions['m'] are processed
1639 # because in recordupdates, files in actions['m'] are processed
1640 # after files in other actions, and the merge driver might add
1640 # after files in other actions, and the merge driver might add
1641 # files to those actions via extraactions above. This can lead to a
1641 # files to those actions via extraactions above. This can lead to a
1642 # file being recorded twice, with poor results. This is especially
1642 # file being recorded twice, with poor results. This is especially
1643 # problematic for actions['r'] (currently only possible with the
1643 # problematic for actions['r'] (currently only possible with the
1644 # merge driver in the initial merge process; interrupted merges
1644 # merge driver in the initial merge process; interrupted merges
1645 # don't go through this flow).
1645 # don't go through this flow).
1646 #
1646 #
1647 # The real fix here is to have indexes by both file and action so
1647 # The real fix here is to have indexes by both file and action so
1648 # that when the action for a file is changed it is automatically
1648 # that when the action for a file is changed it is automatically
1649 # reflected in the other action lists. But that involves a more
1649 # reflected in the other action lists. But that involves a more
1650 # complex data structure, so this will do for now.
1650 # complex data structure, so this will do for now.
1651 #
1651 #
1652 # We don't need to do the same operation for 'dc' and 'cd' because
1652 # We don't need to do the same operation for 'dc' and 'cd' because
1653 # those lists aren't consulted again.
1653 # those lists aren't consulted again.
1654 mfiles.difference_update(a[0] for a in acts)
1654 mfiles.difference_update(a[0] for a in acts)
1655
1655
1656 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1656 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1657
1657
1658 progress(_updating, None, total=numupdates, unit=_files)
1658 progress(_updating, None, total=numupdates, unit=_files)
1659
1659
1660 return updated, merged, removed, unresolved
1660 return updated, merged, removed, unresolved
1661
1661
1662 def recordupdates(repo, actions, branchmerge):
1662 def recordupdates(repo, actions, branchmerge):
1663 "record merge actions to the dirstate"
1663 "record merge actions to the dirstate"
1664 # remove (must come first)
1664 # remove (must come first)
1665 for f, args, msg in actions.get('r', []):
1665 for f, args, msg in actions.get('r', []):
1666 if branchmerge:
1666 if branchmerge:
1667 repo.dirstate.remove(f)
1667 repo.dirstate.remove(f)
1668 else:
1668 else:
1669 repo.dirstate.drop(f)
1669 repo.dirstate.drop(f)
1670
1670
1671 # forget (must come first)
1671 # forget (must come first)
1672 for f, args, msg in actions.get('f', []):
1672 for f, args, msg in actions.get('f', []):
1673 repo.dirstate.drop(f)
1673 repo.dirstate.drop(f)
1674
1674
1675 # resolve path conflicts
1675 # resolve path conflicts
1676 for f, args, msg in actions.get('pr', []):
1676 for f, args, msg in actions.get('pr', []):
1677 f0, = args
1677 f0, = args
1678 origf0 = repo.dirstate.copied(f0) or f0
1678 origf0 = repo.dirstate.copied(f0) or f0
1679 repo.dirstate.add(f)
1679 repo.dirstate.add(f)
1680 repo.dirstate.copy(origf0, f)
1680 repo.dirstate.copy(origf0, f)
1681 if f0 == origf0:
1681 if f0 == origf0:
1682 repo.dirstate.remove(f0)
1682 repo.dirstate.remove(f0)
1683 else:
1683 else:
1684 repo.dirstate.drop(f0)
1684 repo.dirstate.drop(f0)
1685
1685
1686 # re-add
1686 # re-add
1687 for f, args, msg in actions.get('a', []):
1687 for f, args, msg in actions.get('a', []):
1688 repo.dirstate.add(f)
1688 repo.dirstate.add(f)
1689
1689
1690 # re-add/mark as modified
1690 # re-add/mark as modified
1691 for f, args, msg in actions.get('am', []):
1691 for f, args, msg in actions.get('am', []):
1692 if branchmerge:
1692 if branchmerge:
1693 repo.dirstate.normallookup(f)
1693 repo.dirstate.normallookup(f)
1694 else:
1694 else:
1695 repo.dirstate.add(f)
1695 repo.dirstate.add(f)
1696
1696
1697 # exec change
1697 # exec change
1698 for f, args, msg in actions.get('e', []):
1698 for f, args, msg in actions.get('e', []):
1699 repo.dirstate.normallookup(f)
1699 repo.dirstate.normallookup(f)
1700
1700
1701 # keep
1701 # keep
1702 for f, args, msg in actions.get('k', []):
1702 for f, args, msg in actions.get('k', []):
1703 pass
1703 pass
1704
1704
1705 # get
1705 # get
1706 for f, args, msg in actions.get('g', []):
1706 for f, args, msg in actions.get('g', []):
1707 if branchmerge:
1707 if branchmerge:
1708 repo.dirstate.otherparent(f)
1708 repo.dirstate.otherparent(f)
1709 else:
1709 else:
1710 repo.dirstate.normal(f)
1710 repo.dirstate.normal(f)
1711
1711
1712 # merge
1712 # merge
1713 for f, args, msg in actions.get('m', []):
1713 for f, args, msg in actions.get('m', []):
1714 f1, f2, fa, move, anc = args
1714 f1, f2, fa, move, anc = args
1715 if branchmerge:
1715 if branchmerge:
1716 # We've done a branch merge, mark this file as merged
1716 # We've done a branch merge, mark this file as merged
1717 # so that we properly record the merger later
1717 # so that we properly record the merger later
1718 repo.dirstate.merge(f)
1718 repo.dirstate.merge(f)
1719 if f1 != f2: # copy/rename
1719 if f1 != f2: # copy/rename
1720 if move:
1720 if move:
1721 repo.dirstate.remove(f1)
1721 repo.dirstate.remove(f1)
1722 if f1 != f:
1722 if f1 != f:
1723 repo.dirstate.copy(f1, f)
1723 repo.dirstate.copy(f1, f)
1724 else:
1724 else:
1725 repo.dirstate.copy(f2, f)
1725 repo.dirstate.copy(f2, f)
1726 else:
1726 else:
1727 # We've update-merged a locally modified file, so
1727 # We've update-merged a locally modified file, so
1728 # we set the dirstate to emulate a normal checkout
1728 # we set the dirstate to emulate a normal checkout
1729 # of that file some time in the past. Thus our
1729 # of that file some time in the past. Thus our
1730 # merge will appear as a normal local file
1730 # merge will appear as a normal local file
1731 # modification.
1731 # modification.
1732 if f2 == f: # file not locally copied/moved
1732 if f2 == f: # file not locally copied/moved
1733 repo.dirstate.normallookup(f)
1733 repo.dirstate.normallookup(f)
1734 if move:
1734 if move:
1735 repo.dirstate.drop(f1)
1735 repo.dirstate.drop(f1)
1736
1736
1737 # directory rename, move local
1737 # directory rename, move local
1738 for f, args, msg in actions.get('dm', []):
1738 for f, args, msg in actions.get('dm', []):
1739 f0, flag = args
1739 f0, flag = args
1740 if branchmerge:
1740 if branchmerge:
1741 repo.dirstate.add(f)
1741 repo.dirstate.add(f)
1742 repo.dirstate.remove(f0)
1742 repo.dirstate.remove(f0)
1743 repo.dirstate.copy(f0, f)
1743 repo.dirstate.copy(f0, f)
1744 else:
1744 else:
1745 repo.dirstate.normal(f)
1745 repo.dirstate.normal(f)
1746 repo.dirstate.drop(f0)
1746 repo.dirstate.drop(f0)
1747
1747
1748 # directory rename, get
1748 # directory rename, get
1749 for f, args, msg in actions.get('dg', []):
1749 for f, args, msg in actions.get('dg', []):
1750 f0, flag = args
1750 f0, flag = args
1751 if branchmerge:
1751 if branchmerge:
1752 repo.dirstate.add(f)
1752 repo.dirstate.add(f)
1753 repo.dirstate.copy(f0, f)
1753 repo.dirstate.copy(f0, f)
1754 else:
1754 else:
1755 repo.dirstate.normal(f)
1755 repo.dirstate.normal(f)
1756
1756
1757 def update(repo, node, branchmerge, force, ancestor=None,
1757 def update(repo, node, branchmerge, force, ancestor=None,
1758 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1758 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1759 updatecheck=None, wc=None):
1759 updatecheck=None, wc=None):
1760 """
1760 """
1761 Perform a merge between the working directory and the given node
1761 Perform a merge between the working directory and the given node
1762
1762
1763 node = the node to update to
1763 node = the node to update to
1764 branchmerge = whether to merge between branches
1764 branchmerge = whether to merge between branches
1765 force = whether to force branch merging or file overwriting
1765 force = whether to force branch merging or file overwriting
1766 matcher = a matcher to filter file lists (dirstate not updated)
1766 matcher = a matcher to filter file lists (dirstate not updated)
1767 mergeancestor = whether it is merging with an ancestor. If true,
1767 mergeancestor = whether it is merging with an ancestor. If true,
1768 we should accept the incoming changes for any prompts that occur.
1768 we should accept the incoming changes for any prompts that occur.
1769 If false, merging with an ancestor (fast-forward) is only allowed
1769 If false, merging with an ancestor (fast-forward) is only allowed
1770 between different named branches. This flag is used by rebase extension
1770 between different named branches. This flag is used by rebase extension
1771 as a temporary fix and should be avoided in general.
1771 as a temporary fix and should be avoided in general.
1772 labels = labels to use for base, local and other
1772 labels = labels to use for base, local and other
1773 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1773 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1774 this is True, then 'force' should be True as well.
1774 this is True, then 'force' should be True as well.
1775
1775
1776 The table below shows all the behaviors of the update command given the
1776 The table below shows all the behaviors of the update command given the
1777 -c/--check and -C/--clean or no options, whether the working directory is
1777 -c/--check and -C/--clean or no options, whether the working directory is
1778 dirty, whether a revision is specified, and the relationship of the parent
1778 dirty, whether a revision is specified, and the relationship of the parent
1779 rev to the target rev (linear or not). Match from top first. The -n
1779 rev to the target rev (linear or not). Match from top first. The -n
1780 option doesn't exist on the command line, but represents the
1780 option doesn't exist on the command line, but represents the
1781 experimental.updatecheck=noconflict option.
1781 experimental.updatecheck=noconflict option.
1782
1782
1783 This logic is tested by test-update-branches.t.
1783 This logic is tested by test-update-branches.t.
1784
1784
1785 -c -C -n -m dirty rev linear | result
1785 -c -C -n -m dirty rev linear | result
1786 y y * * * * * | (1)
1786 y y * * * * * | (1)
1787 y * y * * * * | (1)
1787 y * y * * * * | (1)
1788 y * * y * * * | (1)
1788 y * * y * * * | (1)
1789 * y y * * * * | (1)
1789 * y y * * * * | (1)
1790 * y * y * * * | (1)
1790 * y * y * * * | (1)
1791 * * y y * * * | (1)
1791 * * y y * * * | (1)
1792 * * * * * n n | x
1792 * * * * * n n | x
1793 * * * * n * * | ok
1793 * * * * n * * | ok
1794 n n n n y * y | merge
1794 n n n n y * y | merge
1795 n n n n y y n | (2)
1795 n n n n y y n | (2)
1796 n n n y y * * | merge
1796 n n n y y * * | merge
1797 n n y n y * * | merge if no conflict
1797 n n y n y * * | merge if no conflict
1798 n y n n y * * | discard
1798 n y n n y * * | discard
1799 y n n n y * * | (3)
1799 y n n n y * * | (3)
1800
1800
1801 x = can't happen
1801 x = can't happen
1802 * = don't-care
1802 * = don't-care
1803 1 = incompatible options (checked in commands.py)
1803 1 = incompatible options (checked in commands.py)
1804 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1804 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1805 3 = abort: uncommitted changes (checked in commands.py)
1805 3 = abort: uncommitted changes (checked in commands.py)
1806
1806
1807 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1807 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1808 to repo[None] if None is passed.
1808 to repo[None] if None is passed.
1809
1809
1810 Return the same tuple as applyupdates().
1810 Return the same tuple as applyupdates().
1811 """
1811 """
1812 # Avoid cycle.
1812 # Avoid cycle.
1813 from . import sparse
1813 from . import sparse
1814
1814
1815 # This function used to find the default destination if node was None, but
1815 # This function used to find the default destination if node was None, but
1816 # that's now in destutil.py.
1816 # that's now in destutil.py.
1817 assert node is not None
1817 assert node is not None
1818 if not branchmerge and not force:
1818 if not branchmerge and not force:
1819 # TODO: remove the default once all callers that pass branchmerge=False
1819 # TODO: remove the default once all callers that pass branchmerge=False
1820 # and force=False pass a value for updatecheck. We may want to allow
1820 # and force=False pass a value for updatecheck. We may want to allow
1821 # updatecheck='abort' to better suppport some of these callers.
1821 # updatecheck='abort' to better suppport some of these callers.
1822 if updatecheck is None:
1822 if updatecheck is None:
1823 updatecheck = 'linear'
1823 updatecheck = 'linear'
1824 assert updatecheck in ('none', 'linear', 'noconflict')
1824 assert updatecheck in ('none', 'linear', 'noconflict')
1825 # If we're doing a partial update, we need to skip updating
1825 # If we're doing a partial update, we need to skip updating
1826 # the dirstate, so make a note of any partial-ness to the
1826 # the dirstate, so make a note of any partial-ness to the
1827 # update here.
1827 # update here.
1828 if matcher is None or matcher.always():
1828 if matcher is None or matcher.always():
1829 partial = False
1829 partial = False
1830 else:
1830 else:
1831 partial = True
1831 partial = True
1832 with repo.wlock():
1832 with repo.wlock():
1833 if wc is None:
1833 if wc is None:
1834 wc = repo[None]
1834 wc = repo[None]
1835 pl = wc.parents()
1835 pl = wc.parents()
1836 p1 = pl[0]
1836 p1 = pl[0]
1837 pas = [None]
1837 pas = [None]
1838 if ancestor is not None:
1838 if ancestor is not None:
1839 pas = [repo[ancestor]]
1839 pas = [repo[ancestor]]
1840
1840
1841 overwrite = force and not branchmerge
1841 overwrite = force and not branchmerge
1842
1842
1843 p2 = repo[node]
1843 p2 = repo[node]
1844 if pas[0] is None:
1844 if pas[0] is None:
1845 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1845 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1846 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1846 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1847 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1847 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1848 else:
1848 else:
1849 pas = [p1.ancestor(p2, warn=branchmerge)]
1849 pas = [p1.ancestor(p2, warn=branchmerge)]
1850
1850
1851 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1851 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1852
1852
1853 ### check phase
1853 ### check phase
1854 if not overwrite:
1854 if not overwrite:
1855 if len(pl) > 1:
1855 if len(pl) > 1:
1856 raise error.Abort(_("outstanding uncommitted merge"))
1856 raise error.Abort(_("outstanding uncommitted merge"))
1857 ms = mergestate.read(repo)
1857 ms = mergestate.read(repo)
1858 if list(ms.unresolved()):
1858 if list(ms.unresolved()):
1859 raise error.Abort(_("outstanding merge conflicts"))
1859 raise error.Abort(_("outstanding merge conflicts"))
1860 if branchmerge:
1860 if branchmerge:
1861 if pas == [p2]:
1861 if pas == [p2]:
1862 raise error.Abort(_("merging with a working directory ancestor"
1862 raise error.Abort(_("merging with a working directory ancestor"
1863 " has no effect"))
1863 " has no effect"))
1864 elif pas == [p1]:
1864 elif pas == [p1]:
1865 if not mergeancestor and wc.branch() == p2.branch():
1865 if not mergeancestor and wc.branch() == p2.branch():
1866 raise error.Abort(_("nothing to merge"),
1866 raise error.Abort(_("nothing to merge"),
1867 hint=_("use 'hg update' "
1867 hint=_("use 'hg update' "
1868 "or check 'hg heads'"))
1868 "or check 'hg heads'"))
1869 if not force and (wc.files() or wc.deleted()):
1869 if not force and (wc.files() or wc.deleted()):
1870 raise error.Abort(_("uncommitted changes"),
1870 raise error.Abort(_("uncommitted changes"),
1871 hint=_("use 'hg status' to list changes"))
1871 hint=_("use 'hg status' to list changes"))
1872 if not wc.isinmemory():
1872 if not wc.isinmemory():
1873 for s in sorted(wc.substate):
1873 for s in sorted(wc.substate):
1874 wc.sub(s).bailifchanged()
1874 wc.sub(s).bailifchanged()
1875
1875
1876 elif not overwrite:
1876 elif not overwrite:
1877 if p1 == p2: # no-op update
1877 if p1 == p2: # no-op update
1878 # call the hooks and exit early
1878 # call the hooks and exit early
1879 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1879 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1880 repo.hook('update', parent1=xp2, parent2='', error=0)
1880 repo.hook('update', parent1=xp2, parent2='', error=0)
1881 return 0, 0, 0, 0
1881 return 0, 0, 0, 0
1882
1882
1883 if (updatecheck == 'linear' and
1883 if (updatecheck == 'linear' and
1884 pas not in ([p1], [p2])): # nonlinear
1884 pas not in ([p1], [p2])): # nonlinear
1885 dirty = wc.dirty(missing=True)
1885 dirty = wc.dirty(missing=True)
1886 if dirty:
1886 if dirty:
1887 # Branching is a bit strange to ensure we do the minimal
1887 # Branching is a bit strange to ensure we do the minimal
1888 # amount of call to obsutil.foreground.
1888 # amount of call to obsutil.foreground.
1889 foreground = obsutil.foreground(repo, [p1.node()])
1889 foreground = obsutil.foreground(repo, [p1.node()])
1890 # note: the <node> variable contains a random identifier
1890 # note: the <node> variable contains a random identifier
1891 if repo[node].node() in foreground:
1891 if repo[node].node() in foreground:
1892 pass # allow updating to successors
1892 pass # allow updating to successors
1893 else:
1893 else:
1894 msg = _("uncommitted changes")
1894 msg = _("uncommitted changes")
1895 hint = _("commit or update --clean to discard changes")
1895 hint = _("commit or update --clean to discard changes")
1896 raise error.UpdateAbort(msg, hint=hint)
1896 raise error.UpdateAbort(msg, hint=hint)
1897 else:
1897 else:
1898 # Allow jumping branches if clean and specific rev given
1898 # Allow jumping branches if clean and specific rev given
1899 pass
1899 pass
1900
1900
1901 if overwrite:
1901 if overwrite:
1902 pas = [wc]
1902 pas = [wc]
1903 elif not branchmerge:
1903 elif not branchmerge:
1904 pas = [p1]
1904 pas = [p1]
1905
1905
1906 # deprecated config: merge.followcopies
1906 # deprecated config: merge.followcopies
1907 followcopies = repo.ui.configbool('merge', 'followcopies')
1907 followcopies = repo.ui.configbool('merge', 'followcopies')
1908 if overwrite:
1908 if overwrite:
1909 followcopies = False
1909 followcopies = False
1910 elif not pas[0]:
1910 elif not pas[0]:
1911 followcopies = False
1911 followcopies = False
1912 if not branchmerge and not wc.dirty(missing=True):
1912 if not branchmerge and not wc.dirty(missing=True):
1913 followcopies = False
1913 followcopies = False
1914
1914
1915 ### calculate phase
1915 ### calculate phase
1916 actionbyfile, diverge, renamedelete = calculateupdates(
1916 actionbyfile, diverge, renamedelete = calculateupdates(
1917 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1917 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1918 followcopies, matcher=matcher, mergeforce=mergeforce)
1918 followcopies, matcher=matcher, mergeforce=mergeforce)
1919
1919
1920 if updatecheck == 'noconflict':
1920 if updatecheck == 'noconflict':
1921 for f, (m, args, msg) in actionbyfile.iteritems():
1921 for f, (m, args, msg) in actionbyfile.iteritems():
1922 if m not in ('g', 'k', 'e', 'r', 'pr'):
1922 if m not in ('g', 'k', 'e', 'r', 'pr'):
1923 msg = _("conflicting changes")
1923 msg = _("conflicting changes")
1924 hint = _("commit or update --clean to discard changes")
1924 hint = _("commit or update --clean to discard changes")
1925 raise error.Abort(msg, hint=hint)
1925 raise error.Abort(msg, hint=hint)
1926
1926
1927 # Prompt and create actions. Most of this is in the resolve phase
1927 # Prompt and create actions. Most of this is in the resolve phase
1928 # already, but we can't handle .hgsubstate in filemerge or
1928 # already, but we can't handle .hgsubstate in filemerge or
1929 # subrepoutil.submerge yet so we have to keep prompting for it.
1929 # subrepoutil.submerge yet so we have to keep prompting for it.
1930 if '.hgsubstate' in actionbyfile:
1930 if '.hgsubstate' in actionbyfile:
1931 f = '.hgsubstate'
1931 f = '.hgsubstate'
1932 m, args, msg = actionbyfile[f]
1932 m, args, msg = actionbyfile[f]
1933 prompts = filemerge.partextras(labels)
1933 prompts = filemerge.partextras(labels)
1934 prompts['f'] = f
1934 prompts['f'] = f
1935 if m == 'cd':
1935 if m == 'cd':
1936 if repo.ui.promptchoice(
1936 if repo.ui.promptchoice(
1937 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1937 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1938 "use (c)hanged version or (d)elete?"
1938 "use (c)hanged version or (d)elete?"
1939 "$$ &Changed $$ &Delete") % prompts, 0):
1939 "$$ &Changed $$ &Delete") % prompts, 0):
1940 actionbyfile[f] = ('r', None, "prompt delete")
1940 actionbyfile[f] = ('r', None, "prompt delete")
1941 elif f in p1:
1941 elif f in p1:
1942 actionbyfile[f] = ('am', None, "prompt keep")
1942 actionbyfile[f] = ('am', None, "prompt keep")
1943 else:
1943 else:
1944 actionbyfile[f] = ('a', None, "prompt keep")
1944 actionbyfile[f] = ('a', None, "prompt keep")
1945 elif m == 'dc':
1945 elif m == 'dc':
1946 f1, f2, fa, move, anc = args
1946 f1, f2, fa, move, anc = args
1947 flags = p2[f2].flags()
1947 flags = p2[f2].flags()
1948 if repo.ui.promptchoice(
1948 if repo.ui.promptchoice(
1949 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1949 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1950 "use (c)hanged version or leave (d)eleted?"
1950 "use (c)hanged version or leave (d)eleted?"
1951 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1951 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1952 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1952 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1953 else:
1953 else:
1954 del actionbyfile[f]
1954 del actionbyfile[f]
1955
1955
1956 # Convert to dictionary-of-lists format
1956 # Convert to dictionary-of-lists format
1957 actions = dict((m, [])
1957 actions = dict((m, [])
1958 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1958 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1959 for f, (m, args, msg) in actionbyfile.iteritems():
1959 for f, (m, args, msg) in actionbyfile.iteritems():
1960 if m not in actions:
1960 if m not in actions:
1961 actions[m] = []
1961 actions[m] = []
1962 actions[m].append((f, args, msg))
1962 actions[m].append((f, args, msg))
1963
1963
1964 if not util.fscasesensitive(repo.path):
1964 if not util.fscasesensitive(repo.path):
1965 # check collision between files only in p2 for clean update
1965 # check collision between files only in p2 for clean update
1966 if (not branchmerge and
1966 if (not branchmerge and
1967 (force or not wc.dirty(missing=True, branch=False))):
1967 (force or not wc.dirty(missing=True, branch=False))):
1968 _checkcollision(repo, p2.manifest(), None)
1968 _checkcollision(repo, p2.manifest(), None)
1969 else:
1969 else:
1970 _checkcollision(repo, wc.manifest(), actions)
1970 _checkcollision(repo, wc.manifest(), actions)
1971
1971
1972 # divergent renames
1972 # divergent renames
1973 for f, fl in sorted(diverge.iteritems()):
1973 for f, fl in sorted(diverge.iteritems()):
1974 repo.ui.warn(_("note: possible conflict - %s was renamed "
1974 repo.ui.warn(_("note: possible conflict - %s was renamed "
1975 "multiple times to:\n") % f)
1975 "multiple times to:\n") % f)
1976 for nf in fl:
1976 for nf in fl:
1977 repo.ui.warn(" %s\n" % nf)
1977 repo.ui.warn(" %s\n" % nf)
1978
1978
1979 # rename and delete
1979 # rename and delete
1980 for f, fl in sorted(renamedelete.iteritems()):
1980 for f, fl in sorted(renamedelete.iteritems()):
1981 repo.ui.warn(_("note: possible conflict - %s was deleted "
1981 repo.ui.warn(_("note: possible conflict - %s was deleted "
1982 "and renamed to:\n") % f)
1982 "and renamed to:\n") % f)
1983 for nf in fl:
1983 for nf in fl:
1984 repo.ui.warn(" %s\n" % nf)
1984 repo.ui.warn(" %s\n" % nf)
1985
1985
1986 ### apply phase
1986 ### apply phase
1987 if not branchmerge: # just jump to the new rev
1987 if not branchmerge: # just jump to the new rev
1988 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1988 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1989 if not partial and not wc.isinmemory():
1989 if not partial and not wc.isinmemory():
1990 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1990 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1991 # note that we're in the middle of an update
1991 # note that we're in the middle of an update
1992 repo.vfs.write('updatestate', p2.hex())
1992 repo.vfs.write('updatestate', p2.hex())
1993
1993
1994 # Advertise fsmonitor when its presence could be useful.
1994 # Advertise fsmonitor when its presence could be useful.
1995 #
1995 #
1996 # We only advertise when performing an update from an empty working
1996 # We only advertise when performing an update from an empty working
1997 # directory. This typically only occurs during initial clone.
1997 # directory. This typically only occurs during initial clone.
1998 #
1998 #
1999 # We give users a mechanism to disable the warning in case it is
1999 # We give users a mechanism to disable the warning in case it is
2000 # annoying.
2000 # annoying.
2001 #
2001 #
2002 # We only allow on Linux and MacOS because that's where fsmonitor is
2002 # We only allow on Linux and MacOS because that's where fsmonitor is
2003 # considered stable.
2003 # considered stable.
2004 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2004 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2005 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2005 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2006 'warn_update_file_count')
2006 'warn_update_file_count')
2007 try:
2007 try:
2008 # avoid cycle: extensions -> cmdutil -> merge
2008 # avoid cycle: extensions -> cmdutil -> merge
2009 from . import extensions
2009 from . import extensions
2010 extensions.find('fsmonitor')
2010 extensions.find('fsmonitor')
2011 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2011 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2012 # We intentionally don't look at whether fsmonitor has disabled
2012 # We intentionally don't look at whether fsmonitor has disabled
2013 # itself because a) fsmonitor may have already printed a warning
2013 # itself because a) fsmonitor may have already printed a warning
2014 # b) we only care about the config state here.
2014 # b) we only care about the config state here.
2015 except KeyError:
2015 except KeyError:
2016 fsmonitorenabled = False
2016 fsmonitorenabled = False
2017
2017
2018 if (fsmonitorwarning
2018 if (fsmonitorwarning
2019 and not fsmonitorenabled
2019 and not fsmonitorenabled
2020 and p1.node() == nullid
2020 and p1.node() == nullid
2021 and len(actions['g']) >= fsmonitorthreshold
2021 and len(actions['g']) >= fsmonitorthreshold
2022 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2022 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2023 repo.ui.warn(
2023 repo.ui.warn(
2024 _('(warning: large working directory being used without '
2024 _('(warning: large working directory being used without '
2025 'fsmonitor enabled; enable fsmonitor to improve performance; '
2025 'fsmonitor enabled; enable fsmonitor to improve performance; '
2026 'see "hg help -e fsmonitor")\n'))
2026 'see "hg help -e fsmonitor")\n'))
2027
2027
2028 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2028 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2029
2029
2030 if not partial and not wc.isinmemory():
2030 if not partial and not wc.isinmemory():
2031 with repo.dirstate.parentchange():
2031 with repo.dirstate.parentchange():
2032 repo.setparents(fp1, fp2)
2032 repo.setparents(fp1, fp2)
2033 recordupdates(repo, actions, branchmerge)
2033 recordupdates(repo, actions, branchmerge)
2034 # update completed, clear state
2034 # update completed, clear state
2035 util.unlink(repo.vfs.join('updatestate'))
2035 util.unlink(repo.vfs.join('updatestate'))
2036
2036
2037 if not branchmerge:
2037 if not branchmerge:
2038 repo.dirstate.setbranch(p2.branch())
2038 repo.dirstate.setbranch(p2.branch())
2039
2039
2040 # If we're updating to a location, clean up any stale temporary includes
2040 # If we're updating to a location, clean up any stale temporary includes
2041 # (ex: this happens during hg rebase --abort).
2041 # (ex: this happens during hg rebase --abort).
2042 if not branchmerge:
2042 if not branchmerge:
2043 sparse.prunetemporaryincludes(repo)
2043 sparse.prunetemporaryincludes(repo)
2044
2044
2045 if not partial:
2045 if not partial:
2046 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2046 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2047 return stats
2047 return stats
2048
2048
2049 def graft(repo, ctx, pctx, labels, keepparent=False):
2049 def graft(repo, ctx, pctx, labels, keepparent=False):
2050 """Do a graft-like merge.
2050 """Do a graft-like merge.
2051
2051
2052 This is a merge where the merge ancestor is chosen such that one
2052 This is a merge where the merge ancestor is chosen such that one
2053 or more changesets are grafted onto the current changeset. In
2053 or more changesets are grafted onto the current changeset. In
2054 addition to the merge, this fixes up the dirstate to include only
2054 addition to the merge, this fixes up the dirstate to include only
2055 a single parent (if keepparent is False) and tries to duplicate any
2055 a single parent (if keepparent is False) and tries to duplicate any
2056 renames/copies appropriately.
2056 renames/copies appropriately.
2057
2057
2058 ctx - changeset to rebase
2058 ctx - changeset to rebase
2059 pctx - merge base, usually ctx.p1()
2059 pctx - merge base, usually ctx.p1()
2060 labels - merge labels eg ['local', 'graft']
2060 labels - merge labels eg ['local', 'graft']
2061 keepparent - keep second parent if any
2061 keepparent - keep second parent if any
2062
2062
2063 """
2063 """
2064 # If we're grafting a descendant onto an ancestor, be sure to pass
2064 # If we're grafting a descendant onto an ancestor, be sure to pass
2065 # mergeancestor=True to update. This does two things: 1) allows the merge if
2065 # mergeancestor=True to update. This does two things: 1) allows the merge if
2066 # the destination is the same as the parent of the ctx (so we can use graft
2066 # the destination is the same as the parent of the ctx (so we can use graft
2067 # to copy commits), and 2) informs update that the incoming changes are
2067 # to copy commits), and 2) informs update that the incoming changes are
2068 # newer than the destination so it doesn't prompt about "remote changed foo
2068 # newer than the destination so it doesn't prompt about "remote changed foo
2069 # which local deleted".
2069 # which local deleted".
2070 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2070 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2071
2071
2072 stats = update(repo, ctx.node(), True, True, pctx.node(),
2072 stats = update(repo, ctx.node(), True, True, pctx.node(),
2073 mergeancestor=mergeancestor, labels=labels)
2073 mergeancestor=mergeancestor, labels=labels)
2074
2074
2075 pother = nullid
2075 pother = nullid
2076 parents = ctx.parents()
2076 parents = ctx.parents()
2077 if keepparent and len(parents) == 2 and pctx in parents:
2077 if keepparent and len(parents) == 2 and pctx in parents:
2078 parents.remove(pctx)
2078 parents.remove(pctx)
2079 pother = parents[0].node()
2079 pother = parents[0].node()
2080
2080
2081 with repo.dirstate.parentchange():
2081 with repo.dirstate.parentchange():
2082 repo.setparents(repo['.'].node(), pother)
2082 repo.setparents(repo['.'].node(), pother)
2083 repo.dirstate.write(repo.currenttransaction())
2083 repo.dirstate.write(repo.currenttransaction())
2084 # fix up dirstate for copies and renames
2084 # fix up dirstate for copies and renames
2085 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2085 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2086 return stats
2086 return stats
General Comments 0
You need to be logged in to leave comments. Login now