##// END OF EJS Templates
merge: coerce nodes to bytes, not str...
Augie Fackler -
r36195:187f2474 default
parent child Browse files
Show More
@@ -1,2084 +1,2084 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepoutil,
33 subrepoutil,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 P: a path conflict (file vs directory)
69 P: a path conflict (file vs directory)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 Merge record states (stored in self._state, indexed by filename):
83 Merge record states (stored in self._state, indexed by filename):
84 u: unresolved conflict
84 u: unresolved conflict
85 r: resolved conflict
85 r: resolved conflict
86 pu: unresolved path conflict (file conflicts with directory)
86 pu: unresolved path conflict (file conflicts with directory)
87 pr: resolved path conflict
87 pr: resolved path conflict
88 d: driver-resolved conflict
88 d: driver-resolved conflict
89
89
90 The resolve command transitions between 'u' and 'r' for conflicts and
90 The resolve command transitions between 'u' and 'r' for conflicts and
91 'pu' and 'pr' for path conflicts.
91 'pu' and 'pr' for path conflicts.
92 '''
92 '''
93 statepathv1 = 'merge/state'
93 statepathv1 = 'merge/state'
94 statepathv2 = 'merge/state2'
94 statepathv2 = 'merge/state2'
95
95
96 @staticmethod
96 @staticmethod
97 def clean(repo, node=None, other=None, labels=None):
97 def clean(repo, node=None, other=None, labels=None):
98 """Initialize a brand new merge state, removing any existing state on
98 """Initialize a brand new merge state, removing any existing state on
99 disk."""
99 disk."""
100 ms = mergestate(repo)
100 ms = mergestate(repo)
101 ms.reset(node, other, labels)
101 ms.reset(node, other, labels)
102 return ms
102 return ms
103
103
104 @staticmethod
104 @staticmethod
105 def read(repo):
105 def read(repo):
106 """Initialize the merge state, reading it from disk."""
106 """Initialize the merge state, reading it from disk."""
107 ms = mergestate(repo)
107 ms = mergestate(repo)
108 ms._read()
108 ms._read()
109 return ms
109 return ms
110
110
111 def __init__(self, repo):
111 def __init__(self, repo):
112 """Initialize the merge state.
112 """Initialize the merge state.
113
113
114 Do not use this directly! Instead call read() or clean()."""
114 Do not use this directly! Instead call read() or clean()."""
115 self._repo = repo
115 self._repo = repo
116 self._dirty = False
116 self._dirty = False
117 self._labels = None
117 self._labels = None
118
118
119 def reset(self, node=None, other=None, labels=None):
119 def reset(self, node=None, other=None, labels=None):
120 self._state = {}
120 self._state = {}
121 self._stateextras = {}
121 self._stateextras = {}
122 self._local = None
122 self._local = None
123 self._other = None
123 self._other = None
124 self._labels = labels
124 self._labels = labels
125 for var in ('localctx', 'otherctx'):
125 for var in ('localctx', 'otherctx'):
126 if var in vars(self):
126 if var in vars(self):
127 delattr(self, var)
127 delattr(self, var)
128 if node:
128 if node:
129 self._local = node
129 self._local = node
130 self._other = other
130 self._other = other
131 self._readmergedriver = None
131 self._readmergedriver = None
132 if self.mergedriver:
132 if self.mergedriver:
133 self._mdstate = 's'
133 self._mdstate = 's'
134 else:
134 else:
135 self._mdstate = 'u'
135 self._mdstate = 'u'
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 self._results = {}
137 self._results = {}
138 self._dirty = False
138 self._dirty = False
139
139
140 def _read(self):
140 def _read(self):
141 """Analyse each record content to restore a serialized state from disk
141 """Analyse each record content to restore a serialized state from disk
142
142
143 This function process "record" entry produced by the de-serialization
143 This function process "record" entry produced by the de-serialization
144 of on disk file.
144 of on disk file.
145 """
145 """
146 self._state = {}
146 self._state = {}
147 self._stateextras = {}
147 self._stateextras = {}
148 self._local = None
148 self._local = None
149 self._other = None
149 self._other = None
150 for var in ('localctx', 'otherctx'):
150 for var in ('localctx', 'otherctx'):
151 if var in vars(self):
151 if var in vars(self):
152 delattr(self, var)
152 delattr(self, var)
153 self._readmergedriver = None
153 self._readmergedriver = None
154 self._mdstate = 's'
154 self._mdstate = 's'
155 unsupported = set()
155 unsupported = set()
156 records = self._readrecords()
156 records = self._readrecords()
157 for rtype, record in records:
157 for rtype, record in records:
158 if rtype == 'L':
158 if rtype == 'L':
159 self._local = bin(record)
159 self._local = bin(record)
160 elif rtype == 'O':
160 elif rtype == 'O':
161 self._other = bin(record)
161 self._other = bin(record)
162 elif rtype == 'm':
162 elif rtype == 'm':
163 bits = record.split('\0', 1)
163 bits = record.split('\0', 1)
164 mdstate = bits[1]
164 mdstate = bits[1]
165 if len(mdstate) != 1 or mdstate not in 'ums':
165 if len(mdstate) != 1 or mdstate not in 'ums':
166 # the merge driver should be idempotent, so just rerun it
166 # the merge driver should be idempotent, so just rerun it
167 mdstate = 'u'
167 mdstate = 'u'
168
168
169 self._readmergedriver = bits[0]
169 self._readmergedriver = bits[0]
170 self._mdstate = mdstate
170 self._mdstate = mdstate
171 elif rtype in 'FDCP':
171 elif rtype in 'FDCP':
172 bits = record.split('\0')
172 bits = record.split('\0')
173 self._state[bits[0]] = bits[1:]
173 self._state[bits[0]] = bits[1:]
174 elif rtype == 'f':
174 elif rtype == 'f':
175 filename, rawextras = record.split('\0', 1)
175 filename, rawextras = record.split('\0', 1)
176 extraparts = rawextras.split('\0')
176 extraparts = rawextras.split('\0')
177 extras = {}
177 extras = {}
178 i = 0
178 i = 0
179 while i < len(extraparts):
179 while i < len(extraparts):
180 extras[extraparts[i]] = extraparts[i + 1]
180 extras[extraparts[i]] = extraparts[i + 1]
181 i += 2
181 i += 2
182
182
183 self._stateextras[filename] = extras
183 self._stateextras[filename] = extras
184 elif rtype == 'l':
184 elif rtype == 'l':
185 labels = record.split('\0', 2)
185 labels = record.split('\0', 2)
186 self._labels = [l for l in labels if len(l) > 0]
186 self._labels = [l for l in labels if len(l) > 0]
187 elif not rtype.islower():
187 elif not rtype.islower():
188 unsupported.add(rtype)
188 unsupported.add(rtype)
189 self._results = {}
189 self._results = {}
190 self._dirty = False
190 self._dirty = False
191
191
192 if unsupported:
192 if unsupported:
193 raise error.UnsupportedMergeRecords(unsupported)
193 raise error.UnsupportedMergeRecords(unsupported)
194
194
195 def _readrecords(self):
195 def _readrecords(self):
196 """Read merge state from disk and return a list of record (TYPE, data)
196 """Read merge state from disk and return a list of record (TYPE, data)
197
197
198 We read data from both v1 and v2 files and decide which one to use.
198 We read data from both v1 and v2 files and decide which one to use.
199
199
200 V1 has been used by version prior to 2.9.1 and contains less data than
200 V1 has been used by version prior to 2.9.1 and contains less data than
201 v2. We read both versions and check if no data in v2 contradicts
201 v2. We read both versions and check if no data in v2 contradicts
202 v1. If there is not contradiction we can safely assume that both v1
202 v1. If there is not contradiction we can safely assume that both v1
203 and v2 were written at the same time and use the extract data in v2. If
203 and v2 were written at the same time and use the extract data in v2. If
204 there is contradiction we ignore v2 content as we assume an old version
204 there is contradiction we ignore v2 content as we assume an old version
205 of Mercurial has overwritten the mergestate file and left an old v2
205 of Mercurial has overwritten the mergestate file and left an old v2
206 file around.
206 file around.
207
207
208 returns list of record [(TYPE, data), ...]"""
208 returns list of record [(TYPE, data), ...]"""
209 v1records = self._readrecordsv1()
209 v1records = self._readrecordsv1()
210 v2records = self._readrecordsv2()
210 v2records = self._readrecordsv2()
211 if self._v1v2match(v1records, v2records):
211 if self._v1v2match(v1records, v2records):
212 return v2records
212 return v2records
213 else:
213 else:
214 # v1 file is newer than v2 file, use it
214 # v1 file is newer than v2 file, use it
215 # we have to infer the "other" changeset of the merge
215 # we have to infer the "other" changeset of the merge
216 # we cannot do better than that with v1 of the format
216 # we cannot do better than that with v1 of the format
217 mctx = self._repo[None].parents()[-1]
217 mctx = self._repo[None].parents()[-1]
218 v1records.append(('O', mctx.hex()))
218 v1records.append(('O', mctx.hex()))
219 # add place holder "other" file node information
219 # add place holder "other" file node information
220 # nobody is using it yet so we do no need to fetch the data
220 # nobody is using it yet so we do no need to fetch the data
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 for idx, r in enumerate(v1records):
222 for idx, r in enumerate(v1records):
223 if r[0] == 'F':
223 if r[0] == 'F':
224 bits = r[1].split('\0')
224 bits = r[1].split('\0')
225 bits.insert(-2, '')
225 bits.insert(-2, '')
226 v1records[idx] = (r[0], '\0'.join(bits))
226 v1records[idx] = (r[0], '\0'.join(bits))
227 return v1records
227 return v1records
228
228
229 def _v1v2match(self, v1records, v2records):
229 def _v1v2match(self, v1records, v2records):
230 oldv2 = set() # old format version of v2 record
230 oldv2 = set() # old format version of v2 record
231 for rec in v2records:
231 for rec in v2records:
232 if rec[0] == 'L':
232 if rec[0] == 'L':
233 oldv2.add(rec)
233 oldv2.add(rec)
234 elif rec[0] == 'F':
234 elif rec[0] == 'F':
235 # drop the onode data (not contained in v1)
235 # drop the onode data (not contained in v1)
236 oldv2.add(('F', _droponode(rec[1])))
236 oldv2.add(('F', _droponode(rec[1])))
237 for rec in v1records:
237 for rec in v1records:
238 if rec not in oldv2:
238 if rec not in oldv2:
239 return False
239 return False
240 else:
240 else:
241 return True
241 return True
242
242
243 def _readrecordsv1(self):
243 def _readrecordsv1(self):
244 """read on disk merge state for version 1 file
244 """read on disk merge state for version 1 file
245
245
246 returns list of record [(TYPE, data), ...]
246 returns list of record [(TYPE, data), ...]
247
247
248 Note: the "F" data from this file are one entry short
248 Note: the "F" data from this file are one entry short
249 (no "other file node" entry)
249 (no "other file node" entry)
250 """
250 """
251 records = []
251 records = []
252 try:
252 try:
253 f = self._repo.vfs(self.statepathv1)
253 f = self._repo.vfs(self.statepathv1)
254 for i, l in enumerate(f):
254 for i, l in enumerate(f):
255 if i == 0:
255 if i == 0:
256 records.append(('L', l[:-1]))
256 records.append(('L', l[:-1]))
257 else:
257 else:
258 records.append(('F', l[:-1]))
258 records.append(('F', l[:-1]))
259 f.close()
259 f.close()
260 except IOError as err:
260 except IOError as err:
261 if err.errno != errno.ENOENT:
261 if err.errno != errno.ENOENT:
262 raise
262 raise
263 return records
263 return records
264
264
265 def _readrecordsv2(self):
265 def _readrecordsv2(self):
266 """read on disk merge state for version 2 file
266 """read on disk merge state for version 2 file
267
267
268 This format is a list of arbitrary records of the form:
268 This format is a list of arbitrary records of the form:
269
269
270 [type][length][content]
270 [type][length][content]
271
271
272 `type` is a single character, `length` is a 4 byte integer, and
272 `type` is a single character, `length` is a 4 byte integer, and
273 `content` is an arbitrary byte sequence of length `length`.
273 `content` is an arbitrary byte sequence of length `length`.
274
274
275 Mercurial versions prior to 3.7 have a bug where if there are
275 Mercurial versions prior to 3.7 have a bug where if there are
276 unsupported mandatory merge records, attempting to clear out the merge
276 unsupported mandatory merge records, attempting to clear out the merge
277 state with hg update --clean or similar aborts. The 't' record type
277 state with hg update --clean or similar aborts. The 't' record type
278 works around that by writing out what those versions treat as an
278 works around that by writing out what those versions treat as an
279 advisory record, but later versions interpret as special: the first
279 advisory record, but later versions interpret as special: the first
280 character is the 'real' record type and everything onwards is the data.
280 character is the 'real' record type and everything onwards is the data.
281
281
282 Returns list of records [(TYPE, data), ...]."""
282 Returns list of records [(TYPE, data), ...]."""
283 records = []
283 records = []
284 try:
284 try:
285 f = self._repo.vfs(self.statepathv2)
285 f = self._repo.vfs(self.statepathv2)
286 data = f.read()
286 data = f.read()
287 off = 0
287 off = 0
288 end = len(data)
288 end = len(data)
289 while off < end:
289 while off < end:
290 rtype = data[off]
290 rtype = data[off]
291 off += 1
291 off += 1
292 length = _unpack('>I', data[off:(off + 4)])[0]
292 length = _unpack('>I', data[off:(off + 4)])[0]
293 off += 4
293 off += 4
294 record = data[off:(off + length)]
294 record = data[off:(off + length)]
295 off += length
295 off += length
296 if rtype == 't':
296 if rtype == 't':
297 rtype, record = record[0], record[1:]
297 rtype, record = record[0], record[1:]
298 records.append((rtype, record))
298 records.append((rtype, record))
299 f.close()
299 f.close()
300 except IOError as err:
300 except IOError as err:
301 if err.errno != errno.ENOENT:
301 if err.errno != errno.ENOENT:
302 raise
302 raise
303 return records
303 return records
304
304
305 @util.propertycache
305 @util.propertycache
306 def mergedriver(self):
306 def mergedriver(self):
307 # protect against the following:
307 # protect against the following:
308 # - A configures a malicious merge driver in their hgrc, then
308 # - A configures a malicious merge driver in their hgrc, then
309 # pauses the merge
309 # pauses the merge
310 # - A edits their hgrc to remove references to the merge driver
310 # - A edits their hgrc to remove references to the merge driver
311 # - A gives a copy of their entire repo, including .hg, to B
311 # - A gives a copy of their entire repo, including .hg, to B
312 # - B inspects .hgrc and finds it to be clean
312 # - B inspects .hgrc and finds it to be clean
313 # - B then continues the merge and the malicious merge driver
313 # - B then continues the merge and the malicious merge driver
314 # gets invoked
314 # gets invoked
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 if (self._readmergedriver is not None
316 if (self._readmergedriver is not None
317 and self._readmergedriver != configmergedriver):
317 and self._readmergedriver != configmergedriver):
318 raise error.ConfigError(
318 raise error.ConfigError(
319 _("merge driver changed since merge started"),
319 _("merge driver changed since merge started"),
320 hint=_("revert merge driver change or abort merge"))
320 hint=_("revert merge driver change or abort merge"))
321
321
322 return configmergedriver
322 return configmergedriver
323
323
324 @util.propertycache
324 @util.propertycache
325 def localctx(self):
325 def localctx(self):
326 if self._local is None:
326 if self._local is None:
327 msg = "localctx accessed but self._local isn't set"
327 msg = "localctx accessed but self._local isn't set"
328 raise error.ProgrammingError(msg)
328 raise error.ProgrammingError(msg)
329 return self._repo[self._local]
329 return self._repo[self._local]
330
330
331 @util.propertycache
331 @util.propertycache
332 def otherctx(self):
332 def otherctx(self):
333 if self._other is None:
333 if self._other is None:
334 msg = "otherctx accessed but self._other isn't set"
334 msg = "otherctx accessed but self._other isn't set"
335 raise error.ProgrammingError(msg)
335 raise error.ProgrammingError(msg)
336 return self._repo[self._other]
336 return self._repo[self._other]
337
337
338 def active(self):
338 def active(self):
339 """Whether mergestate is active.
339 """Whether mergestate is active.
340
340
341 Returns True if there appears to be mergestate. This is a rough proxy
341 Returns True if there appears to be mergestate. This is a rough proxy
342 for "is a merge in progress."
342 for "is a merge in progress."
343 """
343 """
344 # Check local variables before looking at filesystem for performance
344 # Check local variables before looking at filesystem for performance
345 # reasons.
345 # reasons.
346 return bool(self._local) or bool(self._state) or \
346 return bool(self._local) or bool(self._state) or \
347 self._repo.vfs.exists(self.statepathv1) or \
347 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv2)
348 self._repo.vfs.exists(self.statepathv2)
349
349
350 def commit(self):
350 def commit(self):
351 """Write current state on disk (if necessary)"""
351 """Write current state on disk (if necessary)"""
352 if self._dirty:
352 if self._dirty:
353 records = self._makerecords()
353 records = self._makerecords()
354 self._writerecords(records)
354 self._writerecords(records)
355 self._dirty = False
355 self._dirty = False
356
356
357 def _makerecords(self):
357 def _makerecords(self):
358 records = []
358 records = []
359 records.append(('L', hex(self._local)))
359 records.append(('L', hex(self._local)))
360 records.append(('O', hex(self._other)))
360 records.append(('O', hex(self._other)))
361 if self.mergedriver:
361 if self.mergedriver:
362 records.append(('m', '\0'.join([
362 records.append(('m', '\0'.join([
363 self.mergedriver, self._mdstate])))
363 self.mergedriver, self._mdstate])))
364 # Write out state items. In all cases, the value of the state map entry
364 # Write out state items. In all cases, the value of the state map entry
365 # is written as the contents of the record. The record type depends on
365 # is written as the contents of the record. The record type depends on
366 # the type of state that is stored, and capital-letter records are used
366 # the type of state that is stored, and capital-letter records are used
367 # to prevent older versions of Mercurial that do not support the feature
367 # to prevent older versions of Mercurial that do not support the feature
368 # from loading them.
368 # from loading them.
369 for filename, v in self._state.iteritems():
369 for filename, v in self._state.iteritems():
370 if v[0] == 'd':
370 if v[0] == 'd':
371 # Driver-resolved merge. These are stored in 'D' records.
371 # Driver-resolved merge. These are stored in 'D' records.
372 records.append(('D', '\0'.join([filename] + v)))
372 records.append(('D', '\0'.join([filename] + v)))
373 elif v[0] in ('pu', 'pr'):
373 elif v[0] in ('pu', 'pr'):
374 # Path conflicts. These are stored in 'P' records. The current
374 # Path conflicts. These are stored in 'P' records. The current
375 # resolution state ('pu' or 'pr') is stored within the record.
375 # resolution state ('pu' or 'pr') is stored within the record.
376 records.append(('P', '\0'.join([filename] + v)))
376 records.append(('P', '\0'.join([filename] + v)))
377 elif v[1] == nullhex or v[6] == nullhex:
377 elif v[1] == nullhex or v[6] == nullhex:
378 # Change/Delete or Delete/Change conflicts. These are stored in
378 # Change/Delete or Delete/Change conflicts. These are stored in
379 # 'C' records. v[1] is the local file, and is nullhex when the
379 # 'C' records. v[1] is the local file, and is nullhex when the
380 # file is deleted locally ('dc'). v[6] is the remote file, and
380 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # is nullhex when the file is deleted remotely ('cd').
381 # is nullhex when the file is deleted remotely ('cd').
382 records.append(('C', '\0'.join([filename] + v)))
382 records.append(('C', '\0'.join([filename] + v)))
383 else:
383 else:
384 # Normal files. These are stored in 'F' records.
384 # Normal files. These are stored in 'F' records.
385 records.append(('F', '\0'.join([filename] + v)))
385 records.append(('F', '\0'.join([filename] + v)))
386 for filename, extras in sorted(self._stateextras.iteritems()):
386 for filename, extras in sorted(self._stateextras.iteritems()):
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 extras.iteritems())
388 extras.iteritems())
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 if self._labels is not None:
390 if self._labels is not None:
391 labels = '\0'.join(self._labels)
391 labels = '\0'.join(self._labels)
392 records.append(('l', labels))
392 records.append(('l', labels))
393 return records
393 return records
394
394
395 def _writerecords(self, records):
395 def _writerecords(self, records):
396 """Write current state on disk (both v1 and v2)"""
396 """Write current state on disk (both v1 and v2)"""
397 self._writerecordsv1(records)
397 self._writerecordsv1(records)
398 self._writerecordsv2(records)
398 self._writerecordsv2(records)
399
399
400 def _writerecordsv1(self, records):
400 def _writerecordsv1(self, records):
401 """Write current state on disk in a version 1 file"""
401 """Write current state on disk in a version 1 file"""
402 f = self._repo.vfs(self.statepathv1, 'w')
402 f = self._repo.vfs(self.statepathv1, 'w')
403 irecords = iter(records)
403 irecords = iter(records)
404 lrecords = next(irecords)
404 lrecords = next(irecords)
405 assert lrecords[0] == 'L'
405 assert lrecords[0] == 'L'
406 f.write(hex(self._local) + '\n')
406 f.write(hex(self._local) + '\n')
407 for rtype, data in irecords:
407 for rtype, data in irecords:
408 if rtype == 'F':
408 if rtype == 'F':
409 f.write('%s\n' % _droponode(data))
409 f.write('%s\n' % _droponode(data))
410 f.close()
410 f.close()
411
411
412 def _writerecordsv2(self, records):
412 def _writerecordsv2(self, records):
413 """Write current state on disk in a version 2 file
413 """Write current state on disk in a version 2 file
414
414
415 See the docstring for _readrecordsv2 for why we use 't'."""
415 See the docstring for _readrecordsv2 for why we use 't'."""
416 # these are the records that all version 2 clients can read
416 # these are the records that all version 2 clients can read
417 whitelist = 'LOF'
417 whitelist = 'LOF'
418 f = self._repo.vfs(self.statepathv2, 'w')
418 f = self._repo.vfs(self.statepathv2, 'w')
419 for key, data in records:
419 for key, data in records:
420 assert len(key) == 1
420 assert len(key) == 1
421 if key not in whitelist:
421 if key not in whitelist:
422 key, data = 't', '%s%s' % (key, data)
422 key, data = 't', '%s%s' % (key, data)
423 format = '>sI%is' % len(data)
423 format = '>sI%is' % len(data)
424 f.write(_pack(format, key, len(data), data))
424 f.write(_pack(format, key, len(data), data))
425 f.close()
425 f.close()
426
426
427 def add(self, fcl, fco, fca, fd):
427 def add(self, fcl, fco, fca, fd):
428 """add a new (potentially?) conflicting file the merge state
428 """add a new (potentially?) conflicting file the merge state
429 fcl: file context for local,
429 fcl: file context for local,
430 fco: file context for remote,
430 fco: file context for remote,
431 fca: file context for ancestors,
431 fca: file context for ancestors,
432 fd: file path of the resulting merge.
432 fd: file path of the resulting merge.
433
433
434 note: also write the local version to the `.hg/merge` directory.
434 note: also write the local version to the `.hg/merge` directory.
435 """
435 """
436 if fcl.isabsent():
436 if fcl.isabsent():
437 hash = nullhex
437 hash = nullhex
438 else:
438 else:
439 hash = hex(hashlib.sha1(fcl.path()).digest())
439 hash = hex(hashlib.sha1(fcl.path()).digest())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._state[fd] = ['u', hash, fcl.path(),
441 self._state[fd] = ['u', hash, fcl.path(),
442 fca.path(), hex(fca.filenode()),
442 fca.path(), hex(fca.filenode()),
443 fco.path(), hex(fco.filenode()),
443 fco.path(), hex(fco.filenode()),
444 fcl.flags()]
444 fcl.flags()]
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._dirty = True
446 self._dirty = True
447
447
448 def addpath(self, path, frename, forigin):
448 def addpath(self, path, frename, forigin):
449 """add a new conflicting path to the merge state
449 """add a new conflicting path to the merge state
450 path: the path that conflicts
450 path: the path that conflicts
451 frename: the filename the conflicting file was renamed to
451 frename: the filename the conflicting file was renamed to
452 forigin: origin of the file ('l' or 'r' for local/remote)
452 forigin: origin of the file ('l' or 'r' for local/remote)
453 """
453 """
454 self._state[path] = ['pu', frename, forigin]
454 self._state[path] = ['pu', frename, forigin]
455 self._dirty = True
455 self._dirty = True
456
456
457 def __contains__(self, dfile):
457 def __contains__(self, dfile):
458 return dfile in self._state
458 return dfile in self._state
459
459
460 def __getitem__(self, dfile):
460 def __getitem__(self, dfile):
461 return self._state[dfile][0]
461 return self._state[dfile][0]
462
462
463 def __iter__(self):
463 def __iter__(self):
464 return iter(sorted(self._state))
464 return iter(sorted(self._state))
465
465
466 def files(self):
466 def files(self):
467 return self._state.keys()
467 return self._state.keys()
468
468
469 def mark(self, dfile, state):
469 def mark(self, dfile, state):
470 self._state[dfile][0] = state
470 self._state[dfile][0] = state
471 self._dirty = True
471 self._dirty = True
472
472
473 def mdstate(self):
473 def mdstate(self):
474 return self._mdstate
474 return self._mdstate
475
475
476 def unresolved(self):
476 def unresolved(self):
477 """Obtain the paths of unresolved files."""
477 """Obtain the paths of unresolved files."""
478
478
479 for f, entry in self._state.iteritems():
479 for f, entry in self._state.iteritems():
480 if entry[0] in ('u', 'pu'):
480 if entry[0] in ('u', 'pu'):
481 yield f
481 yield f
482
482
483 def driverresolved(self):
483 def driverresolved(self):
484 """Obtain the paths of driver-resolved files."""
484 """Obtain the paths of driver-resolved files."""
485
485
486 for f, entry in self._state.items():
486 for f, entry in self._state.items():
487 if entry[0] == 'd':
487 if entry[0] == 'd':
488 yield f
488 yield f
489
489
490 def extras(self, filename):
490 def extras(self, filename):
491 return self._stateextras.setdefault(filename, {})
491 return self._stateextras.setdefault(filename, {})
492
492
493 def _resolve(self, preresolve, dfile, wctx):
493 def _resolve(self, preresolve, dfile, wctx):
494 """rerun merge process for file path `dfile`"""
494 """rerun merge process for file path `dfile`"""
495 if self[dfile] in 'rd':
495 if self[dfile] in 'rd':
496 return True, 0
496 return True, 0
497 stateentry = self._state[dfile]
497 stateentry = self._state[dfile]
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 octx = self._repo[self._other]
499 octx = self._repo[self._other]
500 extras = self.extras(dfile)
500 extras = self.extras(dfile)
501 anccommitnode = extras.get('ancestorlinknode')
501 anccommitnode = extras.get('ancestorlinknode')
502 if anccommitnode:
502 if anccommitnode:
503 actx = self._repo[anccommitnode]
503 actx = self._repo[anccommitnode]
504 else:
504 else:
505 actx = None
505 actx = None
506 fcd = self._filectxorabsent(hash, wctx, dfile)
506 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fco = self._filectxorabsent(onode, octx, ofile)
507 fco = self._filectxorabsent(onode, octx, ofile)
508 # TODO: move this to filectxorabsent
508 # TODO: move this to filectxorabsent
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 # "premerge" x flags
510 # "premerge" x flags
511 flo = fco.flags()
511 flo = fco.flags()
512 fla = fca.flags()
512 fla = fca.flags()
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if fca.node() == nullid and flags != flo:
514 if fca.node() == nullid and flags != flo:
515 if preresolve:
515 if preresolve:
516 self._repo.ui.warn(
516 self._repo.ui.warn(
517 _('warning: cannot merge flags for %s '
517 _('warning: cannot merge flags for %s '
518 'without common ancestor - keeping local flags\n')
518 'without common ancestor - keeping local flags\n')
519 % afile)
519 % afile)
520 elif flags == fla:
520 elif flags == fla:
521 flags = flo
521 flags = flo
522 if preresolve:
522 if preresolve:
523 # restore local
523 # restore local
524 if hash != nullhex:
524 if hash != nullhex:
525 f = self._repo.vfs('merge/' + hash)
525 f = self._repo.vfs('merge/' + hash)
526 wctx[dfile].write(f.read(), flags)
526 wctx[dfile].write(f.read(), flags)
527 f.close()
527 f.close()
528 else:
528 else:
529 wctx[dfile].remove(ignoremissing=True)
529 wctx[dfile].remove(ignoremissing=True)
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 self._local, lfile, fcd,
531 self._local, lfile, fcd,
532 fco, fca,
532 fco, fca,
533 labels=self._labels)
533 labels=self._labels)
534 else:
534 else:
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 self._local, lfile, fcd,
536 self._local, lfile, fcd,
537 fco, fca,
537 fco, fca,
538 labels=self._labels)
538 labels=self._labels)
539 if r is None:
539 if r is None:
540 # no real conflict
540 # no real conflict
541 del self._state[dfile]
541 del self._state[dfile]
542 self._stateextras.pop(dfile, None)
542 self._stateextras.pop(dfile, None)
543 self._dirty = True
543 self._dirty = True
544 elif not r:
544 elif not r:
545 self.mark(dfile, 'r')
545 self.mark(dfile, 'r')
546
546
547 if complete:
547 if complete:
548 action = None
548 action = None
549 if deleted:
549 if deleted:
550 if fcd.isabsent():
550 if fcd.isabsent():
551 # dc: local picked. Need to drop if present, which may
551 # dc: local picked. Need to drop if present, which may
552 # happen on re-resolves.
552 # happen on re-resolves.
553 action = 'f'
553 action = 'f'
554 else:
554 else:
555 # cd: remote picked (or otherwise deleted)
555 # cd: remote picked (or otherwise deleted)
556 action = 'r'
556 action = 'r'
557 else:
557 else:
558 if fcd.isabsent(): # dc: remote picked
558 if fcd.isabsent(): # dc: remote picked
559 action = 'g'
559 action = 'g'
560 elif fco.isabsent(): # cd: local picked
560 elif fco.isabsent(): # cd: local picked
561 if dfile in self.localctx:
561 if dfile in self.localctx:
562 action = 'am'
562 action = 'am'
563 else:
563 else:
564 action = 'a'
564 action = 'a'
565 # else: regular merges (no action necessary)
565 # else: regular merges (no action necessary)
566 self._results[dfile] = r, action
566 self._results[dfile] = r, action
567
567
568 return complete, r
568 return complete, r
569
569
570 def _filectxorabsent(self, hexnode, ctx, f):
570 def _filectxorabsent(self, hexnode, ctx, f):
571 if hexnode == nullhex:
571 if hexnode == nullhex:
572 return filemerge.absentfilectx(ctx, f)
572 return filemerge.absentfilectx(ctx, f)
573 else:
573 else:
574 return ctx[f]
574 return ctx[f]
575
575
576 def preresolve(self, dfile, wctx):
576 def preresolve(self, dfile, wctx):
577 """run premerge process for dfile
577 """run premerge process for dfile
578
578
579 Returns whether the merge is complete, and the exit code."""
579 Returns whether the merge is complete, and the exit code."""
580 return self._resolve(True, dfile, wctx)
580 return self._resolve(True, dfile, wctx)
581
581
582 def resolve(self, dfile, wctx):
582 def resolve(self, dfile, wctx):
583 """run merge process (assuming premerge was run) for dfile
583 """run merge process (assuming premerge was run) for dfile
584
584
585 Returns the exit code of the merge."""
585 Returns the exit code of the merge."""
586 return self._resolve(False, dfile, wctx)[1]
586 return self._resolve(False, dfile, wctx)[1]
587
587
588 def counts(self):
588 def counts(self):
589 """return counts for updated, merged and removed files in this
589 """return counts for updated, merged and removed files in this
590 session"""
590 session"""
591 updated, merged, removed = 0, 0, 0
591 updated, merged, removed = 0, 0, 0
592 for r, action in self._results.itervalues():
592 for r, action in self._results.itervalues():
593 if r is None:
593 if r is None:
594 updated += 1
594 updated += 1
595 elif r == 0:
595 elif r == 0:
596 if action == 'r':
596 if action == 'r':
597 removed += 1
597 removed += 1
598 else:
598 else:
599 merged += 1
599 merged += 1
600 return updated, merged, removed
600 return updated, merged, removed
601
601
602 def unresolvedcount(self):
602 def unresolvedcount(self):
603 """get unresolved count for this merge (persistent)"""
603 """get unresolved count for this merge (persistent)"""
604 return len(list(self.unresolved()))
604 return len(list(self.unresolved()))
605
605
606 def actions(self):
606 def actions(self):
607 """return lists of actions to perform on the dirstate"""
607 """return lists of actions to perform on the dirstate"""
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 for f, (r, action) in self._results.iteritems():
609 for f, (r, action) in self._results.iteritems():
610 if action is not None:
610 if action is not None:
611 actions[action].append((f, None, "merge result"))
611 actions[action].append((f, None, "merge result"))
612 return actions
612 return actions
613
613
614 def recordactions(self):
614 def recordactions(self):
615 """record remove/add/get actions in the dirstate"""
615 """record remove/add/get actions in the dirstate"""
616 branchmerge = self._repo.dirstate.p2() != nullid
616 branchmerge = self._repo.dirstate.p2() != nullid
617 recordupdates(self._repo, self.actions(), branchmerge)
617 recordupdates(self._repo, self.actions(), branchmerge)
618
618
619 def queueremove(self, f):
619 def queueremove(self, f):
620 """queues a file to be removed from the dirstate
620 """queues a file to be removed from the dirstate
621
621
622 Meant for use by custom merge drivers."""
622 Meant for use by custom merge drivers."""
623 self._results[f] = 0, 'r'
623 self._results[f] = 0, 'r'
624
624
625 def queueadd(self, f):
625 def queueadd(self, f):
626 """queues a file to be added to the dirstate
626 """queues a file to be added to the dirstate
627
627
628 Meant for use by custom merge drivers."""
628 Meant for use by custom merge drivers."""
629 self._results[f] = 0, 'a'
629 self._results[f] = 0, 'a'
630
630
631 def queueget(self, f):
631 def queueget(self, f):
632 """queues a file to be marked modified in the dirstate
632 """queues a file to be marked modified in the dirstate
633
633
634 Meant for use by custom merge drivers."""
634 Meant for use by custom merge drivers."""
635 self._results[f] = 0, 'g'
635 self._results[f] = 0, 'g'
636
636
637 def _getcheckunknownconfig(repo, section, name):
637 def _getcheckunknownconfig(repo, section, name):
638 config = repo.ui.config(section, name)
638 config = repo.ui.config(section, name)
639 valid = ['abort', 'ignore', 'warn']
639 valid = ['abort', 'ignore', 'warn']
640 if config not in valid:
640 if config not in valid:
641 validstr = ', '.join(["'" + v + "'" for v in valid])
641 validstr = ', '.join(["'" + v + "'" for v in valid])
642 raise error.ConfigError(_("%s.%s not valid "
642 raise error.ConfigError(_("%s.%s not valid "
643 "('%s' is none of %s)")
643 "('%s' is none of %s)")
644 % (section, name, config, validstr))
644 % (section, name, config, validstr))
645 return config
645 return config
646
646
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 if wctx.isinmemory():
648 if wctx.isinmemory():
649 # Nothing to do in IMM because nothing in the "working copy" can be an
649 # Nothing to do in IMM because nothing in the "working copy" can be an
650 # unknown file.
650 # unknown file.
651 #
651 #
652 # Note that we should bail out here, not in ``_checkunknownfiles()``,
652 # Note that we should bail out here, not in ``_checkunknownfiles()``,
653 # because that function does other useful work.
653 # because that function does other useful work.
654 return False
654 return False
655
655
656 if f2 is None:
656 if f2 is None:
657 f2 = f
657 f2 = f
658 return (repo.wvfs.audit.check(f)
658 return (repo.wvfs.audit.check(f)
659 and repo.wvfs.isfileorlink(f)
659 and repo.wvfs.isfileorlink(f)
660 and repo.dirstate.normalize(f) not in repo.dirstate
660 and repo.dirstate.normalize(f) not in repo.dirstate
661 and mctx[f2].cmp(wctx[f]))
661 and mctx[f2].cmp(wctx[f]))
662
662
663 class _unknowndirschecker(object):
663 class _unknowndirschecker(object):
664 """
664 """
665 Look for any unknown files or directories that may have a path conflict
665 Look for any unknown files or directories that may have a path conflict
666 with a file. If any path prefix of the file exists as a file or link,
666 with a file. If any path prefix of the file exists as a file or link,
667 then it conflicts. If the file itself is a directory that contains any
667 then it conflicts. If the file itself is a directory that contains any
668 file that is not tracked, then it conflicts.
668 file that is not tracked, then it conflicts.
669
669
670 Returns the shortest path at which a conflict occurs, or None if there is
670 Returns the shortest path at which a conflict occurs, or None if there is
671 no conflict.
671 no conflict.
672 """
672 """
673 def __init__(self):
673 def __init__(self):
674 # A set of paths known to be good. This prevents repeated checking of
674 # A set of paths known to be good. This prevents repeated checking of
675 # dirs. It will be updated with any new dirs that are checked and found
675 # dirs. It will be updated with any new dirs that are checked and found
676 # to be safe.
676 # to be safe.
677 self._unknowndircache = set()
677 self._unknowndircache = set()
678
678
679 # A set of paths that are known to be absent. This prevents repeated
679 # A set of paths that are known to be absent. This prevents repeated
680 # checking of subdirectories that are known not to exist. It will be
680 # checking of subdirectories that are known not to exist. It will be
681 # updated with any new dirs that are checked and found to be absent.
681 # updated with any new dirs that are checked and found to be absent.
682 self._missingdircache = set()
682 self._missingdircache = set()
683
683
684 def __call__(self, repo, wctx, f):
684 def __call__(self, repo, wctx, f):
685 if wctx.isinmemory():
685 if wctx.isinmemory():
686 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
686 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
687 return False
687 return False
688
688
689 # Check for path prefixes that exist as unknown files.
689 # Check for path prefixes that exist as unknown files.
690 for p in reversed(list(util.finddirs(f))):
690 for p in reversed(list(util.finddirs(f))):
691 if p in self._missingdircache:
691 if p in self._missingdircache:
692 return
692 return
693 if p in self._unknowndircache:
693 if p in self._unknowndircache:
694 continue
694 continue
695 if repo.wvfs.audit.check(p):
695 if repo.wvfs.audit.check(p):
696 if (repo.wvfs.isfileorlink(p)
696 if (repo.wvfs.isfileorlink(p)
697 and repo.dirstate.normalize(p) not in repo.dirstate):
697 and repo.dirstate.normalize(p) not in repo.dirstate):
698 return p
698 return p
699 if not repo.wvfs.lexists(p):
699 if not repo.wvfs.lexists(p):
700 self._missingdircache.add(p)
700 self._missingdircache.add(p)
701 return
701 return
702 self._unknowndircache.add(p)
702 self._unknowndircache.add(p)
703
703
704 # Check if the file conflicts with a directory containing unknown files.
704 # Check if the file conflicts with a directory containing unknown files.
705 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
705 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
706 # Does the directory contain any files that are not in the dirstate?
706 # Does the directory contain any files that are not in the dirstate?
707 for p, dirs, files in repo.wvfs.walk(f):
707 for p, dirs, files in repo.wvfs.walk(f):
708 for fn in files:
708 for fn in files:
709 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
709 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
710 if relf not in repo.dirstate:
710 if relf not in repo.dirstate:
711 return f
711 return f
712 return None
712 return None
713
713
714 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
714 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
715 """
715 """
716 Considers any actions that care about the presence of conflicting unknown
716 Considers any actions that care about the presence of conflicting unknown
717 files. For some actions, the result is to abort; for others, it is to
717 files. For some actions, the result is to abort; for others, it is to
718 choose a different action.
718 choose a different action.
719 """
719 """
720 fileconflicts = set()
720 fileconflicts = set()
721 pathconflicts = set()
721 pathconflicts = set()
722 warnconflicts = set()
722 warnconflicts = set()
723 abortconflicts = set()
723 abortconflicts = set()
724 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
724 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
725 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
725 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
726 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
726 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
727 if not force:
727 if not force:
728 def collectconflicts(conflicts, config):
728 def collectconflicts(conflicts, config):
729 if config == 'abort':
729 if config == 'abort':
730 abortconflicts.update(conflicts)
730 abortconflicts.update(conflicts)
731 elif config == 'warn':
731 elif config == 'warn':
732 warnconflicts.update(conflicts)
732 warnconflicts.update(conflicts)
733
733
734 checkunknowndirs = _unknowndirschecker()
734 checkunknowndirs = _unknowndirschecker()
735 for f, (m, args, msg) in actions.iteritems():
735 for f, (m, args, msg) in actions.iteritems():
736 if m in ('c', 'dc'):
736 if m in ('c', 'dc'):
737 if _checkunknownfile(repo, wctx, mctx, f):
737 if _checkunknownfile(repo, wctx, mctx, f):
738 fileconflicts.add(f)
738 fileconflicts.add(f)
739 elif pathconfig and f not in wctx:
739 elif pathconfig and f not in wctx:
740 path = checkunknowndirs(repo, wctx, f)
740 path = checkunknowndirs(repo, wctx, f)
741 if path is not None:
741 if path is not None:
742 pathconflicts.add(path)
742 pathconflicts.add(path)
743 elif m == 'dg':
743 elif m == 'dg':
744 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
744 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
745 fileconflicts.add(f)
745 fileconflicts.add(f)
746
746
747 allconflicts = fileconflicts | pathconflicts
747 allconflicts = fileconflicts | pathconflicts
748 ignoredconflicts = set([c for c in allconflicts
748 ignoredconflicts = set([c for c in allconflicts
749 if repo.dirstate._ignore(c)])
749 if repo.dirstate._ignore(c)])
750 unknownconflicts = allconflicts - ignoredconflicts
750 unknownconflicts = allconflicts - ignoredconflicts
751 collectconflicts(ignoredconflicts, ignoredconfig)
751 collectconflicts(ignoredconflicts, ignoredconfig)
752 collectconflicts(unknownconflicts, unknownconfig)
752 collectconflicts(unknownconflicts, unknownconfig)
753 else:
753 else:
754 for f, (m, args, msg) in actions.iteritems():
754 for f, (m, args, msg) in actions.iteritems():
755 if m == 'cm':
755 if m == 'cm':
756 fl2, anc = args
756 fl2, anc = args
757 different = _checkunknownfile(repo, wctx, mctx, f)
757 different = _checkunknownfile(repo, wctx, mctx, f)
758 if repo.dirstate._ignore(f):
758 if repo.dirstate._ignore(f):
759 config = ignoredconfig
759 config = ignoredconfig
760 else:
760 else:
761 config = unknownconfig
761 config = unknownconfig
762
762
763 # The behavior when force is True is described by this table:
763 # The behavior when force is True is described by this table:
764 # config different mergeforce | action backup
764 # config different mergeforce | action backup
765 # * n * | get n
765 # * n * | get n
766 # * y y | merge -
766 # * y y | merge -
767 # abort y n | merge - (1)
767 # abort y n | merge - (1)
768 # warn y n | warn + get y
768 # warn y n | warn + get y
769 # ignore y n | get y
769 # ignore y n | get y
770 #
770 #
771 # (1) this is probably the wrong behavior here -- we should
771 # (1) this is probably the wrong behavior here -- we should
772 # probably abort, but some actions like rebases currently
772 # probably abort, but some actions like rebases currently
773 # don't like an abort happening in the middle of
773 # don't like an abort happening in the middle of
774 # merge.update.
774 # merge.update.
775 if not different:
775 if not different:
776 actions[f] = ('g', (fl2, False), "remote created")
776 actions[f] = ('g', (fl2, False), "remote created")
777 elif mergeforce or config == 'abort':
777 elif mergeforce or config == 'abort':
778 actions[f] = ('m', (f, f, None, False, anc),
778 actions[f] = ('m', (f, f, None, False, anc),
779 "remote differs from untracked local")
779 "remote differs from untracked local")
780 elif config == 'abort':
780 elif config == 'abort':
781 abortconflicts.add(f)
781 abortconflicts.add(f)
782 else:
782 else:
783 if config == 'warn':
783 if config == 'warn':
784 warnconflicts.add(f)
784 warnconflicts.add(f)
785 actions[f] = ('g', (fl2, True), "remote created")
785 actions[f] = ('g', (fl2, True), "remote created")
786
786
787 for f in sorted(abortconflicts):
787 for f in sorted(abortconflicts):
788 warn = repo.ui.warn
788 warn = repo.ui.warn
789 if f in pathconflicts:
789 if f in pathconflicts:
790 if repo.wvfs.isfileorlink(f):
790 if repo.wvfs.isfileorlink(f):
791 warn(_("%s: untracked file conflicts with directory\n") % f)
791 warn(_("%s: untracked file conflicts with directory\n") % f)
792 else:
792 else:
793 warn(_("%s: untracked directory conflicts with file\n") % f)
793 warn(_("%s: untracked directory conflicts with file\n") % f)
794 else:
794 else:
795 warn(_("%s: untracked file differs\n") % f)
795 warn(_("%s: untracked file differs\n") % f)
796 if abortconflicts:
796 if abortconflicts:
797 raise error.Abort(_("untracked files in working directory "
797 raise error.Abort(_("untracked files in working directory "
798 "differ from files in requested revision"))
798 "differ from files in requested revision"))
799
799
800 for f in sorted(warnconflicts):
800 for f in sorted(warnconflicts):
801 if repo.wvfs.isfileorlink(f):
801 if repo.wvfs.isfileorlink(f):
802 repo.ui.warn(_("%s: replacing untracked file\n") % f)
802 repo.ui.warn(_("%s: replacing untracked file\n") % f)
803 else:
803 else:
804 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
804 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
805
805
806 for f, (m, args, msg) in actions.iteritems():
806 for f, (m, args, msg) in actions.iteritems():
807 if m == 'c':
807 if m == 'c':
808 backup = (f in fileconflicts or f in pathconflicts or
808 backup = (f in fileconflicts or f in pathconflicts or
809 any(p in pathconflicts for p in util.finddirs(f)))
809 any(p in pathconflicts for p in util.finddirs(f)))
810 flags, = args
810 flags, = args
811 actions[f] = ('g', (flags, backup), msg)
811 actions[f] = ('g', (flags, backup), msg)
812
812
813 def _forgetremoved(wctx, mctx, branchmerge):
813 def _forgetremoved(wctx, mctx, branchmerge):
814 """
814 """
815 Forget removed files
815 Forget removed files
816
816
817 If we're jumping between revisions (as opposed to merging), and if
817 If we're jumping between revisions (as opposed to merging), and if
818 neither the working directory nor the target rev has the file,
818 neither the working directory nor the target rev has the file,
819 then we need to remove it from the dirstate, to prevent the
819 then we need to remove it from the dirstate, to prevent the
820 dirstate from listing the file when it is no longer in the
820 dirstate from listing the file when it is no longer in the
821 manifest.
821 manifest.
822
822
823 If we're merging, and the other revision has removed a file
823 If we're merging, and the other revision has removed a file
824 that is not present in the working directory, we need to mark it
824 that is not present in the working directory, we need to mark it
825 as removed.
825 as removed.
826 """
826 """
827
827
828 actions = {}
828 actions = {}
829 m = 'f'
829 m = 'f'
830 if branchmerge:
830 if branchmerge:
831 m = 'r'
831 m = 'r'
832 for f in wctx.deleted():
832 for f in wctx.deleted():
833 if f not in mctx:
833 if f not in mctx:
834 actions[f] = m, None, "forget deleted"
834 actions[f] = m, None, "forget deleted"
835
835
836 if not branchmerge:
836 if not branchmerge:
837 for f in wctx.removed():
837 for f in wctx.removed():
838 if f not in mctx:
838 if f not in mctx:
839 actions[f] = 'f', None, "forget removed"
839 actions[f] = 'f', None, "forget removed"
840
840
841 return actions
841 return actions
842
842
843 def _checkcollision(repo, wmf, actions):
843 def _checkcollision(repo, wmf, actions):
844 # build provisional merged manifest up
844 # build provisional merged manifest up
845 pmmf = set(wmf)
845 pmmf = set(wmf)
846
846
847 if actions:
847 if actions:
848 # k, dr, e and rd are no-op
848 # k, dr, e and rd are no-op
849 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
849 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
850 for f, args, msg in actions[m]:
850 for f, args, msg in actions[m]:
851 pmmf.add(f)
851 pmmf.add(f)
852 for f, args, msg in actions['r']:
852 for f, args, msg in actions['r']:
853 pmmf.discard(f)
853 pmmf.discard(f)
854 for f, args, msg in actions['dm']:
854 for f, args, msg in actions['dm']:
855 f2, flags = args
855 f2, flags = args
856 pmmf.discard(f2)
856 pmmf.discard(f2)
857 pmmf.add(f)
857 pmmf.add(f)
858 for f, args, msg in actions['dg']:
858 for f, args, msg in actions['dg']:
859 pmmf.add(f)
859 pmmf.add(f)
860 for f, args, msg in actions['m']:
860 for f, args, msg in actions['m']:
861 f1, f2, fa, move, anc = args
861 f1, f2, fa, move, anc = args
862 if move:
862 if move:
863 pmmf.discard(f1)
863 pmmf.discard(f1)
864 pmmf.add(f)
864 pmmf.add(f)
865
865
866 # check case-folding collision in provisional merged manifest
866 # check case-folding collision in provisional merged manifest
867 foldmap = {}
867 foldmap = {}
868 for f in pmmf:
868 for f in pmmf:
869 fold = util.normcase(f)
869 fold = util.normcase(f)
870 if fold in foldmap:
870 if fold in foldmap:
871 raise error.Abort(_("case-folding collision between %s and %s")
871 raise error.Abort(_("case-folding collision between %s and %s")
872 % (f, foldmap[fold]))
872 % (f, foldmap[fold]))
873 foldmap[fold] = f
873 foldmap[fold] = f
874
874
875 # check case-folding of directories
875 # check case-folding of directories
876 foldprefix = unfoldprefix = lastfull = ''
876 foldprefix = unfoldprefix = lastfull = ''
877 for fold, f in sorted(foldmap.items()):
877 for fold, f in sorted(foldmap.items()):
878 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
878 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
879 # the folded prefix matches but actual casing is different
879 # the folded prefix matches but actual casing is different
880 raise error.Abort(_("case-folding collision between "
880 raise error.Abort(_("case-folding collision between "
881 "%s and directory of %s") % (lastfull, f))
881 "%s and directory of %s") % (lastfull, f))
882 foldprefix = fold + '/'
882 foldprefix = fold + '/'
883 unfoldprefix = f + '/'
883 unfoldprefix = f + '/'
884 lastfull = f
884 lastfull = f
885
885
886 def driverpreprocess(repo, ms, wctx, labels=None):
886 def driverpreprocess(repo, ms, wctx, labels=None):
887 """run the preprocess step of the merge driver, if any
887 """run the preprocess step of the merge driver, if any
888
888
889 This is currently not implemented -- it's an extension point."""
889 This is currently not implemented -- it's an extension point."""
890 return True
890 return True
891
891
892 def driverconclude(repo, ms, wctx, labels=None):
892 def driverconclude(repo, ms, wctx, labels=None):
893 """run the conclude step of the merge driver, if any
893 """run the conclude step of the merge driver, if any
894
894
895 This is currently not implemented -- it's an extension point."""
895 This is currently not implemented -- it's an extension point."""
896 return True
896 return True
897
897
898 def _filesindirs(repo, manifest, dirs):
898 def _filesindirs(repo, manifest, dirs):
899 """
899 """
900 Generator that yields pairs of all the files in the manifest that are found
900 Generator that yields pairs of all the files in the manifest that are found
901 inside the directories listed in dirs, and which directory they are found
901 inside the directories listed in dirs, and which directory they are found
902 in.
902 in.
903 """
903 """
904 for f in manifest:
904 for f in manifest:
905 for p in util.finddirs(f):
905 for p in util.finddirs(f):
906 if p in dirs:
906 if p in dirs:
907 yield f, p
907 yield f, p
908 break
908 break
909
909
910 def checkpathconflicts(repo, wctx, mctx, actions):
910 def checkpathconflicts(repo, wctx, mctx, actions):
911 """
911 """
912 Check if any actions introduce path conflicts in the repository, updating
912 Check if any actions introduce path conflicts in the repository, updating
913 actions to record or handle the path conflict accordingly.
913 actions to record or handle the path conflict accordingly.
914 """
914 """
915 mf = wctx.manifest()
915 mf = wctx.manifest()
916
916
917 # The set of local files that conflict with a remote directory.
917 # The set of local files that conflict with a remote directory.
918 localconflicts = set()
918 localconflicts = set()
919
919
920 # The set of directories that conflict with a remote file, and so may cause
920 # The set of directories that conflict with a remote file, and so may cause
921 # conflicts if they still contain any files after the merge.
921 # conflicts if they still contain any files after the merge.
922 remoteconflicts = set()
922 remoteconflicts = set()
923
923
924 # The set of directories that appear as both a file and a directory in the
924 # The set of directories that appear as both a file and a directory in the
925 # remote manifest. These indicate an invalid remote manifest, which
925 # remote manifest. These indicate an invalid remote manifest, which
926 # can't be updated to cleanly.
926 # can't be updated to cleanly.
927 invalidconflicts = set()
927 invalidconflicts = set()
928
928
929 # The set of directories that contain files that are being created.
929 # The set of directories that contain files that are being created.
930 createdfiledirs = set()
930 createdfiledirs = set()
931
931
932 # The set of files deleted by all the actions.
932 # The set of files deleted by all the actions.
933 deletedfiles = set()
933 deletedfiles = set()
934
934
935 for f, (m, args, msg) in actions.items():
935 for f, (m, args, msg) in actions.items():
936 if m in ('c', 'dc', 'm', 'cm'):
936 if m in ('c', 'dc', 'm', 'cm'):
937 # This action may create a new local file.
937 # This action may create a new local file.
938 createdfiledirs.update(util.finddirs(f))
938 createdfiledirs.update(util.finddirs(f))
939 if mf.hasdir(f):
939 if mf.hasdir(f):
940 # The file aliases a local directory. This might be ok if all
940 # The file aliases a local directory. This might be ok if all
941 # the files in the local directory are being deleted. This
941 # the files in the local directory are being deleted. This
942 # will be checked once we know what all the deleted files are.
942 # will be checked once we know what all the deleted files are.
943 remoteconflicts.add(f)
943 remoteconflicts.add(f)
944 # Track the names of all deleted files.
944 # Track the names of all deleted files.
945 if m == 'r':
945 if m == 'r':
946 deletedfiles.add(f)
946 deletedfiles.add(f)
947 if m == 'm':
947 if m == 'm':
948 f1, f2, fa, move, anc = args
948 f1, f2, fa, move, anc = args
949 if move:
949 if move:
950 deletedfiles.add(f1)
950 deletedfiles.add(f1)
951 if m == 'dm':
951 if m == 'dm':
952 f2, flags = args
952 f2, flags = args
953 deletedfiles.add(f2)
953 deletedfiles.add(f2)
954
954
955 # Check all directories that contain created files for path conflicts.
955 # Check all directories that contain created files for path conflicts.
956 for p in createdfiledirs:
956 for p in createdfiledirs:
957 if p in mf:
957 if p in mf:
958 if p in mctx:
958 if p in mctx:
959 # A file is in a directory which aliases both a local
959 # A file is in a directory which aliases both a local
960 # and a remote file. This is an internal inconsistency
960 # and a remote file. This is an internal inconsistency
961 # within the remote manifest.
961 # within the remote manifest.
962 invalidconflicts.add(p)
962 invalidconflicts.add(p)
963 else:
963 else:
964 # A file is in a directory which aliases a local file.
964 # A file is in a directory which aliases a local file.
965 # We will need to rename the local file.
965 # We will need to rename the local file.
966 localconflicts.add(p)
966 localconflicts.add(p)
967 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
967 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
968 # The file is in a directory which aliases a remote file.
968 # The file is in a directory which aliases a remote file.
969 # This is an internal inconsistency within the remote
969 # This is an internal inconsistency within the remote
970 # manifest.
970 # manifest.
971 invalidconflicts.add(p)
971 invalidconflicts.add(p)
972
972
973 # Rename all local conflicting files that have not been deleted.
973 # Rename all local conflicting files that have not been deleted.
974 for p in localconflicts:
974 for p in localconflicts:
975 if p not in deletedfiles:
975 if p not in deletedfiles:
976 ctxname = bytes(wctx).rstrip('+')
976 ctxname = bytes(wctx).rstrip('+')
977 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
977 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
978 actions[pnew] = ('pr', (p,), "local path conflict")
978 actions[pnew] = ('pr', (p,), "local path conflict")
979 actions[p] = ('p', (pnew, 'l'), "path conflict")
979 actions[p] = ('p', (pnew, 'l'), "path conflict")
980
980
981 if remoteconflicts:
981 if remoteconflicts:
982 # Check if all files in the conflicting directories have been removed.
982 # Check if all files in the conflicting directories have been removed.
983 ctxname = bytes(mctx).rstrip('+')
983 ctxname = bytes(mctx).rstrip('+')
984 for f, p in _filesindirs(repo, mf, remoteconflicts):
984 for f, p in _filesindirs(repo, mf, remoteconflicts):
985 if f not in deletedfiles:
985 if f not in deletedfiles:
986 m, args, msg = actions[p]
986 m, args, msg = actions[p]
987 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
987 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
988 if m in ('dc', 'm'):
988 if m in ('dc', 'm'):
989 # Action was merge, just update target.
989 # Action was merge, just update target.
990 actions[pnew] = (m, args, msg)
990 actions[pnew] = (m, args, msg)
991 else:
991 else:
992 # Action was create, change to renamed get action.
992 # Action was create, change to renamed get action.
993 fl = args[0]
993 fl = args[0]
994 actions[pnew] = ('dg', (p, fl), "remote path conflict")
994 actions[pnew] = ('dg', (p, fl), "remote path conflict")
995 actions[p] = ('p', (pnew, 'r'), "path conflict")
995 actions[p] = ('p', (pnew, 'r'), "path conflict")
996 remoteconflicts.remove(p)
996 remoteconflicts.remove(p)
997 break
997 break
998
998
999 if invalidconflicts:
999 if invalidconflicts:
1000 for p in invalidconflicts:
1000 for p in invalidconflicts:
1001 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1001 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1002 raise error.Abort(_("destination manifest contains path conflicts"))
1002 raise error.Abort(_("destination manifest contains path conflicts"))
1003
1003
1004 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1004 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1005 acceptremote, followcopies, forcefulldiff=False):
1005 acceptremote, followcopies, forcefulldiff=False):
1006 """
1006 """
1007 Merge wctx and p2 with ancestor pa and generate merge action list
1007 Merge wctx and p2 with ancestor pa and generate merge action list
1008
1008
1009 branchmerge and force are as passed in to update
1009 branchmerge and force are as passed in to update
1010 matcher = matcher to filter file lists
1010 matcher = matcher to filter file lists
1011 acceptremote = accept the incoming changes without prompting
1011 acceptremote = accept the incoming changes without prompting
1012 """
1012 """
1013 if matcher is not None and matcher.always():
1013 if matcher is not None and matcher.always():
1014 matcher = None
1014 matcher = None
1015
1015
1016 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1016 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1017
1017
1018 # manifests fetched in order are going to be faster, so prime the caches
1018 # manifests fetched in order are going to be faster, so prime the caches
1019 [x.manifest() for x in
1019 [x.manifest() for x in
1020 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1020 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1021
1021
1022 if followcopies:
1022 if followcopies:
1023 ret = copies.mergecopies(repo, wctx, p2, pa)
1023 ret = copies.mergecopies(repo, wctx, p2, pa)
1024 copy, movewithdir, diverge, renamedelete, dirmove = ret
1024 copy, movewithdir, diverge, renamedelete, dirmove = ret
1025
1025
1026 boolbm = pycompat.bytestr(bool(branchmerge))
1026 boolbm = pycompat.bytestr(bool(branchmerge))
1027 boolf = pycompat.bytestr(bool(force))
1027 boolf = pycompat.bytestr(bool(force))
1028 boolm = pycompat.bytestr(bool(matcher))
1028 boolm = pycompat.bytestr(bool(matcher))
1029 repo.ui.note(_("resolving manifests\n"))
1029 repo.ui.note(_("resolving manifests\n"))
1030 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1030 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1031 % (boolbm, boolf, boolm))
1031 % (boolbm, boolf, boolm))
1032 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1032 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1033
1033
1034 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1034 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1035 copied = set(copy.values())
1035 copied = set(copy.values())
1036 copied.update(movewithdir.values())
1036 copied.update(movewithdir.values())
1037
1037
1038 if '.hgsubstate' in m1:
1038 if '.hgsubstate' in m1:
1039 # check whether sub state is modified
1039 # check whether sub state is modified
1040 if any(wctx.sub(s).dirty() for s in wctx.substate):
1040 if any(wctx.sub(s).dirty() for s in wctx.substate):
1041 m1['.hgsubstate'] = modifiednodeid
1041 m1['.hgsubstate'] = modifiednodeid
1042
1042
1043 # Don't use m2-vs-ma optimization if:
1043 # Don't use m2-vs-ma optimization if:
1044 # - ma is the same as m1 or m2, which we're just going to diff again later
1044 # - ma is the same as m1 or m2, which we're just going to diff again later
1045 # - The caller specifically asks for a full diff, which is useful during bid
1045 # - The caller specifically asks for a full diff, which is useful during bid
1046 # merge.
1046 # merge.
1047 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1047 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1048 # Identify which files are relevant to the merge, so we can limit the
1048 # Identify which files are relevant to the merge, so we can limit the
1049 # total m1-vs-m2 diff to just those files. This has significant
1049 # total m1-vs-m2 diff to just those files. This has significant
1050 # performance benefits in large repositories.
1050 # performance benefits in large repositories.
1051 relevantfiles = set(ma.diff(m2).keys())
1051 relevantfiles = set(ma.diff(m2).keys())
1052
1052
1053 # For copied and moved files, we need to add the source file too.
1053 # For copied and moved files, we need to add the source file too.
1054 for copykey, copyvalue in copy.iteritems():
1054 for copykey, copyvalue in copy.iteritems():
1055 if copyvalue in relevantfiles:
1055 if copyvalue in relevantfiles:
1056 relevantfiles.add(copykey)
1056 relevantfiles.add(copykey)
1057 for movedirkey in movewithdir:
1057 for movedirkey in movewithdir:
1058 relevantfiles.add(movedirkey)
1058 relevantfiles.add(movedirkey)
1059 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1059 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1060 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1060 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1061
1061
1062 diff = m1.diff(m2, match=matcher)
1062 diff = m1.diff(m2, match=matcher)
1063
1063
1064 if matcher is None:
1064 if matcher is None:
1065 matcher = matchmod.always('', '')
1065 matcher = matchmod.always('', '')
1066
1066
1067 actions = {}
1067 actions = {}
1068 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1068 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1069 if n1 and n2: # file exists on both local and remote side
1069 if n1 and n2: # file exists on both local and remote side
1070 if f not in ma:
1070 if f not in ma:
1071 fa = copy.get(f, None)
1071 fa = copy.get(f, None)
1072 if fa is not None:
1072 if fa is not None:
1073 actions[f] = ('m', (f, f, fa, False, pa.node()),
1073 actions[f] = ('m', (f, f, fa, False, pa.node()),
1074 "both renamed from " + fa)
1074 "both renamed from " + fa)
1075 else:
1075 else:
1076 actions[f] = ('m', (f, f, None, False, pa.node()),
1076 actions[f] = ('m', (f, f, None, False, pa.node()),
1077 "both created")
1077 "both created")
1078 else:
1078 else:
1079 a = ma[f]
1079 a = ma[f]
1080 fla = ma.flags(f)
1080 fla = ma.flags(f)
1081 nol = 'l' not in fl1 + fl2 + fla
1081 nol = 'l' not in fl1 + fl2 + fla
1082 if n2 == a and fl2 == fla:
1082 if n2 == a and fl2 == fla:
1083 actions[f] = ('k', (), "remote unchanged")
1083 actions[f] = ('k', (), "remote unchanged")
1084 elif n1 == a and fl1 == fla: # local unchanged - use remote
1084 elif n1 == a and fl1 == fla: # local unchanged - use remote
1085 if n1 == n2: # optimization: keep local content
1085 if n1 == n2: # optimization: keep local content
1086 actions[f] = ('e', (fl2,), "update permissions")
1086 actions[f] = ('e', (fl2,), "update permissions")
1087 else:
1087 else:
1088 actions[f] = ('g', (fl2, False), "remote is newer")
1088 actions[f] = ('g', (fl2, False), "remote is newer")
1089 elif nol and n2 == a: # remote only changed 'x'
1089 elif nol and n2 == a: # remote only changed 'x'
1090 actions[f] = ('e', (fl2,), "update permissions")
1090 actions[f] = ('e', (fl2,), "update permissions")
1091 elif nol and n1 == a: # local only changed 'x'
1091 elif nol and n1 == a: # local only changed 'x'
1092 actions[f] = ('g', (fl1, False), "remote is newer")
1092 actions[f] = ('g', (fl1, False), "remote is newer")
1093 else: # both changed something
1093 else: # both changed something
1094 actions[f] = ('m', (f, f, f, False, pa.node()),
1094 actions[f] = ('m', (f, f, f, False, pa.node()),
1095 "versions differ")
1095 "versions differ")
1096 elif n1: # file exists only on local side
1096 elif n1: # file exists only on local side
1097 if f in copied:
1097 if f in copied:
1098 pass # we'll deal with it on m2 side
1098 pass # we'll deal with it on m2 side
1099 elif f in movewithdir: # directory rename, move local
1099 elif f in movewithdir: # directory rename, move local
1100 f2 = movewithdir[f]
1100 f2 = movewithdir[f]
1101 if f2 in m2:
1101 if f2 in m2:
1102 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1102 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1103 "remote directory rename, both created")
1103 "remote directory rename, both created")
1104 else:
1104 else:
1105 actions[f2] = ('dm', (f, fl1),
1105 actions[f2] = ('dm', (f, fl1),
1106 "remote directory rename - move from " + f)
1106 "remote directory rename - move from " + f)
1107 elif f in copy:
1107 elif f in copy:
1108 f2 = copy[f]
1108 f2 = copy[f]
1109 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1109 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1110 "local copied/moved from " + f2)
1110 "local copied/moved from " + f2)
1111 elif f in ma: # clean, a different, no remote
1111 elif f in ma: # clean, a different, no remote
1112 if n1 != ma[f]:
1112 if n1 != ma[f]:
1113 if acceptremote:
1113 if acceptremote:
1114 actions[f] = ('r', None, "remote delete")
1114 actions[f] = ('r', None, "remote delete")
1115 else:
1115 else:
1116 actions[f] = ('cd', (f, None, f, False, pa.node()),
1116 actions[f] = ('cd', (f, None, f, False, pa.node()),
1117 "prompt changed/deleted")
1117 "prompt changed/deleted")
1118 elif n1 == addednodeid:
1118 elif n1 == addednodeid:
1119 # This extra 'a' is added by working copy manifest to mark
1119 # This extra 'a' is added by working copy manifest to mark
1120 # the file as locally added. We should forget it instead of
1120 # the file as locally added. We should forget it instead of
1121 # deleting it.
1121 # deleting it.
1122 actions[f] = ('f', None, "remote deleted")
1122 actions[f] = ('f', None, "remote deleted")
1123 else:
1123 else:
1124 actions[f] = ('r', None, "other deleted")
1124 actions[f] = ('r', None, "other deleted")
1125 elif n2: # file exists only on remote side
1125 elif n2: # file exists only on remote side
1126 if f in copied:
1126 if f in copied:
1127 pass # we'll deal with it on m1 side
1127 pass # we'll deal with it on m1 side
1128 elif f in movewithdir:
1128 elif f in movewithdir:
1129 f2 = movewithdir[f]
1129 f2 = movewithdir[f]
1130 if f2 in m1:
1130 if f2 in m1:
1131 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1131 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1132 "local directory rename, both created")
1132 "local directory rename, both created")
1133 else:
1133 else:
1134 actions[f2] = ('dg', (f, fl2),
1134 actions[f2] = ('dg', (f, fl2),
1135 "local directory rename - get from " + f)
1135 "local directory rename - get from " + f)
1136 elif f in copy:
1136 elif f in copy:
1137 f2 = copy[f]
1137 f2 = copy[f]
1138 if f2 in m2:
1138 if f2 in m2:
1139 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1139 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1140 "remote copied from " + f2)
1140 "remote copied from " + f2)
1141 else:
1141 else:
1142 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1142 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1143 "remote moved from " + f2)
1143 "remote moved from " + f2)
1144 elif f not in ma:
1144 elif f not in ma:
1145 # local unknown, remote created: the logic is described by the
1145 # local unknown, remote created: the logic is described by the
1146 # following table:
1146 # following table:
1147 #
1147 #
1148 # force branchmerge different | action
1148 # force branchmerge different | action
1149 # n * * | create
1149 # n * * | create
1150 # y n * | create
1150 # y n * | create
1151 # y y n | create
1151 # y y n | create
1152 # y y y | merge
1152 # y y y | merge
1153 #
1153 #
1154 # Checking whether the files are different is expensive, so we
1154 # Checking whether the files are different is expensive, so we
1155 # don't do that when we can avoid it.
1155 # don't do that when we can avoid it.
1156 if not force:
1156 if not force:
1157 actions[f] = ('c', (fl2,), "remote created")
1157 actions[f] = ('c', (fl2,), "remote created")
1158 elif not branchmerge:
1158 elif not branchmerge:
1159 actions[f] = ('c', (fl2,), "remote created")
1159 actions[f] = ('c', (fl2,), "remote created")
1160 else:
1160 else:
1161 actions[f] = ('cm', (fl2, pa.node()),
1161 actions[f] = ('cm', (fl2, pa.node()),
1162 "remote created, get or merge")
1162 "remote created, get or merge")
1163 elif n2 != ma[f]:
1163 elif n2 != ma[f]:
1164 df = None
1164 df = None
1165 for d in dirmove:
1165 for d in dirmove:
1166 if f.startswith(d):
1166 if f.startswith(d):
1167 # new file added in a directory that was moved
1167 # new file added in a directory that was moved
1168 df = dirmove[d] + f[len(d):]
1168 df = dirmove[d] + f[len(d):]
1169 break
1169 break
1170 if df is not None and df in m1:
1170 if df is not None and df in m1:
1171 actions[df] = ('m', (df, f, f, False, pa.node()),
1171 actions[df] = ('m', (df, f, f, False, pa.node()),
1172 "local directory rename - respect move from " + f)
1172 "local directory rename - respect move from " + f)
1173 elif acceptremote:
1173 elif acceptremote:
1174 actions[f] = ('c', (fl2,), "remote recreating")
1174 actions[f] = ('c', (fl2,), "remote recreating")
1175 else:
1175 else:
1176 actions[f] = ('dc', (None, f, f, False, pa.node()),
1176 actions[f] = ('dc', (None, f, f, False, pa.node()),
1177 "prompt deleted/changed")
1177 "prompt deleted/changed")
1178
1178
1179 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1179 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1180 # If we are merging, look for path conflicts.
1180 # If we are merging, look for path conflicts.
1181 checkpathconflicts(repo, wctx, p2, actions)
1181 checkpathconflicts(repo, wctx, p2, actions)
1182
1182
1183 return actions, diverge, renamedelete
1183 return actions, diverge, renamedelete
1184
1184
1185 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1185 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1186 """Resolves false conflicts where the nodeid changed but the content
1186 """Resolves false conflicts where the nodeid changed but the content
1187 remained the same."""
1187 remained the same."""
1188
1188
1189 for f, (m, args, msg) in actions.items():
1189 for f, (m, args, msg) in actions.items():
1190 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1190 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1191 # local did change but ended up with same content
1191 # local did change but ended up with same content
1192 actions[f] = 'r', None, "prompt same"
1192 actions[f] = 'r', None, "prompt same"
1193 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1193 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1194 # remote did change but ended up with same content
1194 # remote did change but ended up with same content
1195 del actions[f] # don't get = keep local deleted
1195 del actions[f] # don't get = keep local deleted
1196
1196
1197 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1197 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1198 acceptremote, followcopies, matcher=None,
1198 acceptremote, followcopies, matcher=None,
1199 mergeforce=False):
1199 mergeforce=False):
1200 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1200 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1201 # Avoid cycle.
1201 # Avoid cycle.
1202 from . import sparse
1202 from . import sparse
1203
1203
1204 if len(ancestors) == 1: # default
1204 if len(ancestors) == 1: # default
1205 actions, diverge, renamedelete = manifestmerge(
1205 actions, diverge, renamedelete = manifestmerge(
1206 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1206 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1207 acceptremote, followcopies)
1207 acceptremote, followcopies)
1208 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1208 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1209
1209
1210 else: # only when merge.preferancestor=* - the default
1210 else: # only when merge.preferancestor=* - the default
1211 repo.ui.note(
1211 repo.ui.note(
1212 _("note: merging %s and %s using bids from ancestors %s\n") %
1212 _("note: merging %s and %s using bids from ancestors %s\n") %
1213 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1213 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1214 for anc in ancestors)))
1214 for anc in ancestors)))
1215
1215
1216 # Call for bids
1216 # Call for bids
1217 fbids = {} # mapping filename to bids (action method to list af actions)
1217 fbids = {} # mapping filename to bids (action method to list af actions)
1218 diverge, renamedelete = None, None
1218 diverge, renamedelete = None, None
1219 for ancestor in ancestors:
1219 for ancestor in ancestors:
1220 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1220 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1221 actions, diverge1, renamedelete1 = manifestmerge(
1221 actions, diverge1, renamedelete1 = manifestmerge(
1222 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1222 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1223 acceptremote, followcopies, forcefulldiff=True)
1223 acceptremote, followcopies, forcefulldiff=True)
1224 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1224 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1225
1225
1226 # Track the shortest set of warning on the theory that bid
1226 # Track the shortest set of warning on the theory that bid
1227 # merge will correctly incorporate more information
1227 # merge will correctly incorporate more information
1228 if diverge is None or len(diverge1) < len(diverge):
1228 if diverge is None or len(diverge1) < len(diverge):
1229 diverge = diverge1
1229 diverge = diverge1
1230 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1230 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1231 renamedelete = renamedelete1
1231 renamedelete = renamedelete1
1232
1232
1233 for f, a in sorted(actions.iteritems()):
1233 for f, a in sorted(actions.iteritems()):
1234 m, args, msg = a
1234 m, args, msg = a
1235 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1235 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1236 if f in fbids:
1236 if f in fbids:
1237 d = fbids[f]
1237 d = fbids[f]
1238 if m in d:
1238 if m in d:
1239 d[m].append(a)
1239 d[m].append(a)
1240 else:
1240 else:
1241 d[m] = [a]
1241 d[m] = [a]
1242 else:
1242 else:
1243 fbids[f] = {m: [a]}
1243 fbids[f] = {m: [a]}
1244
1244
1245 # Pick the best bid for each file
1245 # Pick the best bid for each file
1246 repo.ui.note(_('\nauction for merging merge bids\n'))
1246 repo.ui.note(_('\nauction for merging merge bids\n'))
1247 actions = {}
1247 actions = {}
1248 dms = [] # filenames that have dm actions
1248 dms = [] # filenames that have dm actions
1249 for f, bids in sorted(fbids.items()):
1249 for f, bids in sorted(fbids.items()):
1250 # bids is a mapping from action method to list af actions
1250 # bids is a mapping from action method to list af actions
1251 # Consensus?
1251 # Consensus?
1252 if len(bids) == 1: # all bids are the same kind of method
1252 if len(bids) == 1: # all bids are the same kind of method
1253 m, l = list(bids.items())[0]
1253 m, l = list(bids.items())[0]
1254 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1254 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1255 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1255 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1256 actions[f] = l[0]
1256 actions[f] = l[0]
1257 if m == 'dm':
1257 if m == 'dm':
1258 dms.append(f)
1258 dms.append(f)
1259 continue
1259 continue
1260 # If keep is an option, just do it.
1260 # If keep is an option, just do it.
1261 if 'k' in bids:
1261 if 'k' in bids:
1262 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1262 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1263 actions[f] = bids['k'][0]
1263 actions[f] = bids['k'][0]
1264 continue
1264 continue
1265 # If there are gets and they all agree [how could they not?], do it.
1265 # If there are gets and they all agree [how could they not?], do it.
1266 if 'g' in bids:
1266 if 'g' in bids:
1267 ga0 = bids['g'][0]
1267 ga0 = bids['g'][0]
1268 if all(a == ga0 for a in bids['g'][1:]):
1268 if all(a == ga0 for a in bids['g'][1:]):
1269 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1269 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1270 actions[f] = ga0
1270 actions[f] = ga0
1271 continue
1271 continue
1272 # TODO: Consider other simple actions such as mode changes
1272 # TODO: Consider other simple actions such as mode changes
1273 # Handle inefficient democrazy.
1273 # Handle inefficient democrazy.
1274 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1274 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1275 for m, l in sorted(bids.items()):
1275 for m, l in sorted(bids.items()):
1276 for _f, args, msg in l:
1276 for _f, args, msg in l:
1277 repo.ui.note(' %s -> %s\n' % (msg, m))
1277 repo.ui.note(' %s -> %s\n' % (msg, m))
1278 # Pick random action. TODO: Instead, prompt user when resolving
1278 # Pick random action. TODO: Instead, prompt user when resolving
1279 m, l = list(bids.items())[0]
1279 m, l = list(bids.items())[0]
1280 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1280 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1281 (f, m))
1281 (f, m))
1282 actions[f] = l[0]
1282 actions[f] = l[0]
1283 if m == 'dm':
1283 if m == 'dm':
1284 dms.append(f)
1284 dms.append(f)
1285 continue
1285 continue
1286 # Work around 'dm' that can cause multiple actions for the same file
1286 # Work around 'dm' that can cause multiple actions for the same file
1287 for f in dms:
1287 for f in dms:
1288 dm, (f0, flags), msg = actions[f]
1288 dm, (f0, flags), msg = actions[f]
1289 assert dm == 'dm', dm
1289 assert dm == 'dm', dm
1290 if f0 in actions and actions[f0][0] == 'r':
1290 if f0 in actions and actions[f0][0] == 'r':
1291 # We have one bid for removing a file and another for moving it.
1291 # We have one bid for removing a file and another for moving it.
1292 # These two could be merged as first move and then delete ...
1292 # These two could be merged as first move and then delete ...
1293 # but instead drop moving and just delete.
1293 # but instead drop moving and just delete.
1294 del actions[f]
1294 del actions[f]
1295 repo.ui.note(_('end of auction\n\n'))
1295 repo.ui.note(_('end of auction\n\n'))
1296
1296
1297 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1297 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1298
1298
1299 if wctx.rev() is None:
1299 if wctx.rev() is None:
1300 fractions = _forgetremoved(wctx, mctx, branchmerge)
1300 fractions = _forgetremoved(wctx, mctx, branchmerge)
1301 actions.update(fractions)
1301 actions.update(fractions)
1302
1302
1303 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1303 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1304 actions)
1304 actions)
1305
1305
1306 return prunedactions, diverge, renamedelete
1306 return prunedactions, diverge, renamedelete
1307
1307
1308 def _getcwd():
1308 def _getcwd():
1309 try:
1309 try:
1310 return pycompat.getcwd()
1310 return pycompat.getcwd()
1311 except OSError as err:
1311 except OSError as err:
1312 if err.errno == errno.ENOENT:
1312 if err.errno == errno.ENOENT:
1313 return None
1313 return None
1314 raise
1314 raise
1315
1315
1316 def batchremove(repo, wctx, actions):
1316 def batchremove(repo, wctx, actions):
1317 """apply removes to the working directory
1317 """apply removes to the working directory
1318
1318
1319 yields tuples for progress updates
1319 yields tuples for progress updates
1320 """
1320 """
1321 verbose = repo.ui.verbose
1321 verbose = repo.ui.verbose
1322 cwd = _getcwd()
1322 cwd = _getcwd()
1323 i = 0
1323 i = 0
1324 for f, args, msg in actions:
1324 for f, args, msg in actions:
1325 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1325 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1326 if verbose:
1326 if verbose:
1327 repo.ui.note(_("removing %s\n") % f)
1327 repo.ui.note(_("removing %s\n") % f)
1328 wctx[f].audit()
1328 wctx[f].audit()
1329 try:
1329 try:
1330 wctx[f].remove(ignoremissing=True)
1330 wctx[f].remove(ignoremissing=True)
1331 except OSError as inst:
1331 except OSError as inst:
1332 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1332 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1333 (f, inst.strerror))
1333 (f, inst.strerror))
1334 if i == 100:
1334 if i == 100:
1335 yield i, f
1335 yield i, f
1336 i = 0
1336 i = 0
1337 i += 1
1337 i += 1
1338 if i > 0:
1338 if i > 0:
1339 yield i, f
1339 yield i, f
1340
1340
1341 if cwd and not _getcwd():
1341 if cwd and not _getcwd():
1342 # cwd was removed in the course of removing files; print a helpful
1342 # cwd was removed in the course of removing files; print a helpful
1343 # warning.
1343 # warning.
1344 repo.ui.warn(_("current directory was removed\n"
1344 repo.ui.warn(_("current directory was removed\n"
1345 "(consider changing to repo root: %s)\n") % repo.root)
1345 "(consider changing to repo root: %s)\n") % repo.root)
1346
1346
1347 def batchget(repo, mctx, wctx, actions):
1347 def batchget(repo, mctx, wctx, actions):
1348 """apply gets to the working directory
1348 """apply gets to the working directory
1349
1349
1350 mctx is the context to get from
1350 mctx is the context to get from
1351
1351
1352 yields tuples for progress updates
1352 yields tuples for progress updates
1353 """
1353 """
1354 verbose = repo.ui.verbose
1354 verbose = repo.ui.verbose
1355 fctx = mctx.filectx
1355 fctx = mctx.filectx
1356 ui = repo.ui
1356 ui = repo.ui
1357 i = 0
1357 i = 0
1358 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1358 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1359 for f, (flags, backup), msg in actions:
1359 for f, (flags, backup), msg in actions:
1360 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1360 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1361 if verbose:
1361 if verbose:
1362 repo.ui.note(_("getting %s\n") % f)
1362 repo.ui.note(_("getting %s\n") % f)
1363
1363
1364 if backup:
1364 if backup:
1365 # If a file or directory exists with the same name, back that
1365 # If a file or directory exists with the same name, back that
1366 # up. Otherwise, look to see if there is a file that conflicts
1366 # up. Otherwise, look to see if there is a file that conflicts
1367 # with a directory this file is in, and if so, back that up.
1367 # with a directory this file is in, and if so, back that up.
1368 absf = repo.wjoin(f)
1368 absf = repo.wjoin(f)
1369 if not repo.wvfs.lexists(f):
1369 if not repo.wvfs.lexists(f):
1370 for p in util.finddirs(f):
1370 for p in util.finddirs(f):
1371 if repo.wvfs.isfileorlink(p):
1371 if repo.wvfs.isfileorlink(p):
1372 absf = repo.wjoin(p)
1372 absf = repo.wjoin(p)
1373 break
1373 break
1374 orig = scmutil.origpath(ui, repo, absf)
1374 orig = scmutil.origpath(ui, repo, absf)
1375 if repo.wvfs.lexists(absf):
1375 if repo.wvfs.lexists(absf):
1376 util.rename(absf, orig)
1376 util.rename(absf, orig)
1377 wctx[f].clearunknown()
1377 wctx[f].clearunknown()
1378 atomictemp = ui.configbool("experimental", "update.atomic-file")
1378 atomictemp = ui.configbool("experimental", "update.atomic-file")
1379 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1379 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1380 atomictemp=atomictemp)
1380 atomictemp=atomictemp)
1381 if i == 100:
1381 if i == 100:
1382 yield i, f
1382 yield i, f
1383 i = 0
1383 i = 0
1384 i += 1
1384 i += 1
1385 if i > 0:
1385 if i > 0:
1386 yield i, f
1386 yield i, f
1387
1387
1388 def _prefetchfiles(repo, ctx, actions):
1388 def _prefetchfiles(repo, ctx, actions):
1389 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1389 """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
1390 of merge actions. ``ctx`` is the context being merged in."""
1390 of merge actions. ``ctx`` is the context being merged in."""
1391
1391
1392 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1392 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1393 # don't touch the context to be merged in. 'cd' is skipped, because
1393 # don't touch the context to be merged in. 'cd' is skipped, because
1394 # changed/deleted never resolves to something from the remote side.
1394 # changed/deleted never resolves to something from the remote side.
1395 oplist = [actions[a] for a in 'g dc dg m'.split()]
1395 oplist = [actions[a] for a in 'g dc dg m'.split()]
1396 prefetch = scmutil.fileprefetchhooks
1396 prefetch = scmutil.fileprefetchhooks
1397 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1397 prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
1398
1398
1399 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1399 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1400 """apply the merge action list to the working directory
1400 """apply the merge action list to the working directory
1401
1401
1402 wctx is the working copy context
1402 wctx is the working copy context
1403 mctx is the context to be merged into the working copy
1403 mctx is the context to be merged into the working copy
1404
1404
1405 Return a tuple of counts (updated, merged, removed, unresolved) that
1405 Return a tuple of counts (updated, merged, removed, unresolved) that
1406 describes how many files were affected by the update.
1406 describes how many files were affected by the update.
1407 """
1407 """
1408
1408
1409 _prefetchfiles(repo, mctx, actions)
1409 _prefetchfiles(repo, mctx, actions)
1410
1410
1411 updated, merged, removed = 0, 0, 0
1411 updated, merged, removed = 0, 0, 0
1412 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1412 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1413 moves = []
1413 moves = []
1414 for m, l in actions.items():
1414 for m, l in actions.items():
1415 l.sort()
1415 l.sort()
1416
1416
1417 # 'cd' and 'dc' actions are treated like other merge conflicts
1417 # 'cd' and 'dc' actions are treated like other merge conflicts
1418 mergeactions = sorted(actions['cd'])
1418 mergeactions = sorted(actions['cd'])
1419 mergeactions.extend(sorted(actions['dc']))
1419 mergeactions.extend(sorted(actions['dc']))
1420 mergeactions.extend(actions['m'])
1420 mergeactions.extend(actions['m'])
1421 for f, args, msg in mergeactions:
1421 for f, args, msg in mergeactions:
1422 f1, f2, fa, move, anc = args
1422 f1, f2, fa, move, anc = args
1423 if f == '.hgsubstate': # merged internally
1423 if f == '.hgsubstate': # merged internally
1424 continue
1424 continue
1425 if f1 is None:
1425 if f1 is None:
1426 fcl = filemerge.absentfilectx(wctx, fa)
1426 fcl = filemerge.absentfilectx(wctx, fa)
1427 else:
1427 else:
1428 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1428 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1429 fcl = wctx[f1]
1429 fcl = wctx[f1]
1430 if f2 is None:
1430 if f2 is None:
1431 fco = filemerge.absentfilectx(mctx, fa)
1431 fco = filemerge.absentfilectx(mctx, fa)
1432 else:
1432 else:
1433 fco = mctx[f2]
1433 fco = mctx[f2]
1434 actx = repo[anc]
1434 actx = repo[anc]
1435 if fa in actx:
1435 if fa in actx:
1436 fca = actx[fa]
1436 fca = actx[fa]
1437 else:
1437 else:
1438 # TODO: move to absentfilectx
1438 # TODO: move to absentfilectx
1439 fca = repo.filectx(f1, fileid=nullrev)
1439 fca = repo.filectx(f1, fileid=nullrev)
1440 ms.add(fcl, fco, fca, f)
1440 ms.add(fcl, fco, fca, f)
1441 if f1 != f and move:
1441 if f1 != f and move:
1442 moves.append(f1)
1442 moves.append(f1)
1443
1443
1444 _updating = _('updating')
1444 _updating = _('updating')
1445 _files = _('files')
1445 _files = _('files')
1446 progress = repo.ui.progress
1446 progress = repo.ui.progress
1447
1447
1448 # remove renamed files after safely stored
1448 # remove renamed files after safely stored
1449 for f in moves:
1449 for f in moves:
1450 if wctx[f].lexists():
1450 if wctx[f].lexists():
1451 repo.ui.debug("removing %s\n" % f)
1451 repo.ui.debug("removing %s\n" % f)
1452 wctx[f].audit()
1452 wctx[f].audit()
1453 wctx[f].remove()
1453 wctx[f].remove()
1454
1454
1455 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1455 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1456 z = 0
1456 z = 0
1457
1457
1458 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1458 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1459 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1459 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1460
1460
1461 # record path conflicts
1461 # record path conflicts
1462 for f, args, msg in actions['p']:
1462 for f, args, msg in actions['p']:
1463 f1, fo = args
1463 f1, fo = args
1464 s = repo.ui.status
1464 s = repo.ui.status
1465 s(_("%s: path conflict - a file or link has the same name as a "
1465 s(_("%s: path conflict - a file or link has the same name as a "
1466 "directory\n") % f)
1466 "directory\n") % f)
1467 if fo == 'l':
1467 if fo == 'l':
1468 s(_("the local file has been renamed to %s\n") % f1)
1468 s(_("the local file has been renamed to %s\n") % f1)
1469 else:
1469 else:
1470 s(_("the remote file has been renamed to %s\n") % f1)
1470 s(_("the remote file has been renamed to %s\n") % f1)
1471 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1471 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1472 ms.addpath(f, f1, fo)
1472 ms.addpath(f, f1, fo)
1473 z += 1
1473 z += 1
1474 progress(_updating, z, item=f, total=numupdates, unit=_files)
1474 progress(_updating, z, item=f, total=numupdates, unit=_files)
1475
1475
1476 # When merging in-memory, we can't support worker processes, so set the
1476 # When merging in-memory, we can't support worker processes, so set the
1477 # per-item cost at 0 in that case.
1477 # per-item cost at 0 in that case.
1478 cost = 0 if wctx.isinmemory() else 0.001
1478 cost = 0 if wctx.isinmemory() else 0.001
1479
1479
1480 # remove in parallel (must come before resolving path conflicts and getting)
1480 # remove in parallel (must come before resolving path conflicts and getting)
1481 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1481 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1482 actions['r'])
1482 actions['r'])
1483 for i, item in prog:
1483 for i, item in prog:
1484 z += i
1484 z += i
1485 progress(_updating, z, item=item, total=numupdates, unit=_files)
1485 progress(_updating, z, item=item, total=numupdates, unit=_files)
1486 removed = len(actions['r'])
1486 removed = len(actions['r'])
1487
1487
1488 # resolve path conflicts (must come before getting)
1488 # resolve path conflicts (must come before getting)
1489 for f, args, msg in actions['pr']:
1489 for f, args, msg in actions['pr']:
1490 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1490 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1491 f0, = args
1491 f0, = args
1492 if wctx[f0].lexists():
1492 if wctx[f0].lexists():
1493 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1493 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1494 wctx[f].audit()
1494 wctx[f].audit()
1495 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1495 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1496 wctx[f0].remove()
1496 wctx[f0].remove()
1497 z += 1
1497 z += 1
1498 progress(_updating, z, item=f, total=numupdates, unit=_files)
1498 progress(_updating, z, item=f, total=numupdates, unit=_files)
1499
1499
1500 # get in parallel
1500 # get in parallel
1501 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1501 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1502 actions['g'])
1502 actions['g'])
1503 for i, item in prog:
1503 for i, item in prog:
1504 z += i
1504 z += i
1505 progress(_updating, z, item=item, total=numupdates, unit=_files)
1505 progress(_updating, z, item=item, total=numupdates, unit=_files)
1506 updated = len(actions['g'])
1506 updated = len(actions['g'])
1507
1507
1508 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1508 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1509 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1509 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1510
1510
1511 # forget (manifest only, just log it) (must come first)
1511 # forget (manifest only, just log it) (must come first)
1512 for f, args, msg in actions['f']:
1512 for f, args, msg in actions['f']:
1513 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1513 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1514 z += 1
1514 z += 1
1515 progress(_updating, z, item=f, total=numupdates, unit=_files)
1515 progress(_updating, z, item=f, total=numupdates, unit=_files)
1516
1516
1517 # re-add (manifest only, just log it)
1517 # re-add (manifest only, just log it)
1518 for f, args, msg in actions['a']:
1518 for f, args, msg in actions['a']:
1519 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1519 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1520 z += 1
1520 z += 1
1521 progress(_updating, z, item=f, total=numupdates, unit=_files)
1521 progress(_updating, z, item=f, total=numupdates, unit=_files)
1522
1522
1523 # re-add/mark as modified (manifest only, just log it)
1523 # re-add/mark as modified (manifest only, just log it)
1524 for f, args, msg in actions['am']:
1524 for f, args, msg in actions['am']:
1525 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1525 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1526 z += 1
1526 z += 1
1527 progress(_updating, z, item=f, total=numupdates, unit=_files)
1527 progress(_updating, z, item=f, total=numupdates, unit=_files)
1528
1528
1529 # keep (noop, just log it)
1529 # keep (noop, just log it)
1530 for f, args, msg in actions['k']:
1530 for f, args, msg in actions['k']:
1531 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1531 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1532 # no progress
1532 # no progress
1533
1533
1534 # directory rename, move local
1534 # directory rename, move local
1535 for f, args, msg in actions['dm']:
1535 for f, args, msg in actions['dm']:
1536 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1536 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1537 z += 1
1537 z += 1
1538 progress(_updating, z, item=f, total=numupdates, unit=_files)
1538 progress(_updating, z, item=f, total=numupdates, unit=_files)
1539 f0, flags = args
1539 f0, flags = args
1540 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1540 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1541 wctx[f].audit()
1541 wctx[f].audit()
1542 wctx[f].write(wctx.filectx(f0).data(), flags)
1542 wctx[f].write(wctx.filectx(f0).data(), flags)
1543 wctx[f0].remove()
1543 wctx[f0].remove()
1544 updated += 1
1544 updated += 1
1545
1545
1546 # local directory rename, get
1546 # local directory rename, get
1547 for f, args, msg in actions['dg']:
1547 for f, args, msg in actions['dg']:
1548 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1548 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1549 z += 1
1549 z += 1
1550 progress(_updating, z, item=f, total=numupdates, unit=_files)
1550 progress(_updating, z, item=f, total=numupdates, unit=_files)
1551 f0, flags = args
1551 f0, flags = args
1552 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1552 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1553 wctx[f].write(mctx.filectx(f0).data(), flags)
1553 wctx[f].write(mctx.filectx(f0).data(), flags)
1554 updated += 1
1554 updated += 1
1555
1555
1556 # exec
1556 # exec
1557 for f, args, msg in actions['e']:
1557 for f, args, msg in actions['e']:
1558 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1558 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1559 z += 1
1559 z += 1
1560 progress(_updating, z, item=f, total=numupdates, unit=_files)
1560 progress(_updating, z, item=f, total=numupdates, unit=_files)
1561 flags, = args
1561 flags, = args
1562 wctx[f].audit()
1562 wctx[f].audit()
1563 wctx[f].setflags('l' in flags, 'x' in flags)
1563 wctx[f].setflags('l' in flags, 'x' in flags)
1564 updated += 1
1564 updated += 1
1565
1565
1566 # the ordering is important here -- ms.mergedriver will raise if the merge
1566 # the ordering is important here -- ms.mergedriver will raise if the merge
1567 # driver has changed, and we want to be able to bypass it when overwrite is
1567 # driver has changed, and we want to be able to bypass it when overwrite is
1568 # True
1568 # True
1569 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1569 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1570
1570
1571 if usemergedriver:
1571 if usemergedriver:
1572 if wctx.isinmemory():
1572 if wctx.isinmemory():
1573 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1573 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1574 "support mergedriver")
1574 "support mergedriver")
1575 ms.commit()
1575 ms.commit()
1576 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1576 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1577 # the driver might leave some files unresolved
1577 # the driver might leave some files unresolved
1578 unresolvedf = set(ms.unresolved())
1578 unresolvedf = set(ms.unresolved())
1579 if not proceed:
1579 if not proceed:
1580 # XXX setting unresolved to at least 1 is a hack to make sure we
1580 # XXX setting unresolved to at least 1 is a hack to make sure we
1581 # error out
1581 # error out
1582 return updated, merged, removed, max(len(unresolvedf), 1)
1582 return updated, merged, removed, max(len(unresolvedf), 1)
1583 newactions = []
1583 newactions = []
1584 for f, args, msg in mergeactions:
1584 for f, args, msg in mergeactions:
1585 if f in unresolvedf:
1585 if f in unresolvedf:
1586 newactions.append((f, args, msg))
1586 newactions.append((f, args, msg))
1587 mergeactions = newactions
1587 mergeactions = newactions
1588
1588
1589 try:
1589 try:
1590 # premerge
1590 # premerge
1591 tocomplete = []
1591 tocomplete = []
1592 for f, args, msg in mergeactions:
1592 for f, args, msg in mergeactions:
1593 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1593 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1594 z += 1
1594 z += 1
1595 progress(_updating, z, item=f, total=numupdates, unit=_files)
1595 progress(_updating, z, item=f, total=numupdates, unit=_files)
1596 if f == '.hgsubstate': # subrepo states need updating
1596 if f == '.hgsubstate': # subrepo states need updating
1597 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1597 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1598 overwrite, labels)
1598 overwrite, labels)
1599 continue
1599 continue
1600 wctx[f].audit()
1600 wctx[f].audit()
1601 complete, r = ms.preresolve(f, wctx)
1601 complete, r = ms.preresolve(f, wctx)
1602 if not complete:
1602 if not complete:
1603 numupdates += 1
1603 numupdates += 1
1604 tocomplete.append((f, args, msg))
1604 tocomplete.append((f, args, msg))
1605
1605
1606 # merge
1606 # merge
1607 for f, args, msg in tocomplete:
1607 for f, args, msg in tocomplete:
1608 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1608 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1609 z += 1
1609 z += 1
1610 progress(_updating, z, item=f, total=numupdates, unit=_files)
1610 progress(_updating, z, item=f, total=numupdates, unit=_files)
1611 ms.resolve(f, wctx)
1611 ms.resolve(f, wctx)
1612
1612
1613 finally:
1613 finally:
1614 ms.commit()
1614 ms.commit()
1615
1615
1616 unresolved = ms.unresolvedcount()
1616 unresolved = ms.unresolvedcount()
1617
1617
1618 if usemergedriver and not unresolved and ms.mdstate() != 's':
1618 if usemergedriver and not unresolved and ms.mdstate() != 's':
1619 if not driverconclude(repo, ms, wctx, labels=labels):
1619 if not driverconclude(repo, ms, wctx, labels=labels):
1620 # XXX setting unresolved to at least 1 is a hack to make sure we
1620 # XXX setting unresolved to at least 1 is a hack to make sure we
1621 # error out
1621 # error out
1622 unresolved = max(unresolved, 1)
1622 unresolved = max(unresolved, 1)
1623
1623
1624 ms.commit()
1624 ms.commit()
1625
1625
1626 msupdated, msmerged, msremoved = ms.counts()
1626 msupdated, msmerged, msremoved = ms.counts()
1627 updated += msupdated
1627 updated += msupdated
1628 merged += msmerged
1628 merged += msmerged
1629 removed += msremoved
1629 removed += msremoved
1630
1630
1631 extraactions = ms.actions()
1631 extraactions = ms.actions()
1632 if extraactions:
1632 if extraactions:
1633 mfiles = set(a[0] for a in actions['m'])
1633 mfiles = set(a[0] for a in actions['m'])
1634 for k, acts in extraactions.iteritems():
1634 for k, acts in extraactions.iteritems():
1635 actions[k].extend(acts)
1635 actions[k].extend(acts)
1636 # Remove these files from actions['m'] as well. This is important
1636 # Remove these files from actions['m'] as well. This is important
1637 # because in recordupdates, files in actions['m'] are processed
1637 # because in recordupdates, files in actions['m'] are processed
1638 # after files in other actions, and the merge driver might add
1638 # after files in other actions, and the merge driver might add
1639 # files to those actions via extraactions above. This can lead to a
1639 # files to those actions via extraactions above. This can lead to a
1640 # file being recorded twice, with poor results. This is especially
1640 # file being recorded twice, with poor results. This is especially
1641 # problematic for actions['r'] (currently only possible with the
1641 # problematic for actions['r'] (currently only possible with the
1642 # merge driver in the initial merge process; interrupted merges
1642 # merge driver in the initial merge process; interrupted merges
1643 # don't go through this flow).
1643 # don't go through this flow).
1644 #
1644 #
1645 # The real fix here is to have indexes by both file and action so
1645 # The real fix here is to have indexes by both file and action so
1646 # that when the action for a file is changed it is automatically
1646 # that when the action for a file is changed it is automatically
1647 # reflected in the other action lists. But that involves a more
1647 # reflected in the other action lists. But that involves a more
1648 # complex data structure, so this will do for now.
1648 # complex data structure, so this will do for now.
1649 #
1649 #
1650 # We don't need to do the same operation for 'dc' and 'cd' because
1650 # We don't need to do the same operation for 'dc' and 'cd' because
1651 # those lists aren't consulted again.
1651 # those lists aren't consulted again.
1652 mfiles.difference_update(a[0] for a in acts)
1652 mfiles.difference_update(a[0] for a in acts)
1653
1653
1654 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1654 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1655
1655
1656 progress(_updating, None, total=numupdates, unit=_files)
1656 progress(_updating, None, total=numupdates, unit=_files)
1657
1657
1658 return updated, merged, removed, unresolved
1658 return updated, merged, removed, unresolved
1659
1659
1660 def recordupdates(repo, actions, branchmerge):
1660 def recordupdates(repo, actions, branchmerge):
1661 "record merge actions to the dirstate"
1661 "record merge actions to the dirstate"
1662 # remove (must come first)
1662 # remove (must come first)
1663 for f, args, msg in actions.get('r', []):
1663 for f, args, msg in actions.get('r', []):
1664 if branchmerge:
1664 if branchmerge:
1665 repo.dirstate.remove(f)
1665 repo.dirstate.remove(f)
1666 else:
1666 else:
1667 repo.dirstate.drop(f)
1667 repo.dirstate.drop(f)
1668
1668
1669 # forget (must come first)
1669 # forget (must come first)
1670 for f, args, msg in actions.get('f', []):
1670 for f, args, msg in actions.get('f', []):
1671 repo.dirstate.drop(f)
1671 repo.dirstate.drop(f)
1672
1672
1673 # resolve path conflicts
1673 # resolve path conflicts
1674 for f, args, msg in actions.get('pr', []):
1674 for f, args, msg in actions.get('pr', []):
1675 f0, = args
1675 f0, = args
1676 origf0 = repo.dirstate.copied(f0) or f0
1676 origf0 = repo.dirstate.copied(f0) or f0
1677 repo.dirstate.add(f)
1677 repo.dirstate.add(f)
1678 repo.dirstate.copy(origf0, f)
1678 repo.dirstate.copy(origf0, f)
1679 if f0 == origf0:
1679 if f0 == origf0:
1680 repo.dirstate.remove(f0)
1680 repo.dirstate.remove(f0)
1681 else:
1681 else:
1682 repo.dirstate.drop(f0)
1682 repo.dirstate.drop(f0)
1683
1683
1684 # re-add
1684 # re-add
1685 for f, args, msg in actions.get('a', []):
1685 for f, args, msg in actions.get('a', []):
1686 repo.dirstate.add(f)
1686 repo.dirstate.add(f)
1687
1687
1688 # re-add/mark as modified
1688 # re-add/mark as modified
1689 for f, args, msg in actions.get('am', []):
1689 for f, args, msg in actions.get('am', []):
1690 if branchmerge:
1690 if branchmerge:
1691 repo.dirstate.normallookup(f)
1691 repo.dirstate.normallookup(f)
1692 else:
1692 else:
1693 repo.dirstate.add(f)
1693 repo.dirstate.add(f)
1694
1694
1695 # exec change
1695 # exec change
1696 for f, args, msg in actions.get('e', []):
1696 for f, args, msg in actions.get('e', []):
1697 repo.dirstate.normallookup(f)
1697 repo.dirstate.normallookup(f)
1698
1698
1699 # keep
1699 # keep
1700 for f, args, msg in actions.get('k', []):
1700 for f, args, msg in actions.get('k', []):
1701 pass
1701 pass
1702
1702
1703 # get
1703 # get
1704 for f, args, msg in actions.get('g', []):
1704 for f, args, msg in actions.get('g', []):
1705 if branchmerge:
1705 if branchmerge:
1706 repo.dirstate.otherparent(f)
1706 repo.dirstate.otherparent(f)
1707 else:
1707 else:
1708 repo.dirstate.normal(f)
1708 repo.dirstate.normal(f)
1709
1709
1710 # merge
1710 # merge
1711 for f, args, msg in actions.get('m', []):
1711 for f, args, msg in actions.get('m', []):
1712 f1, f2, fa, move, anc = args
1712 f1, f2, fa, move, anc = args
1713 if branchmerge:
1713 if branchmerge:
1714 # We've done a branch merge, mark this file as merged
1714 # We've done a branch merge, mark this file as merged
1715 # so that we properly record the merger later
1715 # so that we properly record the merger later
1716 repo.dirstate.merge(f)
1716 repo.dirstate.merge(f)
1717 if f1 != f2: # copy/rename
1717 if f1 != f2: # copy/rename
1718 if move:
1718 if move:
1719 repo.dirstate.remove(f1)
1719 repo.dirstate.remove(f1)
1720 if f1 != f:
1720 if f1 != f:
1721 repo.dirstate.copy(f1, f)
1721 repo.dirstate.copy(f1, f)
1722 else:
1722 else:
1723 repo.dirstate.copy(f2, f)
1723 repo.dirstate.copy(f2, f)
1724 else:
1724 else:
1725 # We've update-merged a locally modified file, so
1725 # We've update-merged a locally modified file, so
1726 # we set the dirstate to emulate a normal checkout
1726 # we set the dirstate to emulate a normal checkout
1727 # of that file some time in the past. Thus our
1727 # of that file some time in the past. Thus our
1728 # merge will appear as a normal local file
1728 # merge will appear as a normal local file
1729 # modification.
1729 # modification.
1730 if f2 == f: # file not locally copied/moved
1730 if f2 == f: # file not locally copied/moved
1731 repo.dirstate.normallookup(f)
1731 repo.dirstate.normallookup(f)
1732 if move:
1732 if move:
1733 repo.dirstate.drop(f1)
1733 repo.dirstate.drop(f1)
1734
1734
1735 # directory rename, move local
1735 # directory rename, move local
1736 for f, args, msg in actions.get('dm', []):
1736 for f, args, msg in actions.get('dm', []):
1737 f0, flag = args
1737 f0, flag = args
1738 if branchmerge:
1738 if branchmerge:
1739 repo.dirstate.add(f)
1739 repo.dirstate.add(f)
1740 repo.dirstate.remove(f0)
1740 repo.dirstate.remove(f0)
1741 repo.dirstate.copy(f0, f)
1741 repo.dirstate.copy(f0, f)
1742 else:
1742 else:
1743 repo.dirstate.normal(f)
1743 repo.dirstate.normal(f)
1744 repo.dirstate.drop(f0)
1744 repo.dirstate.drop(f0)
1745
1745
1746 # directory rename, get
1746 # directory rename, get
1747 for f, args, msg in actions.get('dg', []):
1747 for f, args, msg in actions.get('dg', []):
1748 f0, flag = args
1748 f0, flag = args
1749 if branchmerge:
1749 if branchmerge:
1750 repo.dirstate.add(f)
1750 repo.dirstate.add(f)
1751 repo.dirstate.copy(f0, f)
1751 repo.dirstate.copy(f0, f)
1752 else:
1752 else:
1753 repo.dirstate.normal(f)
1753 repo.dirstate.normal(f)
1754
1754
1755 def update(repo, node, branchmerge, force, ancestor=None,
1755 def update(repo, node, branchmerge, force, ancestor=None,
1756 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1756 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1757 updatecheck=None, wc=None):
1757 updatecheck=None, wc=None):
1758 """
1758 """
1759 Perform a merge between the working directory and the given node
1759 Perform a merge between the working directory and the given node
1760
1760
1761 node = the node to update to
1761 node = the node to update to
1762 branchmerge = whether to merge between branches
1762 branchmerge = whether to merge between branches
1763 force = whether to force branch merging or file overwriting
1763 force = whether to force branch merging or file overwriting
1764 matcher = a matcher to filter file lists (dirstate not updated)
1764 matcher = a matcher to filter file lists (dirstate not updated)
1765 mergeancestor = whether it is merging with an ancestor. If true,
1765 mergeancestor = whether it is merging with an ancestor. If true,
1766 we should accept the incoming changes for any prompts that occur.
1766 we should accept the incoming changes for any prompts that occur.
1767 If false, merging with an ancestor (fast-forward) is only allowed
1767 If false, merging with an ancestor (fast-forward) is only allowed
1768 between different named branches. This flag is used by rebase extension
1768 between different named branches. This flag is used by rebase extension
1769 as a temporary fix and should be avoided in general.
1769 as a temporary fix and should be avoided in general.
1770 labels = labels to use for base, local and other
1770 labels = labels to use for base, local and other
1771 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1771 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1772 this is True, then 'force' should be True as well.
1772 this is True, then 'force' should be True as well.
1773
1773
1774 The table below shows all the behaviors of the update command given the
1774 The table below shows all the behaviors of the update command given the
1775 -c/--check and -C/--clean or no options, whether the working directory is
1775 -c/--check and -C/--clean or no options, whether the working directory is
1776 dirty, whether a revision is specified, and the relationship of the parent
1776 dirty, whether a revision is specified, and the relationship of the parent
1777 rev to the target rev (linear or not). Match from top first. The -n
1777 rev to the target rev (linear or not). Match from top first. The -n
1778 option doesn't exist on the command line, but represents the
1778 option doesn't exist on the command line, but represents the
1779 experimental.updatecheck=noconflict option.
1779 experimental.updatecheck=noconflict option.
1780
1780
1781 This logic is tested by test-update-branches.t.
1781 This logic is tested by test-update-branches.t.
1782
1782
1783 -c -C -n -m dirty rev linear | result
1783 -c -C -n -m dirty rev linear | result
1784 y y * * * * * | (1)
1784 y y * * * * * | (1)
1785 y * y * * * * | (1)
1785 y * y * * * * | (1)
1786 y * * y * * * | (1)
1786 y * * y * * * | (1)
1787 * y y * * * * | (1)
1787 * y y * * * * | (1)
1788 * y * y * * * | (1)
1788 * y * y * * * | (1)
1789 * * y y * * * | (1)
1789 * * y y * * * | (1)
1790 * * * * * n n | x
1790 * * * * * n n | x
1791 * * * * n * * | ok
1791 * * * * n * * | ok
1792 n n n n y * y | merge
1792 n n n n y * y | merge
1793 n n n n y y n | (2)
1793 n n n n y y n | (2)
1794 n n n y y * * | merge
1794 n n n y y * * | merge
1795 n n y n y * * | merge if no conflict
1795 n n y n y * * | merge if no conflict
1796 n y n n y * * | discard
1796 n y n n y * * | discard
1797 y n n n y * * | (3)
1797 y n n n y * * | (3)
1798
1798
1799 x = can't happen
1799 x = can't happen
1800 * = don't-care
1800 * = don't-care
1801 1 = incompatible options (checked in commands.py)
1801 1 = incompatible options (checked in commands.py)
1802 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1802 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1803 3 = abort: uncommitted changes (checked in commands.py)
1803 3 = abort: uncommitted changes (checked in commands.py)
1804
1804
1805 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1805 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1806 to repo[None] if None is passed.
1806 to repo[None] if None is passed.
1807
1807
1808 Return the same tuple as applyupdates().
1808 Return the same tuple as applyupdates().
1809 """
1809 """
1810 # Avoid cycle.
1810 # Avoid cycle.
1811 from . import sparse
1811 from . import sparse
1812
1812
1813 # This function used to find the default destination if node was None, but
1813 # This function used to find the default destination if node was None, but
1814 # that's now in destutil.py.
1814 # that's now in destutil.py.
1815 assert node is not None
1815 assert node is not None
1816 if not branchmerge and not force:
1816 if not branchmerge and not force:
1817 # TODO: remove the default once all callers that pass branchmerge=False
1817 # TODO: remove the default once all callers that pass branchmerge=False
1818 # and force=False pass a value for updatecheck. We may want to allow
1818 # and force=False pass a value for updatecheck. We may want to allow
1819 # updatecheck='abort' to better suppport some of these callers.
1819 # updatecheck='abort' to better suppport some of these callers.
1820 if updatecheck is None:
1820 if updatecheck is None:
1821 updatecheck = 'linear'
1821 updatecheck = 'linear'
1822 assert updatecheck in ('none', 'linear', 'noconflict')
1822 assert updatecheck in ('none', 'linear', 'noconflict')
1823 # If we're doing a partial update, we need to skip updating
1823 # If we're doing a partial update, we need to skip updating
1824 # the dirstate, so make a note of any partial-ness to the
1824 # the dirstate, so make a note of any partial-ness to the
1825 # update here.
1825 # update here.
1826 if matcher is None or matcher.always():
1826 if matcher is None or matcher.always():
1827 partial = False
1827 partial = False
1828 else:
1828 else:
1829 partial = True
1829 partial = True
1830 with repo.wlock():
1830 with repo.wlock():
1831 if wc is None:
1831 if wc is None:
1832 wc = repo[None]
1832 wc = repo[None]
1833 pl = wc.parents()
1833 pl = wc.parents()
1834 p1 = pl[0]
1834 p1 = pl[0]
1835 pas = [None]
1835 pas = [None]
1836 if ancestor is not None:
1836 if ancestor is not None:
1837 pas = [repo[ancestor]]
1837 pas = [repo[ancestor]]
1838
1838
1839 overwrite = force and not branchmerge
1839 overwrite = force and not branchmerge
1840
1840
1841 p2 = repo[node]
1841 p2 = repo[node]
1842 if pas[0] is None:
1842 if pas[0] is None:
1843 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1843 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1844 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1844 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1845 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1845 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1846 else:
1846 else:
1847 pas = [p1.ancestor(p2, warn=branchmerge)]
1847 pas = [p1.ancestor(p2, warn=branchmerge)]
1848
1848
1849 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1849 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1850
1850
1851 ### check phase
1851 ### check phase
1852 if not overwrite:
1852 if not overwrite:
1853 if len(pl) > 1:
1853 if len(pl) > 1:
1854 raise error.Abort(_("outstanding uncommitted merge"))
1854 raise error.Abort(_("outstanding uncommitted merge"))
1855 ms = mergestate.read(repo)
1855 ms = mergestate.read(repo)
1856 if list(ms.unresolved()):
1856 if list(ms.unresolved()):
1857 raise error.Abort(_("outstanding merge conflicts"))
1857 raise error.Abort(_("outstanding merge conflicts"))
1858 if branchmerge:
1858 if branchmerge:
1859 if pas == [p2]:
1859 if pas == [p2]:
1860 raise error.Abort(_("merging with a working directory ancestor"
1860 raise error.Abort(_("merging with a working directory ancestor"
1861 " has no effect"))
1861 " has no effect"))
1862 elif pas == [p1]:
1862 elif pas == [p1]:
1863 if not mergeancestor and wc.branch() == p2.branch():
1863 if not mergeancestor and wc.branch() == p2.branch():
1864 raise error.Abort(_("nothing to merge"),
1864 raise error.Abort(_("nothing to merge"),
1865 hint=_("use 'hg update' "
1865 hint=_("use 'hg update' "
1866 "or check 'hg heads'"))
1866 "or check 'hg heads'"))
1867 if not force and (wc.files() or wc.deleted()):
1867 if not force and (wc.files() or wc.deleted()):
1868 raise error.Abort(_("uncommitted changes"),
1868 raise error.Abort(_("uncommitted changes"),
1869 hint=_("use 'hg status' to list changes"))
1869 hint=_("use 'hg status' to list changes"))
1870 if not wc.isinmemory():
1870 if not wc.isinmemory():
1871 for s in sorted(wc.substate):
1871 for s in sorted(wc.substate):
1872 wc.sub(s).bailifchanged()
1872 wc.sub(s).bailifchanged()
1873
1873
1874 elif not overwrite:
1874 elif not overwrite:
1875 if p1 == p2: # no-op update
1875 if p1 == p2: # no-op update
1876 # call the hooks and exit early
1876 # call the hooks and exit early
1877 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1877 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1878 repo.hook('update', parent1=xp2, parent2='', error=0)
1878 repo.hook('update', parent1=xp2, parent2='', error=0)
1879 return 0, 0, 0, 0
1879 return 0, 0, 0, 0
1880
1880
1881 if (updatecheck == 'linear' and
1881 if (updatecheck == 'linear' and
1882 pas not in ([p1], [p2])): # nonlinear
1882 pas not in ([p1], [p2])): # nonlinear
1883 dirty = wc.dirty(missing=True)
1883 dirty = wc.dirty(missing=True)
1884 if dirty:
1884 if dirty:
1885 # Branching is a bit strange to ensure we do the minimal
1885 # Branching is a bit strange to ensure we do the minimal
1886 # amount of call to obsutil.foreground.
1886 # amount of call to obsutil.foreground.
1887 foreground = obsutil.foreground(repo, [p1.node()])
1887 foreground = obsutil.foreground(repo, [p1.node()])
1888 # note: the <node> variable contains a random identifier
1888 # note: the <node> variable contains a random identifier
1889 if repo[node].node() in foreground:
1889 if repo[node].node() in foreground:
1890 pass # allow updating to successors
1890 pass # allow updating to successors
1891 else:
1891 else:
1892 msg = _("uncommitted changes")
1892 msg = _("uncommitted changes")
1893 hint = _("commit or update --clean to discard changes")
1893 hint = _("commit or update --clean to discard changes")
1894 raise error.UpdateAbort(msg, hint=hint)
1894 raise error.UpdateAbort(msg, hint=hint)
1895 else:
1895 else:
1896 # Allow jumping branches if clean and specific rev given
1896 # Allow jumping branches if clean and specific rev given
1897 pass
1897 pass
1898
1898
1899 if overwrite:
1899 if overwrite:
1900 pas = [wc]
1900 pas = [wc]
1901 elif not branchmerge:
1901 elif not branchmerge:
1902 pas = [p1]
1902 pas = [p1]
1903
1903
1904 # deprecated config: merge.followcopies
1904 # deprecated config: merge.followcopies
1905 followcopies = repo.ui.configbool('merge', 'followcopies')
1905 followcopies = repo.ui.configbool('merge', 'followcopies')
1906 if overwrite:
1906 if overwrite:
1907 followcopies = False
1907 followcopies = False
1908 elif not pas[0]:
1908 elif not pas[0]:
1909 followcopies = False
1909 followcopies = False
1910 if not branchmerge and not wc.dirty(missing=True):
1910 if not branchmerge and not wc.dirty(missing=True):
1911 followcopies = False
1911 followcopies = False
1912
1912
1913 ### calculate phase
1913 ### calculate phase
1914 actionbyfile, diverge, renamedelete = calculateupdates(
1914 actionbyfile, diverge, renamedelete = calculateupdates(
1915 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1915 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1916 followcopies, matcher=matcher, mergeforce=mergeforce)
1916 followcopies, matcher=matcher, mergeforce=mergeforce)
1917
1917
1918 if updatecheck == 'noconflict':
1918 if updatecheck == 'noconflict':
1919 for f, (m, args, msg) in actionbyfile.iteritems():
1919 for f, (m, args, msg) in actionbyfile.iteritems():
1920 if m not in ('g', 'k', 'e', 'r', 'pr'):
1920 if m not in ('g', 'k', 'e', 'r', 'pr'):
1921 msg = _("conflicting changes")
1921 msg = _("conflicting changes")
1922 hint = _("commit or update --clean to discard changes")
1922 hint = _("commit or update --clean to discard changes")
1923 raise error.Abort(msg, hint=hint)
1923 raise error.Abort(msg, hint=hint)
1924
1924
1925 # Prompt and create actions. Most of this is in the resolve phase
1925 # Prompt and create actions. Most of this is in the resolve phase
1926 # already, but we can't handle .hgsubstate in filemerge or
1926 # already, but we can't handle .hgsubstate in filemerge or
1927 # subrepoutil.submerge yet so we have to keep prompting for it.
1927 # subrepoutil.submerge yet so we have to keep prompting for it.
1928 if '.hgsubstate' in actionbyfile:
1928 if '.hgsubstate' in actionbyfile:
1929 f = '.hgsubstate'
1929 f = '.hgsubstate'
1930 m, args, msg = actionbyfile[f]
1930 m, args, msg = actionbyfile[f]
1931 prompts = filemerge.partextras(labels)
1931 prompts = filemerge.partextras(labels)
1932 prompts['f'] = f
1932 prompts['f'] = f
1933 if m == 'cd':
1933 if m == 'cd':
1934 if repo.ui.promptchoice(
1934 if repo.ui.promptchoice(
1935 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1935 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1936 "use (c)hanged version or (d)elete?"
1936 "use (c)hanged version or (d)elete?"
1937 "$$ &Changed $$ &Delete") % prompts, 0):
1937 "$$ &Changed $$ &Delete") % prompts, 0):
1938 actionbyfile[f] = ('r', None, "prompt delete")
1938 actionbyfile[f] = ('r', None, "prompt delete")
1939 elif f in p1:
1939 elif f in p1:
1940 actionbyfile[f] = ('am', None, "prompt keep")
1940 actionbyfile[f] = ('am', None, "prompt keep")
1941 else:
1941 else:
1942 actionbyfile[f] = ('a', None, "prompt keep")
1942 actionbyfile[f] = ('a', None, "prompt keep")
1943 elif m == 'dc':
1943 elif m == 'dc':
1944 f1, f2, fa, move, anc = args
1944 f1, f2, fa, move, anc = args
1945 flags = p2[f2].flags()
1945 flags = p2[f2].flags()
1946 if repo.ui.promptchoice(
1946 if repo.ui.promptchoice(
1947 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1947 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1948 "use (c)hanged version or leave (d)eleted?"
1948 "use (c)hanged version or leave (d)eleted?"
1949 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1949 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1950 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1950 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1951 else:
1951 else:
1952 del actionbyfile[f]
1952 del actionbyfile[f]
1953
1953
1954 # Convert to dictionary-of-lists format
1954 # Convert to dictionary-of-lists format
1955 actions = dict((m, [])
1955 actions = dict((m, [])
1956 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1956 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1957 for f, (m, args, msg) in actionbyfile.iteritems():
1957 for f, (m, args, msg) in actionbyfile.iteritems():
1958 if m not in actions:
1958 if m not in actions:
1959 actions[m] = []
1959 actions[m] = []
1960 actions[m].append((f, args, msg))
1960 actions[m].append((f, args, msg))
1961
1961
1962 if not util.fscasesensitive(repo.path):
1962 if not util.fscasesensitive(repo.path):
1963 # check collision between files only in p2 for clean update
1963 # check collision between files only in p2 for clean update
1964 if (not branchmerge and
1964 if (not branchmerge and
1965 (force or not wc.dirty(missing=True, branch=False))):
1965 (force or not wc.dirty(missing=True, branch=False))):
1966 _checkcollision(repo, p2.manifest(), None)
1966 _checkcollision(repo, p2.manifest(), None)
1967 else:
1967 else:
1968 _checkcollision(repo, wc.manifest(), actions)
1968 _checkcollision(repo, wc.manifest(), actions)
1969
1969
1970 # divergent renames
1970 # divergent renames
1971 for f, fl in sorted(diverge.iteritems()):
1971 for f, fl in sorted(diverge.iteritems()):
1972 repo.ui.warn(_("note: possible conflict - %s was renamed "
1972 repo.ui.warn(_("note: possible conflict - %s was renamed "
1973 "multiple times to:\n") % f)
1973 "multiple times to:\n") % f)
1974 for nf in fl:
1974 for nf in fl:
1975 repo.ui.warn(" %s\n" % nf)
1975 repo.ui.warn(" %s\n" % nf)
1976
1976
1977 # rename and delete
1977 # rename and delete
1978 for f, fl in sorted(renamedelete.iteritems()):
1978 for f, fl in sorted(renamedelete.iteritems()):
1979 repo.ui.warn(_("note: possible conflict - %s was deleted "
1979 repo.ui.warn(_("note: possible conflict - %s was deleted "
1980 "and renamed to:\n") % f)
1980 "and renamed to:\n") % f)
1981 for nf in fl:
1981 for nf in fl:
1982 repo.ui.warn(" %s\n" % nf)
1982 repo.ui.warn(" %s\n" % nf)
1983
1983
1984 ### apply phase
1984 ### apply phase
1985 if not branchmerge: # just jump to the new rev
1985 if not branchmerge: # just jump to the new rev
1986 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1986 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1987 if not partial and not wc.isinmemory():
1987 if not partial and not wc.isinmemory():
1988 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1988 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1989 # note that we're in the middle of an update
1989 # note that we're in the middle of an update
1990 repo.vfs.write('updatestate', p2.hex())
1990 repo.vfs.write('updatestate', p2.hex())
1991
1991
1992 # Advertise fsmonitor when its presence could be useful.
1992 # Advertise fsmonitor when its presence could be useful.
1993 #
1993 #
1994 # We only advertise when performing an update from an empty working
1994 # We only advertise when performing an update from an empty working
1995 # directory. This typically only occurs during initial clone.
1995 # directory. This typically only occurs during initial clone.
1996 #
1996 #
1997 # We give users a mechanism to disable the warning in case it is
1997 # We give users a mechanism to disable the warning in case it is
1998 # annoying.
1998 # annoying.
1999 #
1999 #
2000 # We only allow on Linux and MacOS because that's where fsmonitor is
2000 # We only allow on Linux and MacOS because that's where fsmonitor is
2001 # considered stable.
2001 # considered stable.
2002 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2002 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2003 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2003 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2004 'warn_update_file_count')
2004 'warn_update_file_count')
2005 try:
2005 try:
2006 # avoid cycle: extensions -> cmdutil -> merge
2006 # avoid cycle: extensions -> cmdutil -> merge
2007 from . import extensions
2007 from . import extensions
2008 extensions.find('fsmonitor')
2008 extensions.find('fsmonitor')
2009 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2009 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2010 # We intentionally don't look at whether fsmonitor has disabled
2010 # We intentionally don't look at whether fsmonitor has disabled
2011 # itself because a) fsmonitor may have already printed a warning
2011 # itself because a) fsmonitor may have already printed a warning
2012 # b) we only care about the config state here.
2012 # b) we only care about the config state here.
2013 except KeyError:
2013 except KeyError:
2014 fsmonitorenabled = False
2014 fsmonitorenabled = False
2015
2015
2016 if (fsmonitorwarning
2016 if (fsmonitorwarning
2017 and not fsmonitorenabled
2017 and not fsmonitorenabled
2018 and p1.node() == nullid
2018 and p1.node() == nullid
2019 and len(actions['g']) >= fsmonitorthreshold
2019 and len(actions['g']) >= fsmonitorthreshold
2020 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2020 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2021 repo.ui.warn(
2021 repo.ui.warn(
2022 _('(warning: large working directory being used without '
2022 _('(warning: large working directory being used without '
2023 'fsmonitor enabled; enable fsmonitor to improve performance; '
2023 'fsmonitor enabled; enable fsmonitor to improve performance; '
2024 'see "hg help -e fsmonitor")\n'))
2024 'see "hg help -e fsmonitor")\n'))
2025
2025
2026 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2026 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2027
2027
2028 if not partial and not wc.isinmemory():
2028 if not partial and not wc.isinmemory():
2029 with repo.dirstate.parentchange():
2029 with repo.dirstate.parentchange():
2030 repo.setparents(fp1, fp2)
2030 repo.setparents(fp1, fp2)
2031 recordupdates(repo, actions, branchmerge)
2031 recordupdates(repo, actions, branchmerge)
2032 # update completed, clear state
2032 # update completed, clear state
2033 util.unlink(repo.vfs.join('updatestate'))
2033 util.unlink(repo.vfs.join('updatestate'))
2034
2034
2035 if not branchmerge:
2035 if not branchmerge:
2036 repo.dirstate.setbranch(p2.branch())
2036 repo.dirstate.setbranch(p2.branch())
2037
2037
2038 # If we're updating to a location, clean up any stale temporary includes
2038 # If we're updating to a location, clean up any stale temporary includes
2039 # (ex: this happens during hg rebase --abort).
2039 # (ex: this happens during hg rebase --abort).
2040 if not branchmerge:
2040 if not branchmerge:
2041 sparse.prunetemporaryincludes(repo)
2041 sparse.prunetemporaryincludes(repo)
2042
2042
2043 if not partial:
2043 if not partial:
2044 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2044 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2045 return stats
2045 return stats
2046
2046
2047 def graft(repo, ctx, pctx, labels, keepparent=False):
2047 def graft(repo, ctx, pctx, labels, keepparent=False):
2048 """Do a graft-like merge.
2048 """Do a graft-like merge.
2049
2049
2050 This is a merge where the merge ancestor is chosen such that one
2050 This is a merge where the merge ancestor is chosen such that one
2051 or more changesets are grafted onto the current changeset. In
2051 or more changesets are grafted onto the current changeset. In
2052 addition to the merge, this fixes up the dirstate to include only
2052 addition to the merge, this fixes up the dirstate to include only
2053 a single parent (if keepparent is False) and tries to duplicate any
2053 a single parent (if keepparent is False) and tries to duplicate any
2054 renames/copies appropriately.
2054 renames/copies appropriately.
2055
2055
2056 ctx - changeset to rebase
2056 ctx - changeset to rebase
2057 pctx - merge base, usually ctx.p1()
2057 pctx - merge base, usually ctx.p1()
2058 labels - merge labels eg ['local', 'graft']
2058 labels - merge labels eg ['local', 'graft']
2059 keepparent - keep second parent if any
2059 keepparent - keep second parent if any
2060
2060
2061 """
2061 """
2062 # If we're grafting a descendant onto an ancestor, be sure to pass
2062 # If we're grafting a descendant onto an ancestor, be sure to pass
2063 # mergeancestor=True to update. This does two things: 1) allows the merge if
2063 # mergeancestor=True to update. This does two things: 1) allows the merge if
2064 # the destination is the same as the parent of the ctx (so we can use graft
2064 # the destination is the same as the parent of the ctx (so we can use graft
2065 # to copy commits), and 2) informs update that the incoming changes are
2065 # to copy commits), and 2) informs update that the incoming changes are
2066 # newer than the destination so it doesn't prompt about "remote changed foo
2066 # newer than the destination so it doesn't prompt about "remote changed foo
2067 # which local deleted".
2067 # which local deleted".
2068 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2068 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2069
2069
2070 stats = update(repo, ctx.node(), True, True, pctx.node(),
2070 stats = update(repo, ctx.node(), True, True, pctx.node(),
2071 mergeancestor=mergeancestor, labels=labels)
2071 mergeancestor=mergeancestor, labels=labels)
2072
2072
2073 pother = nullid
2073 pother = nullid
2074 parents = ctx.parents()
2074 parents = ctx.parents()
2075 if keepparent and len(parents) == 2 and pctx in parents:
2075 if keepparent and len(parents) == 2 and pctx in parents:
2076 parents.remove(pctx)
2076 parents.remove(pctx)
2077 pother = parents[0].node()
2077 pother = parents[0].node()
2078
2078
2079 with repo.dirstate.parentchange():
2079 with repo.dirstate.parentchange():
2080 repo.setparents(repo['.'].node(), pother)
2080 repo.setparents(repo['.'].node(), pother)
2081 repo.dirstate.write(repo.currenttransaction())
2081 repo.dirstate.write(repo.currenttransaction())
2082 # fix up dirstate for copies and renames
2082 # fix up dirstate for copies and renames
2083 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2083 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2084 return stats
2084 return stats
General Comments 0
You need to be logged in to leave comments. Login now