##// END OF EJS Templates
merge: cache unknown dir checks (issue5716)...
Mark Thomas -
r35181:b8596235 stable
parent child Browse files
Show More
@@ -1,2040 +1,2060 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 extensions,
28 extensions,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 P: a path conflict (file vs directory)
70 P: a path conflict (file vs directory)
71 m: the external merge driver defined for this merge plus its run state
71 m: the external merge driver defined for this merge plus its run state
72 (experimental)
72 (experimental)
73 f: a (filename, dictionary) tuple of optional values for a given file
73 f: a (filename, dictionary) tuple of optional values for a given file
74 X: unsupported mandatory record type (used in tests)
74 X: unsupported mandatory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
75 x: unsupported advisory record type (used in tests)
76 l: the labels for the parts of the merge.
76 l: the labels for the parts of the merge.
77
77
78 Merge driver run states (experimental):
78 Merge driver run states (experimental):
79 u: driver-resolved files unmarked -- needs to be run next time we're about
79 u: driver-resolved files unmarked -- needs to be run next time we're about
80 to resolve or commit
80 to resolve or commit
81 m: driver-resolved files marked -- only needs to be run before commit
81 m: driver-resolved files marked -- only needs to be run before commit
82 s: success/skipped -- does not need to be run any more
82 s: success/skipped -- does not need to be run any more
83
83
84 Merge record states (stored in self._state, indexed by filename):
84 Merge record states (stored in self._state, indexed by filename):
85 u: unresolved conflict
85 u: unresolved conflict
86 r: resolved conflict
86 r: resolved conflict
87 pu: unresolved path conflict (file conflicts with directory)
87 pu: unresolved path conflict (file conflicts with directory)
88 pr: resolved path conflict
88 pr: resolved path conflict
89 d: driver-resolved conflict
89 d: driver-resolved conflict
90
90
91 The resolve command transitions between 'u' and 'r' for conflicts and
91 The resolve command transitions between 'u' and 'r' for conflicts and
92 'pu' and 'pr' for path conflicts.
92 'pu' and 'pr' for path conflicts.
93 '''
93 '''
94 statepathv1 = 'merge/state'
94 statepathv1 = 'merge/state'
95 statepathv2 = 'merge/state2'
95 statepathv2 = 'merge/state2'
96
96
97 @staticmethod
97 @staticmethod
98 def clean(repo, node=None, other=None, labels=None):
98 def clean(repo, node=None, other=None, labels=None):
99 """Initialize a brand new merge state, removing any existing state on
99 """Initialize a brand new merge state, removing any existing state on
100 disk."""
100 disk."""
101 ms = mergestate(repo)
101 ms = mergestate(repo)
102 ms.reset(node, other, labels)
102 ms.reset(node, other, labels)
103 return ms
103 return ms
104
104
105 @staticmethod
105 @staticmethod
106 def read(repo):
106 def read(repo):
107 """Initialize the merge state, reading it from disk."""
107 """Initialize the merge state, reading it from disk."""
108 ms = mergestate(repo)
108 ms = mergestate(repo)
109 ms._read()
109 ms._read()
110 return ms
110 return ms
111
111
112 def __init__(self, repo):
112 def __init__(self, repo):
113 """Initialize the merge state.
113 """Initialize the merge state.
114
114
115 Do not use this directly! Instead call read() or clean()."""
115 Do not use this directly! Instead call read() or clean()."""
116 self._repo = repo
116 self._repo = repo
117 self._dirty = False
117 self._dirty = False
118 self._labels = None
118 self._labels = None
119
119
120 def reset(self, node=None, other=None, labels=None):
120 def reset(self, node=None, other=None, labels=None):
121 self._state = {}
121 self._state = {}
122 self._stateextras = {}
122 self._stateextras = {}
123 self._local = None
123 self._local = None
124 self._other = None
124 self._other = None
125 self._labels = labels
125 self._labels = labels
126 for var in ('localctx', 'otherctx'):
126 for var in ('localctx', 'otherctx'):
127 if var in vars(self):
127 if var in vars(self):
128 delattr(self, var)
128 delattr(self, var)
129 if node:
129 if node:
130 self._local = node
130 self._local = node
131 self._other = other
131 self._other = other
132 self._readmergedriver = None
132 self._readmergedriver = None
133 if self.mergedriver:
133 if self.mergedriver:
134 self._mdstate = 's'
134 self._mdstate = 's'
135 else:
135 else:
136 self._mdstate = 'u'
136 self._mdstate = 'u'
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 shutil.rmtree(self._repo.vfs.join('merge'), True)
138 self._results = {}
138 self._results = {}
139 self._dirty = False
139 self._dirty = False
140
140
141 def _read(self):
141 def _read(self):
142 """Analyse each record content to restore a serialized state from disk
142 """Analyse each record content to restore a serialized state from disk
143
143
144 This function process "record" entry produced by the de-serialization
144 This function process "record" entry produced by the de-serialization
145 of on disk file.
145 of on disk file.
146 """
146 """
147 self._state = {}
147 self._state = {}
148 self._stateextras = {}
148 self._stateextras = {}
149 self._local = None
149 self._local = None
150 self._other = None
150 self._other = None
151 for var in ('localctx', 'otherctx'):
151 for var in ('localctx', 'otherctx'):
152 if var in vars(self):
152 if var in vars(self):
153 delattr(self, var)
153 delattr(self, var)
154 self._readmergedriver = None
154 self._readmergedriver = None
155 self._mdstate = 's'
155 self._mdstate = 's'
156 unsupported = set()
156 unsupported = set()
157 records = self._readrecords()
157 records = self._readrecords()
158 for rtype, record in records:
158 for rtype, record in records:
159 if rtype == 'L':
159 if rtype == 'L':
160 self._local = bin(record)
160 self._local = bin(record)
161 elif rtype == 'O':
161 elif rtype == 'O':
162 self._other = bin(record)
162 self._other = bin(record)
163 elif rtype == 'm':
163 elif rtype == 'm':
164 bits = record.split('\0', 1)
164 bits = record.split('\0', 1)
165 mdstate = bits[1]
165 mdstate = bits[1]
166 if len(mdstate) != 1 or mdstate not in 'ums':
166 if len(mdstate) != 1 or mdstate not in 'ums':
167 # the merge driver should be idempotent, so just rerun it
167 # the merge driver should be idempotent, so just rerun it
168 mdstate = 'u'
168 mdstate = 'u'
169
169
170 self._readmergedriver = bits[0]
170 self._readmergedriver = bits[0]
171 self._mdstate = mdstate
171 self._mdstate = mdstate
172 elif rtype in 'FDCP':
172 elif rtype in 'FDCP':
173 bits = record.split('\0')
173 bits = record.split('\0')
174 self._state[bits[0]] = bits[1:]
174 self._state[bits[0]] = bits[1:]
175 elif rtype == 'f':
175 elif rtype == 'f':
176 filename, rawextras = record.split('\0', 1)
176 filename, rawextras = record.split('\0', 1)
177 extraparts = rawextras.split('\0')
177 extraparts = rawextras.split('\0')
178 extras = {}
178 extras = {}
179 i = 0
179 i = 0
180 while i < len(extraparts):
180 while i < len(extraparts):
181 extras[extraparts[i]] = extraparts[i + 1]
181 extras[extraparts[i]] = extraparts[i + 1]
182 i += 2
182 i += 2
183
183
184 self._stateextras[filename] = extras
184 self._stateextras[filename] = extras
185 elif rtype == 'l':
185 elif rtype == 'l':
186 labels = record.split('\0', 2)
186 labels = record.split('\0', 2)
187 self._labels = [l for l in labels if len(l) > 0]
187 self._labels = [l for l in labels if len(l) > 0]
188 elif not rtype.islower():
188 elif not rtype.islower():
189 unsupported.add(rtype)
189 unsupported.add(rtype)
190 self._results = {}
190 self._results = {}
191 self._dirty = False
191 self._dirty = False
192
192
193 if unsupported:
193 if unsupported:
194 raise error.UnsupportedMergeRecords(unsupported)
194 raise error.UnsupportedMergeRecords(unsupported)
195
195
196 def _readrecords(self):
196 def _readrecords(self):
197 """Read merge state from disk and return a list of record (TYPE, data)
197 """Read merge state from disk and return a list of record (TYPE, data)
198
198
199 We read data from both v1 and v2 files and decide which one to use.
199 We read data from both v1 and v2 files and decide which one to use.
200
200
201 V1 has been used by version prior to 2.9.1 and contains less data than
201 V1 has been used by version prior to 2.9.1 and contains less data than
202 v2. We read both versions and check if no data in v2 contradicts
202 v2. We read both versions and check if no data in v2 contradicts
203 v1. If there is not contradiction we can safely assume that both v1
203 v1. If there is not contradiction we can safely assume that both v1
204 and v2 were written at the same time and use the extract data in v2. If
204 and v2 were written at the same time and use the extract data in v2. If
205 there is contradiction we ignore v2 content as we assume an old version
205 there is contradiction we ignore v2 content as we assume an old version
206 of Mercurial has overwritten the mergestate file and left an old v2
206 of Mercurial has overwritten the mergestate file and left an old v2
207 file around.
207 file around.
208
208
209 returns list of record [(TYPE, data), ...]"""
209 returns list of record [(TYPE, data), ...]"""
210 v1records = self._readrecordsv1()
210 v1records = self._readrecordsv1()
211 v2records = self._readrecordsv2()
211 v2records = self._readrecordsv2()
212 if self._v1v2match(v1records, v2records):
212 if self._v1v2match(v1records, v2records):
213 return v2records
213 return v2records
214 else:
214 else:
215 # v1 file is newer than v2 file, use it
215 # v1 file is newer than v2 file, use it
216 # we have to infer the "other" changeset of the merge
216 # we have to infer the "other" changeset of the merge
217 # we cannot do better than that with v1 of the format
217 # we cannot do better than that with v1 of the format
218 mctx = self._repo[None].parents()[-1]
218 mctx = self._repo[None].parents()[-1]
219 v1records.append(('O', mctx.hex()))
219 v1records.append(('O', mctx.hex()))
220 # add place holder "other" file node information
220 # add place holder "other" file node information
221 # nobody is using it yet so we do no need to fetch the data
221 # nobody is using it yet so we do no need to fetch the data
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 # if mctx was wrong `mctx[bits[-2]]` may fails.
223 for idx, r in enumerate(v1records):
223 for idx, r in enumerate(v1records):
224 if r[0] == 'F':
224 if r[0] == 'F':
225 bits = r[1].split('\0')
225 bits = r[1].split('\0')
226 bits.insert(-2, '')
226 bits.insert(-2, '')
227 v1records[idx] = (r[0], '\0'.join(bits))
227 v1records[idx] = (r[0], '\0'.join(bits))
228 return v1records
228 return v1records
229
229
230 def _v1v2match(self, v1records, v2records):
230 def _v1v2match(self, v1records, v2records):
231 oldv2 = set() # old format version of v2 record
231 oldv2 = set() # old format version of v2 record
232 for rec in v2records:
232 for rec in v2records:
233 if rec[0] == 'L':
233 if rec[0] == 'L':
234 oldv2.add(rec)
234 oldv2.add(rec)
235 elif rec[0] == 'F':
235 elif rec[0] == 'F':
236 # drop the onode data (not contained in v1)
236 # drop the onode data (not contained in v1)
237 oldv2.add(('F', _droponode(rec[1])))
237 oldv2.add(('F', _droponode(rec[1])))
238 for rec in v1records:
238 for rec in v1records:
239 if rec not in oldv2:
239 if rec not in oldv2:
240 return False
240 return False
241 else:
241 else:
242 return True
242 return True
243
243
244 def _readrecordsv1(self):
244 def _readrecordsv1(self):
245 """read on disk merge state for version 1 file
245 """read on disk merge state for version 1 file
246
246
247 returns list of record [(TYPE, data), ...]
247 returns list of record [(TYPE, data), ...]
248
248
249 Note: the "F" data from this file are one entry short
249 Note: the "F" data from this file are one entry short
250 (no "other file node" entry)
250 (no "other file node" entry)
251 """
251 """
252 records = []
252 records = []
253 try:
253 try:
254 f = self._repo.vfs(self.statepathv1)
254 f = self._repo.vfs(self.statepathv1)
255 for i, l in enumerate(f):
255 for i, l in enumerate(f):
256 if i == 0:
256 if i == 0:
257 records.append(('L', l[:-1]))
257 records.append(('L', l[:-1]))
258 else:
258 else:
259 records.append(('F', l[:-1]))
259 records.append(('F', l[:-1]))
260 f.close()
260 f.close()
261 except IOError as err:
261 except IOError as err:
262 if err.errno != errno.ENOENT:
262 if err.errno != errno.ENOENT:
263 raise
263 raise
264 return records
264 return records
265
265
266 def _readrecordsv2(self):
266 def _readrecordsv2(self):
267 """read on disk merge state for version 2 file
267 """read on disk merge state for version 2 file
268
268
269 This format is a list of arbitrary records of the form:
269 This format is a list of arbitrary records of the form:
270
270
271 [type][length][content]
271 [type][length][content]
272
272
273 `type` is a single character, `length` is a 4 byte integer, and
273 `type` is a single character, `length` is a 4 byte integer, and
274 `content` is an arbitrary byte sequence of length `length`.
274 `content` is an arbitrary byte sequence of length `length`.
275
275
276 Mercurial versions prior to 3.7 have a bug where if there are
276 Mercurial versions prior to 3.7 have a bug where if there are
277 unsupported mandatory merge records, attempting to clear out the merge
277 unsupported mandatory merge records, attempting to clear out the merge
278 state with hg update --clean or similar aborts. The 't' record type
278 state with hg update --clean or similar aborts. The 't' record type
279 works around that by writing out what those versions treat as an
279 works around that by writing out what those versions treat as an
280 advisory record, but later versions interpret as special: the first
280 advisory record, but later versions interpret as special: the first
281 character is the 'real' record type and everything onwards is the data.
281 character is the 'real' record type and everything onwards is the data.
282
282
283 Returns list of records [(TYPE, data), ...]."""
283 Returns list of records [(TYPE, data), ...]."""
284 records = []
284 records = []
285 try:
285 try:
286 f = self._repo.vfs(self.statepathv2)
286 f = self._repo.vfs(self.statepathv2)
287 data = f.read()
287 data = f.read()
288 off = 0
288 off = 0
289 end = len(data)
289 end = len(data)
290 while off < end:
290 while off < end:
291 rtype = data[off]
291 rtype = data[off]
292 off += 1
292 off += 1
293 length = _unpack('>I', data[off:(off + 4)])[0]
293 length = _unpack('>I', data[off:(off + 4)])[0]
294 off += 4
294 off += 4
295 record = data[off:(off + length)]
295 record = data[off:(off + length)]
296 off += length
296 off += length
297 if rtype == 't':
297 if rtype == 't':
298 rtype, record = record[0], record[1:]
298 rtype, record = record[0], record[1:]
299 records.append((rtype, record))
299 records.append((rtype, record))
300 f.close()
300 f.close()
301 except IOError as err:
301 except IOError as err:
302 if err.errno != errno.ENOENT:
302 if err.errno != errno.ENOENT:
303 raise
303 raise
304 return records
304 return records
305
305
306 @util.propertycache
306 @util.propertycache
307 def mergedriver(self):
307 def mergedriver(self):
308 # protect against the following:
308 # protect against the following:
309 # - A configures a malicious merge driver in their hgrc, then
309 # - A configures a malicious merge driver in their hgrc, then
310 # pauses the merge
310 # pauses the merge
311 # - A edits their hgrc to remove references to the merge driver
311 # - A edits their hgrc to remove references to the merge driver
312 # - A gives a copy of their entire repo, including .hg, to B
312 # - A gives a copy of their entire repo, including .hg, to B
313 # - B inspects .hgrc and finds it to be clean
313 # - B inspects .hgrc and finds it to be clean
314 # - B then continues the merge and the malicious merge driver
314 # - B then continues the merge and the malicious merge driver
315 # gets invoked
315 # gets invoked
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
317 if (self._readmergedriver is not None
317 if (self._readmergedriver is not None
318 and self._readmergedriver != configmergedriver):
318 and self._readmergedriver != configmergedriver):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _("merge driver changed since merge started"),
320 _("merge driver changed since merge started"),
321 hint=_("revert merge driver change or abort merge"))
321 hint=_("revert merge driver change or abort merge"))
322
322
323 return configmergedriver
323 return configmergedriver
324
324
325 @util.propertycache
325 @util.propertycache
326 def localctx(self):
326 def localctx(self):
327 if self._local is None:
327 if self._local is None:
328 msg = "localctx accessed but self._local isn't set"
328 msg = "localctx accessed but self._local isn't set"
329 raise error.ProgrammingError(msg)
329 raise error.ProgrammingError(msg)
330 return self._repo[self._local]
330 return self._repo[self._local]
331
331
332 @util.propertycache
332 @util.propertycache
333 def otherctx(self):
333 def otherctx(self):
334 if self._other is None:
334 if self._other is None:
335 msg = "otherctx accessed but self._other isn't set"
335 msg = "otherctx accessed but self._other isn't set"
336 raise error.ProgrammingError(msg)
336 raise error.ProgrammingError(msg)
337 return self._repo[self._other]
337 return self._repo[self._other]
338
338
339 def active(self):
339 def active(self):
340 """Whether mergestate is active.
340 """Whether mergestate is active.
341
341
342 Returns True if there appears to be mergestate. This is a rough proxy
342 Returns True if there appears to be mergestate. This is a rough proxy
343 for "is a merge in progress."
343 for "is a merge in progress."
344 """
344 """
345 # Check local variables before looking at filesystem for performance
345 # Check local variables before looking at filesystem for performance
346 # reasons.
346 # reasons.
347 return bool(self._local) or bool(self._state) or \
347 return bool(self._local) or bool(self._state) or \
348 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv1) or \
349 self._repo.vfs.exists(self.statepathv2)
349 self._repo.vfs.exists(self.statepathv2)
350
350
351 def commit(self):
351 def commit(self):
352 """Write current state on disk (if necessary)"""
352 """Write current state on disk (if necessary)"""
353 if self._dirty:
353 if self._dirty:
354 records = self._makerecords()
354 records = self._makerecords()
355 self._writerecords(records)
355 self._writerecords(records)
356 self._dirty = False
356 self._dirty = False
357
357
358 def _makerecords(self):
358 def _makerecords(self):
359 records = []
359 records = []
360 records.append(('L', hex(self._local)))
360 records.append(('L', hex(self._local)))
361 records.append(('O', hex(self._other)))
361 records.append(('O', hex(self._other)))
362 if self.mergedriver:
362 if self.mergedriver:
363 records.append(('m', '\0'.join([
363 records.append(('m', '\0'.join([
364 self.mergedriver, self._mdstate])))
364 self.mergedriver, self._mdstate])))
365 # Write out state items. In all cases, the value of the state map entry
365 # Write out state items. In all cases, the value of the state map entry
366 # is written as the contents of the record. The record type depends on
366 # is written as the contents of the record. The record type depends on
367 # the type of state that is stored, and capital-letter records are used
367 # the type of state that is stored, and capital-letter records are used
368 # to prevent older versions of Mercurial that do not support the feature
368 # to prevent older versions of Mercurial that do not support the feature
369 # from loading them.
369 # from loading them.
370 for filename, v in self._state.iteritems():
370 for filename, v in self._state.iteritems():
371 if v[0] == 'd':
371 if v[0] == 'd':
372 # Driver-resolved merge. These are stored in 'D' records.
372 # Driver-resolved merge. These are stored in 'D' records.
373 records.append(('D', '\0'.join([filename] + v)))
373 records.append(('D', '\0'.join([filename] + v)))
374 elif v[0] in ('pu', 'pr'):
374 elif v[0] in ('pu', 'pr'):
375 # Path conflicts. These are stored in 'P' records. The current
375 # Path conflicts. These are stored in 'P' records. The current
376 # resolution state ('pu' or 'pr') is stored within the record.
376 # resolution state ('pu' or 'pr') is stored within the record.
377 records.append(('P', '\0'.join([filename] + v)))
377 records.append(('P', '\0'.join([filename] + v)))
378 elif v[1] == nullhex or v[6] == nullhex:
378 elif v[1] == nullhex or v[6] == nullhex:
379 # Change/Delete or Delete/Change conflicts. These are stored in
379 # Change/Delete or Delete/Change conflicts. These are stored in
380 # 'C' records. v[1] is the local file, and is nullhex when the
380 # 'C' records. v[1] is the local file, and is nullhex when the
381 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # file is deleted locally ('dc'). v[6] is the remote file, and
382 # is nullhex when the file is deleted remotely ('cd').
382 # is nullhex when the file is deleted remotely ('cd').
383 records.append(('C', '\0'.join([filename] + v)))
383 records.append(('C', '\0'.join([filename] + v)))
384 else:
384 else:
385 # Normal files. These are stored in 'F' records.
385 # Normal files. These are stored in 'F' records.
386 records.append(('F', '\0'.join([filename] + v)))
386 records.append(('F', '\0'.join([filename] + v)))
387 for filename, extras in sorted(self._stateextras.iteritems()):
387 for filename, extras in sorted(self._stateextras.iteritems()):
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
389 extras.iteritems())
389 extras.iteritems())
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 records.append(('f', '%s\0%s' % (filename, rawextras)))
391 if self._labels is not None:
391 if self._labels is not None:
392 labels = '\0'.join(self._labels)
392 labels = '\0'.join(self._labels)
393 records.append(('l', labels))
393 records.append(('l', labels))
394 return records
394 return records
395
395
396 def _writerecords(self, records):
396 def _writerecords(self, records):
397 """Write current state on disk (both v1 and v2)"""
397 """Write current state on disk (both v1 and v2)"""
398 self._writerecordsv1(records)
398 self._writerecordsv1(records)
399 self._writerecordsv2(records)
399 self._writerecordsv2(records)
400
400
401 def _writerecordsv1(self, records):
401 def _writerecordsv1(self, records):
402 """Write current state on disk in a version 1 file"""
402 """Write current state on disk in a version 1 file"""
403 f = self._repo.vfs(self.statepathv1, 'w')
403 f = self._repo.vfs(self.statepathv1, 'w')
404 irecords = iter(records)
404 irecords = iter(records)
405 lrecords = next(irecords)
405 lrecords = next(irecords)
406 assert lrecords[0] == 'L'
406 assert lrecords[0] == 'L'
407 f.write(hex(self._local) + '\n')
407 f.write(hex(self._local) + '\n')
408 for rtype, data in irecords:
408 for rtype, data in irecords:
409 if rtype == 'F':
409 if rtype == 'F':
410 f.write('%s\n' % _droponode(data))
410 f.write('%s\n' % _droponode(data))
411 f.close()
411 f.close()
412
412
413 def _writerecordsv2(self, records):
413 def _writerecordsv2(self, records):
414 """Write current state on disk in a version 2 file
414 """Write current state on disk in a version 2 file
415
415
416 See the docstring for _readrecordsv2 for why we use 't'."""
416 See the docstring for _readrecordsv2 for why we use 't'."""
417 # these are the records that all version 2 clients can read
417 # these are the records that all version 2 clients can read
418 whitelist = 'LOF'
418 whitelist = 'LOF'
419 f = self._repo.vfs(self.statepathv2, 'w')
419 f = self._repo.vfs(self.statepathv2, 'w')
420 for key, data in records:
420 for key, data in records:
421 assert len(key) == 1
421 assert len(key) == 1
422 if key not in whitelist:
422 if key not in whitelist:
423 key, data = 't', '%s%s' % (key, data)
423 key, data = 't', '%s%s' % (key, data)
424 format = '>sI%is' % len(data)
424 format = '>sI%is' % len(data)
425 f.write(_pack(format, key, len(data), data))
425 f.write(_pack(format, key, len(data), data))
426 f.close()
426 f.close()
427
427
428 def add(self, fcl, fco, fca, fd):
428 def add(self, fcl, fco, fca, fd):
429 """add a new (potentially?) conflicting file the merge state
429 """add a new (potentially?) conflicting file the merge state
430 fcl: file context for local,
430 fcl: file context for local,
431 fco: file context for remote,
431 fco: file context for remote,
432 fca: file context for ancestors,
432 fca: file context for ancestors,
433 fd: file path of the resulting merge.
433 fd: file path of the resulting merge.
434
434
435 note: also write the local version to the `.hg/merge` directory.
435 note: also write the local version to the `.hg/merge` directory.
436 """
436 """
437 if fcl.isabsent():
437 if fcl.isabsent():
438 hash = nullhex
438 hash = nullhex
439 else:
439 else:
440 hash = hex(hashlib.sha1(fcl.path()).digest())
440 hash = hex(hashlib.sha1(fcl.path()).digest())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._repo.vfs.write('merge/' + hash, fcl.data())
442 self._state[fd] = ['u', hash, fcl.path(),
442 self._state[fd] = ['u', hash, fcl.path(),
443 fca.path(), hex(fca.filenode()),
443 fca.path(), hex(fca.filenode()),
444 fco.path(), hex(fco.filenode()),
444 fco.path(), hex(fco.filenode()),
445 fcl.flags()]
445 fcl.flags()]
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
447 self._dirty = True
447 self._dirty = True
448
448
449 def addpath(self, path, frename, forigin):
449 def addpath(self, path, frename, forigin):
450 """add a new conflicting path to the merge state
450 """add a new conflicting path to the merge state
451 path: the path that conflicts
451 path: the path that conflicts
452 frename: the filename the conflicting file was renamed to
452 frename: the filename the conflicting file was renamed to
453 forigin: origin of the file ('l' or 'r' for local/remote)
453 forigin: origin of the file ('l' or 'r' for local/remote)
454 """
454 """
455 self._state[path] = ['pu', frename, forigin]
455 self._state[path] = ['pu', frename, forigin]
456 self._dirty = True
456 self._dirty = True
457
457
458 def __contains__(self, dfile):
458 def __contains__(self, dfile):
459 return dfile in self._state
459 return dfile in self._state
460
460
461 def __getitem__(self, dfile):
461 def __getitem__(self, dfile):
462 return self._state[dfile][0]
462 return self._state[dfile][0]
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(sorted(self._state))
465 return iter(sorted(self._state))
466
466
467 def files(self):
467 def files(self):
468 return self._state.keys()
468 return self._state.keys()
469
469
470 def mark(self, dfile, state):
470 def mark(self, dfile, state):
471 self._state[dfile][0] = state
471 self._state[dfile][0] = state
472 self._dirty = True
472 self._dirty = True
473
473
474 def mdstate(self):
474 def mdstate(self):
475 return self._mdstate
475 return self._mdstate
476
476
477 def unresolved(self):
477 def unresolved(self):
478 """Obtain the paths of unresolved files."""
478 """Obtain the paths of unresolved files."""
479
479
480 for f, entry in self._state.iteritems():
480 for f, entry in self._state.iteritems():
481 if entry[0] in ('u', 'pu'):
481 if entry[0] in ('u', 'pu'):
482 yield f
482 yield f
483
483
484 def driverresolved(self):
484 def driverresolved(self):
485 """Obtain the paths of driver-resolved files."""
485 """Obtain the paths of driver-resolved files."""
486
486
487 for f, entry in self._state.items():
487 for f, entry in self._state.items():
488 if entry[0] == 'd':
488 if entry[0] == 'd':
489 yield f
489 yield f
490
490
491 def extras(self, filename):
491 def extras(self, filename):
492 return self._stateextras.setdefault(filename, {})
492 return self._stateextras.setdefault(filename, {})
493
493
494 def _resolve(self, preresolve, dfile, wctx):
494 def _resolve(self, preresolve, dfile, wctx):
495 """rerun merge process for file path `dfile`"""
495 """rerun merge process for file path `dfile`"""
496 if self[dfile] in 'rd':
496 if self[dfile] in 'rd':
497 return True, 0
497 return True, 0
498 stateentry = self._state[dfile]
498 stateentry = self._state[dfile]
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
500 octx = self._repo[self._other]
500 octx = self._repo[self._other]
501 extras = self.extras(dfile)
501 extras = self.extras(dfile)
502 anccommitnode = extras.get('ancestorlinknode')
502 anccommitnode = extras.get('ancestorlinknode')
503 if anccommitnode:
503 if anccommitnode:
504 actx = self._repo[anccommitnode]
504 actx = self._repo[anccommitnode]
505 else:
505 else:
506 actx = None
506 actx = None
507 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fcd = self._filectxorabsent(hash, wctx, dfile)
508 fco = self._filectxorabsent(onode, octx, ofile)
508 fco = self._filectxorabsent(onode, octx, ofile)
509 # TODO: move this to filectxorabsent
509 # TODO: move this to filectxorabsent
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
511 # "premerge" x flags
511 # "premerge" x flags
512 flo = fco.flags()
512 flo = fco.flags()
513 fla = fca.flags()
513 fla = fca.flags()
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
515 if fca.node() == nullid and flags != flo:
515 if fca.node() == nullid and flags != flo:
516 if preresolve:
516 if preresolve:
517 self._repo.ui.warn(
517 self._repo.ui.warn(
518 _('warning: cannot merge flags for %s '
518 _('warning: cannot merge flags for %s '
519 'without common ancestor - keeping local flags\n')
519 'without common ancestor - keeping local flags\n')
520 % afile)
520 % afile)
521 elif flags == fla:
521 elif flags == fla:
522 flags = flo
522 flags = flo
523 if preresolve:
523 if preresolve:
524 # restore local
524 # restore local
525 if hash != nullhex:
525 if hash != nullhex:
526 f = self._repo.vfs('merge/' + hash)
526 f = self._repo.vfs('merge/' + hash)
527 wctx[dfile].write(f.read(), flags)
527 wctx[dfile].write(f.read(), flags)
528 f.close()
528 f.close()
529 else:
529 else:
530 wctx[dfile].remove(ignoremissing=True)
530 wctx[dfile].remove(ignoremissing=True)
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 complete, r, deleted = filemerge.premerge(self._repo, wctx,
532 self._local, lfile, fcd,
532 self._local, lfile, fcd,
533 fco, fca,
533 fco, fca,
534 labels=self._labels)
534 labels=self._labels)
535 else:
535 else:
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
537 self._local, lfile, fcd,
537 self._local, lfile, fcd,
538 fco, fca,
538 fco, fca,
539 labels=self._labels)
539 labels=self._labels)
540 if r is None:
540 if r is None:
541 # no real conflict
541 # no real conflict
542 del self._state[dfile]
542 del self._state[dfile]
543 self._stateextras.pop(dfile, None)
543 self._stateextras.pop(dfile, None)
544 self._dirty = True
544 self._dirty = True
545 elif not r:
545 elif not r:
546 self.mark(dfile, 'r')
546 self.mark(dfile, 'r')
547
547
548 if complete:
548 if complete:
549 action = None
549 action = None
550 if deleted:
550 if deleted:
551 if fcd.isabsent():
551 if fcd.isabsent():
552 # dc: local picked. Need to drop if present, which may
552 # dc: local picked. Need to drop if present, which may
553 # happen on re-resolves.
553 # happen on re-resolves.
554 action = 'f'
554 action = 'f'
555 else:
555 else:
556 # cd: remote picked (or otherwise deleted)
556 # cd: remote picked (or otherwise deleted)
557 action = 'r'
557 action = 'r'
558 else:
558 else:
559 if fcd.isabsent(): # dc: remote picked
559 if fcd.isabsent(): # dc: remote picked
560 action = 'g'
560 action = 'g'
561 elif fco.isabsent(): # cd: local picked
561 elif fco.isabsent(): # cd: local picked
562 if dfile in self.localctx:
562 if dfile in self.localctx:
563 action = 'am'
563 action = 'am'
564 else:
564 else:
565 action = 'a'
565 action = 'a'
566 # else: regular merges (no action necessary)
566 # else: regular merges (no action necessary)
567 self._results[dfile] = r, action
567 self._results[dfile] = r, action
568
568
569 return complete, r
569 return complete, r
570
570
571 def _filectxorabsent(self, hexnode, ctx, f):
571 def _filectxorabsent(self, hexnode, ctx, f):
572 if hexnode == nullhex:
572 if hexnode == nullhex:
573 return filemerge.absentfilectx(ctx, f)
573 return filemerge.absentfilectx(ctx, f)
574 else:
574 else:
575 return ctx[f]
575 return ctx[f]
576
576
577 def preresolve(self, dfile, wctx):
577 def preresolve(self, dfile, wctx):
578 """run premerge process for dfile
578 """run premerge process for dfile
579
579
580 Returns whether the merge is complete, and the exit code."""
580 Returns whether the merge is complete, and the exit code."""
581 return self._resolve(True, dfile, wctx)
581 return self._resolve(True, dfile, wctx)
582
582
583 def resolve(self, dfile, wctx):
583 def resolve(self, dfile, wctx):
584 """run merge process (assuming premerge was run) for dfile
584 """run merge process (assuming premerge was run) for dfile
585
585
586 Returns the exit code of the merge."""
586 Returns the exit code of the merge."""
587 return self._resolve(False, dfile, wctx)[1]
587 return self._resolve(False, dfile, wctx)[1]
588
588
589 def counts(self):
589 def counts(self):
590 """return counts for updated, merged and removed files in this
590 """return counts for updated, merged and removed files in this
591 session"""
591 session"""
592 updated, merged, removed = 0, 0, 0
592 updated, merged, removed = 0, 0, 0
593 for r, action in self._results.itervalues():
593 for r, action in self._results.itervalues():
594 if r is None:
594 if r is None:
595 updated += 1
595 updated += 1
596 elif r == 0:
596 elif r == 0:
597 if action == 'r':
597 if action == 'r':
598 removed += 1
598 removed += 1
599 else:
599 else:
600 merged += 1
600 merged += 1
601 return updated, merged, removed
601 return updated, merged, removed
602
602
603 def unresolvedcount(self):
603 def unresolvedcount(self):
604 """get unresolved count for this merge (persistent)"""
604 """get unresolved count for this merge (persistent)"""
605 return len(list(self.unresolved()))
605 return len(list(self.unresolved()))
606
606
607 def actions(self):
607 def actions(self):
608 """return lists of actions to perform on the dirstate"""
608 """return lists of actions to perform on the dirstate"""
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
610 for f, (r, action) in self._results.iteritems():
610 for f, (r, action) in self._results.iteritems():
611 if action is not None:
611 if action is not None:
612 actions[action].append((f, None, "merge result"))
612 actions[action].append((f, None, "merge result"))
613 return actions
613 return actions
614
614
615 def recordactions(self):
615 def recordactions(self):
616 """record remove/add/get actions in the dirstate"""
616 """record remove/add/get actions in the dirstate"""
617 branchmerge = self._repo.dirstate.p2() != nullid
617 branchmerge = self._repo.dirstate.p2() != nullid
618 recordupdates(self._repo, self.actions(), branchmerge)
618 recordupdates(self._repo, self.actions(), branchmerge)
619
619
620 def queueremove(self, f):
620 def queueremove(self, f):
621 """queues a file to be removed from the dirstate
621 """queues a file to be removed from the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'r'
624 self._results[f] = 0, 'r'
625
625
626 def queueadd(self, f):
626 def queueadd(self, f):
627 """queues a file to be added to the dirstate
627 """queues a file to be added to the dirstate
628
628
629 Meant for use by custom merge drivers."""
629 Meant for use by custom merge drivers."""
630 self._results[f] = 0, 'a'
630 self._results[f] = 0, 'a'
631
631
632 def queueget(self, f):
632 def queueget(self, f):
633 """queues a file to be marked modified in the dirstate
633 """queues a file to be marked modified in the dirstate
634
634
635 Meant for use by custom merge drivers."""
635 Meant for use by custom merge drivers."""
636 self._results[f] = 0, 'g'
636 self._results[f] = 0, 'g'
637
637
638 def _getcheckunknownconfig(repo, section, name):
638 def _getcheckunknownconfig(repo, section, name):
639 config = repo.ui.config(section, name)
639 config = repo.ui.config(section, name)
640 valid = ['abort', 'ignore', 'warn']
640 valid = ['abort', 'ignore', 'warn']
641 if config not in valid:
641 if config not in valid:
642 validstr = ', '.join(["'" + v + "'" for v in valid])
642 validstr = ', '.join(["'" + v + "'" for v in valid])
643 raise error.ConfigError(_("%s.%s not valid "
643 raise error.ConfigError(_("%s.%s not valid "
644 "('%s' is none of %s)")
644 "('%s' is none of %s)")
645 % (section, name, config, validstr))
645 % (section, name, config, validstr))
646 return config
646 return config
647
647
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
649 if f2 is None:
649 if f2 is None:
650 f2 = f
650 f2 = f
651 return (repo.wvfs.audit.check(f)
651 return (repo.wvfs.audit.check(f)
652 and repo.wvfs.isfileorlink(f)
652 and repo.wvfs.isfileorlink(f)
653 and repo.dirstate.normalize(f) not in repo.dirstate
653 and repo.dirstate.normalize(f) not in repo.dirstate
654 and mctx[f2].cmp(wctx[f]))
654 and mctx[f2].cmp(wctx[f]))
655
655
656 def _checkunknowndirs(repo, f):
656 class _unknowndirschecker(object):
657 """
657 """
658 Look for any unknown files or directories that may have a path conflict
658 Look for any unknown files or directories that may have a path conflict
659 with a file. If any path prefix of the file exists as a file or link,
659 with a file. If any path prefix of the file exists as a file or link,
660 then it conflicts. If the file itself is a directory that contains any
660 then it conflicts. If the file itself is a directory that contains any
661 file that is not tracked, then it conflicts.
661 file that is not tracked, then it conflicts.
662
662
663 Returns the shortest path at which a conflict occurs, or None if there is
663 Returns the shortest path at which a conflict occurs, or None if there is
664 no conflict.
664 no conflict.
665 """
665 """
666 def __init__(self):
667 # A set of paths known to be good. This prevents repeated checking of
668 # dirs. It will be updated with any new dirs that are checked and found
669 # to be safe.
670 self._unknowndircache = set()
666
671
667 # Check for path prefixes that exist as unknown files.
672 # A set of paths that are known to be absent. This prevents repeated
668 for p in reversed(list(util.finddirs(f))):
673 # checking of subdirectories that are known not to exist. It will be
669 if (repo.wvfs.audit.check(p)
674 # updated with any new dirs that are checked and found to be absent.
670 and repo.wvfs.isfileorlink(p)
675 self._missingdircache = set()
671 and repo.dirstate.normalize(p) not in repo.dirstate):
672 return p
673
676
674 # Check if the file conflicts with a directory containing unknown files.
677 def __call__(self, repo, f):
675 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
678 # Check for path prefixes that exist as unknown files.
676 # Does the directory contain any files that are not in the dirstate?
679 for p in reversed(list(util.finddirs(f))):
677 for p, dirs, files in repo.wvfs.walk(f):
680 if p in self._missingdircache:
678 for fn in files:
681 return
679 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
682 if p in self._unknowndircache:
680 if relf not in repo.dirstate:
683 continue
681 return f
684 if repo.wvfs.audit.check(p):
682 return None
685 if (repo.wvfs.isfileorlink(p)
686 and repo.dirstate.normalize(p) not in repo.dirstate):
687 return p
688 if not repo.wvfs.lexists(p):
689 self._missingdircache.add(p)
690 return
691 self._unknowndircache.add(p)
692
693 # Check if the file conflicts with a directory containing unknown files.
694 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
695 # Does the directory contain any files that are not in the dirstate?
696 for p, dirs, files in repo.wvfs.walk(f):
697 for fn in files:
698 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
699 if relf not in repo.dirstate:
700 return f
701 return None
683
702
684 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
703 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
685 """
704 """
686 Considers any actions that care about the presence of conflicting unknown
705 Considers any actions that care about the presence of conflicting unknown
687 files. For some actions, the result is to abort; for others, it is to
706 files. For some actions, the result is to abort; for others, it is to
688 choose a different action.
707 choose a different action.
689 """
708 """
690 fileconflicts = set()
709 fileconflicts = set()
691 pathconflicts = set()
710 pathconflicts = set()
692 warnconflicts = set()
711 warnconflicts = set()
693 abortconflicts = set()
712 abortconflicts = set()
694 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
713 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
695 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
714 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
696 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
715 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
697 if not force:
716 if not force:
698 def collectconflicts(conflicts, config):
717 def collectconflicts(conflicts, config):
699 if config == 'abort':
718 if config == 'abort':
700 abortconflicts.update(conflicts)
719 abortconflicts.update(conflicts)
701 elif config == 'warn':
720 elif config == 'warn':
702 warnconflicts.update(conflicts)
721 warnconflicts.update(conflicts)
703
722
723 checkunknowndirs = _unknowndirschecker()
704 for f, (m, args, msg) in actions.iteritems():
724 for f, (m, args, msg) in actions.iteritems():
705 if m in ('c', 'dc'):
725 if m in ('c', 'dc'):
706 if _checkunknownfile(repo, wctx, mctx, f):
726 if _checkunknownfile(repo, wctx, mctx, f):
707 fileconflicts.add(f)
727 fileconflicts.add(f)
708 elif pathconfig and f not in wctx:
728 elif pathconfig and f not in wctx:
709 path = _checkunknowndirs(repo, f)
729 path = checkunknowndirs(repo, f)
710 if path is not None:
730 if path is not None:
711 pathconflicts.add(path)
731 pathconflicts.add(path)
712 elif m == 'dg':
732 elif m == 'dg':
713 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
733 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
714 fileconflicts.add(f)
734 fileconflicts.add(f)
715
735
716 allconflicts = fileconflicts | pathconflicts
736 allconflicts = fileconflicts | pathconflicts
717 ignoredconflicts = set([c for c in allconflicts
737 ignoredconflicts = set([c for c in allconflicts
718 if repo.dirstate._ignore(c)])
738 if repo.dirstate._ignore(c)])
719 unknownconflicts = allconflicts - ignoredconflicts
739 unknownconflicts = allconflicts - ignoredconflicts
720 collectconflicts(ignoredconflicts, ignoredconfig)
740 collectconflicts(ignoredconflicts, ignoredconfig)
721 collectconflicts(unknownconflicts, unknownconfig)
741 collectconflicts(unknownconflicts, unknownconfig)
722 else:
742 else:
723 for f, (m, args, msg) in actions.iteritems():
743 for f, (m, args, msg) in actions.iteritems():
724 if m == 'cm':
744 if m == 'cm':
725 fl2, anc = args
745 fl2, anc = args
726 different = _checkunknownfile(repo, wctx, mctx, f)
746 different = _checkunknownfile(repo, wctx, mctx, f)
727 if repo.dirstate._ignore(f):
747 if repo.dirstate._ignore(f):
728 config = ignoredconfig
748 config = ignoredconfig
729 else:
749 else:
730 config = unknownconfig
750 config = unknownconfig
731
751
732 # The behavior when force is True is described by this table:
752 # The behavior when force is True is described by this table:
733 # config different mergeforce | action backup
753 # config different mergeforce | action backup
734 # * n * | get n
754 # * n * | get n
735 # * y y | merge -
755 # * y y | merge -
736 # abort y n | merge - (1)
756 # abort y n | merge - (1)
737 # warn y n | warn + get y
757 # warn y n | warn + get y
738 # ignore y n | get y
758 # ignore y n | get y
739 #
759 #
740 # (1) this is probably the wrong behavior here -- we should
760 # (1) this is probably the wrong behavior here -- we should
741 # probably abort, but some actions like rebases currently
761 # probably abort, but some actions like rebases currently
742 # don't like an abort happening in the middle of
762 # don't like an abort happening in the middle of
743 # merge.update.
763 # merge.update.
744 if not different:
764 if not different:
745 actions[f] = ('g', (fl2, False), "remote created")
765 actions[f] = ('g', (fl2, False), "remote created")
746 elif mergeforce or config == 'abort':
766 elif mergeforce or config == 'abort':
747 actions[f] = ('m', (f, f, None, False, anc),
767 actions[f] = ('m', (f, f, None, False, anc),
748 "remote differs from untracked local")
768 "remote differs from untracked local")
749 elif config == 'abort':
769 elif config == 'abort':
750 abortconflicts.add(f)
770 abortconflicts.add(f)
751 else:
771 else:
752 if config == 'warn':
772 if config == 'warn':
753 warnconflicts.add(f)
773 warnconflicts.add(f)
754 actions[f] = ('g', (fl2, True), "remote created")
774 actions[f] = ('g', (fl2, True), "remote created")
755
775
756 for f in sorted(abortconflicts):
776 for f in sorted(abortconflicts):
757 warn = repo.ui.warn
777 warn = repo.ui.warn
758 if f in pathconflicts:
778 if f in pathconflicts:
759 if repo.wvfs.isfileorlink(f):
779 if repo.wvfs.isfileorlink(f):
760 warn(_("%s: untracked file conflicts with directory\n") % f)
780 warn(_("%s: untracked file conflicts with directory\n") % f)
761 else:
781 else:
762 warn(_("%s: untracked directory conflicts with file\n") % f)
782 warn(_("%s: untracked directory conflicts with file\n") % f)
763 else:
783 else:
764 warn(_("%s: untracked file differs\n") % f)
784 warn(_("%s: untracked file differs\n") % f)
765 if abortconflicts:
785 if abortconflicts:
766 raise error.Abort(_("untracked files in working directory "
786 raise error.Abort(_("untracked files in working directory "
767 "differ from files in requested revision"))
787 "differ from files in requested revision"))
768
788
769 for f in sorted(warnconflicts):
789 for f in sorted(warnconflicts):
770 if repo.wvfs.isfileorlink(f):
790 if repo.wvfs.isfileorlink(f):
771 repo.ui.warn(_("%s: replacing untracked file\n") % f)
791 repo.ui.warn(_("%s: replacing untracked file\n") % f)
772 else:
792 else:
773 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
793 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
774
794
775 for f, (m, args, msg) in actions.iteritems():
795 for f, (m, args, msg) in actions.iteritems():
776 if m == 'c':
796 if m == 'c':
777 backup = (f in fileconflicts or f in pathconflicts or
797 backup = (f in fileconflicts or f in pathconflicts or
778 any(p in pathconflicts for p in util.finddirs(f)))
798 any(p in pathconflicts for p in util.finddirs(f)))
779 flags, = args
799 flags, = args
780 actions[f] = ('g', (flags, backup), msg)
800 actions[f] = ('g', (flags, backup), msg)
781
801
782 def _forgetremoved(wctx, mctx, branchmerge):
802 def _forgetremoved(wctx, mctx, branchmerge):
783 """
803 """
784 Forget removed files
804 Forget removed files
785
805
786 If we're jumping between revisions (as opposed to merging), and if
806 If we're jumping between revisions (as opposed to merging), and if
787 neither the working directory nor the target rev has the file,
807 neither the working directory nor the target rev has the file,
788 then we need to remove it from the dirstate, to prevent the
808 then we need to remove it from the dirstate, to prevent the
789 dirstate from listing the file when it is no longer in the
809 dirstate from listing the file when it is no longer in the
790 manifest.
810 manifest.
791
811
792 If we're merging, and the other revision has removed a file
812 If we're merging, and the other revision has removed a file
793 that is not present in the working directory, we need to mark it
813 that is not present in the working directory, we need to mark it
794 as removed.
814 as removed.
795 """
815 """
796
816
797 actions = {}
817 actions = {}
798 m = 'f'
818 m = 'f'
799 if branchmerge:
819 if branchmerge:
800 m = 'r'
820 m = 'r'
801 for f in wctx.deleted():
821 for f in wctx.deleted():
802 if f not in mctx:
822 if f not in mctx:
803 actions[f] = m, None, "forget deleted"
823 actions[f] = m, None, "forget deleted"
804
824
805 if not branchmerge:
825 if not branchmerge:
806 for f in wctx.removed():
826 for f in wctx.removed():
807 if f not in mctx:
827 if f not in mctx:
808 actions[f] = 'f', None, "forget removed"
828 actions[f] = 'f', None, "forget removed"
809
829
810 return actions
830 return actions
811
831
812 def _checkcollision(repo, wmf, actions):
832 def _checkcollision(repo, wmf, actions):
813 # build provisional merged manifest up
833 # build provisional merged manifest up
814 pmmf = set(wmf)
834 pmmf = set(wmf)
815
835
816 if actions:
836 if actions:
817 # k, dr, e and rd are no-op
837 # k, dr, e and rd are no-op
818 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
838 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
819 for f, args, msg in actions[m]:
839 for f, args, msg in actions[m]:
820 pmmf.add(f)
840 pmmf.add(f)
821 for f, args, msg in actions['r']:
841 for f, args, msg in actions['r']:
822 pmmf.discard(f)
842 pmmf.discard(f)
823 for f, args, msg in actions['dm']:
843 for f, args, msg in actions['dm']:
824 f2, flags = args
844 f2, flags = args
825 pmmf.discard(f2)
845 pmmf.discard(f2)
826 pmmf.add(f)
846 pmmf.add(f)
827 for f, args, msg in actions['dg']:
847 for f, args, msg in actions['dg']:
828 pmmf.add(f)
848 pmmf.add(f)
829 for f, args, msg in actions['m']:
849 for f, args, msg in actions['m']:
830 f1, f2, fa, move, anc = args
850 f1, f2, fa, move, anc = args
831 if move:
851 if move:
832 pmmf.discard(f1)
852 pmmf.discard(f1)
833 pmmf.add(f)
853 pmmf.add(f)
834
854
835 # check case-folding collision in provisional merged manifest
855 # check case-folding collision in provisional merged manifest
836 foldmap = {}
856 foldmap = {}
837 for f in pmmf:
857 for f in pmmf:
838 fold = util.normcase(f)
858 fold = util.normcase(f)
839 if fold in foldmap:
859 if fold in foldmap:
840 raise error.Abort(_("case-folding collision between %s and %s")
860 raise error.Abort(_("case-folding collision between %s and %s")
841 % (f, foldmap[fold]))
861 % (f, foldmap[fold]))
842 foldmap[fold] = f
862 foldmap[fold] = f
843
863
844 # check case-folding of directories
864 # check case-folding of directories
845 foldprefix = unfoldprefix = lastfull = ''
865 foldprefix = unfoldprefix = lastfull = ''
846 for fold, f in sorted(foldmap.items()):
866 for fold, f in sorted(foldmap.items()):
847 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
867 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
848 # the folded prefix matches but actual casing is different
868 # the folded prefix matches but actual casing is different
849 raise error.Abort(_("case-folding collision between "
869 raise error.Abort(_("case-folding collision between "
850 "%s and directory of %s") % (lastfull, f))
870 "%s and directory of %s") % (lastfull, f))
851 foldprefix = fold + '/'
871 foldprefix = fold + '/'
852 unfoldprefix = f + '/'
872 unfoldprefix = f + '/'
853 lastfull = f
873 lastfull = f
854
874
855 def driverpreprocess(repo, ms, wctx, labels=None):
875 def driverpreprocess(repo, ms, wctx, labels=None):
856 """run the preprocess step of the merge driver, if any
876 """run the preprocess step of the merge driver, if any
857
877
858 This is currently not implemented -- it's an extension point."""
878 This is currently not implemented -- it's an extension point."""
859 return True
879 return True
860
880
861 def driverconclude(repo, ms, wctx, labels=None):
881 def driverconclude(repo, ms, wctx, labels=None):
862 """run the conclude step of the merge driver, if any
882 """run the conclude step of the merge driver, if any
863
883
864 This is currently not implemented -- it's an extension point."""
884 This is currently not implemented -- it's an extension point."""
865 return True
885 return True
866
886
867 def _filesindirs(repo, manifest, dirs):
887 def _filesindirs(repo, manifest, dirs):
868 """
888 """
869 Generator that yields pairs of all the files in the manifest that are found
889 Generator that yields pairs of all the files in the manifest that are found
870 inside the directories listed in dirs, and which directory they are found
890 inside the directories listed in dirs, and which directory they are found
871 in.
891 in.
872 """
892 """
873 for f in manifest:
893 for f in manifest:
874 for p in util.finddirs(f):
894 for p in util.finddirs(f):
875 if p in dirs:
895 if p in dirs:
876 yield f, p
896 yield f, p
877 break
897 break
878
898
879 def checkpathconflicts(repo, wctx, mctx, actions):
899 def checkpathconflicts(repo, wctx, mctx, actions):
880 """
900 """
881 Check if any actions introduce path conflicts in the repository, updating
901 Check if any actions introduce path conflicts in the repository, updating
882 actions to record or handle the path conflict accordingly.
902 actions to record or handle the path conflict accordingly.
883 """
903 """
884 mf = wctx.manifest()
904 mf = wctx.manifest()
885
905
886 # The set of local files that conflict with a remote directory.
906 # The set of local files that conflict with a remote directory.
887 localconflicts = set()
907 localconflicts = set()
888
908
889 # The set of directories that conflict with a remote file, and so may cause
909 # The set of directories that conflict with a remote file, and so may cause
890 # conflicts if they still contain any files after the merge.
910 # conflicts if they still contain any files after the merge.
891 remoteconflicts = set()
911 remoteconflicts = set()
892
912
893 # The set of directories that appear as both a file and a directory in the
913 # The set of directories that appear as both a file and a directory in the
894 # remote manifest. These indicate an invalid remote manifest, which
914 # remote manifest. These indicate an invalid remote manifest, which
895 # can't be updated to cleanly.
915 # can't be updated to cleanly.
896 invalidconflicts = set()
916 invalidconflicts = set()
897
917
898 # The set of files deleted by all the actions.
918 # The set of files deleted by all the actions.
899 deletedfiles = set()
919 deletedfiles = set()
900
920
901 for f, (m, args, msg) in actions.items():
921 for f, (m, args, msg) in actions.items():
902 if m in ('c', 'dc', 'm', 'cm'):
922 if m in ('c', 'dc', 'm', 'cm'):
903 # This action may create a new local file.
923 # This action may create a new local file.
904 if mf.hasdir(f):
924 if mf.hasdir(f):
905 # The file aliases a local directory. This might be ok if all
925 # The file aliases a local directory. This might be ok if all
906 # the files in the local directory are being deleted. This
926 # the files in the local directory are being deleted. This
907 # will be checked once we know what all the deleted files are.
927 # will be checked once we know what all the deleted files are.
908 remoteconflicts.add(f)
928 remoteconflicts.add(f)
909 for p in util.finddirs(f):
929 for p in util.finddirs(f):
910 if p in mf:
930 if p in mf:
911 if p in mctx:
931 if p in mctx:
912 # The file is in a directory which aliases both a local
932 # The file is in a directory which aliases both a local
913 # and a remote file. This is an internal inconsistency
933 # and a remote file. This is an internal inconsistency
914 # within the remote manifest.
934 # within the remote manifest.
915 invalidconflicts.add(p)
935 invalidconflicts.add(p)
916 else:
936 else:
917 # The file is in a directory which aliases a local file.
937 # The file is in a directory which aliases a local file.
918 # We will need to rename the local file.
938 # We will need to rename the local file.
919 localconflicts.add(p)
939 localconflicts.add(p)
920 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
940 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
921 # The file is in a directory which aliases a remote file.
941 # The file is in a directory which aliases a remote file.
922 # This is an internal inconsistency within the remote
942 # This is an internal inconsistency within the remote
923 # manifest.
943 # manifest.
924 invalidconflicts.add(p)
944 invalidconflicts.add(p)
925
945
926 # Track the names of all deleted files.
946 # Track the names of all deleted files.
927 if m == 'r':
947 if m == 'r':
928 deletedfiles.add(f)
948 deletedfiles.add(f)
929 if m == 'm':
949 if m == 'm':
930 f1, f2, fa, move, anc = args
950 f1, f2, fa, move, anc = args
931 if move:
951 if move:
932 deletedfiles.add(f1)
952 deletedfiles.add(f1)
933 if m == 'dm':
953 if m == 'dm':
934 f2, flags = args
954 f2, flags = args
935 deletedfiles.add(f2)
955 deletedfiles.add(f2)
936
956
937 # Rename all local conflicting files that have not been deleted.
957 # Rename all local conflicting files that have not been deleted.
938 for p in localconflicts:
958 for p in localconflicts:
939 if p not in deletedfiles:
959 if p not in deletedfiles:
940 ctxname = str(wctx).rstrip('+')
960 ctxname = str(wctx).rstrip('+')
941 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
961 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
942 actions[pnew] = ('pr', (p,), "local path conflict")
962 actions[pnew] = ('pr', (p,), "local path conflict")
943 actions[p] = ('p', (pnew, 'l'), "path conflict")
963 actions[p] = ('p', (pnew, 'l'), "path conflict")
944
964
945 if remoteconflicts:
965 if remoteconflicts:
946 # Check if all files in the conflicting directories have been removed.
966 # Check if all files in the conflicting directories have been removed.
947 ctxname = str(mctx).rstrip('+')
967 ctxname = str(mctx).rstrip('+')
948 for f, p in _filesindirs(repo, mf, remoteconflicts):
968 for f, p in _filesindirs(repo, mf, remoteconflicts):
949 if f not in deletedfiles:
969 if f not in deletedfiles:
950 m, args, msg = actions[p]
970 m, args, msg = actions[p]
951 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
971 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
952 if m in ('dc', 'm'):
972 if m in ('dc', 'm'):
953 # Action was merge, just update target.
973 # Action was merge, just update target.
954 actions[pnew] = (m, args, msg)
974 actions[pnew] = (m, args, msg)
955 else:
975 else:
956 # Action was create, change to renamed get action.
976 # Action was create, change to renamed get action.
957 fl = args[0]
977 fl = args[0]
958 actions[pnew] = ('dg', (p, fl), "remote path conflict")
978 actions[pnew] = ('dg', (p, fl), "remote path conflict")
959 actions[p] = ('p', (pnew, 'r'), "path conflict")
979 actions[p] = ('p', (pnew, 'r'), "path conflict")
960 remoteconflicts.remove(p)
980 remoteconflicts.remove(p)
961 break
981 break
962
982
963 if invalidconflicts:
983 if invalidconflicts:
964 for p in invalidconflicts:
984 for p in invalidconflicts:
965 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
985 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
966 raise error.Abort(_("destination manifest contains path conflicts"))
986 raise error.Abort(_("destination manifest contains path conflicts"))
967
987
968 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
988 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
969 acceptremote, followcopies, forcefulldiff=False):
989 acceptremote, followcopies, forcefulldiff=False):
970 """
990 """
971 Merge wctx and p2 with ancestor pa and generate merge action list
991 Merge wctx and p2 with ancestor pa and generate merge action list
972
992
973 branchmerge and force are as passed in to update
993 branchmerge and force are as passed in to update
974 matcher = matcher to filter file lists
994 matcher = matcher to filter file lists
975 acceptremote = accept the incoming changes without prompting
995 acceptremote = accept the incoming changes without prompting
976 """
996 """
977 if matcher is not None and matcher.always():
997 if matcher is not None and matcher.always():
978 matcher = None
998 matcher = None
979
999
980 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1000 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
981
1001
982 # manifests fetched in order are going to be faster, so prime the caches
1002 # manifests fetched in order are going to be faster, so prime the caches
983 [x.manifest() for x in
1003 [x.manifest() for x in
984 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1004 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
985
1005
986 if followcopies:
1006 if followcopies:
987 ret = copies.mergecopies(repo, wctx, p2, pa)
1007 ret = copies.mergecopies(repo, wctx, p2, pa)
988 copy, movewithdir, diverge, renamedelete, dirmove = ret
1008 copy, movewithdir, diverge, renamedelete, dirmove = ret
989
1009
990 boolbm = pycompat.bytestr(bool(branchmerge))
1010 boolbm = pycompat.bytestr(bool(branchmerge))
991 boolf = pycompat.bytestr(bool(force))
1011 boolf = pycompat.bytestr(bool(force))
992 boolm = pycompat.bytestr(bool(matcher))
1012 boolm = pycompat.bytestr(bool(matcher))
993 repo.ui.note(_("resolving manifests\n"))
1013 repo.ui.note(_("resolving manifests\n"))
994 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1014 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
995 % (boolbm, boolf, boolm))
1015 % (boolbm, boolf, boolm))
996 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1016 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
997
1017
998 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1018 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
999 copied = set(copy.values())
1019 copied = set(copy.values())
1000 copied.update(movewithdir.values())
1020 copied.update(movewithdir.values())
1001
1021
1002 if '.hgsubstate' in m1:
1022 if '.hgsubstate' in m1:
1003 # check whether sub state is modified
1023 # check whether sub state is modified
1004 if any(wctx.sub(s).dirty() for s in wctx.substate):
1024 if any(wctx.sub(s).dirty() for s in wctx.substate):
1005 m1['.hgsubstate'] = modifiednodeid
1025 m1['.hgsubstate'] = modifiednodeid
1006
1026
1007 # Don't use m2-vs-ma optimization if:
1027 # Don't use m2-vs-ma optimization if:
1008 # - ma is the same as m1 or m2, which we're just going to diff again later
1028 # - ma is the same as m1 or m2, which we're just going to diff again later
1009 # - The caller specifically asks for a full diff, which is useful during bid
1029 # - The caller specifically asks for a full diff, which is useful during bid
1010 # merge.
1030 # merge.
1011 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1031 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1012 # Identify which files are relevant to the merge, so we can limit the
1032 # Identify which files are relevant to the merge, so we can limit the
1013 # total m1-vs-m2 diff to just those files. This has significant
1033 # total m1-vs-m2 diff to just those files. This has significant
1014 # performance benefits in large repositories.
1034 # performance benefits in large repositories.
1015 relevantfiles = set(ma.diff(m2).keys())
1035 relevantfiles = set(ma.diff(m2).keys())
1016
1036
1017 # For copied and moved files, we need to add the source file too.
1037 # For copied and moved files, we need to add the source file too.
1018 for copykey, copyvalue in copy.iteritems():
1038 for copykey, copyvalue in copy.iteritems():
1019 if copyvalue in relevantfiles:
1039 if copyvalue in relevantfiles:
1020 relevantfiles.add(copykey)
1040 relevantfiles.add(copykey)
1021 for movedirkey in movewithdir:
1041 for movedirkey in movewithdir:
1022 relevantfiles.add(movedirkey)
1042 relevantfiles.add(movedirkey)
1023 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1043 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1024 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1044 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1025
1045
1026 diff = m1.diff(m2, match=matcher)
1046 diff = m1.diff(m2, match=matcher)
1027
1047
1028 if matcher is None:
1048 if matcher is None:
1029 matcher = matchmod.always('', '')
1049 matcher = matchmod.always('', '')
1030
1050
1031 actions = {}
1051 actions = {}
1032 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1052 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1033 if n1 and n2: # file exists on both local and remote side
1053 if n1 and n2: # file exists on both local and remote side
1034 if f not in ma:
1054 if f not in ma:
1035 fa = copy.get(f, None)
1055 fa = copy.get(f, None)
1036 if fa is not None:
1056 if fa is not None:
1037 actions[f] = ('m', (f, f, fa, False, pa.node()),
1057 actions[f] = ('m', (f, f, fa, False, pa.node()),
1038 "both renamed from " + fa)
1058 "both renamed from " + fa)
1039 else:
1059 else:
1040 actions[f] = ('m', (f, f, None, False, pa.node()),
1060 actions[f] = ('m', (f, f, None, False, pa.node()),
1041 "both created")
1061 "both created")
1042 else:
1062 else:
1043 a = ma[f]
1063 a = ma[f]
1044 fla = ma.flags(f)
1064 fla = ma.flags(f)
1045 nol = 'l' not in fl1 + fl2 + fla
1065 nol = 'l' not in fl1 + fl2 + fla
1046 if n2 == a and fl2 == fla:
1066 if n2 == a and fl2 == fla:
1047 actions[f] = ('k', (), "remote unchanged")
1067 actions[f] = ('k', (), "remote unchanged")
1048 elif n1 == a and fl1 == fla: # local unchanged - use remote
1068 elif n1 == a and fl1 == fla: # local unchanged - use remote
1049 if n1 == n2: # optimization: keep local content
1069 if n1 == n2: # optimization: keep local content
1050 actions[f] = ('e', (fl2,), "update permissions")
1070 actions[f] = ('e', (fl2,), "update permissions")
1051 else:
1071 else:
1052 actions[f] = ('g', (fl2, False), "remote is newer")
1072 actions[f] = ('g', (fl2, False), "remote is newer")
1053 elif nol and n2 == a: # remote only changed 'x'
1073 elif nol and n2 == a: # remote only changed 'x'
1054 actions[f] = ('e', (fl2,), "update permissions")
1074 actions[f] = ('e', (fl2,), "update permissions")
1055 elif nol and n1 == a: # local only changed 'x'
1075 elif nol and n1 == a: # local only changed 'x'
1056 actions[f] = ('g', (fl1, False), "remote is newer")
1076 actions[f] = ('g', (fl1, False), "remote is newer")
1057 else: # both changed something
1077 else: # both changed something
1058 actions[f] = ('m', (f, f, f, False, pa.node()),
1078 actions[f] = ('m', (f, f, f, False, pa.node()),
1059 "versions differ")
1079 "versions differ")
1060 elif n1: # file exists only on local side
1080 elif n1: # file exists only on local side
1061 if f in copied:
1081 if f in copied:
1062 pass # we'll deal with it on m2 side
1082 pass # we'll deal with it on m2 side
1063 elif f in movewithdir: # directory rename, move local
1083 elif f in movewithdir: # directory rename, move local
1064 f2 = movewithdir[f]
1084 f2 = movewithdir[f]
1065 if f2 in m2:
1085 if f2 in m2:
1066 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1086 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1067 "remote directory rename, both created")
1087 "remote directory rename, both created")
1068 else:
1088 else:
1069 actions[f2] = ('dm', (f, fl1),
1089 actions[f2] = ('dm', (f, fl1),
1070 "remote directory rename - move from " + f)
1090 "remote directory rename - move from " + f)
1071 elif f in copy:
1091 elif f in copy:
1072 f2 = copy[f]
1092 f2 = copy[f]
1073 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1093 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1074 "local copied/moved from " + f2)
1094 "local copied/moved from " + f2)
1075 elif f in ma: # clean, a different, no remote
1095 elif f in ma: # clean, a different, no remote
1076 if n1 != ma[f]:
1096 if n1 != ma[f]:
1077 if acceptremote:
1097 if acceptremote:
1078 actions[f] = ('r', None, "remote delete")
1098 actions[f] = ('r', None, "remote delete")
1079 else:
1099 else:
1080 actions[f] = ('cd', (f, None, f, False, pa.node()),
1100 actions[f] = ('cd', (f, None, f, False, pa.node()),
1081 "prompt changed/deleted")
1101 "prompt changed/deleted")
1082 elif n1 == addednodeid:
1102 elif n1 == addednodeid:
1083 # This extra 'a' is added by working copy manifest to mark
1103 # This extra 'a' is added by working copy manifest to mark
1084 # the file as locally added. We should forget it instead of
1104 # the file as locally added. We should forget it instead of
1085 # deleting it.
1105 # deleting it.
1086 actions[f] = ('f', None, "remote deleted")
1106 actions[f] = ('f', None, "remote deleted")
1087 else:
1107 else:
1088 actions[f] = ('r', None, "other deleted")
1108 actions[f] = ('r', None, "other deleted")
1089 elif n2: # file exists only on remote side
1109 elif n2: # file exists only on remote side
1090 if f in copied:
1110 if f in copied:
1091 pass # we'll deal with it on m1 side
1111 pass # we'll deal with it on m1 side
1092 elif f in movewithdir:
1112 elif f in movewithdir:
1093 f2 = movewithdir[f]
1113 f2 = movewithdir[f]
1094 if f2 in m1:
1114 if f2 in m1:
1095 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1115 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1096 "local directory rename, both created")
1116 "local directory rename, both created")
1097 else:
1117 else:
1098 actions[f2] = ('dg', (f, fl2),
1118 actions[f2] = ('dg', (f, fl2),
1099 "local directory rename - get from " + f)
1119 "local directory rename - get from " + f)
1100 elif f in copy:
1120 elif f in copy:
1101 f2 = copy[f]
1121 f2 = copy[f]
1102 if f2 in m2:
1122 if f2 in m2:
1103 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1123 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1104 "remote copied from " + f2)
1124 "remote copied from " + f2)
1105 else:
1125 else:
1106 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1126 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1107 "remote moved from " + f2)
1127 "remote moved from " + f2)
1108 elif f not in ma:
1128 elif f not in ma:
1109 # local unknown, remote created: the logic is described by the
1129 # local unknown, remote created: the logic is described by the
1110 # following table:
1130 # following table:
1111 #
1131 #
1112 # force branchmerge different | action
1132 # force branchmerge different | action
1113 # n * * | create
1133 # n * * | create
1114 # y n * | create
1134 # y n * | create
1115 # y y n | create
1135 # y y n | create
1116 # y y y | merge
1136 # y y y | merge
1117 #
1137 #
1118 # Checking whether the files are different is expensive, so we
1138 # Checking whether the files are different is expensive, so we
1119 # don't do that when we can avoid it.
1139 # don't do that when we can avoid it.
1120 if not force:
1140 if not force:
1121 actions[f] = ('c', (fl2,), "remote created")
1141 actions[f] = ('c', (fl2,), "remote created")
1122 elif not branchmerge:
1142 elif not branchmerge:
1123 actions[f] = ('c', (fl2,), "remote created")
1143 actions[f] = ('c', (fl2,), "remote created")
1124 else:
1144 else:
1125 actions[f] = ('cm', (fl2, pa.node()),
1145 actions[f] = ('cm', (fl2, pa.node()),
1126 "remote created, get or merge")
1146 "remote created, get or merge")
1127 elif n2 != ma[f]:
1147 elif n2 != ma[f]:
1128 df = None
1148 df = None
1129 for d in dirmove:
1149 for d in dirmove:
1130 if f.startswith(d):
1150 if f.startswith(d):
1131 # new file added in a directory that was moved
1151 # new file added in a directory that was moved
1132 df = dirmove[d] + f[len(d):]
1152 df = dirmove[d] + f[len(d):]
1133 break
1153 break
1134 if df is not None and df in m1:
1154 if df is not None and df in m1:
1135 actions[df] = ('m', (df, f, f, False, pa.node()),
1155 actions[df] = ('m', (df, f, f, False, pa.node()),
1136 "local directory rename - respect move from " + f)
1156 "local directory rename - respect move from " + f)
1137 elif acceptremote:
1157 elif acceptremote:
1138 actions[f] = ('c', (fl2,), "remote recreating")
1158 actions[f] = ('c', (fl2,), "remote recreating")
1139 else:
1159 else:
1140 actions[f] = ('dc', (None, f, f, False, pa.node()),
1160 actions[f] = ('dc', (None, f, f, False, pa.node()),
1141 "prompt deleted/changed")
1161 "prompt deleted/changed")
1142
1162
1143 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1163 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1144 # If we are merging, look for path conflicts.
1164 # If we are merging, look for path conflicts.
1145 checkpathconflicts(repo, wctx, p2, actions)
1165 checkpathconflicts(repo, wctx, p2, actions)
1146
1166
1147 return actions, diverge, renamedelete
1167 return actions, diverge, renamedelete
1148
1168
1149 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1169 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1150 """Resolves false conflicts where the nodeid changed but the content
1170 """Resolves false conflicts where the nodeid changed but the content
1151 remained the same."""
1171 remained the same."""
1152
1172
1153 for f, (m, args, msg) in actions.items():
1173 for f, (m, args, msg) in actions.items():
1154 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1174 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1155 # local did change but ended up with same content
1175 # local did change but ended up with same content
1156 actions[f] = 'r', None, "prompt same"
1176 actions[f] = 'r', None, "prompt same"
1157 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1177 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1158 # remote did change but ended up with same content
1178 # remote did change but ended up with same content
1159 del actions[f] # don't get = keep local deleted
1179 del actions[f] # don't get = keep local deleted
1160
1180
1161 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1181 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1162 acceptremote, followcopies, matcher=None,
1182 acceptremote, followcopies, matcher=None,
1163 mergeforce=False):
1183 mergeforce=False):
1164 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1184 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1165 # Avoid cycle.
1185 # Avoid cycle.
1166 from . import sparse
1186 from . import sparse
1167
1187
1168 if len(ancestors) == 1: # default
1188 if len(ancestors) == 1: # default
1169 actions, diverge, renamedelete = manifestmerge(
1189 actions, diverge, renamedelete = manifestmerge(
1170 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1190 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1171 acceptremote, followcopies)
1191 acceptremote, followcopies)
1172 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1192 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1173
1193
1174 else: # only when merge.preferancestor=* - the default
1194 else: # only when merge.preferancestor=* - the default
1175 repo.ui.note(
1195 repo.ui.note(
1176 _("note: merging %s and %s using bids from ancestors %s\n") %
1196 _("note: merging %s and %s using bids from ancestors %s\n") %
1177 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1197 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1178 for anc in ancestors)))
1198 for anc in ancestors)))
1179
1199
1180 # Call for bids
1200 # Call for bids
1181 fbids = {} # mapping filename to bids (action method to list af actions)
1201 fbids = {} # mapping filename to bids (action method to list af actions)
1182 diverge, renamedelete = None, None
1202 diverge, renamedelete = None, None
1183 for ancestor in ancestors:
1203 for ancestor in ancestors:
1184 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1204 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1185 actions, diverge1, renamedelete1 = manifestmerge(
1205 actions, diverge1, renamedelete1 = manifestmerge(
1186 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1206 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1187 acceptremote, followcopies, forcefulldiff=True)
1207 acceptremote, followcopies, forcefulldiff=True)
1188 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1208 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1189
1209
1190 # Track the shortest set of warning on the theory that bid
1210 # Track the shortest set of warning on the theory that bid
1191 # merge will correctly incorporate more information
1211 # merge will correctly incorporate more information
1192 if diverge is None or len(diverge1) < len(diverge):
1212 if diverge is None or len(diverge1) < len(diverge):
1193 diverge = diverge1
1213 diverge = diverge1
1194 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1214 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1195 renamedelete = renamedelete1
1215 renamedelete = renamedelete1
1196
1216
1197 for f, a in sorted(actions.iteritems()):
1217 for f, a in sorted(actions.iteritems()):
1198 m, args, msg = a
1218 m, args, msg = a
1199 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1219 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1200 if f in fbids:
1220 if f in fbids:
1201 d = fbids[f]
1221 d = fbids[f]
1202 if m in d:
1222 if m in d:
1203 d[m].append(a)
1223 d[m].append(a)
1204 else:
1224 else:
1205 d[m] = [a]
1225 d[m] = [a]
1206 else:
1226 else:
1207 fbids[f] = {m: [a]}
1227 fbids[f] = {m: [a]}
1208
1228
1209 # Pick the best bid for each file
1229 # Pick the best bid for each file
1210 repo.ui.note(_('\nauction for merging merge bids\n'))
1230 repo.ui.note(_('\nauction for merging merge bids\n'))
1211 actions = {}
1231 actions = {}
1212 dms = [] # filenames that have dm actions
1232 dms = [] # filenames that have dm actions
1213 for f, bids in sorted(fbids.items()):
1233 for f, bids in sorted(fbids.items()):
1214 # bids is a mapping from action method to list af actions
1234 # bids is a mapping from action method to list af actions
1215 # Consensus?
1235 # Consensus?
1216 if len(bids) == 1: # all bids are the same kind of method
1236 if len(bids) == 1: # all bids are the same kind of method
1217 m, l = list(bids.items())[0]
1237 m, l = list(bids.items())[0]
1218 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1238 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1219 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1239 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1220 actions[f] = l[0]
1240 actions[f] = l[0]
1221 if m == 'dm':
1241 if m == 'dm':
1222 dms.append(f)
1242 dms.append(f)
1223 continue
1243 continue
1224 # If keep is an option, just do it.
1244 # If keep is an option, just do it.
1225 if 'k' in bids:
1245 if 'k' in bids:
1226 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1246 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1227 actions[f] = bids['k'][0]
1247 actions[f] = bids['k'][0]
1228 continue
1248 continue
1229 # If there are gets and they all agree [how could they not?], do it.
1249 # If there are gets and they all agree [how could they not?], do it.
1230 if 'g' in bids:
1250 if 'g' in bids:
1231 ga0 = bids['g'][0]
1251 ga0 = bids['g'][0]
1232 if all(a == ga0 for a in bids['g'][1:]):
1252 if all(a == ga0 for a in bids['g'][1:]):
1233 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1253 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1234 actions[f] = ga0
1254 actions[f] = ga0
1235 continue
1255 continue
1236 # TODO: Consider other simple actions such as mode changes
1256 # TODO: Consider other simple actions such as mode changes
1237 # Handle inefficient democrazy.
1257 # Handle inefficient democrazy.
1238 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1258 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1239 for m, l in sorted(bids.items()):
1259 for m, l in sorted(bids.items()):
1240 for _f, args, msg in l:
1260 for _f, args, msg in l:
1241 repo.ui.note(' %s -> %s\n' % (msg, m))
1261 repo.ui.note(' %s -> %s\n' % (msg, m))
1242 # Pick random action. TODO: Instead, prompt user when resolving
1262 # Pick random action. TODO: Instead, prompt user when resolving
1243 m, l = list(bids.items())[0]
1263 m, l = list(bids.items())[0]
1244 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1264 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1245 (f, m))
1265 (f, m))
1246 actions[f] = l[0]
1266 actions[f] = l[0]
1247 if m == 'dm':
1267 if m == 'dm':
1248 dms.append(f)
1268 dms.append(f)
1249 continue
1269 continue
1250 # Work around 'dm' that can cause multiple actions for the same file
1270 # Work around 'dm' that can cause multiple actions for the same file
1251 for f in dms:
1271 for f in dms:
1252 dm, (f0, flags), msg = actions[f]
1272 dm, (f0, flags), msg = actions[f]
1253 assert dm == 'dm', dm
1273 assert dm == 'dm', dm
1254 if f0 in actions and actions[f0][0] == 'r':
1274 if f0 in actions and actions[f0][0] == 'r':
1255 # We have one bid for removing a file and another for moving it.
1275 # We have one bid for removing a file and another for moving it.
1256 # These two could be merged as first move and then delete ...
1276 # These two could be merged as first move and then delete ...
1257 # but instead drop moving and just delete.
1277 # but instead drop moving and just delete.
1258 del actions[f]
1278 del actions[f]
1259 repo.ui.note(_('end of auction\n\n'))
1279 repo.ui.note(_('end of auction\n\n'))
1260
1280
1261 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1281 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1262
1282
1263 if wctx.rev() is None:
1283 if wctx.rev() is None:
1264 fractions = _forgetremoved(wctx, mctx, branchmerge)
1284 fractions = _forgetremoved(wctx, mctx, branchmerge)
1265 actions.update(fractions)
1285 actions.update(fractions)
1266
1286
1267 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1287 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1268 actions)
1288 actions)
1269
1289
1270 return prunedactions, diverge, renamedelete
1290 return prunedactions, diverge, renamedelete
1271
1291
1272 def _getcwd():
1292 def _getcwd():
1273 try:
1293 try:
1274 return pycompat.getcwd()
1294 return pycompat.getcwd()
1275 except OSError as err:
1295 except OSError as err:
1276 if err.errno == errno.ENOENT:
1296 if err.errno == errno.ENOENT:
1277 return None
1297 return None
1278 raise
1298 raise
1279
1299
1280 def batchremove(repo, wctx, actions):
1300 def batchremove(repo, wctx, actions):
1281 """apply removes to the working directory
1301 """apply removes to the working directory
1282
1302
1283 yields tuples for progress updates
1303 yields tuples for progress updates
1284 """
1304 """
1285 verbose = repo.ui.verbose
1305 verbose = repo.ui.verbose
1286 cwd = _getcwd()
1306 cwd = _getcwd()
1287 i = 0
1307 i = 0
1288 for f, args, msg in actions:
1308 for f, args, msg in actions:
1289 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1309 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1290 if verbose:
1310 if verbose:
1291 repo.ui.note(_("removing %s\n") % f)
1311 repo.ui.note(_("removing %s\n") % f)
1292 wctx[f].audit()
1312 wctx[f].audit()
1293 try:
1313 try:
1294 wctx[f].remove(ignoremissing=True)
1314 wctx[f].remove(ignoremissing=True)
1295 except OSError as inst:
1315 except OSError as inst:
1296 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1316 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1297 (f, inst.strerror))
1317 (f, inst.strerror))
1298 if i == 100:
1318 if i == 100:
1299 yield i, f
1319 yield i, f
1300 i = 0
1320 i = 0
1301 i += 1
1321 i += 1
1302 if i > 0:
1322 if i > 0:
1303 yield i, f
1323 yield i, f
1304
1324
1305 if cwd and not _getcwd():
1325 if cwd and not _getcwd():
1306 # cwd was removed in the course of removing files; print a helpful
1326 # cwd was removed in the course of removing files; print a helpful
1307 # warning.
1327 # warning.
1308 repo.ui.warn(_("current directory was removed\n"
1328 repo.ui.warn(_("current directory was removed\n"
1309 "(consider changing to repo root: %s)\n") % repo.root)
1329 "(consider changing to repo root: %s)\n") % repo.root)
1310
1330
1311 # It's necessary to flush here in case we're inside a worker fork and will
1331 # It's necessary to flush here in case we're inside a worker fork and will
1312 # quit after this function.
1332 # quit after this function.
1313 wctx.flushall()
1333 wctx.flushall()
1314
1334
1315 def batchget(repo, mctx, wctx, actions):
1335 def batchget(repo, mctx, wctx, actions):
1316 """apply gets to the working directory
1336 """apply gets to the working directory
1317
1337
1318 mctx is the context to get from
1338 mctx is the context to get from
1319
1339
1320 yields tuples for progress updates
1340 yields tuples for progress updates
1321 """
1341 """
1322 verbose = repo.ui.verbose
1342 verbose = repo.ui.verbose
1323 fctx = mctx.filectx
1343 fctx = mctx.filectx
1324 ui = repo.ui
1344 ui = repo.ui
1325 i = 0
1345 i = 0
1326 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1346 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1327 for f, (flags, backup), msg in actions:
1347 for f, (flags, backup), msg in actions:
1328 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1348 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1329 if verbose:
1349 if verbose:
1330 repo.ui.note(_("getting %s\n") % f)
1350 repo.ui.note(_("getting %s\n") % f)
1331
1351
1332 if backup:
1352 if backup:
1333 # If a file or directory exists with the same name, back that
1353 # If a file or directory exists with the same name, back that
1334 # up. Otherwise, look to see if there is a file that conflicts
1354 # up. Otherwise, look to see if there is a file that conflicts
1335 # with a directory this file is in, and if so, back that up.
1355 # with a directory this file is in, and if so, back that up.
1336 absf = repo.wjoin(f)
1356 absf = repo.wjoin(f)
1337 if not repo.wvfs.lexists(f):
1357 if not repo.wvfs.lexists(f):
1338 for p in util.finddirs(f):
1358 for p in util.finddirs(f):
1339 if repo.wvfs.isfileorlink(p):
1359 if repo.wvfs.isfileorlink(p):
1340 absf = repo.wjoin(p)
1360 absf = repo.wjoin(p)
1341 break
1361 break
1342 orig = scmutil.origpath(ui, repo, absf)
1362 orig = scmutil.origpath(ui, repo, absf)
1343 if repo.wvfs.lexists(absf):
1363 if repo.wvfs.lexists(absf):
1344 util.rename(absf, orig)
1364 util.rename(absf, orig)
1345 wctx[f].clearunknown()
1365 wctx[f].clearunknown()
1346 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1366 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1347 if i == 100:
1367 if i == 100:
1348 yield i, f
1368 yield i, f
1349 i = 0
1369 i = 0
1350 i += 1
1370 i += 1
1351 if i > 0:
1371 if i > 0:
1352 yield i, f
1372 yield i, f
1353
1373
1354 # It's necessary to flush here in case we're inside a worker fork and will
1374 # It's necessary to flush here in case we're inside a worker fork and will
1355 # quit after this function.
1375 # quit after this function.
1356 wctx.flushall()
1376 wctx.flushall()
1357
1377
1358 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1378 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1359 """apply the merge action list to the working directory
1379 """apply the merge action list to the working directory
1360
1380
1361 wctx is the working copy context
1381 wctx is the working copy context
1362 mctx is the context to be merged into the working copy
1382 mctx is the context to be merged into the working copy
1363
1383
1364 Return a tuple of counts (updated, merged, removed, unresolved) that
1384 Return a tuple of counts (updated, merged, removed, unresolved) that
1365 describes how many files were affected by the update.
1385 describes how many files were affected by the update.
1366 """
1386 """
1367
1387
1368 updated, merged, removed = 0, 0, 0
1388 updated, merged, removed = 0, 0, 0
1369 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1389 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1370 moves = []
1390 moves = []
1371 for m, l in actions.items():
1391 for m, l in actions.items():
1372 l.sort()
1392 l.sort()
1373
1393
1374 # 'cd' and 'dc' actions are treated like other merge conflicts
1394 # 'cd' and 'dc' actions are treated like other merge conflicts
1375 mergeactions = sorted(actions['cd'])
1395 mergeactions = sorted(actions['cd'])
1376 mergeactions.extend(sorted(actions['dc']))
1396 mergeactions.extend(sorted(actions['dc']))
1377 mergeactions.extend(actions['m'])
1397 mergeactions.extend(actions['m'])
1378 for f, args, msg in mergeactions:
1398 for f, args, msg in mergeactions:
1379 f1, f2, fa, move, anc = args
1399 f1, f2, fa, move, anc = args
1380 if f == '.hgsubstate': # merged internally
1400 if f == '.hgsubstate': # merged internally
1381 continue
1401 continue
1382 if f1 is None:
1402 if f1 is None:
1383 fcl = filemerge.absentfilectx(wctx, fa)
1403 fcl = filemerge.absentfilectx(wctx, fa)
1384 else:
1404 else:
1385 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1405 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1386 fcl = wctx[f1]
1406 fcl = wctx[f1]
1387 if f2 is None:
1407 if f2 is None:
1388 fco = filemerge.absentfilectx(mctx, fa)
1408 fco = filemerge.absentfilectx(mctx, fa)
1389 else:
1409 else:
1390 fco = mctx[f2]
1410 fco = mctx[f2]
1391 actx = repo[anc]
1411 actx = repo[anc]
1392 if fa in actx:
1412 if fa in actx:
1393 fca = actx[fa]
1413 fca = actx[fa]
1394 else:
1414 else:
1395 # TODO: move to absentfilectx
1415 # TODO: move to absentfilectx
1396 fca = repo.filectx(f1, fileid=nullrev)
1416 fca = repo.filectx(f1, fileid=nullrev)
1397 ms.add(fcl, fco, fca, f)
1417 ms.add(fcl, fco, fca, f)
1398 if f1 != f and move:
1418 if f1 != f and move:
1399 moves.append(f1)
1419 moves.append(f1)
1400
1420
1401 _updating = _('updating')
1421 _updating = _('updating')
1402 _files = _('files')
1422 _files = _('files')
1403 progress = repo.ui.progress
1423 progress = repo.ui.progress
1404
1424
1405 # remove renamed files after safely stored
1425 # remove renamed files after safely stored
1406 for f in moves:
1426 for f in moves:
1407 if wctx[f].lexists():
1427 if wctx[f].lexists():
1408 repo.ui.debug("removing %s\n" % f)
1428 repo.ui.debug("removing %s\n" % f)
1409 wctx[f].audit()
1429 wctx[f].audit()
1410 wctx[f].remove()
1430 wctx[f].remove()
1411
1431
1412 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1432 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1413 z = 0
1433 z = 0
1414
1434
1415 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1435 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1416 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1436 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1417
1437
1418 # record path conflicts
1438 # record path conflicts
1419 for f, args, msg in actions['p']:
1439 for f, args, msg in actions['p']:
1420 f1, fo = args
1440 f1, fo = args
1421 s = repo.ui.status
1441 s = repo.ui.status
1422 s(_("%s: path conflict - a file or link has the same name as a "
1442 s(_("%s: path conflict - a file or link has the same name as a "
1423 "directory\n") % f)
1443 "directory\n") % f)
1424 if fo == 'l':
1444 if fo == 'l':
1425 s(_("the local file has been renamed to %s\n") % f1)
1445 s(_("the local file has been renamed to %s\n") % f1)
1426 else:
1446 else:
1427 s(_("the remote file has been renamed to %s\n") % f1)
1447 s(_("the remote file has been renamed to %s\n") % f1)
1428 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1448 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1429 ms.addpath(f, f1, fo)
1449 ms.addpath(f, f1, fo)
1430 z += 1
1450 z += 1
1431 progress(_updating, z, item=f, total=numupdates, unit=_files)
1451 progress(_updating, z, item=f, total=numupdates, unit=_files)
1432
1452
1433 # When merging in-memory, we can't support worker processes, so set the
1453 # When merging in-memory, we can't support worker processes, so set the
1434 # per-item cost at 0 in that case.
1454 # per-item cost at 0 in that case.
1435 cost = 0 if wctx.isinmemory() else 0.001
1455 cost = 0 if wctx.isinmemory() else 0.001
1436
1456
1437 # remove in parallel (must come before resolving path conflicts and getting)
1457 # remove in parallel (must come before resolving path conflicts and getting)
1438 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1458 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1439 actions['r'])
1459 actions['r'])
1440 for i, item in prog:
1460 for i, item in prog:
1441 z += i
1461 z += i
1442 progress(_updating, z, item=item, total=numupdates, unit=_files)
1462 progress(_updating, z, item=item, total=numupdates, unit=_files)
1443 removed = len(actions['r'])
1463 removed = len(actions['r'])
1444
1464
1445 # resolve path conflicts (must come before getting)
1465 # resolve path conflicts (must come before getting)
1446 for f, args, msg in actions['pr']:
1466 for f, args, msg in actions['pr']:
1447 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1467 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1448 f0, = args
1468 f0, = args
1449 if wctx[f0].lexists():
1469 if wctx[f0].lexists():
1450 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1470 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1451 wctx[f].audit()
1471 wctx[f].audit()
1452 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1472 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1453 wctx[f0].remove()
1473 wctx[f0].remove()
1454 z += 1
1474 z += 1
1455 progress(_updating, z, item=f, total=numupdates, unit=_files)
1475 progress(_updating, z, item=f, total=numupdates, unit=_files)
1456
1476
1457 # We should flush before forking into worker processes, since those workers
1477 # We should flush before forking into worker processes, since those workers
1458 # flush when they complete, and we don't want to duplicate work.
1478 # flush when they complete, and we don't want to duplicate work.
1459 wctx.flushall()
1479 wctx.flushall()
1460
1480
1461 # get in parallel
1481 # get in parallel
1462 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1482 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1463 actions['g'])
1483 actions['g'])
1464 for i, item in prog:
1484 for i, item in prog:
1465 z += i
1485 z += i
1466 progress(_updating, z, item=item, total=numupdates, unit=_files)
1486 progress(_updating, z, item=item, total=numupdates, unit=_files)
1467 updated = len(actions['g'])
1487 updated = len(actions['g'])
1468
1488
1469 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1489 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1470 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1490 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1471
1491
1472 # forget (manifest only, just log it) (must come first)
1492 # forget (manifest only, just log it) (must come first)
1473 for f, args, msg in actions['f']:
1493 for f, args, msg in actions['f']:
1474 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1494 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1475 z += 1
1495 z += 1
1476 progress(_updating, z, item=f, total=numupdates, unit=_files)
1496 progress(_updating, z, item=f, total=numupdates, unit=_files)
1477
1497
1478 # re-add (manifest only, just log it)
1498 # re-add (manifest only, just log it)
1479 for f, args, msg in actions['a']:
1499 for f, args, msg in actions['a']:
1480 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1500 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1481 z += 1
1501 z += 1
1482 progress(_updating, z, item=f, total=numupdates, unit=_files)
1502 progress(_updating, z, item=f, total=numupdates, unit=_files)
1483
1503
1484 # re-add/mark as modified (manifest only, just log it)
1504 # re-add/mark as modified (manifest only, just log it)
1485 for f, args, msg in actions['am']:
1505 for f, args, msg in actions['am']:
1486 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1506 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1487 z += 1
1507 z += 1
1488 progress(_updating, z, item=f, total=numupdates, unit=_files)
1508 progress(_updating, z, item=f, total=numupdates, unit=_files)
1489
1509
1490 # keep (noop, just log it)
1510 # keep (noop, just log it)
1491 for f, args, msg in actions['k']:
1511 for f, args, msg in actions['k']:
1492 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1512 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1493 # no progress
1513 # no progress
1494
1514
1495 # directory rename, move local
1515 # directory rename, move local
1496 for f, args, msg in actions['dm']:
1516 for f, args, msg in actions['dm']:
1497 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1517 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1498 z += 1
1518 z += 1
1499 progress(_updating, z, item=f, total=numupdates, unit=_files)
1519 progress(_updating, z, item=f, total=numupdates, unit=_files)
1500 f0, flags = args
1520 f0, flags = args
1501 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1521 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1502 wctx[f].audit()
1522 wctx[f].audit()
1503 wctx[f].write(wctx.filectx(f0).data(), flags)
1523 wctx[f].write(wctx.filectx(f0).data(), flags)
1504 wctx[f0].remove()
1524 wctx[f0].remove()
1505 updated += 1
1525 updated += 1
1506
1526
1507 # local directory rename, get
1527 # local directory rename, get
1508 for f, args, msg in actions['dg']:
1528 for f, args, msg in actions['dg']:
1509 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1529 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1510 z += 1
1530 z += 1
1511 progress(_updating, z, item=f, total=numupdates, unit=_files)
1531 progress(_updating, z, item=f, total=numupdates, unit=_files)
1512 f0, flags = args
1532 f0, flags = args
1513 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1533 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1514 wctx[f].write(mctx.filectx(f0).data(), flags)
1534 wctx[f].write(mctx.filectx(f0).data(), flags)
1515 updated += 1
1535 updated += 1
1516
1536
1517 # exec
1537 # exec
1518 for f, args, msg in actions['e']:
1538 for f, args, msg in actions['e']:
1519 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1539 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1520 z += 1
1540 z += 1
1521 progress(_updating, z, item=f, total=numupdates, unit=_files)
1541 progress(_updating, z, item=f, total=numupdates, unit=_files)
1522 flags, = args
1542 flags, = args
1523 wctx[f].audit()
1543 wctx[f].audit()
1524 wctx[f].setflags('l' in flags, 'x' in flags)
1544 wctx[f].setflags('l' in flags, 'x' in flags)
1525 updated += 1
1545 updated += 1
1526
1546
1527 # the ordering is important here -- ms.mergedriver will raise if the merge
1547 # the ordering is important here -- ms.mergedriver will raise if the merge
1528 # driver has changed, and we want to be able to bypass it when overwrite is
1548 # driver has changed, and we want to be able to bypass it when overwrite is
1529 # True
1549 # True
1530 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1550 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1531
1551
1532 if usemergedriver:
1552 if usemergedriver:
1533 ms.commit()
1553 ms.commit()
1534 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1554 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1535 # the driver might leave some files unresolved
1555 # the driver might leave some files unresolved
1536 unresolvedf = set(ms.unresolved())
1556 unresolvedf = set(ms.unresolved())
1537 if not proceed:
1557 if not proceed:
1538 # XXX setting unresolved to at least 1 is a hack to make sure we
1558 # XXX setting unresolved to at least 1 is a hack to make sure we
1539 # error out
1559 # error out
1540 return updated, merged, removed, max(len(unresolvedf), 1)
1560 return updated, merged, removed, max(len(unresolvedf), 1)
1541 newactions = []
1561 newactions = []
1542 for f, args, msg in mergeactions:
1562 for f, args, msg in mergeactions:
1543 if f in unresolvedf:
1563 if f in unresolvedf:
1544 newactions.append((f, args, msg))
1564 newactions.append((f, args, msg))
1545 mergeactions = newactions
1565 mergeactions = newactions
1546
1566
1547 try:
1567 try:
1548 # premerge
1568 # premerge
1549 tocomplete = []
1569 tocomplete = []
1550 for f, args, msg in mergeactions:
1570 for f, args, msg in mergeactions:
1551 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1571 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1552 z += 1
1572 z += 1
1553 progress(_updating, z, item=f, total=numupdates, unit=_files)
1573 progress(_updating, z, item=f, total=numupdates, unit=_files)
1554 if f == '.hgsubstate': # subrepo states need updating
1574 if f == '.hgsubstate': # subrepo states need updating
1555 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1575 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1556 overwrite, labels)
1576 overwrite, labels)
1557 continue
1577 continue
1558 wctx[f].audit()
1578 wctx[f].audit()
1559 complete, r = ms.preresolve(f, wctx)
1579 complete, r = ms.preresolve(f, wctx)
1560 if not complete:
1580 if not complete:
1561 numupdates += 1
1581 numupdates += 1
1562 tocomplete.append((f, args, msg))
1582 tocomplete.append((f, args, msg))
1563
1583
1564 # merge
1584 # merge
1565 for f, args, msg in tocomplete:
1585 for f, args, msg in tocomplete:
1566 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1586 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1567 z += 1
1587 z += 1
1568 progress(_updating, z, item=f, total=numupdates, unit=_files)
1588 progress(_updating, z, item=f, total=numupdates, unit=_files)
1569 ms.resolve(f, wctx)
1589 ms.resolve(f, wctx)
1570
1590
1571 finally:
1591 finally:
1572 ms.commit()
1592 ms.commit()
1573
1593
1574 unresolved = ms.unresolvedcount()
1594 unresolved = ms.unresolvedcount()
1575
1595
1576 if usemergedriver and not unresolved and ms.mdstate() != 's':
1596 if usemergedriver and not unresolved and ms.mdstate() != 's':
1577 if not driverconclude(repo, ms, wctx, labels=labels):
1597 if not driverconclude(repo, ms, wctx, labels=labels):
1578 # XXX setting unresolved to at least 1 is a hack to make sure we
1598 # XXX setting unresolved to at least 1 is a hack to make sure we
1579 # error out
1599 # error out
1580 unresolved = max(unresolved, 1)
1600 unresolved = max(unresolved, 1)
1581
1601
1582 ms.commit()
1602 ms.commit()
1583
1603
1584 msupdated, msmerged, msremoved = ms.counts()
1604 msupdated, msmerged, msremoved = ms.counts()
1585 updated += msupdated
1605 updated += msupdated
1586 merged += msmerged
1606 merged += msmerged
1587 removed += msremoved
1607 removed += msremoved
1588
1608
1589 extraactions = ms.actions()
1609 extraactions = ms.actions()
1590 if extraactions:
1610 if extraactions:
1591 mfiles = set(a[0] for a in actions['m'])
1611 mfiles = set(a[0] for a in actions['m'])
1592 for k, acts in extraactions.iteritems():
1612 for k, acts in extraactions.iteritems():
1593 actions[k].extend(acts)
1613 actions[k].extend(acts)
1594 # Remove these files from actions['m'] as well. This is important
1614 # Remove these files from actions['m'] as well. This is important
1595 # because in recordupdates, files in actions['m'] are processed
1615 # because in recordupdates, files in actions['m'] are processed
1596 # after files in other actions, and the merge driver might add
1616 # after files in other actions, and the merge driver might add
1597 # files to those actions via extraactions above. This can lead to a
1617 # files to those actions via extraactions above. This can lead to a
1598 # file being recorded twice, with poor results. This is especially
1618 # file being recorded twice, with poor results. This is especially
1599 # problematic for actions['r'] (currently only possible with the
1619 # problematic for actions['r'] (currently only possible with the
1600 # merge driver in the initial merge process; interrupted merges
1620 # merge driver in the initial merge process; interrupted merges
1601 # don't go through this flow).
1621 # don't go through this flow).
1602 #
1622 #
1603 # The real fix here is to have indexes by both file and action so
1623 # The real fix here is to have indexes by both file and action so
1604 # that when the action for a file is changed it is automatically
1624 # that when the action for a file is changed it is automatically
1605 # reflected in the other action lists. But that involves a more
1625 # reflected in the other action lists. But that involves a more
1606 # complex data structure, so this will do for now.
1626 # complex data structure, so this will do for now.
1607 #
1627 #
1608 # We don't need to do the same operation for 'dc' and 'cd' because
1628 # We don't need to do the same operation for 'dc' and 'cd' because
1609 # those lists aren't consulted again.
1629 # those lists aren't consulted again.
1610 mfiles.difference_update(a[0] for a in acts)
1630 mfiles.difference_update(a[0] for a in acts)
1611
1631
1612 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1632 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1613
1633
1614 progress(_updating, None, total=numupdates, unit=_files)
1634 progress(_updating, None, total=numupdates, unit=_files)
1615
1635
1616 return updated, merged, removed, unresolved
1636 return updated, merged, removed, unresolved
1617
1637
1618 def recordupdates(repo, actions, branchmerge):
1638 def recordupdates(repo, actions, branchmerge):
1619 "record merge actions to the dirstate"
1639 "record merge actions to the dirstate"
1620 # remove (must come first)
1640 # remove (must come first)
1621 for f, args, msg in actions.get('r', []):
1641 for f, args, msg in actions.get('r', []):
1622 if branchmerge:
1642 if branchmerge:
1623 repo.dirstate.remove(f)
1643 repo.dirstate.remove(f)
1624 else:
1644 else:
1625 repo.dirstate.drop(f)
1645 repo.dirstate.drop(f)
1626
1646
1627 # forget (must come first)
1647 # forget (must come first)
1628 for f, args, msg in actions.get('f', []):
1648 for f, args, msg in actions.get('f', []):
1629 repo.dirstate.drop(f)
1649 repo.dirstate.drop(f)
1630
1650
1631 # resolve path conflicts
1651 # resolve path conflicts
1632 for f, args, msg in actions.get('pr', []):
1652 for f, args, msg in actions.get('pr', []):
1633 f0, = args
1653 f0, = args
1634 origf0 = repo.dirstate.copied(f0) or f0
1654 origf0 = repo.dirstate.copied(f0) or f0
1635 repo.dirstate.add(f)
1655 repo.dirstate.add(f)
1636 repo.dirstate.copy(origf0, f)
1656 repo.dirstate.copy(origf0, f)
1637 if f0 == origf0:
1657 if f0 == origf0:
1638 repo.dirstate.remove(f0)
1658 repo.dirstate.remove(f0)
1639 else:
1659 else:
1640 repo.dirstate.drop(f0)
1660 repo.dirstate.drop(f0)
1641
1661
1642 # re-add
1662 # re-add
1643 for f, args, msg in actions.get('a', []):
1663 for f, args, msg in actions.get('a', []):
1644 repo.dirstate.add(f)
1664 repo.dirstate.add(f)
1645
1665
1646 # re-add/mark as modified
1666 # re-add/mark as modified
1647 for f, args, msg in actions.get('am', []):
1667 for f, args, msg in actions.get('am', []):
1648 if branchmerge:
1668 if branchmerge:
1649 repo.dirstate.normallookup(f)
1669 repo.dirstate.normallookup(f)
1650 else:
1670 else:
1651 repo.dirstate.add(f)
1671 repo.dirstate.add(f)
1652
1672
1653 # exec change
1673 # exec change
1654 for f, args, msg in actions.get('e', []):
1674 for f, args, msg in actions.get('e', []):
1655 repo.dirstate.normallookup(f)
1675 repo.dirstate.normallookup(f)
1656
1676
1657 # keep
1677 # keep
1658 for f, args, msg in actions.get('k', []):
1678 for f, args, msg in actions.get('k', []):
1659 pass
1679 pass
1660
1680
1661 # get
1681 # get
1662 for f, args, msg in actions.get('g', []):
1682 for f, args, msg in actions.get('g', []):
1663 if branchmerge:
1683 if branchmerge:
1664 repo.dirstate.otherparent(f)
1684 repo.dirstate.otherparent(f)
1665 else:
1685 else:
1666 repo.dirstate.normal(f)
1686 repo.dirstate.normal(f)
1667
1687
1668 # merge
1688 # merge
1669 for f, args, msg in actions.get('m', []):
1689 for f, args, msg in actions.get('m', []):
1670 f1, f2, fa, move, anc = args
1690 f1, f2, fa, move, anc = args
1671 if branchmerge:
1691 if branchmerge:
1672 # We've done a branch merge, mark this file as merged
1692 # We've done a branch merge, mark this file as merged
1673 # so that we properly record the merger later
1693 # so that we properly record the merger later
1674 repo.dirstate.merge(f)
1694 repo.dirstate.merge(f)
1675 if f1 != f2: # copy/rename
1695 if f1 != f2: # copy/rename
1676 if move:
1696 if move:
1677 repo.dirstate.remove(f1)
1697 repo.dirstate.remove(f1)
1678 if f1 != f:
1698 if f1 != f:
1679 repo.dirstate.copy(f1, f)
1699 repo.dirstate.copy(f1, f)
1680 else:
1700 else:
1681 repo.dirstate.copy(f2, f)
1701 repo.dirstate.copy(f2, f)
1682 else:
1702 else:
1683 # We've update-merged a locally modified file, so
1703 # We've update-merged a locally modified file, so
1684 # we set the dirstate to emulate a normal checkout
1704 # we set the dirstate to emulate a normal checkout
1685 # of that file some time in the past. Thus our
1705 # of that file some time in the past. Thus our
1686 # merge will appear as a normal local file
1706 # merge will appear as a normal local file
1687 # modification.
1707 # modification.
1688 if f2 == f: # file not locally copied/moved
1708 if f2 == f: # file not locally copied/moved
1689 repo.dirstate.normallookup(f)
1709 repo.dirstate.normallookup(f)
1690 if move:
1710 if move:
1691 repo.dirstate.drop(f1)
1711 repo.dirstate.drop(f1)
1692
1712
1693 # directory rename, move local
1713 # directory rename, move local
1694 for f, args, msg in actions.get('dm', []):
1714 for f, args, msg in actions.get('dm', []):
1695 f0, flag = args
1715 f0, flag = args
1696 if branchmerge:
1716 if branchmerge:
1697 repo.dirstate.add(f)
1717 repo.dirstate.add(f)
1698 repo.dirstate.remove(f0)
1718 repo.dirstate.remove(f0)
1699 repo.dirstate.copy(f0, f)
1719 repo.dirstate.copy(f0, f)
1700 else:
1720 else:
1701 repo.dirstate.normal(f)
1721 repo.dirstate.normal(f)
1702 repo.dirstate.drop(f0)
1722 repo.dirstate.drop(f0)
1703
1723
1704 # directory rename, get
1724 # directory rename, get
1705 for f, args, msg in actions.get('dg', []):
1725 for f, args, msg in actions.get('dg', []):
1706 f0, flag = args
1726 f0, flag = args
1707 if branchmerge:
1727 if branchmerge:
1708 repo.dirstate.add(f)
1728 repo.dirstate.add(f)
1709 repo.dirstate.copy(f0, f)
1729 repo.dirstate.copy(f0, f)
1710 else:
1730 else:
1711 repo.dirstate.normal(f)
1731 repo.dirstate.normal(f)
1712
1732
1713 def update(repo, node, branchmerge, force, ancestor=None,
1733 def update(repo, node, branchmerge, force, ancestor=None,
1714 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1734 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1715 updatecheck=None, wc=None):
1735 updatecheck=None, wc=None):
1716 """
1736 """
1717 Perform a merge between the working directory and the given node
1737 Perform a merge between the working directory and the given node
1718
1738
1719 node = the node to update to
1739 node = the node to update to
1720 branchmerge = whether to merge between branches
1740 branchmerge = whether to merge between branches
1721 force = whether to force branch merging or file overwriting
1741 force = whether to force branch merging or file overwriting
1722 matcher = a matcher to filter file lists (dirstate not updated)
1742 matcher = a matcher to filter file lists (dirstate not updated)
1723 mergeancestor = whether it is merging with an ancestor. If true,
1743 mergeancestor = whether it is merging with an ancestor. If true,
1724 we should accept the incoming changes for any prompts that occur.
1744 we should accept the incoming changes for any prompts that occur.
1725 If false, merging with an ancestor (fast-forward) is only allowed
1745 If false, merging with an ancestor (fast-forward) is only allowed
1726 between different named branches. This flag is used by rebase extension
1746 between different named branches. This flag is used by rebase extension
1727 as a temporary fix and should be avoided in general.
1747 as a temporary fix and should be avoided in general.
1728 labels = labels to use for base, local and other
1748 labels = labels to use for base, local and other
1729 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1749 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1730 this is True, then 'force' should be True as well.
1750 this is True, then 'force' should be True as well.
1731
1751
1732 The table below shows all the behaviors of the update command given the
1752 The table below shows all the behaviors of the update command given the
1733 -c/--check and -C/--clean or no options, whether the working directory is
1753 -c/--check and -C/--clean or no options, whether the working directory is
1734 dirty, whether a revision is specified, and the relationship of the parent
1754 dirty, whether a revision is specified, and the relationship of the parent
1735 rev to the target rev (linear or not). Match from top first. The -n
1755 rev to the target rev (linear or not). Match from top first. The -n
1736 option doesn't exist on the command line, but represents the
1756 option doesn't exist on the command line, but represents the
1737 experimental.updatecheck=noconflict option.
1757 experimental.updatecheck=noconflict option.
1738
1758
1739 This logic is tested by test-update-branches.t.
1759 This logic is tested by test-update-branches.t.
1740
1760
1741 -c -C -n -m dirty rev linear | result
1761 -c -C -n -m dirty rev linear | result
1742 y y * * * * * | (1)
1762 y y * * * * * | (1)
1743 y * y * * * * | (1)
1763 y * y * * * * | (1)
1744 y * * y * * * | (1)
1764 y * * y * * * | (1)
1745 * y y * * * * | (1)
1765 * y y * * * * | (1)
1746 * y * y * * * | (1)
1766 * y * y * * * | (1)
1747 * * y y * * * | (1)
1767 * * y y * * * | (1)
1748 * * * * * n n | x
1768 * * * * * n n | x
1749 * * * * n * * | ok
1769 * * * * n * * | ok
1750 n n n n y * y | merge
1770 n n n n y * y | merge
1751 n n n n y y n | (2)
1771 n n n n y y n | (2)
1752 n n n y y * * | merge
1772 n n n y y * * | merge
1753 n n y n y * * | merge if no conflict
1773 n n y n y * * | merge if no conflict
1754 n y n n y * * | discard
1774 n y n n y * * | discard
1755 y n n n y * * | (3)
1775 y n n n y * * | (3)
1756
1776
1757 x = can't happen
1777 x = can't happen
1758 * = don't-care
1778 * = don't-care
1759 1 = incompatible options (checked in commands.py)
1779 1 = incompatible options (checked in commands.py)
1760 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1780 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1761 3 = abort: uncommitted changes (checked in commands.py)
1781 3 = abort: uncommitted changes (checked in commands.py)
1762
1782
1763 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1783 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1764 to repo[None] if None is passed.
1784 to repo[None] if None is passed.
1765
1785
1766 Return the same tuple as applyupdates().
1786 Return the same tuple as applyupdates().
1767 """
1787 """
1768 # Avoid cycle.
1788 # Avoid cycle.
1769 from . import sparse
1789 from . import sparse
1770
1790
1771 # This function used to find the default destination if node was None, but
1791 # This function used to find the default destination if node was None, but
1772 # that's now in destutil.py.
1792 # that's now in destutil.py.
1773 assert node is not None
1793 assert node is not None
1774 if not branchmerge and not force:
1794 if not branchmerge and not force:
1775 # TODO: remove the default once all callers that pass branchmerge=False
1795 # TODO: remove the default once all callers that pass branchmerge=False
1776 # and force=False pass a value for updatecheck. We may want to allow
1796 # and force=False pass a value for updatecheck. We may want to allow
1777 # updatecheck='abort' to better suppport some of these callers.
1797 # updatecheck='abort' to better suppport some of these callers.
1778 if updatecheck is None:
1798 if updatecheck is None:
1779 updatecheck = 'linear'
1799 updatecheck = 'linear'
1780 assert updatecheck in ('none', 'linear', 'noconflict')
1800 assert updatecheck in ('none', 'linear', 'noconflict')
1781 # If we're doing a partial update, we need to skip updating
1801 # If we're doing a partial update, we need to skip updating
1782 # the dirstate, so make a note of any partial-ness to the
1802 # the dirstate, so make a note of any partial-ness to the
1783 # update here.
1803 # update here.
1784 if matcher is None or matcher.always():
1804 if matcher is None or matcher.always():
1785 partial = False
1805 partial = False
1786 else:
1806 else:
1787 partial = True
1807 partial = True
1788 with repo.wlock():
1808 with repo.wlock():
1789 if wc is None:
1809 if wc is None:
1790 wc = repo[None]
1810 wc = repo[None]
1791 pl = wc.parents()
1811 pl = wc.parents()
1792 p1 = pl[0]
1812 p1 = pl[0]
1793 pas = [None]
1813 pas = [None]
1794 if ancestor is not None:
1814 if ancestor is not None:
1795 pas = [repo[ancestor]]
1815 pas = [repo[ancestor]]
1796
1816
1797 overwrite = force and not branchmerge
1817 overwrite = force and not branchmerge
1798
1818
1799 p2 = repo[node]
1819 p2 = repo[node]
1800 if pas[0] is None:
1820 if pas[0] is None:
1801 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1821 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1802 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1822 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1803 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1823 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1804 else:
1824 else:
1805 pas = [p1.ancestor(p2, warn=branchmerge)]
1825 pas = [p1.ancestor(p2, warn=branchmerge)]
1806
1826
1807 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1827 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1808
1828
1809 ### check phase
1829 ### check phase
1810 if not overwrite:
1830 if not overwrite:
1811 if len(pl) > 1:
1831 if len(pl) > 1:
1812 raise error.Abort(_("outstanding uncommitted merge"))
1832 raise error.Abort(_("outstanding uncommitted merge"))
1813 ms = mergestate.read(repo)
1833 ms = mergestate.read(repo)
1814 if list(ms.unresolved()):
1834 if list(ms.unresolved()):
1815 raise error.Abort(_("outstanding merge conflicts"))
1835 raise error.Abort(_("outstanding merge conflicts"))
1816 if branchmerge:
1836 if branchmerge:
1817 if pas == [p2]:
1837 if pas == [p2]:
1818 raise error.Abort(_("merging with a working directory ancestor"
1838 raise error.Abort(_("merging with a working directory ancestor"
1819 " has no effect"))
1839 " has no effect"))
1820 elif pas == [p1]:
1840 elif pas == [p1]:
1821 if not mergeancestor and wc.branch() == p2.branch():
1841 if not mergeancestor and wc.branch() == p2.branch():
1822 raise error.Abort(_("nothing to merge"),
1842 raise error.Abort(_("nothing to merge"),
1823 hint=_("use 'hg update' "
1843 hint=_("use 'hg update' "
1824 "or check 'hg heads'"))
1844 "or check 'hg heads'"))
1825 if not force and (wc.files() or wc.deleted()):
1845 if not force and (wc.files() or wc.deleted()):
1826 raise error.Abort(_("uncommitted changes"),
1846 raise error.Abort(_("uncommitted changes"),
1827 hint=_("use 'hg status' to list changes"))
1847 hint=_("use 'hg status' to list changes"))
1828 for s in sorted(wc.substate):
1848 for s in sorted(wc.substate):
1829 wc.sub(s).bailifchanged()
1849 wc.sub(s).bailifchanged()
1830
1850
1831 elif not overwrite:
1851 elif not overwrite:
1832 if p1 == p2: # no-op update
1852 if p1 == p2: # no-op update
1833 # call the hooks and exit early
1853 # call the hooks and exit early
1834 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1854 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1835 repo.hook('update', parent1=xp2, parent2='', error=0)
1855 repo.hook('update', parent1=xp2, parent2='', error=0)
1836 return 0, 0, 0, 0
1856 return 0, 0, 0, 0
1837
1857
1838 if (updatecheck == 'linear' and
1858 if (updatecheck == 'linear' and
1839 pas not in ([p1], [p2])): # nonlinear
1859 pas not in ([p1], [p2])): # nonlinear
1840 dirty = wc.dirty(missing=True)
1860 dirty = wc.dirty(missing=True)
1841 if dirty:
1861 if dirty:
1842 # Branching is a bit strange to ensure we do the minimal
1862 # Branching is a bit strange to ensure we do the minimal
1843 # amount of call to obsutil.foreground.
1863 # amount of call to obsutil.foreground.
1844 foreground = obsutil.foreground(repo, [p1.node()])
1864 foreground = obsutil.foreground(repo, [p1.node()])
1845 # note: the <node> variable contains a random identifier
1865 # note: the <node> variable contains a random identifier
1846 if repo[node].node() in foreground:
1866 if repo[node].node() in foreground:
1847 pass # allow updating to successors
1867 pass # allow updating to successors
1848 else:
1868 else:
1849 msg = _("uncommitted changes")
1869 msg = _("uncommitted changes")
1850 hint = _("commit or update --clean to discard changes")
1870 hint = _("commit or update --clean to discard changes")
1851 raise error.UpdateAbort(msg, hint=hint)
1871 raise error.UpdateAbort(msg, hint=hint)
1852 else:
1872 else:
1853 # Allow jumping branches if clean and specific rev given
1873 # Allow jumping branches if clean and specific rev given
1854 pass
1874 pass
1855
1875
1856 if overwrite:
1876 if overwrite:
1857 pas = [wc]
1877 pas = [wc]
1858 elif not branchmerge:
1878 elif not branchmerge:
1859 pas = [p1]
1879 pas = [p1]
1860
1880
1861 # deprecated config: merge.followcopies
1881 # deprecated config: merge.followcopies
1862 followcopies = repo.ui.configbool('merge', 'followcopies')
1882 followcopies = repo.ui.configbool('merge', 'followcopies')
1863 if overwrite:
1883 if overwrite:
1864 followcopies = False
1884 followcopies = False
1865 elif not pas[0]:
1885 elif not pas[0]:
1866 followcopies = False
1886 followcopies = False
1867 if not branchmerge and not wc.dirty(missing=True):
1887 if not branchmerge and not wc.dirty(missing=True):
1868 followcopies = False
1888 followcopies = False
1869
1889
1870 ### calculate phase
1890 ### calculate phase
1871 actionbyfile, diverge, renamedelete = calculateupdates(
1891 actionbyfile, diverge, renamedelete = calculateupdates(
1872 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1892 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1873 followcopies, matcher=matcher, mergeforce=mergeforce)
1893 followcopies, matcher=matcher, mergeforce=mergeforce)
1874
1894
1875 if updatecheck == 'noconflict':
1895 if updatecheck == 'noconflict':
1876 for f, (m, args, msg) in actionbyfile.iteritems():
1896 for f, (m, args, msg) in actionbyfile.iteritems():
1877 if m not in ('g', 'k', 'e', 'r', 'pr'):
1897 if m not in ('g', 'k', 'e', 'r', 'pr'):
1878 msg = _("conflicting changes")
1898 msg = _("conflicting changes")
1879 hint = _("commit or update --clean to discard changes")
1899 hint = _("commit or update --clean to discard changes")
1880 raise error.Abort(msg, hint=hint)
1900 raise error.Abort(msg, hint=hint)
1881
1901
1882 # Prompt and create actions. Most of this is in the resolve phase
1902 # Prompt and create actions. Most of this is in the resolve phase
1883 # already, but we can't handle .hgsubstate in filemerge or
1903 # already, but we can't handle .hgsubstate in filemerge or
1884 # subrepo.submerge yet so we have to keep prompting for it.
1904 # subrepo.submerge yet so we have to keep prompting for it.
1885 if '.hgsubstate' in actionbyfile:
1905 if '.hgsubstate' in actionbyfile:
1886 f = '.hgsubstate'
1906 f = '.hgsubstate'
1887 m, args, msg = actionbyfile[f]
1907 m, args, msg = actionbyfile[f]
1888 prompts = filemerge.partextras(labels)
1908 prompts = filemerge.partextras(labels)
1889 prompts['f'] = f
1909 prompts['f'] = f
1890 if m == 'cd':
1910 if m == 'cd':
1891 if repo.ui.promptchoice(
1911 if repo.ui.promptchoice(
1892 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1912 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1893 "use (c)hanged version or (d)elete?"
1913 "use (c)hanged version or (d)elete?"
1894 "$$ &Changed $$ &Delete") % prompts, 0):
1914 "$$ &Changed $$ &Delete") % prompts, 0):
1895 actionbyfile[f] = ('r', None, "prompt delete")
1915 actionbyfile[f] = ('r', None, "prompt delete")
1896 elif f in p1:
1916 elif f in p1:
1897 actionbyfile[f] = ('am', None, "prompt keep")
1917 actionbyfile[f] = ('am', None, "prompt keep")
1898 else:
1918 else:
1899 actionbyfile[f] = ('a', None, "prompt keep")
1919 actionbyfile[f] = ('a', None, "prompt keep")
1900 elif m == 'dc':
1920 elif m == 'dc':
1901 f1, f2, fa, move, anc = args
1921 f1, f2, fa, move, anc = args
1902 flags = p2[f2].flags()
1922 flags = p2[f2].flags()
1903 if repo.ui.promptchoice(
1923 if repo.ui.promptchoice(
1904 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1924 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1905 "use (c)hanged version or leave (d)eleted?"
1925 "use (c)hanged version or leave (d)eleted?"
1906 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1926 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1907 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1927 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1908 else:
1928 else:
1909 del actionbyfile[f]
1929 del actionbyfile[f]
1910
1930
1911 # Convert to dictionary-of-lists format
1931 # Convert to dictionary-of-lists format
1912 actions = dict((m, [])
1932 actions = dict((m, [])
1913 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1933 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1914 for f, (m, args, msg) in actionbyfile.iteritems():
1934 for f, (m, args, msg) in actionbyfile.iteritems():
1915 if m not in actions:
1935 if m not in actions:
1916 actions[m] = []
1936 actions[m] = []
1917 actions[m].append((f, args, msg))
1937 actions[m].append((f, args, msg))
1918
1938
1919 if not util.fscasesensitive(repo.path):
1939 if not util.fscasesensitive(repo.path):
1920 # check collision between files only in p2 for clean update
1940 # check collision between files only in p2 for clean update
1921 if (not branchmerge and
1941 if (not branchmerge and
1922 (force or not wc.dirty(missing=True, branch=False))):
1942 (force or not wc.dirty(missing=True, branch=False))):
1923 _checkcollision(repo, p2.manifest(), None)
1943 _checkcollision(repo, p2.manifest(), None)
1924 else:
1944 else:
1925 _checkcollision(repo, wc.manifest(), actions)
1945 _checkcollision(repo, wc.manifest(), actions)
1926
1946
1927 # divergent renames
1947 # divergent renames
1928 for f, fl in sorted(diverge.iteritems()):
1948 for f, fl in sorted(diverge.iteritems()):
1929 repo.ui.warn(_("note: possible conflict - %s was renamed "
1949 repo.ui.warn(_("note: possible conflict - %s was renamed "
1930 "multiple times to:\n") % f)
1950 "multiple times to:\n") % f)
1931 for nf in fl:
1951 for nf in fl:
1932 repo.ui.warn(" %s\n" % nf)
1952 repo.ui.warn(" %s\n" % nf)
1933
1953
1934 # rename and delete
1954 # rename and delete
1935 for f, fl in sorted(renamedelete.iteritems()):
1955 for f, fl in sorted(renamedelete.iteritems()):
1936 repo.ui.warn(_("note: possible conflict - %s was deleted "
1956 repo.ui.warn(_("note: possible conflict - %s was deleted "
1937 "and renamed to:\n") % f)
1957 "and renamed to:\n") % f)
1938 for nf in fl:
1958 for nf in fl:
1939 repo.ui.warn(" %s\n" % nf)
1959 repo.ui.warn(" %s\n" % nf)
1940
1960
1941 ### apply phase
1961 ### apply phase
1942 if not branchmerge: # just jump to the new rev
1962 if not branchmerge: # just jump to the new rev
1943 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1963 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1944 if not partial:
1964 if not partial:
1945 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1965 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1946 # note that we're in the middle of an update
1966 # note that we're in the middle of an update
1947 repo.vfs.write('updatestate', p2.hex())
1967 repo.vfs.write('updatestate', p2.hex())
1948
1968
1949 # Advertise fsmonitor when its presence could be useful.
1969 # Advertise fsmonitor when its presence could be useful.
1950 #
1970 #
1951 # We only advertise when performing an update from an empty working
1971 # We only advertise when performing an update from an empty working
1952 # directory. This typically only occurs during initial clone.
1972 # directory. This typically only occurs during initial clone.
1953 #
1973 #
1954 # We give users a mechanism to disable the warning in case it is
1974 # We give users a mechanism to disable the warning in case it is
1955 # annoying.
1975 # annoying.
1956 #
1976 #
1957 # We only allow on Linux and MacOS because that's where fsmonitor is
1977 # We only allow on Linux and MacOS because that's where fsmonitor is
1958 # considered stable.
1978 # considered stable.
1959 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1979 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
1960 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1980 fsmonitorthreshold = repo.ui.configint('fsmonitor',
1961 'warn_update_file_count')
1981 'warn_update_file_count')
1962 try:
1982 try:
1963 extensions.find('fsmonitor')
1983 extensions.find('fsmonitor')
1964 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1984 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
1965 # We intentionally don't look at whether fsmonitor has disabled
1985 # We intentionally don't look at whether fsmonitor has disabled
1966 # itself because a) fsmonitor may have already printed a warning
1986 # itself because a) fsmonitor may have already printed a warning
1967 # b) we only care about the config state here.
1987 # b) we only care about the config state here.
1968 except KeyError:
1988 except KeyError:
1969 fsmonitorenabled = False
1989 fsmonitorenabled = False
1970
1990
1971 if (fsmonitorwarning
1991 if (fsmonitorwarning
1972 and not fsmonitorenabled
1992 and not fsmonitorenabled
1973 and p1.node() == nullid
1993 and p1.node() == nullid
1974 and len(actions['g']) >= fsmonitorthreshold
1994 and len(actions['g']) >= fsmonitorthreshold
1975 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1995 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
1976 repo.ui.warn(
1996 repo.ui.warn(
1977 _('(warning: large working directory being used without '
1997 _('(warning: large working directory being used without '
1978 'fsmonitor enabled; enable fsmonitor to improve performance; '
1998 'fsmonitor enabled; enable fsmonitor to improve performance; '
1979 'see "hg help -e fsmonitor")\n'))
1999 'see "hg help -e fsmonitor")\n'))
1980
2000
1981 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2001 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1982 wc.flushall()
2002 wc.flushall()
1983
2003
1984 if not partial:
2004 if not partial:
1985 with repo.dirstate.parentchange():
2005 with repo.dirstate.parentchange():
1986 repo.setparents(fp1, fp2)
2006 repo.setparents(fp1, fp2)
1987 recordupdates(repo, actions, branchmerge)
2007 recordupdates(repo, actions, branchmerge)
1988 # update completed, clear state
2008 # update completed, clear state
1989 util.unlink(repo.vfs.join('updatestate'))
2009 util.unlink(repo.vfs.join('updatestate'))
1990
2010
1991 if not branchmerge:
2011 if not branchmerge:
1992 repo.dirstate.setbranch(p2.branch())
2012 repo.dirstate.setbranch(p2.branch())
1993
2013
1994 # If we're updating to a location, clean up any stale temporary includes
2014 # If we're updating to a location, clean up any stale temporary includes
1995 # (ex: this happens during hg rebase --abort).
2015 # (ex: this happens during hg rebase --abort).
1996 if not branchmerge:
2016 if not branchmerge:
1997 sparse.prunetemporaryincludes(repo)
2017 sparse.prunetemporaryincludes(repo)
1998
2018
1999 if not partial:
2019 if not partial:
2000 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2020 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
2001 return stats
2021 return stats
2002
2022
2003 def graft(repo, ctx, pctx, labels, keepparent=False):
2023 def graft(repo, ctx, pctx, labels, keepparent=False):
2004 """Do a graft-like merge.
2024 """Do a graft-like merge.
2005
2025
2006 This is a merge where the merge ancestor is chosen such that one
2026 This is a merge where the merge ancestor is chosen such that one
2007 or more changesets are grafted onto the current changeset. In
2027 or more changesets are grafted onto the current changeset. In
2008 addition to the merge, this fixes up the dirstate to include only
2028 addition to the merge, this fixes up the dirstate to include only
2009 a single parent (if keepparent is False) and tries to duplicate any
2029 a single parent (if keepparent is False) and tries to duplicate any
2010 renames/copies appropriately.
2030 renames/copies appropriately.
2011
2031
2012 ctx - changeset to rebase
2032 ctx - changeset to rebase
2013 pctx - merge base, usually ctx.p1()
2033 pctx - merge base, usually ctx.p1()
2014 labels - merge labels eg ['local', 'graft']
2034 labels - merge labels eg ['local', 'graft']
2015 keepparent - keep second parent if any
2035 keepparent - keep second parent if any
2016
2036
2017 """
2037 """
2018 # If we're grafting a descendant onto an ancestor, be sure to pass
2038 # If we're grafting a descendant onto an ancestor, be sure to pass
2019 # mergeancestor=True to update. This does two things: 1) allows the merge if
2039 # mergeancestor=True to update. This does two things: 1) allows the merge if
2020 # the destination is the same as the parent of the ctx (so we can use graft
2040 # the destination is the same as the parent of the ctx (so we can use graft
2021 # to copy commits), and 2) informs update that the incoming changes are
2041 # to copy commits), and 2) informs update that the incoming changes are
2022 # newer than the destination so it doesn't prompt about "remote changed foo
2042 # newer than the destination so it doesn't prompt about "remote changed foo
2023 # which local deleted".
2043 # which local deleted".
2024 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2044 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2025
2045
2026 stats = update(repo, ctx.node(), True, True, pctx.node(),
2046 stats = update(repo, ctx.node(), True, True, pctx.node(),
2027 mergeancestor=mergeancestor, labels=labels)
2047 mergeancestor=mergeancestor, labels=labels)
2028
2048
2029 pother = nullid
2049 pother = nullid
2030 parents = ctx.parents()
2050 parents = ctx.parents()
2031 if keepparent and len(parents) == 2 and pctx in parents:
2051 if keepparent and len(parents) == 2 and pctx in parents:
2032 parents.remove(pctx)
2052 parents.remove(pctx)
2033 pother = parents[0].node()
2053 pother = parents[0].node()
2034
2054
2035 with repo.dirstate.parentchange():
2055 with repo.dirstate.parentchange():
2036 repo.setparents(repo['.'].node(), pother)
2056 repo.setparents(repo['.'].node(), pother)
2037 repo.dirstate.write(repo.currenttransaction())
2057 repo.dirstate.write(repo.currenttransaction())
2038 # fix up dirstate for copies and renames
2058 # fix up dirstate for copies and renames
2039 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2059 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2040 return stats
2060 return stats
General Comments 0
You need to be logged in to leave comments. Login now