##// END OF EJS Templates
merge: don't use workers in in-memory mode...
Phil Cohen -
r34787:9c899660 default
parent child Browse files
Show More
@@ -1,2001 +1,2005 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 P: a path conflict (file vs directory)
69 P: a path conflict (file vs directory)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 Merge record states (stored in self._state, indexed by filename):
83 Merge record states (stored in self._state, indexed by filename):
84 u: unresolved conflict
84 u: unresolved conflict
85 r: resolved conflict
85 r: resolved conflict
86 pu: unresolved path conflict (file conflicts with directory)
86 pu: unresolved path conflict (file conflicts with directory)
87 pr: resolved path conflict
87 pr: resolved path conflict
88 d: driver-resolved conflict
88 d: driver-resolved conflict
89
89
90 The resolve command transitions between 'u' and 'r' for conflicts and
90 The resolve command transitions between 'u' and 'r' for conflicts and
91 'pu' and 'pr' for path conflicts.
91 'pu' and 'pr' for path conflicts.
92 '''
92 '''
93 statepathv1 = 'merge/state'
93 statepathv1 = 'merge/state'
94 statepathv2 = 'merge/state2'
94 statepathv2 = 'merge/state2'
95
95
96 @staticmethod
96 @staticmethod
97 def clean(repo, node=None, other=None, labels=None):
97 def clean(repo, node=None, other=None, labels=None):
98 """Initialize a brand new merge state, removing any existing state on
98 """Initialize a brand new merge state, removing any existing state on
99 disk."""
99 disk."""
100 ms = mergestate(repo)
100 ms = mergestate(repo)
101 ms.reset(node, other, labels)
101 ms.reset(node, other, labels)
102 return ms
102 return ms
103
103
104 @staticmethod
104 @staticmethod
105 def read(repo):
105 def read(repo):
106 """Initialize the merge state, reading it from disk."""
106 """Initialize the merge state, reading it from disk."""
107 ms = mergestate(repo)
107 ms = mergestate(repo)
108 ms._read()
108 ms._read()
109 return ms
109 return ms
110
110
111 def __init__(self, repo):
111 def __init__(self, repo):
112 """Initialize the merge state.
112 """Initialize the merge state.
113
113
114 Do not use this directly! Instead call read() or clean()."""
114 Do not use this directly! Instead call read() or clean()."""
115 self._repo = repo
115 self._repo = repo
116 self._dirty = False
116 self._dirty = False
117 self._labels = None
117 self._labels = None
118
118
119 def reset(self, node=None, other=None, labels=None):
119 def reset(self, node=None, other=None, labels=None):
120 self._state = {}
120 self._state = {}
121 self._stateextras = {}
121 self._stateextras = {}
122 self._local = None
122 self._local = None
123 self._other = None
123 self._other = None
124 self._labels = labels
124 self._labels = labels
125 for var in ('localctx', 'otherctx'):
125 for var in ('localctx', 'otherctx'):
126 if var in vars(self):
126 if var in vars(self):
127 delattr(self, var)
127 delattr(self, var)
128 if node:
128 if node:
129 self._local = node
129 self._local = node
130 self._other = other
130 self._other = other
131 self._readmergedriver = None
131 self._readmergedriver = None
132 if self.mergedriver:
132 if self.mergedriver:
133 self._mdstate = 's'
133 self._mdstate = 's'
134 else:
134 else:
135 self._mdstate = 'u'
135 self._mdstate = 'u'
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 self._results = {}
137 self._results = {}
138 self._dirty = False
138 self._dirty = False
139
139
140 def _read(self):
140 def _read(self):
141 """Analyse each record content to restore a serialized state from disk
141 """Analyse each record content to restore a serialized state from disk
142
142
143 This function process "record" entry produced by the de-serialization
143 This function process "record" entry produced by the de-serialization
144 of on disk file.
144 of on disk file.
145 """
145 """
146 self._state = {}
146 self._state = {}
147 self._stateextras = {}
147 self._stateextras = {}
148 self._local = None
148 self._local = None
149 self._other = None
149 self._other = None
150 for var in ('localctx', 'otherctx'):
150 for var in ('localctx', 'otherctx'):
151 if var in vars(self):
151 if var in vars(self):
152 delattr(self, var)
152 delattr(self, var)
153 self._readmergedriver = None
153 self._readmergedriver = None
154 self._mdstate = 's'
154 self._mdstate = 's'
155 unsupported = set()
155 unsupported = set()
156 records = self._readrecords()
156 records = self._readrecords()
157 for rtype, record in records:
157 for rtype, record in records:
158 if rtype == 'L':
158 if rtype == 'L':
159 self._local = bin(record)
159 self._local = bin(record)
160 elif rtype == 'O':
160 elif rtype == 'O':
161 self._other = bin(record)
161 self._other = bin(record)
162 elif rtype == 'm':
162 elif rtype == 'm':
163 bits = record.split('\0', 1)
163 bits = record.split('\0', 1)
164 mdstate = bits[1]
164 mdstate = bits[1]
165 if len(mdstate) != 1 or mdstate not in 'ums':
165 if len(mdstate) != 1 or mdstate not in 'ums':
166 # the merge driver should be idempotent, so just rerun it
166 # the merge driver should be idempotent, so just rerun it
167 mdstate = 'u'
167 mdstate = 'u'
168
168
169 self._readmergedriver = bits[0]
169 self._readmergedriver = bits[0]
170 self._mdstate = mdstate
170 self._mdstate = mdstate
171 elif rtype in 'FDCP':
171 elif rtype in 'FDCP':
172 bits = record.split('\0')
172 bits = record.split('\0')
173 self._state[bits[0]] = bits[1:]
173 self._state[bits[0]] = bits[1:]
174 elif rtype == 'f':
174 elif rtype == 'f':
175 filename, rawextras = record.split('\0', 1)
175 filename, rawextras = record.split('\0', 1)
176 extraparts = rawextras.split('\0')
176 extraparts = rawextras.split('\0')
177 extras = {}
177 extras = {}
178 i = 0
178 i = 0
179 while i < len(extraparts):
179 while i < len(extraparts):
180 extras[extraparts[i]] = extraparts[i + 1]
180 extras[extraparts[i]] = extraparts[i + 1]
181 i += 2
181 i += 2
182
182
183 self._stateextras[filename] = extras
183 self._stateextras[filename] = extras
184 elif rtype == 'l':
184 elif rtype == 'l':
185 labels = record.split('\0', 2)
185 labels = record.split('\0', 2)
186 self._labels = [l for l in labels if len(l) > 0]
186 self._labels = [l for l in labels if len(l) > 0]
187 elif not rtype.islower():
187 elif not rtype.islower():
188 unsupported.add(rtype)
188 unsupported.add(rtype)
189 self._results = {}
189 self._results = {}
190 self._dirty = False
190 self._dirty = False
191
191
192 if unsupported:
192 if unsupported:
193 raise error.UnsupportedMergeRecords(unsupported)
193 raise error.UnsupportedMergeRecords(unsupported)
194
194
195 def _readrecords(self):
195 def _readrecords(self):
196 """Read merge state from disk and return a list of record (TYPE, data)
196 """Read merge state from disk and return a list of record (TYPE, data)
197
197
198 We read data from both v1 and v2 files and decide which one to use.
198 We read data from both v1 and v2 files and decide which one to use.
199
199
200 V1 has been used by version prior to 2.9.1 and contains less data than
200 V1 has been used by version prior to 2.9.1 and contains less data than
201 v2. We read both versions and check if no data in v2 contradicts
201 v2. We read both versions and check if no data in v2 contradicts
202 v1. If there is not contradiction we can safely assume that both v1
202 v1. If there is not contradiction we can safely assume that both v1
203 and v2 were written at the same time and use the extract data in v2. If
203 and v2 were written at the same time and use the extract data in v2. If
204 there is contradiction we ignore v2 content as we assume an old version
204 there is contradiction we ignore v2 content as we assume an old version
205 of Mercurial has overwritten the mergestate file and left an old v2
205 of Mercurial has overwritten the mergestate file and left an old v2
206 file around.
206 file around.
207
207
208 returns list of record [(TYPE, data), ...]"""
208 returns list of record [(TYPE, data), ...]"""
209 v1records = self._readrecordsv1()
209 v1records = self._readrecordsv1()
210 v2records = self._readrecordsv2()
210 v2records = self._readrecordsv2()
211 if self._v1v2match(v1records, v2records):
211 if self._v1v2match(v1records, v2records):
212 return v2records
212 return v2records
213 else:
213 else:
214 # v1 file is newer than v2 file, use it
214 # v1 file is newer than v2 file, use it
215 # we have to infer the "other" changeset of the merge
215 # we have to infer the "other" changeset of the merge
216 # we cannot do better than that with v1 of the format
216 # we cannot do better than that with v1 of the format
217 mctx = self._repo[None].parents()[-1]
217 mctx = self._repo[None].parents()[-1]
218 v1records.append(('O', mctx.hex()))
218 v1records.append(('O', mctx.hex()))
219 # add place holder "other" file node information
219 # add place holder "other" file node information
220 # nobody is using it yet so we do no need to fetch the data
220 # nobody is using it yet so we do no need to fetch the data
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 for idx, r in enumerate(v1records):
222 for idx, r in enumerate(v1records):
223 if r[0] == 'F':
223 if r[0] == 'F':
224 bits = r[1].split('\0')
224 bits = r[1].split('\0')
225 bits.insert(-2, '')
225 bits.insert(-2, '')
226 v1records[idx] = (r[0], '\0'.join(bits))
226 v1records[idx] = (r[0], '\0'.join(bits))
227 return v1records
227 return v1records
228
228
229 def _v1v2match(self, v1records, v2records):
229 def _v1v2match(self, v1records, v2records):
230 oldv2 = set() # old format version of v2 record
230 oldv2 = set() # old format version of v2 record
231 for rec in v2records:
231 for rec in v2records:
232 if rec[0] == 'L':
232 if rec[0] == 'L':
233 oldv2.add(rec)
233 oldv2.add(rec)
234 elif rec[0] == 'F':
234 elif rec[0] == 'F':
235 # drop the onode data (not contained in v1)
235 # drop the onode data (not contained in v1)
236 oldv2.add(('F', _droponode(rec[1])))
236 oldv2.add(('F', _droponode(rec[1])))
237 for rec in v1records:
237 for rec in v1records:
238 if rec not in oldv2:
238 if rec not in oldv2:
239 return False
239 return False
240 else:
240 else:
241 return True
241 return True
242
242
243 def _readrecordsv1(self):
243 def _readrecordsv1(self):
244 """read on disk merge state for version 1 file
244 """read on disk merge state for version 1 file
245
245
246 returns list of record [(TYPE, data), ...]
246 returns list of record [(TYPE, data), ...]
247
247
248 Note: the "F" data from this file are one entry short
248 Note: the "F" data from this file are one entry short
249 (no "other file node" entry)
249 (no "other file node" entry)
250 """
250 """
251 records = []
251 records = []
252 try:
252 try:
253 f = self._repo.vfs(self.statepathv1)
253 f = self._repo.vfs(self.statepathv1)
254 for i, l in enumerate(f):
254 for i, l in enumerate(f):
255 if i == 0:
255 if i == 0:
256 records.append(('L', l[:-1]))
256 records.append(('L', l[:-1]))
257 else:
257 else:
258 records.append(('F', l[:-1]))
258 records.append(('F', l[:-1]))
259 f.close()
259 f.close()
260 except IOError as err:
260 except IOError as err:
261 if err.errno != errno.ENOENT:
261 if err.errno != errno.ENOENT:
262 raise
262 raise
263 return records
263 return records
264
264
265 def _readrecordsv2(self):
265 def _readrecordsv2(self):
266 """read on disk merge state for version 2 file
266 """read on disk merge state for version 2 file
267
267
268 This format is a list of arbitrary records of the form:
268 This format is a list of arbitrary records of the form:
269
269
270 [type][length][content]
270 [type][length][content]
271
271
272 `type` is a single character, `length` is a 4 byte integer, and
272 `type` is a single character, `length` is a 4 byte integer, and
273 `content` is an arbitrary byte sequence of length `length`.
273 `content` is an arbitrary byte sequence of length `length`.
274
274
275 Mercurial versions prior to 3.7 have a bug where if there are
275 Mercurial versions prior to 3.7 have a bug where if there are
276 unsupported mandatory merge records, attempting to clear out the merge
276 unsupported mandatory merge records, attempting to clear out the merge
277 state with hg update --clean or similar aborts. The 't' record type
277 state with hg update --clean or similar aborts. The 't' record type
278 works around that by writing out what those versions treat as an
278 works around that by writing out what those versions treat as an
279 advisory record, but later versions interpret as special: the first
279 advisory record, but later versions interpret as special: the first
280 character is the 'real' record type and everything onwards is the data.
280 character is the 'real' record type and everything onwards is the data.
281
281
282 Returns list of records [(TYPE, data), ...]."""
282 Returns list of records [(TYPE, data), ...]."""
283 records = []
283 records = []
284 try:
284 try:
285 f = self._repo.vfs(self.statepathv2)
285 f = self._repo.vfs(self.statepathv2)
286 data = f.read()
286 data = f.read()
287 off = 0
287 off = 0
288 end = len(data)
288 end = len(data)
289 while off < end:
289 while off < end:
290 rtype = data[off]
290 rtype = data[off]
291 off += 1
291 off += 1
292 length = _unpack('>I', data[off:(off + 4)])[0]
292 length = _unpack('>I', data[off:(off + 4)])[0]
293 off += 4
293 off += 4
294 record = data[off:(off + length)]
294 record = data[off:(off + length)]
295 off += length
295 off += length
296 if rtype == 't':
296 if rtype == 't':
297 rtype, record = record[0], record[1:]
297 rtype, record = record[0], record[1:]
298 records.append((rtype, record))
298 records.append((rtype, record))
299 f.close()
299 f.close()
300 except IOError as err:
300 except IOError as err:
301 if err.errno != errno.ENOENT:
301 if err.errno != errno.ENOENT:
302 raise
302 raise
303 return records
303 return records
304
304
305 @util.propertycache
305 @util.propertycache
306 def mergedriver(self):
306 def mergedriver(self):
307 # protect against the following:
307 # protect against the following:
308 # - A configures a malicious merge driver in their hgrc, then
308 # - A configures a malicious merge driver in their hgrc, then
309 # pauses the merge
309 # pauses the merge
310 # - A edits their hgrc to remove references to the merge driver
310 # - A edits their hgrc to remove references to the merge driver
311 # - A gives a copy of their entire repo, including .hg, to B
311 # - A gives a copy of their entire repo, including .hg, to B
312 # - B inspects .hgrc and finds it to be clean
312 # - B inspects .hgrc and finds it to be clean
313 # - B then continues the merge and the malicious merge driver
313 # - B then continues the merge and the malicious merge driver
314 # gets invoked
314 # gets invoked
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 if (self._readmergedriver is not None
316 if (self._readmergedriver is not None
317 and self._readmergedriver != configmergedriver):
317 and self._readmergedriver != configmergedriver):
318 raise error.ConfigError(
318 raise error.ConfigError(
319 _("merge driver changed since merge started"),
319 _("merge driver changed since merge started"),
320 hint=_("revert merge driver change or abort merge"))
320 hint=_("revert merge driver change or abort merge"))
321
321
322 return configmergedriver
322 return configmergedriver
323
323
324 @util.propertycache
324 @util.propertycache
325 def localctx(self):
325 def localctx(self):
326 if self._local is None:
326 if self._local is None:
327 msg = "localctx accessed but self._local isn't set"
327 msg = "localctx accessed but self._local isn't set"
328 raise error.ProgrammingError(msg)
328 raise error.ProgrammingError(msg)
329 return self._repo[self._local]
329 return self._repo[self._local]
330
330
331 @util.propertycache
331 @util.propertycache
332 def otherctx(self):
332 def otherctx(self):
333 if self._other is None:
333 if self._other is None:
334 msg = "otherctx accessed but self._other isn't set"
334 msg = "otherctx accessed but self._other isn't set"
335 raise error.ProgrammingError(msg)
335 raise error.ProgrammingError(msg)
336 return self._repo[self._other]
336 return self._repo[self._other]
337
337
338 def active(self):
338 def active(self):
339 """Whether mergestate is active.
339 """Whether mergestate is active.
340
340
341 Returns True if there appears to be mergestate. This is a rough proxy
341 Returns True if there appears to be mergestate. This is a rough proxy
342 for "is a merge in progress."
342 for "is a merge in progress."
343 """
343 """
344 # Check local variables before looking at filesystem for performance
344 # Check local variables before looking at filesystem for performance
345 # reasons.
345 # reasons.
346 return bool(self._local) or bool(self._state) or \
346 return bool(self._local) or bool(self._state) or \
347 self._repo.vfs.exists(self.statepathv1) or \
347 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv2)
348 self._repo.vfs.exists(self.statepathv2)
349
349
350 def commit(self):
350 def commit(self):
351 """Write current state on disk (if necessary)"""
351 """Write current state on disk (if necessary)"""
352 if self._dirty:
352 if self._dirty:
353 records = self._makerecords()
353 records = self._makerecords()
354 self._writerecords(records)
354 self._writerecords(records)
355 self._dirty = False
355 self._dirty = False
356
356
357 def _makerecords(self):
357 def _makerecords(self):
358 records = []
358 records = []
359 records.append(('L', hex(self._local)))
359 records.append(('L', hex(self._local)))
360 records.append(('O', hex(self._other)))
360 records.append(('O', hex(self._other)))
361 if self.mergedriver:
361 if self.mergedriver:
362 records.append(('m', '\0'.join([
362 records.append(('m', '\0'.join([
363 self.mergedriver, self._mdstate])))
363 self.mergedriver, self._mdstate])))
364 # Write out state items. In all cases, the value of the state map entry
364 # Write out state items. In all cases, the value of the state map entry
365 # is written as the contents of the record. The record type depends on
365 # is written as the contents of the record. The record type depends on
366 # the type of state that is stored, and capital-letter records are used
366 # the type of state that is stored, and capital-letter records are used
367 # to prevent older versions of Mercurial that do not support the feature
367 # to prevent older versions of Mercurial that do not support the feature
368 # from loading them.
368 # from loading them.
369 for filename, v in self._state.iteritems():
369 for filename, v in self._state.iteritems():
370 if v[0] == 'd':
370 if v[0] == 'd':
371 # Driver-resolved merge. These are stored in 'D' records.
371 # Driver-resolved merge. These are stored in 'D' records.
372 records.append(('D', '\0'.join([filename] + v)))
372 records.append(('D', '\0'.join([filename] + v)))
373 elif v[0] in ('pu', 'pr'):
373 elif v[0] in ('pu', 'pr'):
374 # Path conflicts. These are stored in 'P' records. The current
374 # Path conflicts. These are stored in 'P' records. The current
375 # resolution state ('pu' or 'pr') is stored within the record.
375 # resolution state ('pu' or 'pr') is stored within the record.
376 records.append(('P', '\0'.join([filename] + v)))
376 records.append(('P', '\0'.join([filename] + v)))
377 elif v[1] == nullhex or v[6] == nullhex:
377 elif v[1] == nullhex or v[6] == nullhex:
378 # Change/Delete or Delete/Change conflicts. These are stored in
378 # Change/Delete or Delete/Change conflicts. These are stored in
379 # 'C' records. v[1] is the local file, and is nullhex when the
379 # 'C' records. v[1] is the local file, and is nullhex when the
380 # file is deleted locally ('dc'). v[6] is the remote file, and
380 # file is deleted locally ('dc'). v[6] is the remote file, and
381 # is nullhex when the file is deleted remotely ('cd').
381 # is nullhex when the file is deleted remotely ('cd').
382 records.append(('C', '\0'.join([filename] + v)))
382 records.append(('C', '\0'.join([filename] + v)))
383 else:
383 else:
384 # Normal files. These are stored in 'F' records.
384 # Normal files. These are stored in 'F' records.
385 records.append(('F', '\0'.join([filename] + v)))
385 records.append(('F', '\0'.join([filename] + v)))
386 for filename, extras in sorted(self._stateextras.iteritems()):
386 for filename, extras in sorted(self._stateextras.iteritems()):
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
387 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
388 extras.iteritems())
388 extras.iteritems())
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
389 records.append(('f', '%s\0%s' % (filename, rawextras)))
390 if self._labels is not None:
390 if self._labels is not None:
391 labels = '\0'.join(self._labels)
391 labels = '\0'.join(self._labels)
392 records.append(('l', labels))
392 records.append(('l', labels))
393 return records
393 return records
394
394
395 def _writerecords(self, records):
395 def _writerecords(self, records):
396 """Write current state on disk (both v1 and v2)"""
396 """Write current state on disk (both v1 and v2)"""
397 self._writerecordsv1(records)
397 self._writerecordsv1(records)
398 self._writerecordsv2(records)
398 self._writerecordsv2(records)
399
399
400 def _writerecordsv1(self, records):
400 def _writerecordsv1(self, records):
401 """Write current state on disk in a version 1 file"""
401 """Write current state on disk in a version 1 file"""
402 f = self._repo.vfs(self.statepathv1, 'w')
402 f = self._repo.vfs(self.statepathv1, 'w')
403 irecords = iter(records)
403 irecords = iter(records)
404 lrecords = next(irecords)
404 lrecords = next(irecords)
405 assert lrecords[0] == 'L'
405 assert lrecords[0] == 'L'
406 f.write(hex(self._local) + '\n')
406 f.write(hex(self._local) + '\n')
407 for rtype, data in irecords:
407 for rtype, data in irecords:
408 if rtype == 'F':
408 if rtype == 'F':
409 f.write('%s\n' % _droponode(data))
409 f.write('%s\n' % _droponode(data))
410 f.close()
410 f.close()
411
411
412 def _writerecordsv2(self, records):
412 def _writerecordsv2(self, records):
413 """Write current state on disk in a version 2 file
413 """Write current state on disk in a version 2 file
414
414
415 See the docstring for _readrecordsv2 for why we use 't'."""
415 See the docstring for _readrecordsv2 for why we use 't'."""
416 # these are the records that all version 2 clients can read
416 # these are the records that all version 2 clients can read
417 whitelist = 'LOF'
417 whitelist = 'LOF'
418 f = self._repo.vfs(self.statepathv2, 'w')
418 f = self._repo.vfs(self.statepathv2, 'w')
419 for key, data in records:
419 for key, data in records:
420 assert len(key) == 1
420 assert len(key) == 1
421 if key not in whitelist:
421 if key not in whitelist:
422 key, data = 't', '%s%s' % (key, data)
422 key, data = 't', '%s%s' % (key, data)
423 format = '>sI%is' % len(data)
423 format = '>sI%is' % len(data)
424 f.write(_pack(format, key, len(data), data))
424 f.write(_pack(format, key, len(data), data))
425 f.close()
425 f.close()
426
426
427 def add(self, fcl, fco, fca, fd):
427 def add(self, fcl, fco, fca, fd):
428 """add a new (potentially?) conflicting file the merge state
428 """add a new (potentially?) conflicting file the merge state
429 fcl: file context for local,
429 fcl: file context for local,
430 fco: file context for remote,
430 fco: file context for remote,
431 fca: file context for ancestors,
431 fca: file context for ancestors,
432 fd: file path of the resulting merge.
432 fd: file path of the resulting merge.
433
433
434 note: also write the local version to the `.hg/merge` directory.
434 note: also write the local version to the `.hg/merge` directory.
435 """
435 """
436 if fcl.isabsent():
436 if fcl.isabsent():
437 hash = nullhex
437 hash = nullhex
438 else:
438 else:
439 hash = hex(hashlib.sha1(fcl.path()).digest())
439 hash = hex(hashlib.sha1(fcl.path()).digest())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
440 self._repo.vfs.write('merge/' + hash, fcl.data())
441 self._state[fd] = ['u', hash, fcl.path(),
441 self._state[fd] = ['u', hash, fcl.path(),
442 fca.path(), hex(fca.filenode()),
442 fca.path(), hex(fca.filenode()),
443 fco.path(), hex(fco.filenode()),
443 fco.path(), hex(fco.filenode()),
444 fcl.flags()]
444 fcl.flags()]
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
445 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
446 self._dirty = True
446 self._dirty = True
447
447
448 def addpath(self, path, frename, forigin):
448 def addpath(self, path, frename, forigin):
449 """add a new conflicting path to the merge state
449 """add a new conflicting path to the merge state
450 path: the path that conflicts
450 path: the path that conflicts
451 frename: the filename the conflicting file was renamed to
451 frename: the filename the conflicting file was renamed to
452 forigin: origin of the file ('l' or 'r' for local/remote)
452 forigin: origin of the file ('l' or 'r' for local/remote)
453 """
453 """
454 self._state[path] = ['pu', frename, forigin]
454 self._state[path] = ['pu', frename, forigin]
455 self._dirty = True
455 self._dirty = True
456
456
457 def __contains__(self, dfile):
457 def __contains__(self, dfile):
458 return dfile in self._state
458 return dfile in self._state
459
459
460 def __getitem__(self, dfile):
460 def __getitem__(self, dfile):
461 return self._state[dfile][0]
461 return self._state[dfile][0]
462
462
463 def __iter__(self):
463 def __iter__(self):
464 return iter(sorted(self._state))
464 return iter(sorted(self._state))
465
465
466 def files(self):
466 def files(self):
467 return self._state.keys()
467 return self._state.keys()
468
468
469 def mark(self, dfile, state):
469 def mark(self, dfile, state):
470 self._state[dfile][0] = state
470 self._state[dfile][0] = state
471 self._dirty = True
471 self._dirty = True
472
472
473 def mdstate(self):
473 def mdstate(self):
474 return self._mdstate
474 return self._mdstate
475
475
476 def unresolved(self):
476 def unresolved(self):
477 """Obtain the paths of unresolved files."""
477 """Obtain the paths of unresolved files."""
478
478
479 for f, entry in self._state.iteritems():
479 for f, entry in self._state.iteritems():
480 if entry[0] in ('u', 'pu'):
480 if entry[0] in ('u', 'pu'):
481 yield f
481 yield f
482
482
483 def driverresolved(self):
483 def driverresolved(self):
484 """Obtain the paths of driver-resolved files."""
484 """Obtain the paths of driver-resolved files."""
485
485
486 for f, entry in self._state.items():
486 for f, entry in self._state.items():
487 if entry[0] == 'd':
487 if entry[0] == 'd':
488 yield f
488 yield f
489
489
490 def extras(self, filename):
490 def extras(self, filename):
491 return self._stateextras.setdefault(filename, {})
491 return self._stateextras.setdefault(filename, {})
492
492
493 def _resolve(self, preresolve, dfile, wctx):
493 def _resolve(self, preresolve, dfile, wctx):
494 """rerun merge process for file path `dfile`"""
494 """rerun merge process for file path `dfile`"""
495 if self[dfile] in 'rd':
495 if self[dfile] in 'rd':
496 return True, 0
496 return True, 0
497 stateentry = self._state[dfile]
497 stateentry = self._state[dfile]
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
498 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
499 octx = self._repo[self._other]
499 octx = self._repo[self._other]
500 extras = self.extras(dfile)
500 extras = self.extras(dfile)
501 anccommitnode = extras.get('ancestorlinknode')
501 anccommitnode = extras.get('ancestorlinknode')
502 if anccommitnode:
502 if anccommitnode:
503 actx = self._repo[anccommitnode]
503 actx = self._repo[anccommitnode]
504 else:
504 else:
505 actx = None
505 actx = None
506 fcd = self._filectxorabsent(hash, wctx, dfile)
506 fcd = self._filectxorabsent(hash, wctx, dfile)
507 fco = self._filectxorabsent(onode, octx, ofile)
507 fco = self._filectxorabsent(onode, octx, ofile)
508 # TODO: move this to filectxorabsent
508 # TODO: move this to filectxorabsent
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
509 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
510 # "premerge" x flags
510 # "premerge" x flags
511 flo = fco.flags()
511 flo = fco.flags()
512 fla = fca.flags()
512 fla = fca.flags()
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
513 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
514 if fca.node() == nullid and flags != flo:
514 if fca.node() == nullid and flags != flo:
515 if preresolve:
515 if preresolve:
516 self._repo.ui.warn(
516 self._repo.ui.warn(
517 _('warning: cannot merge flags for %s '
517 _('warning: cannot merge flags for %s '
518 'without common ancestor - keeping local flags\n')
518 'without common ancestor - keeping local flags\n')
519 % afile)
519 % afile)
520 elif flags == fla:
520 elif flags == fla:
521 flags = flo
521 flags = flo
522 if preresolve:
522 if preresolve:
523 # restore local
523 # restore local
524 if hash != nullhex:
524 if hash != nullhex:
525 f = self._repo.vfs('merge/' + hash)
525 f = self._repo.vfs('merge/' + hash)
526 wctx[dfile].write(f.read(), flags)
526 wctx[dfile].write(f.read(), flags)
527 f.close()
527 f.close()
528 else:
528 else:
529 wctx[dfile].remove(ignoremissing=True)
529 wctx[dfile].remove(ignoremissing=True)
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
530 complete, r, deleted = filemerge.premerge(self._repo, wctx,
531 self._local, lfile, fcd,
531 self._local, lfile, fcd,
532 fco, fca,
532 fco, fca,
533 labels=self._labels)
533 labels=self._labels)
534 else:
534 else:
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
535 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
536 self._local, lfile, fcd,
536 self._local, lfile, fcd,
537 fco, fca,
537 fco, fca,
538 labels=self._labels)
538 labels=self._labels)
539 if r is None:
539 if r is None:
540 # no real conflict
540 # no real conflict
541 del self._state[dfile]
541 del self._state[dfile]
542 self._stateextras.pop(dfile, None)
542 self._stateextras.pop(dfile, None)
543 self._dirty = True
543 self._dirty = True
544 elif not r:
544 elif not r:
545 self.mark(dfile, 'r')
545 self.mark(dfile, 'r')
546
546
547 if complete:
547 if complete:
548 action = None
548 action = None
549 if deleted:
549 if deleted:
550 if fcd.isabsent():
550 if fcd.isabsent():
551 # dc: local picked. Need to drop if present, which may
551 # dc: local picked. Need to drop if present, which may
552 # happen on re-resolves.
552 # happen on re-resolves.
553 action = 'f'
553 action = 'f'
554 else:
554 else:
555 # cd: remote picked (or otherwise deleted)
555 # cd: remote picked (or otherwise deleted)
556 action = 'r'
556 action = 'r'
557 else:
557 else:
558 if fcd.isabsent(): # dc: remote picked
558 if fcd.isabsent(): # dc: remote picked
559 action = 'g'
559 action = 'g'
560 elif fco.isabsent(): # cd: local picked
560 elif fco.isabsent(): # cd: local picked
561 if dfile in self.localctx:
561 if dfile in self.localctx:
562 action = 'am'
562 action = 'am'
563 else:
563 else:
564 action = 'a'
564 action = 'a'
565 # else: regular merges (no action necessary)
565 # else: regular merges (no action necessary)
566 self._results[dfile] = r, action
566 self._results[dfile] = r, action
567
567
568 return complete, r
568 return complete, r
569
569
570 def _filectxorabsent(self, hexnode, ctx, f):
570 def _filectxorabsent(self, hexnode, ctx, f):
571 if hexnode == nullhex:
571 if hexnode == nullhex:
572 return filemerge.absentfilectx(ctx, f)
572 return filemerge.absentfilectx(ctx, f)
573 else:
573 else:
574 return ctx[f]
574 return ctx[f]
575
575
576 def preresolve(self, dfile, wctx):
576 def preresolve(self, dfile, wctx):
577 """run premerge process for dfile
577 """run premerge process for dfile
578
578
579 Returns whether the merge is complete, and the exit code."""
579 Returns whether the merge is complete, and the exit code."""
580 return self._resolve(True, dfile, wctx)
580 return self._resolve(True, dfile, wctx)
581
581
582 def resolve(self, dfile, wctx):
582 def resolve(self, dfile, wctx):
583 """run merge process (assuming premerge was run) for dfile
583 """run merge process (assuming premerge was run) for dfile
584
584
585 Returns the exit code of the merge."""
585 Returns the exit code of the merge."""
586 return self._resolve(False, dfile, wctx)[1]
586 return self._resolve(False, dfile, wctx)[1]
587
587
588 def counts(self):
588 def counts(self):
589 """return counts for updated, merged and removed files in this
589 """return counts for updated, merged and removed files in this
590 session"""
590 session"""
591 updated, merged, removed = 0, 0, 0
591 updated, merged, removed = 0, 0, 0
592 for r, action in self._results.itervalues():
592 for r, action in self._results.itervalues():
593 if r is None:
593 if r is None:
594 updated += 1
594 updated += 1
595 elif r == 0:
595 elif r == 0:
596 if action == 'r':
596 if action == 'r':
597 removed += 1
597 removed += 1
598 else:
598 else:
599 merged += 1
599 merged += 1
600 return updated, merged, removed
600 return updated, merged, removed
601
601
602 def unresolvedcount(self):
602 def unresolvedcount(self):
603 """get unresolved count for this merge (persistent)"""
603 """get unresolved count for this merge (persistent)"""
604 return len(list(self.unresolved()))
604 return len(list(self.unresolved()))
605
605
606 def actions(self):
606 def actions(self):
607 """return lists of actions to perform on the dirstate"""
607 """return lists of actions to perform on the dirstate"""
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
608 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
609 for f, (r, action) in self._results.iteritems():
609 for f, (r, action) in self._results.iteritems():
610 if action is not None:
610 if action is not None:
611 actions[action].append((f, None, "merge result"))
611 actions[action].append((f, None, "merge result"))
612 return actions
612 return actions
613
613
614 def recordactions(self):
614 def recordactions(self):
615 """record remove/add/get actions in the dirstate"""
615 """record remove/add/get actions in the dirstate"""
616 branchmerge = self._repo.dirstate.p2() != nullid
616 branchmerge = self._repo.dirstate.p2() != nullid
617 recordupdates(self._repo, self.actions(), branchmerge)
617 recordupdates(self._repo, self.actions(), branchmerge)
618
618
619 def queueremove(self, f):
619 def queueremove(self, f):
620 """queues a file to be removed from the dirstate
620 """queues a file to be removed from the dirstate
621
621
622 Meant for use by custom merge drivers."""
622 Meant for use by custom merge drivers."""
623 self._results[f] = 0, 'r'
623 self._results[f] = 0, 'r'
624
624
625 def queueadd(self, f):
625 def queueadd(self, f):
626 """queues a file to be added to the dirstate
626 """queues a file to be added to the dirstate
627
627
628 Meant for use by custom merge drivers."""
628 Meant for use by custom merge drivers."""
629 self._results[f] = 0, 'a'
629 self._results[f] = 0, 'a'
630
630
631 def queueget(self, f):
631 def queueget(self, f):
632 """queues a file to be marked modified in the dirstate
632 """queues a file to be marked modified in the dirstate
633
633
634 Meant for use by custom merge drivers."""
634 Meant for use by custom merge drivers."""
635 self._results[f] = 0, 'g'
635 self._results[f] = 0, 'g'
636
636
637 def _getcheckunknownconfig(repo, section, name):
637 def _getcheckunknownconfig(repo, section, name):
638 config = repo.ui.config(section, name)
638 config = repo.ui.config(section, name)
639 valid = ['abort', 'ignore', 'warn']
639 valid = ['abort', 'ignore', 'warn']
640 if config not in valid:
640 if config not in valid:
641 validstr = ', '.join(["'" + v + "'" for v in valid])
641 validstr = ', '.join(["'" + v + "'" for v in valid])
642 raise error.ConfigError(_("%s.%s not valid "
642 raise error.ConfigError(_("%s.%s not valid "
643 "('%s' is none of %s)")
643 "('%s' is none of %s)")
644 % (section, name, config, validstr))
644 % (section, name, config, validstr))
645 return config
645 return config
646
646
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
647 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
648 if f2 is None:
648 if f2 is None:
649 f2 = f
649 f2 = f
650 return (repo.wvfs.audit.check(f)
650 return (repo.wvfs.audit.check(f)
651 and repo.wvfs.isfileorlink(f)
651 and repo.wvfs.isfileorlink(f)
652 and repo.dirstate.normalize(f) not in repo.dirstate
652 and repo.dirstate.normalize(f) not in repo.dirstate
653 and mctx[f2].cmp(wctx[f]))
653 and mctx[f2].cmp(wctx[f]))
654
654
655 def _checkunknowndirs(repo, f):
655 def _checkunknowndirs(repo, f):
656 """
656 """
657 Look for any unknown files or directories that may have a path conflict
657 Look for any unknown files or directories that may have a path conflict
658 with a file. If any path prefix of the file exists as a file or link,
658 with a file. If any path prefix of the file exists as a file or link,
659 then it conflicts. If the file itself is a directory that contains any
659 then it conflicts. If the file itself is a directory that contains any
660 file that is not tracked, then it conflicts.
660 file that is not tracked, then it conflicts.
661
661
662 Returns the shortest path at which a conflict occurs, or None if there is
662 Returns the shortest path at which a conflict occurs, or None if there is
663 no conflict.
663 no conflict.
664 """
664 """
665
665
666 # Check for path prefixes that exist as unknown files.
666 # Check for path prefixes that exist as unknown files.
667 for p in reversed(list(util.finddirs(f))):
667 for p in reversed(list(util.finddirs(f))):
668 if (repo.wvfs.audit.check(p)
668 if (repo.wvfs.audit.check(p)
669 and repo.wvfs.isfileorlink(p)
669 and repo.wvfs.isfileorlink(p)
670 and repo.dirstate.normalize(p) not in repo.dirstate):
670 and repo.dirstate.normalize(p) not in repo.dirstate):
671 return p
671 return p
672
672
673 # Check if the file conflicts with a directory containing unknown files.
673 # Check if the file conflicts with a directory containing unknown files.
674 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
674 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
675 # Does the directory contain any files that are not in the dirstate?
675 # Does the directory contain any files that are not in the dirstate?
676 for p, dirs, files in repo.wvfs.walk(f):
676 for p, dirs, files in repo.wvfs.walk(f):
677 for fn in files:
677 for fn in files:
678 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
678 relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn))
679 if relf not in repo.dirstate:
679 if relf not in repo.dirstate:
680 return f
680 return f
681 return None
681 return None
682
682
683 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
683 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
684 """
684 """
685 Considers any actions that care about the presence of conflicting unknown
685 Considers any actions that care about the presence of conflicting unknown
686 files. For some actions, the result is to abort; for others, it is to
686 files. For some actions, the result is to abort; for others, it is to
687 choose a different action.
687 choose a different action.
688 """
688 """
689 fileconflicts = set()
689 fileconflicts = set()
690 pathconflicts = set()
690 pathconflicts = set()
691 warnconflicts = set()
691 warnconflicts = set()
692 abortconflicts = set()
692 abortconflicts = set()
693 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
693 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
694 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
694 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
695 if not force:
695 if not force:
696 def collectconflicts(conflicts, config):
696 def collectconflicts(conflicts, config):
697 if config == 'abort':
697 if config == 'abort':
698 abortconflicts.update(conflicts)
698 abortconflicts.update(conflicts)
699 elif config == 'warn':
699 elif config == 'warn':
700 warnconflicts.update(conflicts)
700 warnconflicts.update(conflicts)
701
701
702 for f, (m, args, msg) in actions.iteritems():
702 for f, (m, args, msg) in actions.iteritems():
703 if m in ('c', 'dc'):
703 if m in ('c', 'dc'):
704 if _checkunknownfile(repo, wctx, mctx, f):
704 if _checkunknownfile(repo, wctx, mctx, f):
705 fileconflicts.add(f)
705 fileconflicts.add(f)
706 elif f not in wctx:
706 elif f not in wctx:
707 path = _checkunknowndirs(repo, f)
707 path = _checkunknowndirs(repo, f)
708 if path is not None:
708 if path is not None:
709 pathconflicts.add(path)
709 pathconflicts.add(path)
710 elif m == 'dg':
710 elif m == 'dg':
711 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
711 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
712 fileconflicts.add(f)
712 fileconflicts.add(f)
713
713
714 allconflicts = fileconflicts | pathconflicts
714 allconflicts = fileconflicts | pathconflicts
715 ignoredconflicts = set([c for c in allconflicts
715 ignoredconflicts = set([c for c in allconflicts
716 if repo.dirstate._ignore(c)])
716 if repo.dirstate._ignore(c)])
717 unknownconflicts = allconflicts - ignoredconflicts
717 unknownconflicts = allconflicts - ignoredconflicts
718 collectconflicts(ignoredconflicts, ignoredconfig)
718 collectconflicts(ignoredconflicts, ignoredconfig)
719 collectconflicts(unknownconflicts, unknownconfig)
719 collectconflicts(unknownconflicts, unknownconfig)
720 else:
720 else:
721 for f, (m, args, msg) in actions.iteritems():
721 for f, (m, args, msg) in actions.iteritems():
722 if m == 'cm':
722 if m == 'cm':
723 fl2, anc = args
723 fl2, anc = args
724 different = _checkunknownfile(repo, wctx, mctx, f)
724 different = _checkunknownfile(repo, wctx, mctx, f)
725 if repo.dirstate._ignore(f):
725 if repo.dirstate._ignore(f):
726 config = ignoredconfig
726 config = ignoredconfig
727 else:
727 else:
728 config = unknownconfig
728 config = unknownconfig
729
729
730 # The behavior when force is True is described by this table:
730 # The behavior when force is True is described by this table:
731 # config different mergeforce | action backup
731 # config different mergeforce | action backup
732 # * n * | get n
732 # * n * | get n
733 # * y y | merge -
733 # * y y | merge -
734 # abort y n | merge - (1)
734 # abort y n | merge - (1)
735 # warn y n | warn + get y
735 # warn y n | warn + get y
736 # ignore y n | get y
736 # ignore y n | get y
737 #
737 #
738 # (1) this is probably the wrong behavior here -- we should
738 # (1) this is probably the wrong behavior here -- we should
739 # probably abort, but some actions like rebases currently
739 # probably abort, but some actions like rebases currently
740 # don't like an abort happening in the middle of
740 # don't like an abort happening in the middle of
741 # merge.update.
741 # merge.update.
742 if not different:
742 if not different:
743 actions[f] = ('g', (fl2, False), "remote created")
743 actions[f] = ('g', (fl2, False), "remote created")
744 elif mergeforce or config == 'abort':
744 elif mergeforce or config == 'abort':
745 actions[f] = ('m', (f, f, None, False, anc),
745 actions[f] = ('m', (f, f, None, False, anc),
746 "remote differs from untracked local")
746 "remote differs from untracked local")
747 elif config == 'abort':
747 elif config == 'abort':
748 abortconflicts.add(f)
748 abortconflicts.add(f)
749 else:
749 else:
750 if config == 'warn':
750 if config == 'warn':
751 warnconflicts.add(f)
751 warnconflicts.add(f)
752 actions[f] = ('g', (fl2, True), "remote created")
752 actions[f] = ('g', (fl2, True), "remote created")
753
753
754 for f in sorted(abortconflicts):
754 for f in sorted(abortconflicts):
755 warn = repo.ui.warn
755 warn = repo.ui.warn
756 if f in pathconflicts:
756 if f in pathconflicts:
757 if repo.wvfs.isfileorlink(f):
757 if repo.wvfs.isfileorlink(f):
758 warn(_("%s: untracked file conflicts with directory\n") % f)
758 warn(_("%s: untracked file conflicts with directory\n") % f)
759 else:
759 else:
760 warn(_("%s: untracked directory conflicts with file\n") % f)
760 warn(_("%s: untracked directory conflicts with file\n") % f)
761 else:
761 else:
762 warn(_("%s: untracked file differs\n") % f)
762 warn(_("%s: untracked file differs\n") % f)
763 if abortconflicts:
763 if abortconflicts:
764 raise error.Abort(_("untracked files in working directory "
764 raise error.Abort(_("untracked files in working directory "
765 "differ from files in requested revision"))
765 "differ from files in requested revision"))
766
766
767 for f in sorted(warnconflicts):
767 for f in sorted(warnconflicts):
768 if repo.wvfs.isfileorlink(f):
768 if repo.wvfs.isfileorlink(f):
769 repo.ui.warn(_("%s: replacing untracked file\n") % f)
769 repo.ui.warn(_("%s: replacing untracked file\n") % f)
770 else:
770 else:
771 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
771 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
772
772
773 for f, (m, args, msg) in actions.iteritems():
773 for f, (m, args, msg) in actions.iteritems():
774 if m == 'c':
774 if m == 'c':
775 backup = (f in fileconflicts or f in pathconflicts or
775 backup = (f in fileconflicts or f in pathconflicts or
776 any(p in pathconflicts for p in util.finddirs(f)))
776 any(p in pathconflicts for p in util.finddirs(f)))
777 flags, = args
777 flags, = args
778 actions[f] = ('g', (flags, backup), msg)
778 actions[f] = ('g', (flags, backup), msg)
779
779
780 def _forgetremoved(wctx, mctx, branchmerge):
780 def _forgetremoved(wctx, mctx, branchmerge):
781 """
781 """
782 Forget removed files
782 Forget removed files
783
783
784 If we're jumping between revisions (as opposed to merging), and if
784 If we're jumping between revisions (as opposed to merging), and if
785 neither the working directory nor the target rev has the file,
785 neither the working directory nor the target rev has the file,
786 then we need to remove it from the dirstate, to prevent the
786 then we need to remove it from the dirstate, to prevent the
787 dirstate from listing the file when it is no longer in the
787 dirstate from listing the file when it is no longer in the
788 manifest.
788 manifest.
789
789
790 If we're merging, and the other revision has removed a file
790 If we're merging, and the other revision has removed a file
791 that is not present in the working directory, we need to mark it
791 that is not present in the working directory, we need to mark it
792 as removed.
792 as removed.
793 """
793 """
794
794
795 actions = {}
795 actions = {}
796 m = 'f'
796 m = 'f'
797 if branchmerge:
797 if branchmerge:
798 m = 'r'
798 m = 'r'
799 for f in wctx.deleted():
799 for f in wctx.deleted():
800 if f not in mctx:
800 if f not in mctx:
801 actions[f] = m, None, "forget deleted"
801 actions[f] = m, None, "forget deleted"
802
802
803 if not branchmerge:
803 if not branchmerge:
804 for f in wctx.removed():
804 for f in wctx.removed():
805 if f not in mctx:
805 if f not in mctx:
806 actions[f] = 'f', None, "forget removed"
806 actions[f] = 'f', None, "forget removed"
807
807
808 return actions
808 return actions
809
809
810 def _checkcollision(repo, wmf, actions):
810 def _checkcollision(repo, wmf, actions):
811 # build provisional merged manifest up
811 # build provisional merged manifest up
812 pmmf = set(wmf)
812 pmmf = set(wmf)
813
813
814 if actions:
814 if actions:
815 # k, dr, e and rd are no-op
815 # k, dr, e and rd are no-op
816 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
816 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
817 for f, args, msg in actions[m]:
817 for f, args, msg in actions[m]:
818 pmmf.add(f)
818 pmmf.add(f)
819 for f, args, msg in actions['r']:
819 for f, args, msg in actions['r']:
820 pmmf.discard(f)
820 pmmf.discard(f)
821 for f, args, msg in actions['dm']:
821 for f, args, msg in actions['dm']:
822 f2, flags = args
822 f2, flags = args
823 pmmf.discard(f2)
823 pmmf.discard(f2)
824 pmmf.add(f)
824 pmmf.add(f)
825 for f, args, msg in actions['dg']:
825 for f, args, msg in actions['dg']:
826 pmmf.add(f)
826 pmmf.add(f)
827 for f, args, msg in actions['m']:
827 for f, args, msg in actions['m']:
828 f1, f2, fa, move, anc = args
828 f1, f2, fa, move, anc = args
829 if move:
829 if move:
830 pmmf.discard(f1)
830 pmmf.discard(f1)
831 pmmf.add(f)
831 pmmf.add(f)
832
832
833 # check case-folding collision in provisional merged manifest
833 # check case-folding collision in provisional merged manifest
834 foldmap = {}
834 foldmap = {}
835 for f in pmmf:
835 for f in pmmf:
836 fold = util.normcase(f)
836 fold = util.normcase(f)
837 if fold in foldmap:
837 if fold in foldmap:
838 raise error.Abort(_("case-folding collision between %s and %s")
838 raise error.Abort(_("case-folding collision between %s and %s")
839 % (f, foldmap[fold]))
839 % (f, foldmap[fold]))
840 foldmap[fold] = f
840 foldmap[fold] = f
841
841
842 # check case-folding of directories
842 # check case-folding of directories
843 foldprefix = unfoldprefix = lastfull = ''
843 foldprefix = unfoldprefix = lastfull = ''
844 for fold, f in sorted(foldmap.items()):
844 for fold, f in sorted(foldmap.items()):
845 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
845 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
846 # the folded prefix matches but actual casing is different
846 # the folded prefix matches but actual casing is different
847 raise error.Abort(_("case-folding collision between "
847 raise error.Abort(_("case-folding collision between "
848 "%s and directory of %s") % (lastfull, f))
848 "%s and directory of %s") % (lastfull, f))
849 foldprefix = fold + '/'
849 foldprefix = fold + '/'
850 unfoldprefix = f + '/'
850 unfoldprefix = f + '/'
851 lastfull = f
851 lastfull = f
852
852
853 def driverpreprocess(repo, ms, wctx, labels=None):
853 def driverpreprocess(repo, ms, wctx, labels=None):
854 """run the preprocess step of the merge driver, if any
854 """run the preprocess step of the merge driver, if any
855
855
856 This is currently not implemented -- it's an extension point."""
856 This is currently not implemented -- it's an extension point."""
857 return True
857 return True
858
858
859 def driverconclude(repo, ms, wctx, labels=None):
859 def driverconclude(repo, ms, wctx, labels=None):
860 """run the conclude step of the merge driver, if any
860 """run the conclude step of the merge driver, if any
861
861
862 This is currently not implemented -- it's an extension point."""
862 This is currently not implemented -- it's an extension point."""
863 return True
863 return True
864
864
865 def _filesindirs(repo, manifest, dirs):
865 def _filesindirs(repo, manifest, dirs):
866 """
866 """
867 Generator that yields pairs of all the files in the manifest that are found
867 Generator that yields pairs of all the files in the manifest that are found
868 inside the directories listed in dirs, and which directory they are found
868 inside the directories listed in dirs, and which directory they are found
869 in.
869 in.
870 """
870 """
871 for f in manifest:
871 for f in manifest:
872 for p in util.finddirs(f):
872 for p in util.finddirs(f):
873 if p in dirs:
873 if p in dirs:
874 yield f, p
874 yield f, p
875 break
875 break
876
876
877 def checkpathconflicts(repo, wctx, mctx, actions):
877 def checkpathconflicts(repo, wctx, mctx, actions):
878 """
878 """
879 Check if any actions introduce path conflicts in the repository, updating
879 Check if any actions introduce path conflicts in the repository, updating
880 actions to record or handle the path conflict accordingly.
880 actions to record or handle the path conflict accordingly.
881 """
881 """
882 mf = wctx.manifest()
882 mf = wctx.manifest()
883
883
884 # The set of local files that conflict with a remote directory.
884 # The set of local files that conflict with a remote directory.
885 localconflicts = set()
885 localconflicts = set()
886
886
887 # The set of directories that conflict with a remote file, and so may cause
887 # The set of directories that conflict with a remote file, and so may cause
888 # conflicts if they still contain any files after the merge.
888 # conflicts if they still contain any files after the merge.
889 remoteconflicts = set()
889 remoteconflicts = set()
890
890
891 # The set of directories that appear as both a file and a directory in the
891 # The set of directories that appear as both a file and a directory in the
892 # remote manifest. These indicate an invalid remote manifest, which
892 # remote manifest. These indicate an invalid remote manifest, which
893 # can't be updated to cleanly.
893 # can't be updated to cleanly.
894 invalidconflicts = set()
894 invalidconflicts = set()
895
895
896 # The set of files deleted by all the actions.
896 # The set of files deleted by all the actions.
897 deletedfiles = set()
897 deletedfiles = set()
898
898
899 for f, (m, args, msg) in actions.items():
899 for f, (m, args, msg) in actions.items():
900 if m in ('c', 'dc', 'm', 'cm'):
900 if m in ('c', 'dc', 'm', 'cm'):
901 # This action may create a new local file.
901 # This action may create a new local file.
902 if mf.hasdir(f):
902 if mf.hasdir(f):
903 # The file aliases a local directory. This might be ok if all
903 # The file aliases a local directory. This might be ok if all
904 # the files in the local directory are being deleted. This
904 # the files in the local directory are being deleted. This
905 # will be checked once we know what all the deleted files are.
905 # will be checked once we know what all the deleted files are.
906 remoteconflicts.add(f)
906 remoteconflicts.add(f)
907 for p in util.finddirs(f):
907 for p in util.finddirs(f):
908 if p in mf:
908 if p in mf:
909 if p in mctx:
909 if p in mctx:
910 # The file is in a directory which aliases both a local
910 # The file is in a directory which aliases both a local
911 # and a remote file. This is an internal inconsistency
911 # and a remote file. This is an internal inconsistency
912 # within the remote manifest.
912 # within the remote manifest.
913 invalidconflicts.add(p)
913 invalidconflicts.add(p)
914 else:
914 else:
915 # The file is in a directory which aliases a local file.
915 # The file is in a directory which aliases a local file.
916 # We will need to rename the local file.
916 # We will need to rename the local file.
917 localconflicts.add(p)
917 localconflicts.add(p)
918 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
918 if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'):
919 # The file is in a directory which aliases a remote file.
919 # The file is in a directory which aliases a remote file.
920 # This is an internal inconsistency within the remote
920 # This is an internal inconsistency within the remote
921 # manifest.
921 # manifest.
922 invalidconflicts.add(p)
922 invalidconflicts.add(p)
923
923
924 # Track the names of all deleted files.
924 # Track the names of all deleted files.
925 if m == 'r':
925 if m == 'r':
926 deletedfiles.add(f)
926 deletedfiles.add(f)
927 if m == 'm':
927 if m == 'm':
928 f1, f2, fa, move, anc = args
928 f1, f2, fa, move, anc = args
929 if move:
929 if move:
930 deletedfiles.add(f1)
930 deletedfiles.add(f1)
931 if m == 'dm':
931 if m == 'dm':
932 f2, flags = args
932 f2, flags = args
933 deletedfiles.add(f2)
933 deletedfiles.add(f2)
934
934
935 # Rename all local conflicting files that have not been deleted.
935 # Rename all local conflicting files that have not been deleted.
936 for p in localconflicts:
936 for p in localconflicts:
937 if p not in deletedfiles:
937 if p not in deletedfiles:
938 ctxname = str(wctx).rstrip('+')
938 ctxname = str(wctx).rstrip('+')
939 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
939 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
940 actions[pnew] = ('pr', (p,), "local path conflict")
940 actions[pnew] = ('pr', (p,), "local path conflict")
941 actions[p] = ('p', (pnew, 'l'), "path conflict")
941 actions[p] = ('p', (pnew, 'l'), "path conflict")
942
942
943 if remoteconflicts:
943 if remoteconflicts:
944 # Check if all files in the conflicting directories have been removed.
944 # Check if all files in the conflicting directories have been removed.
945 ctxname = str(mctx).rstrip('+')
945 ctxname = str(mctx).rstrip('+')
946 for f, p in _filesindirs(repo, mf, remoteconflicts):
946 for f, p in _filesindirs(repo, mf, remoteconflicts):
947 if f not in deletedfiles:
947 if f not in deletedfiles:
948 m, args, msg = actions[p]
948 m, args, msg = actions[p]
949 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
949 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
950 if m in ('dc', 'm'):
950 if m in ('dc', 'm'):
951 # Action was merge, just update target.
951 # Action was merge, just update target.
952 actions[pnew] = (m, args, msg)
952 actions[pnew] = (m, args, msg)
953 else:
953 else:
954 # Action was create, change to renamed get action.
954 # Action was create, change to renamed get action.
955 fl = args[0]
955 fl = args[0]
956 actions[pnew] = ('dg', (p, fl), "remote path conflict")
956 actions[pnew] = ('dg', (p, fl), "remote path conflict")
957 actions[p] = ('p', (pnew, 'r'), "path conflict")
957 actions[p] = ('p', (pnew, 'r'), "path conflict")
958 remoteconflicts.remove(p)
958 remoteconflicts.remove(p)
959 break
959 break
960
960
961 if invalidconflicts:
961 if invalidconflicts:
962 for p in invalidconflicts:
962 for p in invalidconflicts:
963 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
963 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
964 raise error.Abort(_("destination manifest contains path conflicts"))
964 raise error.Abort(_("destination manifest contains path conflicts"))
965
965
966 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
966 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
967 acceptremote, followcopies, forcefulldiff=False):
967 acceptremote, followcopies, forcefulldiff=False):
968 """
968 """
969 Merge wctx and p2 with ancestor pa and generate merge action list
969 Merge wctx and p2 with ancestor pa and generate merge action list
970
970
971 branchmerge and force are as passed in to update
971 branchmerge and force are as passed in to update
972 matcher = matcher to filter file lists
972 matcher = matcher to filter file lists
973 acceptremote = accept the incoming changes without prompting
973 acceptremote = accept the incoming changes without prompting
974 """
974 """
975 if matcher is not None and matcher.always():
975 if matcher is not None and matcher.always():
976 matcher = None
976 matcher = None
977
977
978 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
978 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
979
979
980 # manifests fetched in order are going to be faster, so prime the caches
980 # manifests fetched in order are going to be faster, so prime the caches
981 [x.manifest() for x in
981 [x.manifest() for x in
982 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
982 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
983
983
984 if followcopies:
984 if followcopies:
985 ret = copies.mergecopies(repo, wctx, p2, pa)
985 ret = copies.mergecopies(repo, wctx, p2, pa)
986 copy, movewithdir, diverge, renamedelete, dirmove = ret
986 copy, movewithdir, diverge, renamedelete, dirmove = ret
987
987
988 boolbm = pycompat.bytestr(bool(branchmerge))
988 boolbm = pycompat.bytestr(bool(branchmerge))
989 boolf = pycompat.bytestr(bool(force))
989 boolf = pycompat.bytestr(bool(force))
990 boolm = pycompat.bytestr(bool(matcher))
990 boolm = pycompat.bytestr(bool(matcher))
991 repo.ui.note(_("resolving manifests\n"))
991 repo.ui.note(_("resolving manifests\n"))
992 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
992 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
993 % (boolbm, boolf, boolm))
993 % (boolbm, boolf, boolm))
994 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
994 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
995
995
996 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
996 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
997 copied = set(copy.values())
997 copied = set(copy.values())
998 copied.update(movewithdir.values())
998 copied.update(movewithdir.values())
999
999
1000 if '.hgsubstate' in m1:
1000 if '.hgsubstate' in m1:
1001 # check whether sub state is modified
1001 # check whether sub state is modified
1002 if any(wctx.sub(s).dirty() for s in wctx.substate):
1002 if any(wctx.sub(s).dirty() for s in wctx.substate):
1003 m1['.hgsubstate'] = modifiednodeid
1003 m1['.hgsubstate'] = modifiednodeid
1004
1004
1005 # Don't use m2-vs-ma optimization if:
1005 # Don't use m2-vs-ma optimization if:
1006 # - ma is the same as m1 or m2, which we're just going to diff again later
1006 # - ma is the same as m1 or m2, which we're just going to diff again later
1007 # - The caller specifically asks for a full diff, which is useful during bid
1007 # - The caller specifically asks for a full diff, which is useful during bid
1008 # merge.
1008 # merge.
1009 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1009 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1010 # Identify which files are relevant to the merge, so we can limit the
1010 # Identify which files are relevant to the merge, so we can limit the
1011 # total m1-vs-m2 diff to just those files. This has significant
1011 # total m1-vs-m2 diff to just those files. This has significant
1012 # performance benefits in large repositories.
1012 # performance benefits in large repositories.
1013 relevantfiles = set(ma.diff(m2).keys())
1013 relevantfiles = set(ma.diff(m2).keys())
1014
1014
1015 # For copied and moved files, we need to add the source file too.
1015 # For copied and moved files, we need to add the source file too.
1016 for copykey, copyvalue in copy.iteritems():
1016 for copykey, copyvalue in copy.iteritems():
1017 if copyvalue in relevantfiles:
1017 if copyvalue in relevantfiles:
1018 relevantfiles.add(copykey)
1018 relevantfiles.add(copykey)
1019 for movedirkey in movewithdir:
1019 for movedirkey in movewithdir:
1020 relevantfiles.add(movedirkey)
1020 relevantfiles.add(movedirkey)
1021 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1021 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1022 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1022 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1023
1023
1024 diff = m1.diff(m2, match=matcher)
1024 diff = m1.diff(m2, match=matcher)
1025
1025
1026 if matcher is None:
1026 if matcher is None:
1027 matcher = matchmod.always('', '')
1027 matcher = matchmod.always('', '')
1028
1028
1029 actions = {}
1029 actions = {}
1030 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1030 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1031 if n1 and n2: # file exists on both local and remote side
1031 if n1 and n2: # file exists on both local and remote side
1032 if f not in ma:
1032 if f not in ma:
1033 fa = copy.get(f, None)
1033 fa = copy.get(f, None)
1034 if fa is not None:
1034 if fa is not None:
1035 actions[f] = ('m', (f, f, fa, False, pa.node()),
1035 actions[f] = ('m', (f, f, fa, False, pa.node()),
1036 "both renamed from " + fa)
1036 "both renamed from " + fa)
1037 else:
1037 else:
1038 actions[f] = ('m', (f, f, None, False, pa.node()),
1038 actions[f] = ('m', (f, f, None, False, pa.node()),
1039 "both created")
1039 "both created")
1040 else:
1040 else:
1041 a = ma[f]
1041 a = ma[f]
1042 fla = ma.flags(f)
1042 fla = ma.flags(f)
1043 nol = 'l' not in fl1 + fl2 + fla
1043 nol = 'l' not in fl1 + fl2 + fla
1044 if n2 == a and fl2 == fla:
1044 if n2 == a and fl2 == fla:
1045 actions[f] = ('k', (), "remote unchanged")
1045 actions[f] = ('k', (), "remote unchanged")
1046 elif n1 == a and fl1 == fla: # local unchanged - use remote
1046 elif n1 == a and fl1 == fla: # local unchanged - use remote
1047 if n1 == n2: # optimization: keep local content
1047 if n1 == n2: # optimization: keep local content
1048 actions[f] = ('e', (fl2,), "update permissions")
1048 actions[f] = ('e', (fl2,), "update permissions")
1049 else:
1049 else:
1050 actions[f] = ('g', (fl2, False), "remote is newer")
1050 actions[f] = ('g', (fl2, False), "remote is newer")
1051 elif nol and n2 == a: # remote only changed 'x'
1051 elif nol and n2 == a: # remote only changed 'x'
1052 actions[f] = ('e', (fl2,), "update permissions")
1052 actions[f] = ('e', (fl2,), "update permissions")
1053 elif nol and n1 == a: # local only changed 'x'
1053 elif nol and n1 == a: # local only changed 'x'
1054 actions[f] = ('g', (fl1, False), "remote is newer")
1054 actions[f] = ('g', (fl1, False), "remote is newer")
1055 else: # both changed something
1055 else: # both changed something
1056 actions[f] = ('m', (f, f, f, False, pa.node()),
1056 actions[f] = ('m', (f, f, f, False, pa.node()),
1057 "versions differ")
1057 "versions differ")
1058 elif n1: # file exists only on local side
1058 elif n1: # file exists only on local side
1059 if f in copied:
1059 if f in copied:
1060 pass # we'll deal with it on m2 side
1060 pass # we'll deal with it on m2 side
1061 elif f in movewithdir: # directory rename, move local
1061 elif f in movewithdir: # directory rename, move local
1062 f2 = movewithdir[f]
1062 f2 = movewithdir[f]
1063 if f2 in m2:
1063 if f2 in m2:
1064 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1064 actions[f2] = ('m', (f, f2, None, True, pa.node()),
1065 "remote directory rename, both created")
1065 "remote directory rename, both created")
1066 else:
1066 else:
1067 actions[f2] = ('dm', (f, fl1),
1067 actions[f2] = ('dm', (f, fl1),
1068 "remote directory rename - move from " + f)
1068 "remote directory rename - move from " + f)
1069 elif f in copy:
1069 elif f in copy:
1070 f2 = copy[f]
1070 f2 = copy[f]
1071 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1071 actions[f] = ('m', (f, f2, f2, False, pa.node()),
1072 "local copied/moved from " + f2)
1072 "local copied/moved from " + f2)
1073 elif f in ma: # clean, a different, no remote
1073 elif f in ma: # clean, a different, no remote
1074 if n1 != ma[f]:
1074 if n1 != ma[f]:
1075 if acceptremote:
1075 if acceptremote:
1076 actions[f] = ('r', None, "remote delete")
1076 actions[f] = ('r', None, "remote delete")
1077 else:
1077 else:
1078 actions[f] = ('cd', (f, None, f, False, pa.node()),
1078 actions[f] = ('cd', (f, None, f, False, pa.node()),
1079 "prompt changed/deleted")
1079 "prompt changed/deleted")
1080 elif n1 == addednodeid:
1080 elif n1 == addednodeid:
1081 # This extra 'a' is added by working copy manifest to mark
1081 # This extra 'a' is added by working copy manifest to mark
1082 # the file as locally added. We should forget it instead of
1082 # the file as locally added. We should forget it instead of
1083 # deleting it.
1083 # deleting it.
1084 actions[f] = ('f', None, "remote deleted")
1084 actions[f] = ('f', None, "remote deleted")
1085 else:
1085 else:
1086 actions[f] = ('r', None, "other deleted")
1086 actions[f] = ('r', None, "other deleted")
1087 elif n2: # file exists only on remote side
1087 elif n2: # file exists only on remote side
1088 if f in copied:
1088 if f in copied:
1089 pass # we'll deal with it on m1 side
1089 pass # we'll deal with it on m1 side
1090 elif f in movewithdir:
1090 elif f in movewithdir:
1091 f2 = movewithdir[f]
1091 f2 = movewithdir[f]
1092 if f2 in m1:
1092 if f2 in m1:
1093 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1093 actions[f2] = ('m', (f2, f, None, False, pa.node()),
1094 "local directory rename, both created")
1094 "local directory rename, both created")
1095 else:
1095 else:
1096 actions[f2] = ('dg', (f, fl2),
1096 actions[f2] = ('dg', (f, fl2),
1097 "local directory rename - get from " + f)
1097 "local directory rename - get from " + f)
1098 elif f in copy:
1098 elif f in copy:
1099 f2 = copy[f]
1099 f2 = copy[f]
1100 if f2 in m2:
1100 if f2 in m2:
1101 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1101 actions[f] = ('m', (f2, f, f2, False, pa.node()),
1102 "remote copied from " + f2)
1102 "remote copied from " + f2)
1103 else:
1103 else:
1104 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1104 actions[f] = ('m', (f2, f, f2, True, pa.node()),
1105 "remote moved from " + f2)
1105 "remote moved from " + f2)
1106 elif f not in ma:
1106 elif f not in ma:
1107 # local unknown, remote created: the logic is described by the
1107 # local unknown, remote created: the logic is described by the
1108 # following table:
1108 # following table:
1109 #
1109 #
1110 # force branchmerge different | action
1110 # force branchmerge different | action
1111 # n * * | create
1111 # n * * | create
1112 # y n * | create
1112 # y n * | create
1113 # y y n | create
1113 # y y n | create
1114 # y y y | merge
1114 # y y y | merge
1115 #
1115 #
1116 # Checking whether the files are different is expensive, so we
1116 # Checking whether the files are different is expensive, so we
1117 # don't do that when we can avoid it.
1117 # don't do that when we can avoid it.
1118 if not force:
1118 if not force:
1119 actions[f] = ('c', (fl2,), "remote created")
1119 actions[f] = ('c', (fl2,), "remote created")
1120 elif not branchmerge:
1120 elif not branchmerge:
1121 actions[f] = ('c', (fl2,), "remote created")
1121 actions[f] = ('c', (fl2,), "remote created")
1122 else:
1122 else:
1123 actions[f] = ('cm', (fl2, pa.node()),
1123 actions[f] = ('cm', (fl2, pa.node()),
1124 "remote created, get or merge")
1124 "remote created, get or merge")
1125 elif n2 != ma[f]:
1125 elif n2 != ma[f]:
1126 df = None
1126 df = None
1127 for d in dirmove:
1127 for d in dirmove:
1128 if f.startswith(d):
1128 if f.startswith(d):
1129 # new file added in a directory that was moved
1129 # new file added in a directory that was moved
1130 df = dirmove[d] + f[len(d):]
1130 df = dirmove[d] + f[len(d):]
1131 break
1131 break
1132 if df is not None and df in m1:
1132 if df is not None and df in m1:
1133 actions[df] = ('m', (df, f, f, False, pa.node()),
1133 actions[df] = ('m', (df, f, f, False, pa.node()),
1134 "local directory rename - respect move from " + f)
1134 "local directory rename - respect move from " + f)
1135 elif acceptremote:
1135 elif acceptremote:
1136 actions[f] = ('c', (fl2,), "remote recreating")
1136 actions[f] = ('c', (fl2,), "remote recreating")
1137 else:
1137 else:
1138 actions[f] = ('dc', (None, f, f, False, pa.node()),
1138 actions[f] = ('dc', (None, f, f, False, pa.node()),
1139 "prompt deleted/changed")
1139 "prompt deleted/changed")
1140
1140
1141 # If we are merging, look for path conflicts.
1141 # If we are merging, look for path conflicts.
1142 checkpathconflicts(repo, wctx, p2, actions)
1142 checkpathconflicts(repo, wctx, p2, actions)
1143
1143
1144 return actions, diverge, renamedelete
1144 return actions, diverge, renamedelete
1145
1145
1146 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1146 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1147 """Resolves false conflicts where the nodeid changed but the content
1147 """Resolves false conflicts where the nodeid changed but the content
1148 remained the same."""
1148 remained the same."""
1149
1149
1150 for f, (m, args, msg) in actions.items():
1150 for f, (m, args, msg) in actions.items():
1151 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1151 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
1152 # local did change but ended up with same content
1152 # local did change but ended up with same content
1153 actions[f] = 'r', None, "prompt same"
1153 actions[f] = 'r', None, "prompt same"
1154 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1154 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
1155 # remote did change but ended up with same content
1155 # remote did change but ended up with same content
1156 del actions[f] # don't get = keep local deleted
1156 del actions[f] # don't get = keep local deleted
1157
1157
1158 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1158 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1159 acceptremote, followcopies, matcher=None,
1159 acceptremote, followcopies, matcher=None,
1160 mergeforce=False):
1160 mergeforce=False):
1161 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1161 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1162 # Avoid cycle.
1162 # Avoid cycle.
1163 from . import sparse
1163 from . import sparse
1164
1164
1165 if len(ancestors) == 1: # default
1165 if len(ancestors) == 1: # default
1166 actions, diverge, renamedelete = manifestmerge(
1166 actions, diverge, renamedelete = manifestmerge(
1167 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1167 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1168 acceptremote, followcopies)
1168 acceptremote, followcopies)
1169 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1169 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1170
1170
1171 else: # only when merge.preferancestor=* - the default
1171 else: # only when merge.preferancestor=* - the default
1172 repo.ui.note(
1172 repo.ui.note(
1173 _("note: merging %s and %s using bids from ancestors %s\n") %
1173 _("note: merging %s and %s using bids from ancestors %s\n") %
1174 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1174 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1175 for anc in ancestors)))
1175 for anc in ancestors)))
1176
1176
1177 # Call for bids
1177 # Call for bids
1178 fbids = {} # mapping filename to bids (action method to list af actions)
1178 fbids = {} # mapping filename to bids (action method to list af actions)
1179 diverge, renamedelete = None, None
1179 diverge, renamedelete = None, None
1180 for ancestor in ancestors:
1180 for ancestor in ancestors:
1181 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1181 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1182 actions, diverge1, renamedelete1 = manifestmerge(
1182 actions, diverge1, renamedelete1 = manifestmerge(
1183 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1183 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1184 acceptremote, followcopies, forcefulldiff=True)
1184 acceptremote, followcopies, forcefulldiff=True)
1185 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1185 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1186
1186
1187 # Track the shortest set of warning on the theory that bid
1187 # Track the shortest set of warning on the theory that bid
1188 # merge will correctly incorporate more information
1188 # merge will correctly incorporate more information
1189 if diverge is None or len(diverge1) < len(diverge):
1189 if diverge is None or len(diverge1) < len(diverge):
1190 diverge = diverge1
1190 diverge = diverge1
1191 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1191 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1192 renamedelete = renamedelete1
1192 renamedelete = renamedelete1
1193
1193
1194 for f, a in sorted(actions.iteritems()):
1194 for f, a in sorted(actions.iteritems()):
1195 m, args, msg = a
1195 m, args, msg = a
1196 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1196 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1197 if f in fbids:
1197 if f in fbids:
1198 d = fbids[f]
1198 d = fbids[f]
1199 if m in d:
1199 if m in d:
1200 d[m].append(a)
1200 d[m].append(a)
1201 else:
1201 else:
1202 d[m] = [a]
1202 d[m] = [a]
1203 else:
1203 else:
1204 fbids[f] = {m: [a]}
1204 fbids[f] = {m: [a]}
1205
1205
1206 # Pick the best bid for each file
1206 # Pick the best bid for each file
1207 repo.ui.note(_('\nauction for merging merge bids\n'))
1207 repo.ui.note(_('\nauction for merging merge bids\n'))
1208 actions = {}
1208 actions = {}
1209 dms = [] # filenames that have dm actions
1209 dms = [] # filenames that have dm actions
1210 for f, bids in sorted(fbids.items()):
1210 for f, bids in sorted(fbids.items()):
1211 # bids is a mapping from action method to list af actions
1211 # bids is a mapping from action method to list af actions
1212 # Consensus?
1212 # Consensus?
1213 if len(bids) == 1: # all bids are the same kind of method
1213 if len(bids) == 1: # all bids are the same kind of method
1214 m, l = list(bids.items())[0]
1214 m, l = list(bids.items())[0]
1215 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1215 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1216 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1216 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1217 actions[f] = l[0]
1217 actions[f] = l[0]
1218 if m == 'dm':
1218 if m == 'dm':
1219 dms.append(f)
1219 dms.append(f)
1220 continue
1220 continue
1221 # If keep is an option, just do it.
1221 # If keep is an option, just do it.
1222 if 'k' in bids:
1222 if 'k' in bids:
1223 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1223 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1224 actions[f] = bids['k'][0]
1224 actions[f] = bids['k'][0]
1225 continue
1225 continue
1226 # If there are gets and they all agree [how could they not?], do it.
1226 # If there are gets and they all agree [how could they not?], do it.
1227 if 'g' in bids:
1227 if 'g' in bids:
1228 ga0 = bids['g'][0]
1228 ga0 = bids['g'][0]
1229 if all(a == ga0 for a in bids['g'][1:]):
1229 if all(a == ga0 for a in bids['g'][1:]):
1230 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1230 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1231 actions[f] = ga0
1231 actions[f] = ga0
1232 continue
1232 continue
1233 # TODO: Consider other simple actions such as mode changes
1233 # TODO: Consider other simple actions such as mode changes
1234 # Handle inefficient democrazy.
1234 # Handle inefficient democrazy.
1235 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1235 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1236 for m, l in sorted(bids.items()):
1236 for m, l in sorted(bids.items()):
1237 for _f, args, msg in l:
1237 for _f, args, msg in l:
1238 repo.ui.note(' %s -> %s\n' % (msg, m))
1238 repo.ui.note(' %s -> %s\n' % (msg, m))
1239 # Pick random action. TODO: Instead, prompt user when resolving
1239 # Pick random action. TODO: Instead, prompt user when resolving
1240 m, l = list(bids.items())[0]
1240 m, l = list(bids.items())[0]
1241 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1241 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1242 (f, m))
1242 (f, m))
1243 actions[f] = l[0]
1243 actions[f] = l[0]
1244 if m == 'dm':
1244 if m == 'dm':
1245 dms.append(f)
1245 dms.append(f)
1246 continue
1246 continue
1247 # Work around 'dm' that can cause multiple actions for the same file
1247 # Work around 'dm' that can cause multiple actions for the same file
1248 for f in dms:
1248 for f in dms:
1249 dm, (f0, flags), msg = actions[f]
1249 dm, (f0, flags), msg = actions[f]
1250 assert dm == 'dm', dm
1250 assert dm == 'dm', dm
1251 if f0 in actions and actions[f0][0] == 'r':
1251 if f0 in actions and actions[f0][0] == 'r':
1252 # We have one bid for removing a file and another for moving it.
1252 # We have one bid for removing a file and another for moving it.
1253 # These two could be merged as first move and then delete ...
1253 # These two could be merged as first move and then delete ...
1254 # but instead drop moving and just delete.
1254 # but instead drop moving and just delete.
1255 del actions[f]
1255 del actions[f]
1256 repo.ui.note(_('end of auction\n\n'))
1256 repo.ui.note(_('end of auction\n\n'))
1257
1257
1258 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1258 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1259
1259
1260 if wctx.rev() is None:
1260 if wctx.rev() is None:
1261 fractions = _forgetremoved(wctx, mctx, branchmerge)
1261 fractions = _forgetremoved(wctx, mctx, branchmerge)
1262 actions.update(fractions)
1262 actions.update(fractions)
1263
1263
1264 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1264 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1265 actions)
1265 actions)
1266
1266
1267 return prunedactions, diverge, renamedelete
1267 return prunedactions, diverge, renamedelete
1268
1268
1269 def _getcwd():
1269 def _getcwd():
1270 try:
1270 try:
1271 return pycompat.getcwd()
1271 return pycompat.getcwd()
1272 except OSError as err:
1272 except OSError as err:
1273 if err.errno == errno.ENOENT:
1273 if err.errno == errno.ENOENT:
1274 return None
1274 return None
1275 raise
1275 raise
1276
1276
1277 def batchremove(repo, wctx, actions):
1277 def batchremove(repo, wctx, actions):
1278 """apply removes to the working directory
1278 """apply removes to the working directory
1279
1279
1280 yields tuples for progress updates
1280 yields tuples for progress updates
1281 """
1281 """
1282 verbose = repo.ui.verbose
1282 verbose = repo.ui.verbose
1283 cwd = _getcwd()
1283 cwd = _getcwd()
1284 i = 0
1284 i = 0
1285 for f, args, msg in actions:
1285 for f, args, msg in actions:
1286 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1286 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1287 if verbose:
1287 if verbose:
1288 repo.ui.note(_("removing %s\n") % f)
1288 repo.ui.note(_("removing %s\n") % f)
1289 wctx[f].audit()
1289 wctx[f].audit()
1290 try:
1290 try:
1291 wctx[f].remove(ignoremissing=True)
1291 wctx[f].remove(ignoremissing=True)
1292 except OSError as inst:
1292 except OSError as inst:
1293 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1293 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1294 (f, inst.strerror))
1294 (f, inst.strerror))
1295 if i == 100:
1295 if i == 100:
1296 yield i, f
1296 yield i, f
1297 i = 0
1297 i = 0
1298 i += 1
1298 i += 1
1299 if i > 0:
1299 if i > 0:
1300 yield i, f
1300 yield i, f
1301
1301
1302 if cwd and not _getcwd():
1302 if cwd and not _getcwd():
1303 # cwd was removed in the course of removing files; print a helpful
1303 # cwd was removed in the course of removing files; print a helpful
1304 # warning.
1304 # warning.
1305 repo.ui.warn(_("current directory was removed\n"
1305 repo.ui.warn(_("current directory was removed\n"
1306 "(consider changing to repo root: %s)\n") % repo.root)
1306 "(consider changing to repo root: %s)\n") % repo.root)
1307
1307
1308 # It's necessary to flush here in case we're inside a worker fork and will
1308 # It's necessary to flush here in case we're inside a worker fork and will
1309 # quit after this function.
1309 # quit after this function.
1310 wctx.flushall()
1310 wctx.flushall()
1311
1311
1312 def batchget(repo, mctx, wctx, actions):
1312 def batchget(repo, mctx, wctx, actions):
1313 """apply gets to the working directory
1313 """apply gets to the working directory
1314
1314
1315 mctx is the context to get from
1315 mctx is the context to get from
1316
1316
1317 yields tuples for progress updates
1317 yields tuples for progress updates
1318 """
1318 """
1319 verbose = repo.ui.verbose
1319 verbose = repo.ui.verbose
1320 fctx = mctx.filectx
1320 fctx = mctx.filectx
1321 ui = repo.ui
1321 ui = repo.ui
1322 i = 0
1322 i = 0
1323 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1323 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1324 for f, (flags, backup), msg in actions:
1324 for f, (flags, backup), msg in actions:
1325 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1325 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1326 if verbose:
1326 if verbose:
1327 repo.ui.note(_("getting %s\n") % f)
1327 repo.ui.note(_("getting %s\n") % f)
1328
1328
1329 if backup:
1329 if backup:
1330 # If a file or directory exists with the same name, back that
1330 # If a file or directory exists with the same name, back that
1331 # up. Otherwise, look to see if there is a file that conflicts
1331 # up. Otherwise, look to see if there is a file that conflicts
1332 # with a directory this file is in, and if so, back that up.
1332 # with a directory this file is in, and if so, back that up.
1333 absf = repo.wjoin(f)
1333 absf = repo.wjoin(f)
1334 if not repo.wvfs.lexists(f):
1334 if not repo.wvfs.lexists(f):
1335 for p in util.finddirs(f):
1335 for p in util.finddirs(f):
1336 if repo.wvfs.isfileorlink(p):
1336 if repo.wvfs.isfileorlink(p):
1337 absf = repo.wjoin(p)
1337 absf = repo.wjoin(p)
1338 break
1338 break
1339 orig = scmutil.origpath(ui, repo, absf)
1339 orig = scmutil.origpath(ui, repo, absf)
1340 if repo.wvfs.lexists(absf):
1340 if repo.wvfs.lexists(absf):
1341 util.rename(absf, orig)
1341 util.rename(absf, orig)
1342 wctx[f].clearunknown()
1342 wctx[f].clearunknown()
1343 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1343 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1344 if i == 100:
1344 if i == 100:
1345 yield i, f
1345 yield i, f
1346 i = 0
1346 i = 0
1347 i += 1
1347 i += 1
1348 if i > 0:
1348 if i > 0:
1349 yield i, f
1349 yield i, f
1350
1350
1351 # It's necessary to flush here in case we're inside a worker fork and will
1351 # It's necessary to flush here in case we're inside a worker fork and will
1352 # quit after this function.
1352 # quit after this function.
1353 wctx.flushall()
1353 wctx.flushall()
1354
1354
1355 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1355 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1356 """apply the merge action list to the working directory
1356 """apply the merge action list to the working directory
1357
1357
1358 wctx is the working copy context
1358 wctx is the working copy context
1359 mctx is the context to be merged into the working copy
1359 mctx is the context to be merged into the working copy
1360
1360
1361 Return a tuple of counts (updated, merged, removed, unresolved) that
1361 Return a tuple of counts (updated, merged, removed, unresolved) that
1362 describes how many files were affected by the update.
1362 describes how many files were affected by the update.
1363 """
1363 """
1364
1364
1365 updated, merged, removed = 0, 0, 0
1365 updated, merged, removed = 0, 0, 0
1366 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1366 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1367 moves = []
1367 moves = []
1368 for m, l in actions.items():
1368 for m, l in actions.items():
1369 l.sort()
1369 l.sort()
1370
1370
1371 # 'cd' and 'dc' actions are treated like other merge conflicts
1371 # 'cd' and 'dc' actions are treated like other merge conflicts
1372 mergeactions = sorted(actions['cd'])
1372 mergeactions = sorted(actions['cd'])
1373 mergeactions.extend(sorted(actions['dc']))
1373 mergeactions.extend(sorted(actions['dc']))
1374 mergeactions.extend(actions['m'])
1374 mergeactions.extend(actions['m'])
1375 for f, args, msg in mergeactions:
1375 for f, args, msg in mergeactions:
1376 f1, f2, fa, move, anc = args
1376 f1, f2, fa, move, anc = args
1377 if f == '.hgsubstate': # merged internally
1377 if f == '.hgsubstate': # merged internally
1378 continue
1378 continue
1379 if f1 is None:
1379 if f1 is None:
1380 fcl = filemerge.absentfilectx(wctx, fa)
1380 fcl = filemerge.absentfilectx(wctx, fa)
1381 else:
1381 else:
1382 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1382 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1383 fcl = wctx[f1]
1383 fcl = wctx[f1]
1384 if f2 is None:
1384 if f2 is None:
1385 fco = filemerge.absentfilectx(mctx, fa)
1385 fco = filemerge.absentfilectx(mctx, fa)
1386 else:
1386 else:
1387 fco = mctx[f2]
1387 fco = mctx[f2]
1388 actx = repo[anc]
1388 actx = repo[anc]
1389 if fa in actx:
1389 if fa in actx:
1390 fca = actx[fa]
1390 fca = actx[fa]
1391 else:
1391 else:
1392 # TODO: move to absentfilectx
1392 # TODO: move to absentfilectx
1393 fca = repo.filectx(f1, fileid=nullrev)
1393 fca = repo.filectx(f1, fileid=nullrev)
1394 ms.add(fcl, fco, fca, f)
1394 ms.add(fcl, fco, fca, f)
1395 if f1 != f and move:
1395 if f1 != f and move:
1396 moves.append(f1)
1396 moves.append(f1)
1397
1397
1398 _updating = _('updating')
1398 _updating = _('updating')
1399 _files = _('files')
1399 _files = _('files')
1400 progress = repo.ui.progress
1400 progress = repo.ui.progress
1401
1401
1402 # remove renamed files after safely stored
1402 # remove renamed files after safely stored
1403 for f in moves:
1403 for f in moves:
1404 if wctx[f].lexists():
1404 if wctx[f].lexists():
1405 repo.ui.debug("removing %s\n" % f)
1405 repo.ui.debug("removing %s\n" % f)
1406 wctx[f].audit()
1406 wctx[f].audit()
1407 wctx[f].remove()
1407 wctx[f].remove()
1408
1408
1409 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1409 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1410 z = 0
1410 z = 0
1411
1411
1412 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1412 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1413 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1413 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1414
1414
1415 # record path conflicts
1415 # record path conflicts
1416 for f, args, msg in actions['p']:
1416 for f, args, msg in actions['p']:
1417 f1, fo = args
1417 f1, fo = args
1418 s = repo.ui.status
1418 s = repo.ui.status
1419 s(_("%s: path conflict - a file or link has the same name as a "
1419 s(_("%s: path conflict - a file or link has the same name as a "
1420 "directory\n") % f)
1420 "directory\n") % f)
1421 if fo == 'l':
1421 if fo == 'l':
1422 s(_("the local file has been renamed to %s\n") % f1)
1422 s(_("the local file has been renamed to %s\n") % f1)
1423 else:
1423 else:
1424 s(_("the remote file has been renamed to %s\n") % f1)
1424 s(_("the remote file has been renamed to %s\n") % f1)
1425 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1425 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1426 ms.addpath(f, f1, fo)
1426 ms.addpath(f, f1, fo)
1427 z += 1
1427 z += 1
1428 progress(_updating, z, item=f, total=numupdates, unit=_files)
1428 progress(_updating, z, item=f, total=numupdates, unit=_files)
1429
1429
1430 # When merging in-memory, we can't support worker processes, so set the
1431 # per-item cost at 0 in that case.
1432 cost = 0 if wctx.isinmemory() else 0.001
1433
1430 # remove in parallel (must come before resolving path conflicts and getting)
1434 # remove in parallel (must come before resolving path conflicts and getting)
1431 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1435 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1432 actions['r'])
1436 actions['r'])
1433 for i, item in prog:
1437 for i, item in prog:
1434 z += i
1438 z += i
1435 progress(_updating, z, item=item, total=numupdates, unit=_files)
1439 progress(_updating, z, item=item, total=numupdates, unit=_files)
1436 removed = len(actions['r'])
1440 removed = len(actions['r'])
1437
1441
1438 # resolve path conflicts (must come before getting)
1442 # resolve path conflicts (must come before getting)
1439 for f, args, msg in actions['pr']:
1443 for f, args, msg in actions['pr']:
1440 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1444 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1441 f0, = args
1445 f0, = args
1442 if wctx[f0].lexists():
1446 if wctx[f0].lexists():
1443 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1447 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1444 wctx[f].audit()
1448 wctx[f].audit()
1445 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1449 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1446 wctx[f0].remove()
1450 wctx[f0].remove()
1447 z += 1
1451 z += 1
1448 progress(_updating, z, item=f, total=numupdates, unit=_files)
1452 progress(_updating, z, item=f, total=numupdates, unit=_files)
1449
1453
1450 # We should flush before forking into worker processes, since those workers
1454 # We should flush before forking into worker processes, since those workers
1451 # flush when they complete, and we don't want to duplicate work.
1455 # flush when they complete, and we don't want to duplicate work.
1452 wctx.flushall()
1456 wctx.flushall()
1453
1457
1454 # get in parallel
1458 # get in parallel
1455 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1459 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1456 actions['g'])
1460 actions['g'])
1457 for i, item in prog:
1461 for i, item in prog:
1458 z += i
1462 z += i
1459 progress(_updating, z, item=item, total=numupdates, unit=_files)
1463 progress(_updating, z, item=item, total=numupdates, unit=_files)
1460 updated = len(actions['g'])
1464 updated = len(actions['g'])
1461
1465
1462 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1466 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1463 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1467 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1464
1468
1465 # forget (manifest only, just log it) (must come first)
1469 # forget (manifest only, just log it) (must come first)
1466 for f, args, msg in actions['f']:
1470 for f, args, msg in actions['f']:
1467 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1471 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1468 z += 1
1472 z += 1
1469 progress(_updating, z, item=f, total=numupdates, unit=_files)
1473 progress(_updating, z, item=f, total=numupdates, unit=_files)
1470
1474
1471 # re-add (manifest only, just log it)
1475 # re-add (manifest only, just log it)
1472 for f, args, msg in actions['a']:
1476 for f, args, msg in actions['a']:
1473 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1477 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1474 z += 1
1478 z += 1
1475 progress(_updating, z, item=f, total=numupdates, unit=_files)
1479 progress(_updating, z, item=f, total=numupdates, unit=_files)
1476
1480
1477 # re-add/mark as modified (manifest only, just log it)
1481 # re-add/mark as modified (manifest only, just log it)
1478 for f, args, msg in actions['am']:
1482 for f, args, msg in actions['am']:
1479 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1483 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1480 z += 1
1484 z += 1
1481 progress(_updating, z, item=f, total=numupdates, unit=_files)
1485 progress(_updating, z, item=f, total=numupdates, unit=_files)
1482
1486
1483 # keep (noop, just log it)
1487 # keep (noop, just log it)
1484 for f, args, msg in actions['k']:
1488 for f, args, msg in actions['k']:
1485 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1489 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1486 # no progress
1490 # no progress
1487
1491
1488 # directory rename, move local
1492 # directory rename, move local
1489 for f, args, msg in actions['dm']:
1493 for f, args, msg in actions['dm']:
1490 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1494 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1491 z += 1
1495 z += 1
1492 progress(_updating, z, item=f, total=numupdates, unit=_files)
1496 progress(_updating, z, item=f, total=numupdates, unit=_files)
1493 f0, flags = args
1497 f0, flags = args
1494 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1498 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1495 wctx[f].audit()
1499 wctx[f].audit()
1496 wctx[f].write(wctx.filectx(f0).data(), flags)
1500 wctx[f].write(wctx.filectx(f0).data(), flags)
1497 wctx[f0].remove()
1501 wctx[f0].remove()
1498 updated += 1
1502 updated += 1
1499
1503
1500 # local directory rename, get
1504 # local directory rename, get
1501 for f, args, msg in actions['dg']:
1505 for f, args, msg in actions['dg']:
1502 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1506 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1503 z += 1
1507 z += 1
1504 progress(_updating, z, item=f, total=numupdates, unit=_files)
1508 progress(_updating, z, item=f, total=numupdates, unit=_files)
1505 f0, flags = args
1509 f0, flags = args
1506 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1510 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1507 wctx[f].write(mctx.filectx(f0).data(), flags)
1511 wctx[f].write(mctx.filectx(f0).data(), flags)
1508 updated += 1
1512 updated += 1
1509
1513
1510 # exec
1514 # exec
1511 for f, args, msg in actions['e']:
1515 for f, args, msg in actions['e']:
1512 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1516 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1513 z += 1
1517 z += 1
1514 progress(_updating, z, item=f, total=numupdates, unit=_files)
1518 progress(_updating, z, item=f, total=numupdates, unit=_files)
1515 flags, = args
1519 flags, = args
1516 wctx[f].audit()
1520 wctx[f].audit()
1517 wctx[f].setflags('l' in flags, 'x' in flags)
1521 wctx[f].setflags('l' in flags, 'x' in flags)
1518 updated += 1
1522 updated += 1
1519
1523
1520 # the ordering is important here -- ms.mergedriver will raise if the merge
1524 # the ordering is important here -- ms.mergedriver will raise if the merge
1521 # driver has changed, and we want to be able to bypass it when overwrite is
1525 # driver has changed, and we want to be able to bypass it when overwrite is
1522 # True
1526 # True
1523 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1527 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1524
1528
1525 if usemergedriver:
1529 if usemergedriver:
1526 ms.commit()
1530 ms.commit()
1527 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1531 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1528 # the driver might leave some files unresolved
1532 # the driver might leave some files unresolved
1529 unresolvedf = set(ms.unresolved())
1533 unresolvedf = set(ms.unresolved())
1530 if not proceed:
1534 if not proceed:
1531 # XXX setting unresolved to at least 1 is a hack to make sure we
1535 # XXX setting unresolved to at least 1 is a hack to make sure we
1532 # error out
1536 # error out
1533 return updated, merged, removed, max(len(unresolvedf), 1)
1537 return updated, merged, removed, max(len(unresolvedf), 1)
1534 newactions = []
1538 newactions = []
1535 for f, args, msg in mergeactions:
1539 for f, args, msg in mergeactions:
1536 if f in unresolvedf:
1540 if f in unresolvedf:
1537 newactions.append((f, args, msg))
1541 newactions.append((f, args, msg))
1538 mergeactions = newactions
1542 mergeactions = newactions
1539
1543
1540 try:
1544 try:
1541 # premerge
1545 # premerge
1542 tocomplete = []
1546 tocomplete = []
1543 for f, args, msg in mergeactions:
1547 for f, args, msg in mergeactions:
1544 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1548 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1545 z += 1
1549 z += 1
1546 progress(_updating, z, item=f, total=numupdates, unit=_files)
1550 progress(_updating, z, item=f, total=numupdates, unit=_files)
1547 if f == '.hgsubstate': # subrepo states need updating
1551 if f == '.hgsubstate': # subrepo states need updating
1548 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1552 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1549 overwrite, labels)
1553 overwrite, labels)
1550 continue
1554 continue
1551 wctx[f].audit()
1555 wctx[f].audit()
1552 complete, r = ms.preresolve(f, wctx)
1556 complete, r = ms.preresolve(f, wctx)
1553 if not complete:
1557 if not complete:
1554 numupdates += 1
1558 numupdates += 1
1555 tocomplete.append((f, args, msg))
1559 tocomplete.append((f, args, msg))
1556
1560
1557 # merge
1561 # merge
1558 for f, args, msg in tocomplete:
1562 for f, args, msg in tocomplete:
1559 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1563 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1560 z += 1
1564 z += 1
1561 progress(_updating, z, item=f, total=numupdates, unit=_files)
1565 progress(_updating, z, item=f, total=numupdates, unit=_files)
1562 ms.resolve(f, wctx)
1566 ms.resolve(f, wctx)
1563
1567
1564 finally:
1568 finally:
1565 ms.commit()
1569 ms.commit()
1566
1570
1567 unresolved = ms.unresolvedcount()
1571 unresolved = ms.unresolvedcount()
1568
1572
1569 if usemergedriver and not unresolved and ms.mdstate() != 's':
1573 if usemergedriver and not unresolved and ms.mdstate() != 's':
1570 if not driverconclude(repo, ms, wctx, labels=labels):
1574 if not driverconclude(repo, ms, wctx, labels=labels):
1571 # XXX setting unresolved to at least 1 is a hack to make sure we
1575 # XXX setting unresolved to at least 1 is a hack to make sure we
1572 # error out
1576 # error out
1573 unresolved = max(unresolved, 1)
1577 unresolved = max(unresolved, 1)
1574
1578
1575 ms.commit()
1579 ms.commit()
1576
1580
1577 msupdated, msmerged, msremoved = ms.counts()
1581 msupdated, msmerged, msremoved = ms.counts()
1578 updated += msupdated
1582 updated += msupdated
1579 merged += msmerged
1583 merged += msmerged
1580 removed += msremoved
1584 removed += msremoved
1581
1585
1582 extraactions = ms.actions()
1586 extraactions = ms.actions()
1583 if extraactions:
1587 if extraactions:
1584 mfiles = set(a[0] for a in actions['m'])
1588 mfiles = set(a[0] for a in actions['m'])
1585 for k, acts in extraactions.iteritems():
1589 for k, acts in extraactions.iteritems():
1586 actions[k].extend(acts)
1590 actions[k].extend(acts)
1587 # Remove these files from actions['m'] as well. This is important
1591 # Remove these files from actions['m'] as well. This is important
1588 # because in recordupdates, files in actions['m'] are processed
1592 # because in recordupdates, files in actions['m'] are processed
1589 # after files in other actions, and the merge driver might add
1593 # after files in other actions, and the merge driver might add
1590 # files to those actions via extraactions above. This can lead to a
1594 # files to those actions via extraactions above. This can lead to a
1591 # file being recorded twice, with poor results. This is especially
1595 # file being recorded twice, with poor results. This is especially
1592 # problematic for actions['r'] (currently only possible with the
1596 # problematic for actions['r'] (currently only possible with the
1593 # merge driver in the initial merge process; interrupted merges
1597 # merge driver in the initial merge process; interrupted merges
1594 # don't go through this flow).
1598 # don't go through this flow).
1595 #
1599 #
1596 # The real fix here is to have indexes by both file and action so
1600 # The real fix here is to have indexes by both file and action so
1597 # that when the action for a file is changed it is automatically
1601 # that when the action for a file is changed it is automatically
1598 # reflected in the other action lists. But that involves a more
1602 # reflected in the other action lists. But that involves a more
1599 # complex data structure, so this will do for now.
1603 # complex data structure, so this will do for now.
1600 #
1604 #
1601 # We don't need to do the same operation for 'dc' and 'cd' because
1605 # We don't need to do the same operation for 'dc' and 'cd' because
1602 # those lists aren't consulted again.
1606 # those lists aren't consulted again.
1603 mfiles.difference_update(a[0] for a in acts)
1607 mfiles.difference_update(a[0] for a in acts)
1604
1608
1605 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1609 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1606
1610
1607 progress(_updating, None, total=numupdates, unit=_files)
1611 progress(_updating, None, total=numupdates, unit=_files)
1608
1612
1609 return updated, merged, removed, unresolved
1613 return updated, merged, removed, unresolved
1610
1614
1611 def recordupdates(repo, actions, branchmerge):
1615 def recordupdates(repo, actions, branchmerge):
1612 "record merge actions to the dirstate"
1616 "record merge actions to the dirstate"
1613 # remove (must come first)
1617 # remove (must come first)
1614 for f, args, msg in actions.get('r', []):
1618 for f, args, msg in actions.get('r', []):
1615 if branchmerge:
1619 if branchmerge:
1616 repo.dirstate.remove(f)
1620 repo.dirstate.remove(f)
1617 else:
1621 else:
1618 repo.dirstate.drop(f)
1622 repo.dirstate.drop(f)
1619
1623
1620 # forget (must come first)
1624 # forget (must come first)
1621 for f, args, msg in actions.get('f', []):
1625 for f, args, msg in actions.get('f', []):
1622 repo.dirstate.drop(f)
1626 repo.dirstate.drop(f)
1623
1627
1624 # resolve path conflicts
1628 # resolve path conflicts
1625 for f, args, msg in actions.get('pr', []):
1629 for f, args, msg in actions.get('pr', []):
1626 f0, = args
1630 f0, = args
1627 origf0 = repo.dirstate.copied(f0) or f0
1631 origf0 = repo.dirstate.copied(f0) or f0
1628 repo.dirstate.add(f)
1632 repo.dirstate.add(f)
1629 repo.dirstate.copy(origf0, f)
1633 repo.dirstate.copy(origf0, f)
1630 if f0 == origf0:
1634 if f0 == origf0:
1631 repo.dirstate.remove(f0)
1635 repo.dirstate.remove(f0)
1632 else:
1636 else:
1633 repo.dirstate.drop(f0)
1637 repo.dirstate.drop(f0)
1634
1638
1635 # re-add
1639 # re-add
1636 for f, args, msg in actions.get('a', []):
1640 for f, args, msg in actions.get('a', []):
1637 repo.dirstate.add(f)
1641 repo.dirstate.add(f)
1638
1642
1639 # re-add/mark as modified
1643 # re-add/mark as modified
1640 for f, args, msg in actions.get('am', []):
1644 for f, args, msg in actions.get('am', []):
1641 if branchmerge:
1645 if branchmerge:
1642 repo.dirstate.normallookup(f)
1646 repo.dirstate.normallookup(f)
1643 else:
1647 else:
1644 repo.dirstate.add(f)
1648 repo.dirstate.add(f)
1645
1649
1646 # exec change
1650 # exec change
1647 for f, args, msg in actions.get('e', []):
1651 for f, args, msg in actions.get('e', []):
1648 repo.dirstate.normallookup(f)
1652 repo.dirstate.normallookup(f)
1649
1653
1650 # keep
1654 # keep
1651 for f, args, msg in actions.get('k', []):
1655 for f, args, msg in actions.get('k', []):
1652 pass
1656 pass
1653
1657
1654 # get
1658 # get
1655 for f, args, msg in actions.get('g', []):
1659 for f, args, msg in actions.get('g', []):
1656 if branchmerge:
1660 if branchmerge:
1657 repo.dirstate.otherparent(f)
1661 repo.dirstate.otherparent(f)
1658 else:
1662 else:
1659 repo.dirstate.normal(f)
1663 repo.dirstate.normal(f)
1660
1664
1661 # merge
1665 # merge
1662 for f, args, msg in actions.get('m', []):
1666 for f, args, msg in actions.get('m', []):
1663 f1, f2, fa, move, anc = args
1667 f1, f2, fa, move, anc = args
1664 if branchmerge:
1668 if branchmerge:
1665 # We've done a branch merge, mark this file as merged
1669 # We've done a branch merge, mark this file as merged
1666 # so that we properly record the merger later
1670 # so that we properly record the merger later
1667 repo.dirstate.merge(f)
1671 repo.dirstate.merge(f)
1668 if f1 != f2: # copy/rename
1672 if f1 != f2: # copy/rename
1669 if move:
1673 if move:
1670 repo.dirstate.remove(f1)
1674 repo.dirstate.remove(f1)
1671 if f1 != f:
1675 if f1 != f:
1672 repo.dirstate.copy(f1, f)
1676 repo.dirstate.copy(f1, f)
1673 else:
1677 else:
1674 repo.dirstate.copy(f2, f)
1678 repo.dirstate.copy(f2, f)
1675 else:
1679 else:
1676 # We've update-merged a locally modified file, so
1680 # We've update-merged a locally modified file, so
1677 # we set the dirstate to emulate a normal checkout
1681 # we set the dirstate to emulate a normal checkout
1678 # of that file some time in the past. Thus our
1682 # of that file some time in the past. Thus our
1679 # merge will appear as a normal local file
1683 # merge will appear as a normal local file
1680 # modification.
1684 # modification.
1681 if f2 == f: # file not locally copied/moved
1685 if f2 == f: # file not locally copied/moved
1682 repo.dirstate.normallookup(f)
1686 repo.dirstate.normallookup(f)
1683 if move:
1687 if move:
1684 repo.dirstate.drop(f1)
1688 repo.dirstate.drop(f1)
1685
1689
1686 # directory rename, move local
1690 # directory rename, move local
1687 for f, args, msg in actions.get('dm', []):
1691 for f, args, msg in actions.get('dm', []):
1688 f0, flag = args
1692 f0, flag = args
1689 if branchmerge:
1693 if branchmerge:
1690 repo.dirstate.add(f)
1694 repo.dirstate.add(f)
1691 repo.dirstate.remove(f0)
1695 repo.dirstate.remove(f0)
1692 repo.dirstate.copy(f0, f)
1696 repo.dirstate.copy(f0, f)
1693 else:
1697 else:
1694 repo.dirstate.normal(f)
1698 repo.dirstate.normal(f)
1695 repo.dirstate.drop(f0)
1699 repo.dirstate.drop(f0)
1696
1700
1697 # directory rename, get
1701 # directory rename, get
1698 for f, args, msg in actions.get('dg', []):
1702 for f, args, msg in actions.get('dg', []):
1699 f0, flag = args
1703 f0, flag = args
1700 if branchmerge:
1704 if branchmerge:
1701 repo.dirstate.add(f)
1705 repo.dirstate.add(f)
1702 repo.dirstate.copy(f0, f)
1706 repo.dirstate.copy(f0, f)
1703 else:
1707 else:
1704 repo.dirstate.normal(f)
1708 repo.dirstate.normal(f)
1705
1709
1706 def update(repo, node, branchmerge, force, ancestor=None,
1710 def update(repo, node, branchmerge, force, ancestor=None,
1707 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1711 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1708 updatecheck=None, wc=None):
1712 updatecheck=None, wc=None):
1709 """
1713 """
1710 Perform a merge between the working directory and the given node
1714 Perform a merge between the working directory and the given node
1711
1715
1712 node = the node to update to
1716 node = the node to update to
1713 branchmerge = whether to merge between branches
1717 branchmerge = whether to merge between branches
1714 force = whether to force branch merging or file overwriting
1718 force = whether to force branch merging or file overwriting
1715 matcher = a matcher to filter file lists (dirstate not updated)
1719 matcher = a matcher to filter file lists (dirstate not updated)
1716 mergeancestor = whether it is merging with an ancestor. If true,
1720 mergeancestor = whether it is merging with an ancestor. If true,
1717 we should accept the incoming changes for any prompts that occur.
1721 we should accept the incoming changes for any prompts that occur.
1718 If false, merging with an ancestor (fast-forward) is only allowed
1722 If false, merging with an ancestor (fast-forward) is only allowed
1719 between different named branches. This flag is used by rebase extension
1723 between different named branches. This flag is used by rebase extension
1720 as a temporary fix and should be avoided in general.
1724 as a temporary fix and should be avoided in general.
1721 labels = labels to use for base, local and other
1725 labels = labels to use for base, local and other
1722 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1726 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1723 this is True, then 'force' should be True as well.
1727 this is True, then 'force' should be True as well.
1724
1728
1725 The table below shows all the behaviors of the update command
1729 The table below shows all the behaviors of the update command
1726 given the -c and -C or no options, whether the working directory
1730 given the -c and -C or no options, whether the working directory
1727 is dirty, whether a revision is specified, and the relationship of
1731 is dirty, whether a revision is specified, and the relationship of
1728 the parent rev to the target rev (linear or not). Match from top first. The
1732 the parent rev to the target rev (linear or not). Match from top first. The
1729 -n option doesn't exist on the command line, but represents the
1733 -n option doesn't exist on the command line, but represents the
1730 experimental.updatecheck=noconflict option.
1734 experimental.updatecheck=noconflict option.
1731
1735
1732 This logic is tested by test-update-branches.t.
1736 This logic is tested by test-update-branches.t.
1733
1737
1734 -c -C -n -m dirty rev linear | result
1738 -c -C -n -m dirty rev linear | result
1735 y y * * * * * | (1)
1739 y y * * * * * | (1)
1736 y * y * * * * | (1)
1740 y * y * * * * | (1)
1737 y * * y * * * | (1)
1741 y * * y * * * | (1)
1738 * y y * * * * | (1)
1742 * y y * * * * | (1)
1739 * y * y * * * | (1)
1743 * y * y * * * | (1)
1740 * * y y * * * | (1)
1744 * * y y * * * | (1)
1741 * * * * * n n | x
1745 * * * * * n n | x
1742 * * * * n * * | ok
1746 * * * * n * * | ok
1743 n n n n y * y | merge
1747 n n n n y * y | merge
1744 n n n n y y n | (2)
1748 n n n n y y n | (2)
1745 n n n y y * * | merge
1749 n n n y y * * | merge
1746 n n y n y * * | merge if no conflict
1750 n n y n y * * | merge if no conflict
1747 n y n n y * * | discard
1751 n y n n y * * | discard
1748 y n n n y * * | (3)
1752 y n n n y * * | (3)
1749
1753
1750 x = can't happen
1754 x = can't happen
1751 * = don't-care
1755 * = don't-care
1752 1 = incompatible options (checked in commands.py)
1756 1 = incompatible options (checked in commands.py)
1753 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1757 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1754 3 = abort: uncommitted changes (checked in commands.py)
1758 3 = abort: uncommitted changes (checked in commands.py)
1755
1759
1756 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1760 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1757 to repo[None] if None is passed.
1761 to repo[None] if None is passed.
1758
1762
1759 Return the same tuple as applyupdates().
1763 Return the same tuple as applyupdates().
1760 """
1764 """
1761 # Avoid cycle.
1765 # Avoid cycle.
1762 from . import sparse
1766 from . import sparse
1763
1767
1764 # This function used to find the default destination if node was None, but
1768 # This function used to find the default destination if node was None, but
1765 # that's now in destutil.py.
1769 # that's now in destutil.py.
1766 assert node is not None
1770 assert node is not None
1767 if not branchmerge and not force:
1771 if not branchmerge and not force:
1768 # TODO: remove the default once all callers that pass branchmerge=False
1772 # TODO: remove the default once all callers that pass branchmerge=False
1769 # and force=False pass a value for updatecheck. We may want to allow
1773 # and force=False pass a value for updatecheck. We may want to allow
1770 # updatecheck='abort' to better suppport some of these callers.
1774 # updatecheck='abort' to better suppport some of these callers.
1771 if updatecheck is None:
1775 if updatecheck is None:
1772 updatecheck = 'linear'
1776 updatecheck = 'linear'
1773 assert updatecheck in ('none', 'linear', 'noconflict')
1777 assert updatecheck in ('none', 'linear', 'noconflict')
1774 # If we're doing a partial update, we need to skip updating
1778 # If we're doing a partial update, we need to skip updating
1775 # the dirstate, so make a note of any partial-ness to the
1779 # the dirstate, so make a note of any partial-ness to the
1776 # update here.
1780 # update here.
1777 if matcher is None or matcher.always():
1781 if matcher is None or matcher.always():
1778 partial = False
1782 partial = False
1779 else:
1783 else:
1780 partial = True
1784 partial = True
1781 with repo.wlock():
1785 with repo.wlock():
1782 if wc is None:
1786 if wc is None:
1783 wc = repo[None]
1787 wc = repo[None]
1784 pl = wc.parents()
1788 pl = wc.parents()
1785 p1 = pl[0]
1789 p1 = pl[0]
1786 pas = [None]
1790 pas = [None]
1787 if ancestor is not None:
1791 if ancestor is not None:
1788 pas = [repo[ancestor]]
1792 pas = [repo[ancestor]]
1789
1793
1790 overwrite = force and not branchmerge
1794 overwrite = force and not branchmerge
1791
1795
1792 p2 = repo[node]
1796 p2 = repo[node]
1793 if pas[0] is None:
1797 if pas[0] is None:
1794 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1798 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1795 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1799 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1796 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1800 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1797 else:
1801 else:
1798 pas = [p1.ancestor(p2, warn=branchmerge)]
1802 pas = [p1.ancestor(p2, warn=branchmerge)]
1799
1803
1800 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1804 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1801
1805
1802 ### check phase
1806 ### check phase
1803 if not overwrite:
1807 if not overwrite:
1804 if len(pl) > 1:
1808 if len(pl) > 1:
1805 raise error.Abort(_("outstanding uncommitted merge"))
1809 raise error.Abort(_("outstanding uncommitted merge"))
1806 ms = mergestate.read(repo)
1810 ms = mergestate.read(repo)
1807 if list(ms.unresolved()):
1811 if list(ms.unresolved()):
1808 raise error.Abort(_("outstanding merge conflicts"))
1812 raise error.Abort(_("outstanding merge conflicts"))
1809 if branchmerge:
1813 if branchmerge:
1810 if pas == [p2]:
1814 if pas == [p2]:
1811 raise error.Abort(_("merging with a working directory ancestor"
1815 raise error.Abort(_("merging with a working directory ancestor"
1812 " has no effect"))
1816 " has no effect"))
1813 elif pas == [p1]:
1817 elif pas == [p1]:
1814 if not mergeancestor and wc.branch() == p2.branch():
1818 if not mergeancestor and wc.branch() == p2.branch():
1815 raise error.Abort(_("nothing to merge"),
1819 raise error.Abort(_("nothing to merge"),
1816 hint=_("use 'hg update' "
1820 hint=_("use 'hg update' "
1817 "or check 'hg heads'"))
1821 "or check 'hg heads'"))
1818 if not force and (wc.files() or wc.deleted()):
1822 if not force and (wc.files() or wc.deleted()):
1819 raise error.Abort(_("uncommitted changes"),
1823 raise error.Abort(_("uncommitted changes"),
1820 hint=_("use 'hg status' to list changes"))
1824 hint=_("use 'hg status' to list changes"))
1821 for s in sorted(wc.substate):
1825 for s in sorted(wc.substate):
1822 wc.sub(s).bailifchanged()
1826 wc.sub(s).bailifchanged()
1823
1827
1824 elif not overwrite:
1828 elif not overwrite:
1825 if p1 == p2: # no-op update
1829 if p1 == p2: # no-op update
1826 # call the hooks and exit early
1830 # call the hooks and exit early
1827 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1831 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1828 repo.hook('update', parent1=xp2, parent2='', error=0)
1832 repo.hook('update', parent1=xp2, parent2='', error=0)
1829 return 0, 0, 0, 0
1833 return 0, 0, 0, 0
1830
1834
1831 if (updatecheck == 'linear' and
1835 if (updatecheck == 'linear' and
1832 pas not in ([p1], [p2])): # nonlinear
1836 pas not in ([p1], [p2])): # nonlinear
1833 dirty = wc.dirty(missing=True)
1837 dirty = wc.dirty(missing=True)
1834 if dirty:
1838 if dirty:
1835 # Branching is a bit strange to ensure we do the minimal
1839 # Branching is a bit strange to ensure we do the minimal
1836 # amount of call to obsutil.foreground.
1840 # amount of call to obsutil.foreground.
1837 foreground = obsutil.foreground(repo, [p1.node()])
1841 foreground = obsutil.foreground(repo, [p1.node()])
1838 # note: the <node> variable contains a random identifier
1842 # note: the <node> variable contains a random identifier
1839 if repo[node].node() in foreground:
1843 if repo[node].node() in foreground:
1840 pass # allow updating to successors
1844 pass # allow updating to successors
1841 else:
1845 else:
1842 msg = _("uncommitted changes")
1846 msg = _("uncommitted changes")
1843 hint = _("commit or update --clean to discard changes")
1847 hint = _("commit or update --clean to discard changes")
1844 raise error.UpdateAbort(msg, hint=hint)
1848 raise error.UpdateAbort(msg, hint=hint)
1845 else:
1849 else:
1846 # Allow jumping branches if clean and specific rev given
1850 # Allow jumping branches if clean and specific rev given
1847 pass
1851 pass
1848
1852
1849 if overwrite:
1853 if overwrite:
1850 pas = [wc]
1854 pas = [wc]
1851 elif not branchmerge:
1855 elif not branchmerge:
1852 pas = [p1]
1856 pas = [p1]
1853
1857
1854 # deprecated config: merge.followcopies
1858 # deprecated config: merge.followcopies
1855 followcopies = repo.ui.configbool('merge', 'followcopies')
1859 followcopies = repo.ui.configbool('merge', 'followcopies')
1856 if overwrite:
1860 if overwrite:
1857 followcopies = False
1861 followcopies = False
1858 elif not pas[0]:
1862 elif not pas[0]:
1859 followcopies = False
1863 followcopies = False
1860 if not branchmerge and not wc.dirty(missing=True):
1864 if not branchmerge and not wc.dirty(missing=True):
1861 followcopies = False
1865 followcopies = False
1862
1866
1863 ### calculate phase
1867 ### calculate phase
1864 actionbyfile, diverge, renamedelete = calculateupdates(
1868 actionbyfile, diverge, renamedelete = calculateupdates(
1865 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1869 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1866 followcopies, matcher=matcher, mergeforce=mergeforce)
1870 followcopies, matcher=matcher, mergeforce=mergeforce)
1867
1871
1868 if updatecheck == 'noconflict':
1872 if updatecheck == 'noconflict':
1869 for f, (m, args, msg) in actionbyfile.iteritems():
1873 for f, (m, args, msg) in actionbyfile.iteritems():
1870 if m not in ('g', 'k', 'e', 'r', 'pr'):
1874 if m not in ('g', 'k', 'e', 'r', 'pr'):
1871 msg = _("conflicting changes")
1875 msg = _("conflicting changes")
1872 hint = _("commit or update --clean to discard changes")
1876 hint = _("commit or update --clean to discard changes")
1873 raise error.Abort(msg, hint=hint)
1877 raise error.Abort(msg, hint=hint)
1874
1878
1875 # Prompt and create actions. Most of this is in the resolve phase
1879 # Prompt and create actions. Most of this is in the resolve phase
1876 # already, but we can't handle .hgsubstate in filemerge or
1880 # already, but we can't handle .hgsubstate in filemerge or
1877 # subrepo.submerge yet so we have to keep prompting for it.
1881 # subrepo.submerge yet so we have to keep prompting for it.
1878 if '.hgsubstate' in actionbyfile:
1882 if '.hgsubstate' in actionbyfile:
1879 f = '.hgsubstate'
1883 f = '.hgsubstate'
1880 m, args, msg = actionbyfile[f]
1884 m, args, msg = actionbyfile[f]
1881 prompts = filemerge.partextras(labels)
1885 prompts = filemerge.partextras(labels)
1882 prompts['f'] = f
1886 prompts['f'] = f
1883 if m == 'cd':
1887 if m == 'cd':
1884 if repo.ui.promptchoice(
1888 if repo.ui.promptchoice(
1885 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1889 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1886 "use (c)hanged version or (d)elete?"
1890 "use (c)hanged version or (d)elete?"
1887 "$$ &Changed $$ &Delete") % prompts, 0):
1891 "$$ &Changed $$ &Delete") % prompts, 0):
1888 actionbyfile[f] = ('r', None, "prompt delete")
1892 actionbyfile[f] = ('r', None, "prompt delete")
1889 elif f in p1:
1893 elif f in p1:
1890 actionbyfile[f] = ('am', None, "prompt keep")
1894 actionbyfile[f] = ('am', None, "prompt keep")
1891 else:
1895 else:
1892 actionbyfile[f] = ('a', None, "prompt keep")
1896 actionbyfile[f] = ('a', None, "prompt keep")
1893 elif m == 'dc':
1897 elif m == 'dc':
1894 f1, f2, fa, move, anc = args
1898 f1, f2, fa, move, anc = args
1895 flags = p2[f2].flags()
1899 flags = p2[f2].flags()
1896 if repo.ui.promptchoice(
1900 if repo.ui.promptchoice(
1897 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1901 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1898 "use (c)hanged version or leave (d)eleted?"
1902 "use (c)hanged version or leave (d)eleted?"
1899 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1903 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1900 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1904 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1901 else:
1905 else:
1902 del actionbyfile[f]
1906 del actionbyfile[f]
1903
1907
1904 # Convert to dictionary-of-lists format
1908 # Convert to dictionary-of-lists format
1905 actions = dict((m, [])
1909 actions = dict((m, [])
1906 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1910 for m in 'a am f g cd dc r dm dg m e k p pr'.split())
1907 for f, (m, args, msg) in actionbyfile.iteritems():
1911 for f, (m, args, msg) in actionbyfile.iteritems():
1908 if m not in actions:
1912 if m not in actions:
1909 actions[m] = []
1913 actions[m] = []
1910 actions[m].append((f, args, msg))
1914 actions[m].append((f, args, msg))
1911
1915
1912 if not util.fscasesensitive(repo.path):
1916 if not util.fscasesensitive(repo.path):
1913 # check collision between files only in p2 for clean update
1917 # check collision between files only in p2 for clean update
1914 if (not branchmerge and
1918 if (not branchmerge and
1915 (force or not wc.dirty(missing=True, branch=False))):
1919 (force or not wc.dirty(missing=True, branch=False))):
1916 _checkcollision(repo, p2.manifest(), None)
1920 _checkcollision(repo, p2.manifest(), None)
1917 else:
1921 else:
1918 _checkcollision(repo, wc.manifest(), actions)
1922 _checkcollision(repo, wc.manifest(), actions)
1919
1923
1920 # divergent renames
1924 # divergent renames
1921 for f, fl in sorted(diverge.iteritems()):
1925 for f, fl in sorted(diverge.iteritems()):
1922 repo.ui.warn(_("note: possible conflict - %s was renamed "
1926 repo.ui.warn(_("note: possible conflict - %s was renamed "
1923 "multiple times to:\n") % f)
1927 "multiple times to:\n") % f)
1924 for nf in fl:
1928 for nf in fl:
1925 repo.ui.warn(" %s\n" % nf)
1929 repo.ui.warn(" %s\n" % nf)
1926
1930
1927 # rename and delete
1931 # rename and delete
1928 for f, fl in sorted(renamedelete.iteritems()):
1932 for f, fl in sorted(renamedelete.iteritems()):
1929 repo.ui.warn(_("note: possible conflict - %s was deleted "
1933 repo.ui.warn(_("note: possible conflict - %s was deleted "
1930 "and renamed to:\n") % f)
1934 "and renamed to:\n") % f)
1931 for nf in fl:
1935 for nf in fl:
1932 repo.ui.warn(" %s\n" % nf)
1936 repo.ui.warn(" %s\n" % nf)
1933
1937
1934 ### apply phase
1938 ### apply phase
1935 if not branchmerge: # just jump to the new rev
1939 if not branchmerge: # just jump to the new rev
1936 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1940 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1937 if not partial:
1941 if not partial:
1938 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1942 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1939 # note that we're in the middle of an update
1943 # note that we're in the middle of an update
1940 repo.vfs.write('updatestate', p2.hex())
1944 repo.vfs.write('updatestate', p2.hex())
1941
1945
1942 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1946 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1943 wc.flushall()
1947 wc.flushall()
1944
1948
1945 if not partial:
1949 if not partial:
1946 with repo.dirstate.parentchange():
1950 with repo.dirstate.parentchange():
1947 repo.setparents(fp1, fp2)
1951 repo.setparents(fp1, fp2)
1948 recordupdates(repo, actions, branchmerge)
1952 recordupdates(repo, actions, branchmerge)
1949 # update completed, clear state
1953 # update completed, clear state
1950 util.unlink(repo.vfs.join('updatestate'))
1954 util.unlink(repo.vfs.join('updatestate'))
1951
1955
1952 if not branchmerge:
1956 if not branchmerge:
1953 repo.dirstate.setbranch(p2.branch())
1957 repo.dirstate.setbranch(p2.branch())
1954
1958
1955 # If we're updating to a location, clean up any stale temporary includes
1959 # If we're updating to a location, clean up any stale temporary includes
1956 # (ex: this happens during hg rebase --abort).
1960 # (ex: this happens during hg rebase --abort).
1957 if not branchmerge:
1961 if not branchmerge:
1958 sparse.prunetemporaryincludes(repo)
1962 sparse.prunetemporaryincludes(repo)
1959
1963
1960 if not partial:
1964 if not partial:
1961 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1965 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1962 return stats
1966 return stats
1963
1967
1964 def graft(repo, ctx, pctx, labels, keepparent=False):
1968 def graft(repo, ctx, pctx, labels, keepparent=False):
1965 """Do a graft-like merge.
1969 """Do a graft-like merge.
1966
1970
1967 This is a merge where the merge ancestor is chosen such that one
1971 This is a merge where the merge ancestor is chosen such that one
1968 or more changesets are grafted onto the current changeset. In
1972 or more changesets are grafted onto the current changeset. In
1969 addition to the merge, this fixes up the dirstate to include only
1973 addition to the merge, this fixes up the dirstate to include only
1970 a single parent (if keepparent is False) and tries to duplicate any
1974 a single parent (if keepparent is False) and tries to duplicate any
1971 renames/copies appropriately.
1975 renames/copies appropriately.
1972
1976
1973 ctx - changeset to rebase
1977 ctx - changeset to rebase
1974 pctx - merge base, usually ctx.p1()
1978 pctx - merge base, usually ctx.p1()
1975 labels - merge labels eg ['local', 'graft']
1979 labels - merge labels eg ['local', 'graft']
1976 keepparent - keep second parent if any
1980 keepparent - keep second parent if any
1977
1981
1978 """
1982 """
1979 # If we're grafting a descendant onto an ancestor, be sure to pass
1983 # If we're grafting a descendant onto an ancestor, be sure to pass
1980 # mergeancestor=True to update. This does two things: 1) allows the merge if
1984 # mergeancestor=True to update. This does two things: 1) allows the merge if
1981 # the destination is the same as the parent of the ctx (so we can use graft
1985 # the destination is the same as the parent of the ctx (so we can use graft
1982 # to copy commits), and 2) informs update that the incoming changes are
1986 # to copy commits), and 2) informs update that the incoming changes are
1983 # newer than the destination so it doesn't prompt about "remote changed foo
1987 # newer than the destination so it doesn't prompt about "remote changed foo
1984 # which local deleted".
1988 # which local deleted".
1985 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1989 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1986
1990
1987 stats = update(repo, ctx.node(), True, True, pctx.node(),
1991 stats = update(repo, ctx.node(), True, True, pctx.node(),
1988 mergeancestor=mergeancestor, labels=labels)
1992 mergeancestor=mergeancestor, labels=labels)
1989
1993
1990 pother = nullid
1994 pother = nullid
1991 parents = ctx.parents()
1995 parents = ctx.parents()
1992 if keepparent and len(parents) == 2 and pctx in parents:
1996 if keepparent and len(parents) == 2 and pctx in parents:
1993 parents.remove(pctx)
1997 parents.remove(pctx)
1994 pother = parents[0].node()
1998 pother = parents[0].node()
1995
1999
1996 with repo.dirstate.parentchange():
2000 with repo.dirstate.parentchange():
1997 repo.setparents(repo['.'].node(), pother)
2001 repo.setparents(repo['.'].node(), pother)
1998 repo.dirstate.write(repo.currenttransaction())
2002 repo.dirstate.write(repo.currenttransaction())
1999 # fix up dirstate for copies and renames
2003 # fix up dirstate for copies and renames
2000 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
2004 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
2001 return stats
2005 return stats
General Comments 0
You need to be logged in to leave comments. Login now