##// END OF EJS Templates
merge: add merge action 'p' to record path conflicts during update...
Mark Thomas -
r34548:81aebcc7 default
parent child Browse files
Show More
@@ -1,1796 +1,1812 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 P: a path conflict (file vs directory)
69 P: a path conflict (file vs directory)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 Merge record states (stored in self._state, indexed by filename):
83 Merge record states (stored in self._state, indexed by filename):
84 u: unresolved conflict
84 u: unresolved conflict
85 r: resolved conflict
85 r: resolved conflict
86 pu: unresolved path conflict (file conflicts with directory)
86 pu: unresolved path conflict (file conflicts with directory)
87 pr: resolved path conflict
87 pr: resolved path conflict
88 d: driver-resolved conflict
88 d: driver-resolved conflict
89
89
90 The resolve command transitions between 'u' and 'r' for conflicts and
90 The resolve command transitions between 'u' and 'r' for conflicts and
91 'pu' and 'pr' for path conflicts.
91 'pu' and 'pr' for path conflicts.
92 '''
92 '''
93 statepathv1 = 'merge/state'
93 statepathv1 = 'merge/state'
94 statepathv2 = 'merge/state2'
94 statepathv2 = 'merge/state2'
95
95
96 @staticmethod
96 @staticmethod
97 def clean(repo, node=None, other=None, labels=None):
97 def clean(repo, node=None, other=None, labels=None):
98 """Initialize a brand new merge state, removing any existing state on
98 """Initialize a brand new merge state, removing any existing state on
99 disk."""
99 disk."""
100 ms = mergestate(repo)
100 ms = mergestate(repo)
101 ms.reset(node, other, labels)
101 ms.reset(node, other, labels)
102 return ms
102 return ms
103
103
104 @staticmethod
104 @staticmethod
105 def read(repo):
105 def read(repo):
106 """Initialize the merge state, reading it from disk."""
106 """Initialize the merge state, reading it from disk."""
107 ms = mergestate(repo)
107 ms = mergestate(repo)
108 ms._read()
108 ms._read()
109 return ms
109 return ms
110
110
111 def __init__(self, repo):
111 def __init__(self, repo):
112 """Initialize the merge state.
112 """Initialize the merge state.
113
113
114 Do not use this directly! Instead call read() or clean()."""
114 Do not use this directly! Instead call read() or clean()."""
115 self._repo = repo
115 self._repo = repo
116 self._dirty = False
116 self._dirty = False
117 self._labels = None
117 self._labels = None
118
118
119 def reset(self, node=None, other=None, labels=None):
119 def reset(self, node=None, other=None, labels=None):
120 self._state = {}
120 self._state = {}
121 self._stateextras = {}
121 self._stateextras = {}
122 self._local = None
122 self._local = None
123 self._other = None
123 self._other = None
124 self._labels = labels
124 self._labels = labels
125 for var in ('localctx', 'otherctx'):
125 for var in ('localctx', 'otherctx'):
126 if var in vars(self):
126 if var in vars(self):
127 delattr(self, var)
127 delattr(self, var)
128 if node:
128 if node:
129 self._local = node
129 self._local = node
130 self._other = other
130 self._other = other
131 self._readmergedriver = None
131 self._readmergedriver = None
132 if self.mergedriver:
132 if self.mergedriver:
133 self._mdstate = 's'
133 self._mdstate = 's'
134 else:
134 else:
135 self._mdstate = 'u'
135 self._mdstate = 'u'
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
136 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 self._results = {}
137 self._results = {}
138 self._dirty = False
138 self._dirty = False
139
139
140 def _read(self):
140 def _read(self):
141 """Analyse each record content to restore a serialized state from disk
141 """Analyse each record content to restore a serialized state from disk
142
142
143 This function process "record" entry produced by the de-serialization
143 This function process "record" entry produced by the de-serialization
144 of on disk file.
144 of on disk file.
145 """
145 """
146 self._state = {}
146 self._state = {}
147 self._stateextras = {}
147 self._stateextras = {}
148 self._local = None
148 self._local = None
149 self._other = None
149 self._other = None
150 for var in ('localctx', 'otherctx'):
150 for var in ('localctx', 'otherctx'):
151 if var in vars(self):
151 if var in vars(self):
152 delattr(self, var)
152 delattr(self, var)
153 self._readmergedriver = None
153 self._readmergedriver = None
154 self._mdstate = 's'
154 self._mdstate = 's'
155 unsupported = set()
155 unsupported = set()
156 records = self._readrecords()
156 records = self._readrecords()
157 for rtype, record in records:
157 for rtype, record in records:
158 if rtype == 'L':
158 if rtype == 'L':
159 self._local = bin(record)
159 self._local = bin(record)
160 elif rtype == 'O':
160 elif rtype == 'O':
161 self._other = bin(record)
161 self._other = bin(record)
162 elif rtype == 'm':
162 elif rtype == 'm':
163 bits = record.split('\0', 1)
163 bits = record.split('\0', 1)
164 mdstate = bits[1]
164 mdstate = bits[1]
165 if len(mdstate) != 1 or mdstate not in 'ums':
165 if len(mdstate) != 1 or mdstate not in 'ums':
166 # the merge driver should be idempotent, so just rerun it
166 # the merge driver should be idempotent, so just rerun it
167 mdstate = 'u'
167 mdstate = 'u'
168
168
169 self._readmergedriver = bits[0]
169 self._readmergedriver = bits[0]
170 self._mdstate = mdstate
170 self._mdstate = mdstate
171 elif rtype in 'FDCP':
171 elif rtype in 'FDCP':
172 bits = record.split('\0')
172 bits = record.split('\0')
173 self._state[bits[0]] = bits[1:]
173 self._state[bits[0]] = bits[1:]
174 elif rtype == 'f':
174 elif rtype == 'f':
175 filename, rawextras = record.split('\0', 1)
175 filename, rawextras = record.split('\0', 1)
176 extraparts = rawextras.split('\0')
176 extraparts = rawextras.split('\0')
177 extras = {}
177 extras = {}
178 i = 0
178 i = 0
179 while i < len(extraparts):
179 while i < len(extraparts):
180 extras[extraparts[i]] = extraparts[i + 1]
180 extras[extraparts[i]] = extraparts[i + 1]
181 i += 2
181 i += 2
182
182
183 self._stateextras[filename] = extras
183 self._stateextras[filename] = extras
184 elif rtype == 'l':
184 elif rtype == 'l':
185 labels = record.split('\0', 2)
185 labels = record.split('\0', 2)
186 self._labels = [l for l in labels if len(l) > 0]
186 self._labels = [l for l in labels if len(l) > 0]
187 elif not rtype.islower():
187 elif not rtype.islower():
188 unsupported.add(rtype)
188 unsupported.add(rtype)
189 self._results = {}
189 self._results = {}
190 self._dirty = False
190 self._dirty = False
191
191
192 if unsupported:
192 if unsupported:
193 raise error.UnsupportedMergeRecords(unsupported)
193 raise error.UnsupportedMergeRecords(unsupported)
194
194
195 def _readrecords(self):
195 def _readrecords(self):
196 """Read merge state from disk and return a list of record (TYPE, data)
196 """Read merge state from disk and return a list of record (TYPE, data)
197
197
198 We read data from both v1 and v2 files and decide which one to use.
198 We read data from both v1 and v2 files and decide which one to use.
199
199
200 V1 has been used by version prior to 2.9.1 and contains less data than
200 V1 has been used by version prior to 2.9.1 and contains less data than
201 v2. We read both versions and check if no data in v2 contradicts
201 v2. We read both versions and check if no data in v2 contradicts
202 v1. If there is not contradiction we can safely assume that both v1
202 v1. If there is not contradiction we can safely assume that both v1
203 and v2 were written at the same time and use the extract data in v2. If
203 and v2 were written at the same time and use the extract data in v2. If
204 there is contradiction we ignore v2 content as we assume an old version
204 there is contradiction we ignore v2 content as we assume an old version
205 of Mercurial has overwritten the mergestate file and left an old v2
205 of Mercurial has overwritten the mergestate file and left an old v2
206 file around.
206 file around.
207
207
208 returns list of record [(TYPE, data), ...]"""
208 returns list of record [(TYPE, data), ...]"""
209 v1records = self._readrecordsv1()
209 v1records = self._readrecordsv1()
210 v2records = self._readrecordsv2()
210 v2records = self._readrecordsv2()
211 if self._v1v2match(v1records, v2records):
211 if self._v1v2match(v1records, v2records):
212 return v2records
212 return v2records
213 else:
213 else:
214 # v1 file is newer than v2 file, use it
214 # v1 file is newer than v2 file, use it
215 # we have to infer the "other" changeset of the merge
215 # we have to infer the "other" changeset of the merge
216 # we cannot do better than that with v1 of the format
216 # we cannot do better than that with v1 of the format
217 mctx = self._repo[None].parents()[-1]
217 mctx = self._repo[None].parents()[-1]
218 v1records.append(('O', mctx.hex()))
218 v1records.append(('O', mctx.hex()))
219 # add place holder "other" file node information
219 # add place holder "other" file node information
220 # nobody is using it yet so we do no need to fetch the data
220 # nobody is using it yet so we do no need to fetch the data
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
221 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 for idx, r in enumerate(v1records):
222 for idx, r in enumerate(v1records):
223 if r[0] == 'F':
223 if r[0] == 'F':
224 bits = r[1].split('\0')
224 bits = r[1].split('\0')
225 bits.insert(-2, '')
225 bits.insert(-2, '')
226 v1records[idx] = (r[0], '\0'.join(bits))
226 v1records[idx] = (r[0], '\0'.join(bits))
227 return v1records
227 return v1records
228
228
229 def _v1v2match(self, v1records, v2records):
229 def _v1v2match(self, v1records, v2records):
230 oldv2 = set() # old format version of v2 record
230 oldv2 = set() # old format version of v2 record
231 for rec in v2records:
231 for rec in v2records:
232 if rec[0] == 'L':
232 if rec[0] == 'L':
233 oldv2.add(rec)
233 oldv2.add(rec)
234 elif rec[0] == 'F':
234 elif rec[0] == 'F':
235 # drop the onode data (not contained in v1)
235 # drop the onode data (not contained in v1)
236 oldv2.add(('F', _droponode(rec[1])))
236 oldv2.add(('F', _droponode(rec[1])))
237 for rec in v1records:
237 for rec in v1records:
238 if rec not in oldv2:
238 if rec not in oldv2:
239 return False
239 return False
240 else:
240 else:
241 return True
241 return True
242
242
243 def _readrecordsv1(self):
243 def _readrecordsv1(self):
244 """read on disk merge state for version 1 file
244 """read on disk merge state for version 1 file
245
245
246 returns list of record [(TYPE, data), ...]
246 returns list of record [(TYPE, data), ...]
247
247
248 Note: the "F" data from this file are one entry short
248 Note: the "F" data from this file are one entry short
249 (no "other file node" entry)
249 (no "other file node" entry)
250 """
250 """
251 records = []
251 records = []
252 try:
252 try:
253 f = self._repo.vfs(self.statepathv1)
253 f = self._repo.vfs(self.statepathv1)
254 for i, l in enumerate(f):
254 for i, l in enumerate(f):
255 if i == 0:
255 if i == 0:
256 records.append(('L', l[:-1]))
256 records.append(('L', l[:-1]))
257 else:
257 else:
258 records.append(('F', l[:-1]))
258 records.append(('F', l[:-1]))
259 f.close()
259 f.close()
260 except IOError as err:
260 except IOError as err:
261 if err.errno != errno.ENOENT:
261 if err.errno != errno.ENOENT:
262 raise
262 raise
263 return records
263 return records
264
264
265 def _readrecordsv2(self):
265 def _readrecordsv2(self):
266 """read on disk merge state for version 2 file
266 """read on disk merge state for version 2 file
267
267
268 This format is a list of arbitrary records of the form:
268 This format is a list of arbitrary records of the form:
269
269
270 [type][length][content]
270 [type][length][content]
271
271
272 `type` is a single character, `length` is a 4 byte integer, and
272 `type` is a single character, `length` is a 4 byte integer, and
273 `content` is an arbitrary byte sequence of length `length`.
273 `content` is an arbitrary byte sequence of length `length`.
274
274
275 Mercurial versions prior to 3.7 have a bug where if there are
275 Mercurial versions prior to 3.7 have a bug where if there are
276 unsupported mandatory merge records, attempting to clear out the merge
276 unsupported mandatory merge records, attempting to clear out the merge
277 state with hg update --clean or similar aborts. The 't' record type
277 state with hg update --clean or similar aborts. The 't' record type
278 works around that by writing out what those versions treat as an
278 works around that by writing out what those versions treat as an
279 advisory record, but later versions interpret as special: the first
279 advisory record, but later versions interpret as special: the first
280 character is the 'real' record type and everything onwards is the data.
280 character is the 'real' record type and everything onwards is the data.
281
281
282 Returns list of records [(TYPE, data), ...]."""
282 Returns list of records [(TYPE, data), ...]."""
283 records = []
283 records = []
284 try:
284 try:
285 f = self._repo.vfs(self.statepathv2)
285 f = self._repo.vfs(self.statepathv2)
286 data = f.read()
286 data = f.read()
287 off = 0
287 off = 0
288 end = len(data)
288 end = len(data)
289 while off < end:
289 while off < end:
290 rtype = data[off]
290 rtype = data[off]
291 off += 1
291 off += 1
292 length = _unpack('>I', data[off:(off + 4)])[0]
292 length = _unpack('>I', data[off:(off + 4)])[0]
293 off += 4
293 off += 4
294 record = data[off:(off + length)]
294 record = data[off:(off + length)]
295 off += length
295 off += length
296 if rtype == 't':
296 if rtype == 't':
297 rtype, record = record[0], record[1:]
297 rtype, record = record[0], record[1:]
298 records.append((rtype, record))
298 records.append((rtype, record))
299 f.close()
299 f.close()
300 except IOError as err:
300 except IOError as err:
301 if err.errno != errno.ENOENT:
301 if err.errno != errno.ENOENT:
302 raise
302 raise
303 return records
303 return records
304
304
305 @util.propertycache
305 @util.propertycache
306 def mergedriver(self):
306 def mergedriver(self):
307 # protect against the following:
307 # protect against the following:
308 # - A configures a malicious merge driver in their hgrc, then
308 # - A configures a malicious merge driver in their hgrc, then
309 # pauses the merge
309 # pauses the merge
310 # - A edits their hgrc to remove references to the merge driver
310 # - A edits their hgrc to remove references to the merge driver
311 # - A gives a copy of their entire repo, including .hg, to B
311 # - A gives a copy of their entire repo, including .hg, to B
312 # - B inspects .hgrc and finds it to be clean
312 # - B inspects .hgrc and finds it to be clean
313 # - B then continues the merge and the malicious merge driver
313 # - B then continues the merge and the malicious merge driver
314 # gets invoked
314 # gets invoked
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 if (self._readmergedriver is not None
316 if (self._readmergedriver is not None
317 and self._readmergedriver != configmergedriver):
317 and self._readmergedriver != configmergedriver):
318 raise error.ConfigError(
318 raise error.ConfigError(
319 _("merge driver changed since merge started"),
319 _("merge driver changed since merge started"),
320 hint=_("revert merge driver change or abort merge"))
320 hint=_("revert merge driver change or abort merge"))
321
321
322 return configmergedriver
322 return configmergedriver
323
323
324 @util.propertycache
324 @util.propertycache
325 def localctx(self):
325 def localctx(self):
326 if self._local is None:
326 if self._local is None:
327 msg = "localctx accessed but self._local isn't set"
327 msg = "localctx accessed but self._local isn't set"
328 raise error.ProgrammingError(msg)
328 raise error.ProgrammingError(msg)
329 return self._repo[self._local]
329 return self._repo[self._local]
330
330
331 @util.propertycache
331 @util.propertycache
332 def otherctx(self):
332 def otherctx(self):
333 if self._other is None:
333 if self._other is None:
334 msg = "otherctx accessed but self._other isn't set"
334 msg = "otherctx accessed but self._other isn't set"
335 raise error.ProgrammingError(msg)
335 raise error.ProgrammingError(msg)
336 return self._repo[self._other]
336 return self._repo[self._other]
337
337
338 def active(self):
338 def active(self):
339 """Whether mergestate is active.
339 """Whether mergestate is active.
340
340
341 Returns True if there appears to be mergestate. This is a rough proxy
341 Returns True if there appears to be mergestate. This is a rough proxy
342 for "is a merge in progress."
342 for "is a merge in progress."
343 """
343 """
344 # Check local variables before looking at filesystem for performance
344 # Check local variables before looking at filesystem for performance
345 # reasons.
345 # reasons.
346 return bool(self._local) or bool(self._state) or \
346 return bool(self._local) or bool(self._state) or \
347 self._repo.vfs.exists(self.statepathv1) or \
347 self._repo.vfs.exists(self.statepathv1) or \
348 self._repo.vfs.exists(self.statepathv2)
348 self._repo.vfs.exists(self.statepathv2)
349
349
350 def commit(self):
350 def commit(self):
351 """Write current state on disk (if necessary)"""
351 """Write current state on disk (if necessary)"""
352 if self._dirty:
352 if self._dirty:
353 records = self._makerecords()
353 records = self._makerecords()
354 self._writerecords(records)
354 self._writerecords(records)
355 self._dirty = False
355 self._dirty = False
356
356
357 def _makerecords(self):
357 def _makerecords(self):
358 records = []
358 records = []
359 records.append(('L', hex(self._local)))
359 records.append(('L', hex(self._local)))
360 records.append(('O', hex(self._other)))
360 records.append(('O', hex(self._other)))
361 if self.mergedriver:
361 if self.mergedriver:
362 records.append(('m', '\0'.join([
362 records.append(('m', '\0'.join([
363 self.mergedriver, self._mdstate])))
363 self.mergedriver, self._mdstate])))
364 for d, v in self._state.iteritems():
364 for d, v in self._state.iteritems():
365 if v[0] == 'd':
365 if v[0] == 'd':
366 records.append(('D', '\0'.join([d] + v)))
366 records.append(('D', '\0'.join([d] + v)))
367 elif v[0] in ('pu', 'pr'):
367 elif v[0] in ('pu', 'pr'):
368 records.append(('P', '\0'.join([d] + v)))
368 records.append(('P', '\0'.join([d] + v)))
369 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
369 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
370 # older versions of Mercurial
370 # older versions of Mercurial
371 elif v[1] == nullhex or v[6] == nullhex:
371 elif v[1] == nullhex or v[6] == nullhex:
372 records.append(('C', '\0'.join([d] + v)))
372 records.append(('C', '\0'.join([d] + v)))
373 else:
373 else:
374 records.append(('F', '\0'.join([d] + v)))
374 records.append(('F', '\0'.join([d] + v)))
375 for filename, extras in sorted(self._stateextras.iteritems()):
375 for filename, extras in sorted(self._stateextras.iteritems()):
376 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
376 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
377 extras.iteritems())
377 extras.iteritems())
378 records.append(('f', '%s\0%s' % (filename, rawextras)))
378 records.append(('f', '%s\0%s' % (filename, rawextras)))
379 if self._labels is not None:
379 if self._labels is not None:
380 labels = '\0'.join(self._labels)
380 labels = '\0'.join(self._labels)
381 records.append(('l', labels))
381 records.append(('l', labels))
382 return records
382 return records
383
383
384 def _writerecords(self, records):
384 def _writerecords(self, records):
385 """Write current state on disk (both v1 and v2)"""
385 """Write current state on disk (both v1 and v2)"""
386 self._writerecordsv1(records)
386 self._writerecordsv1(records)
387 self._writerecordsv2(records)
387 self._writerecordsv2(records)
388
388
389 def _writerecordsv1(self, records):
389 def _writerecordsv1(self, records):
390 """Write current state on disk in a version 1 file"""
390 """Write current state on disk in a version 1 file"""
391 f = self._repo.vfs(self.statepathv1, 'w')
391 f = self._repo.vfs(self.statepathv1, 'w')
392 irecords = iter(records)
392 irecords = iter(records)
393 lrecords = next(irecords)
393 lrecords = next(irecords)
394 assert lrecords[0] == 'L'
394 assert lrecords[0] == 'L'
395 f.write(hex(self._local) + '\n')
395 f.write(hex(self._local) + '\n')
396 for rtype, data in irecords:
396 for rtype, data in irecords:
397 if rtype == 'F':
397 if rtype == 'F':
398 f.write('%s\n' % _droponode(data))
398 f.write('%s\n' % _droponode(data))
399 f.close()
399 f.close()
400
400
401 def _writerecordsv2(self, records):
401 def _writerecordsv2(self, records):
402 """Write current state on disk in a version 2 file
402 """Write current state on disk in a version 2 file
403
403
404 See the docstring for _readrecordsv2 for why we use 't'."""
404 See the docstring for _readrecordsv2 for why we use 't'."""
405 # these are the records that all version 2 clients can read
405 # these are the records that all version 2 clients can read
406 whitelist = 'LOF'
406 whitelist = 'LOF'
407 f = self._repo.vfs(self.statepathv2, 'w')
407 f = self._repo.vfs(self.statepathv2, 'w')
408 for key, data in records:
408 for key, data in records:
409 assert len(key) == 1
409 assert len(key) == 1
410 if key not in whitelist:
410 if key not in whitelist:
411 key, data = 't', '%s%s' % (key, data)
411 key, data = 't', '%s%s' % (key, data)
412 format = '>sI%is' % len(data)
412 format = '>sI%is' % len(data)
413 f.write(_pack(format, key, len(data), data))
413 f.write(_pack(format, key, len(data), data))
414 f.close()
414 f.close()
415
415
416 def add(self, fcl, fco, fca, fd):
416 def add(self, fcl, fco, fca, fd):
417 """add a new (potentially?) conflicting file the merge state
417 """add a new (potentially?) conflicting file the merge state
418 fcl: file context for local,
418 fcl: file context for local,
419 fco: file context for remote,
419 fco: file context for remote,
420 fca: file context for ancestors,
420 fca: file context for ancestors,
421 fd: file path of the resulting merge.
421 fd: file path of the resulting merge.
422
422
423 note: also write the local version to the `.hg/merge` directory.
423 note: also write the local version to the `.hg/merge` directory.
424 """
424 """
425 if fcl.isabsent():
425 if fcl.isabsent():
426 hash = nullhex
426 hash = nullhex
427 else:
427 else:
428 hash = hex(hashlib.sha1(fcl.path()).digest())
428 hash = hex(hashlib.sha1(fcl.path()).digest())
429 self._repo.vfs.write('merge/' + hash, fcl.data())
429 self._repo.vfs.write('merge/' + hash, fcl.data())
430 self._state[fd] = ['u', hash, fcl.path(),
430 self._state[fd] = ['u', hash, fcl.path(),
431 fca.path(), hex(fca.filenode()),
431 fca.path(), hex(fca.filenode()),
432 fco.path(), hex(fco.filenode()),
432 fco.path(), hex(fco.filenode()),
433 fcl.flags()]
433 fcl.flags()]
434 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
434 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
435 self._dirty = True
435 self._dirty = True
436
436
437 def addpath(self, path, frename, forigin):
437 def addpath(self, path, frename, forigin):
438 """add a new conflicting path to the merge state
438 """add a new conflicting path to the merge state
439 path: the path that conflicts
439 path: the path that conflicts
440 frename: the filename the conflicting file was renamed to
440 frename: the filename the conflicting file was renamed to
441 forigin: origin of the file ('l' or 'r' for local/remote)
441 forigin: origin of the file ('l' or 'r' for local/remote)
442 """
442 """
443 self._state[path] = ['pu', frename, forigin]
443 self._state[path] = ['pu', frename, forigin]
444 self._dirty = True
444 self._dirty = True
445
445
446 def __contains__(self, dfile):
446 def __contains__(self, dfile):
447 return dfile in self._state
447 return dfile in self._state
448
448
449 def __getitem__(self, dfile):
449 def __getitem__(self, dfile):
450 return self._state[dfile][0]
450 return self._state[dfile][0]
451
451
452 def __iter__(self):
452 def __iter__(self):
453 return iter(sorted(self._state))
453 return iter(sorted(self._state))
454
454
455 def files(self):
455 def files(self):
456 return self._state.keys()
456 return self._state.keys()
457
457
458 def mark(self, dfile, state):
458 def mark(self, dfile, state):
459 self._state[dfile][0] = state
459 self._state[dfile][0] = state
460 self._dirty = True
460 self._dirty = True
461
461
462 def mdstate(self):
462 def mdstate(self):
463 return self._mdstate
463 return self._mdstate
464
464
465 def unresolved(self):
465 def unresolved(self):
466 """Obtain the paths of unresolved files."""
466 """Obtain the paths of unresolved files."""
467
467
468 for f, entry in self._state.iteritems():
468 for f, entry in self._state.iteritems():
469 if entry[0] in ('u', 'pu'):
469 if entry[0] in ('u', 'pu'):
470 yield f
470 yield f
471
471
472 def driverresolved(self):
472 def driverresolved(self):
473 """Obtain the paths of driver-resolved files."""
473 """Obtain the paths of driver-resolved files."""
474
474
475 for f, entry in self._state.items():
475 for f, entry in self._state.items():
476 if entry[0] == 'd':
476 if entry[0] == 'd':
477 yield f
477 yield f
478
478
479 def extras(self, filename):
479 def extras(self, filename):
480 return self._stateextras.setdefault(filename, {})
480 return self._stateextras.setdefault(filename, {})
481
481
482 def _resolve(self, preresolve, dfile, wctx):
482 def _resolve(self, preresolve, dfile, wctx):
483 """rerun merge process for file path `dfile`"""
483 """rerun merge process for file path `dfile`"""
484 if self[dfile] in 'rd':
484 if self[dfile] in 'rd':
485 return True, 0
485 return True, 0
486 stateentry = self._state[dfile]
486 stateentry = self._state[dfile]
487 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
487 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
488 octx = self._repo[self._other]
488 octx = self._repo[self._other]
489 extras = self.extras(dfile)
489 extras = self.extras(dfile)
490 anccommitnode = extras.get('ancestorlinknode')
490 anccommitnode = extras.get('ancestorlinknode')
491 if anccommitnode:
491 if anccommitnode:
492 actx = self._repo[anccommitnode]
492 actx = self._repo[anccommitnode]
493 else:
493 else:
494 actx = None
494 actx = None
495 fcd = self._filectxorabsent(hash, wctx, dfile)
495 fcd = self._filectxorabsent(hash, wctx, dfile)
496 fco = self._filectxorabsent(onode, octx, ofile)
496 fco = self._filectxorabsent(onode, octx, ofile)
497 # TODO: move this to filectxorabsent
497 # TODO: move this to filectxorabsent
498 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
498 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
499 # "premerge" x flags
499 # "premerge" x flags
500 flo = fco.flags()
500 flo = fco.flags()
501 fla = fca.flags()
501 fla = fca.flags()
502 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
502 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
503 if fca.node() == nullid and flags != flo:
503 if fca.node() == nullid and flags != flo:
504 if preresolve:
504 if preresolve:
505 self._repo.ui.warn(
505 self._repo.ui.warn(
506 _('warning: cannot merge flags for %s '
506 _('warning: cannot merge flags for %s '
507 'without common ancestor - keeping local flags\n')
507 'without common ancestor - keeping local flags\n')
508 % afile)
508 % afile)
509 elif flags == fla:
509 elif flags == fla:
510 flags = flo
510 flags = flo
511 if preresolve:
511 if preresolve:
512 # restore local
512 # restore local
513 if hash != nullhex:
513 if hash != nullhex:
514 f = self._repo.vfs('merge/' + hash)
514 f = self._repo.vfs('merge/' + hash)
515 wctx[dfile].write(f.read(), flags)
515 wctx[dfile].write(f.read(), flags)
516 f.close()
516 f.close()
517 else:
517 else:
518 wctx[dfile].remove(ignoremissing=True)
518 wctx[dfile].remove(ignoremissing=True)
519 complete, r, deleted = filemerge.premerge(self._repo, wctx,
519 complete, r, deleted = filemerge.premerge(self._repo, wctx,
520 self._local, lfile, fcd,
520 self._local, lfile, fcd,
521 fco, fca,
521 fco, fca,
522 labels=self._labels)
522 labels=self._labels)
523 else:
523 else:
524 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
524 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
525 self._local, lfile, fcd,
525 self._local, lfile, fcd,
526 fco, fca,
526 fco, fca,
527 labels=self._labels)
527 labels=self._labels)
528 if r is None:
528 if r is None:
529 # no real conflict
529 # no real conflict
530 del self._state[dfile]
530 del self._state[dfile]
531 self._stateextras.pop(dfile, None)
531 self._stateextras.pop(dfile, None)
532 self._dirty = True
532 self._dirty = True
533 elif not r:
533 elif not r:
534 self.mark(dfile, 'r')
534 self.mark(dfile, 'r')
535
535
536 if complete:
536 if complete:
537 action = None
537 action = None
538 if deleted:
538 if deleted:
539 if fcd.isabsent():
539 if fcd.isabsent():
540 # dc: local picked. Need to drop if present, which may
540 # dc: local picked. Need to drop if present, which may
541 # happen on re-resolves.
541 # happen on re-resolves.
542 action = 'f'
542 action = 'f'
543 else:
543 else:
544 # cd: remote picked (or otherwise deleted)
544 # cd: remote picked (or otherwise deleted)
545 action = 'r'
545 action = 'r'
546 else:
546 else:
547 if fcd.isabsent(): # dc: remote picked
547 if fcd.isabsent(): # dc: remote picked
548 action = 'g'
548 action = 'g'
549 elif fco.isabsent(): # cd: local picked
549 elif fco.isabsent(): # cd: local picked
550 if dfile in self.localctx:
550 if dfile in self.localctx:
551 action = 'am'
551 action = 'am'
552 else:
552 else:
553 action = 'a'
553 action = 'a'
554 # else: regular merges (no action necessary)
554 # else: regular merges (no action necessary)
555 self._results[dfile] = r, action
555 self._results[dfile] = r, action
556
556
557 return complete, r
557 return complete, r
558
558
559 def _filectxorabsent(self, hexnode, ctx, f):
559 def _filectxorabsent(self, hexnode, ctx, f):
560 if hexnode == nullhex:
560 if hexnode == nullhex:
561 return filemerge.absentfilectx(ctx, f)
561 return filemerge.absentfilectx(ctx, f)
562 else:
562 else:
563 return ctx[f]
563 return ctx[f]
564
564
565 def preresolve(self, dfile, wctx):
565 def preresolve(self, dfile, wctx):
566 """run premerge process for dfile
566 """run premerge process for dfile
567
567
568 Returns whether the merge is complete, and the exit code."""
568 Returns whether the merge is complete, and the exit code."""
569 return self._resolve(True, dfile, wctx)
569 return self._resolve(True, dfile, wctx)
570
570
571 def resolve(self, dfile, wctx):
571 def resolve(self, dfile, wctx):
572 """run merge process (assuming premerge was run) for dfile
572 """run merge process (assuming premerge was run) for dfile
573
573
574 Returns the exit code of the merge."""
574 Returns the exit code of the merge."""
575 return self._resolve(False, dfile, wctx)[1]
575 return self._resolve(False, dfile, wctx)[1]
576
576
577 def counts(self):
577 def counts(self):
578 """return counts for updated, merged and removed files in this
578 """return counts for updated, merged and removed files in this
579 session"""
579 session"""
580 updated, merged, removed = 0, 0, 0
580 updated, merged, removed = 0, 0, 0
581 for r, action in self._results.itervalues():
581 for r, action in self._results.itervalues():
582 if r is None:
582 if r is None:
583 updated += 1
583 updated += 1
584 elif r == 0:
584 elif r == 0:
585 if action == 'r':
585 if action == 'r':
586 removed += 1
586 removed += 1
587 else:
587 else:
588 merged += 1
588 merged += 1
589 return updated, merged, removed
589 return updated, merged, removed
590
590
591 def unresolvedcount(self):
591 def unresolvedcount(self):
592 """get unresolved count for this merge (persistent)"""
592 """get unresolved count for this merge (persistent)"""
593 return len(list(self.unresolved()))
593 return len(list(self.unresolved()))
594
594
595 def actions(self):
595 def actions(self):
596 """return lists of actions to perform on the dirstate"""
596 """return lists of actions to perform on the dirstate"""
597 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
597 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
598 for f, (r, action) in self._results.iteritems():
598 for f, (r, action) in self._results.iteritems():
599 if action is not None:
599 if action is not None:
600 actions[action].append((f, None, "merge result"))
600 actions[action].append((f, None, "merge result"))
601 return actions
601 return actions
602
602
603 def recordactions(self):
603 def recordactions(self):
604 """record remove/add/get actions in the dirstate"""
604 """record remove/add/get actions in the dirstate"""
605 branchmerge = self._repo.dirstate.p2() != nullid
605 branchmerge = self._repo.dirstate.p2() != nullid
606 recordupdates(self._repo, self.actions(), branchmerge)
606 recordupdates(self._repo, self.actions(), branchmerge)
607
607
608 def queueremove(self, f):
608 def queueremove(self, f):
609 """queues a file to be removed from the dirstate
609 """queues a file to be removed from the dirstate
610
610
611 Meant for use by custom merge drivers."""
611 Meant for use by custom merge drivers."""
612 self._results[f] = 0, 'r'
612 self._results[f] = 0, 'r'
613
613
614 def queueadd(self, f):
614 def queueadd(self, f):
615 """queues a file to be added to the dirstate
615 """queues a file to be added to the dirstate
616
616
617 Meant for use by custom merge drivers."""
617 Meant for use by custom merge drivers."""
618 self._results[f] = 0, 'a'
618 self._results[f] = 0, 'a'
619
619
620 def queueget(self, f):
620 def queueget(self, f):
621 """queues a file to be marked modified in the dirstate
621 """queues a file to be marked modified in the dirstate
622
622
623 Meant for use by custom merge drivers."""
623 Meant for use by custom merge drivers."""
624 self._results[f] = 0, 'g'
624 self._results[f] = 0, 'g'
625
625
626 def _getcheckunknownconfig(repo, section, name):
626 def _getcheckunknownconfig(repo, section, name):
627 config = repo.ui.config(section, name)
627 config = repo.ui.config(section, name)
628 valid = ['abort', 'ignore', 'warn']
628 valid = ['abort', 'ignore', 'warn']
629 if config not in valid:
629 if config not in valid:
630 validstr = ', '.join(["'" + v + "'" for v in valid])
630 validstr = ', '.join(["'" + v + "'" for v in valid])
631 raise error.ConfigError(_("%s.%s not valid "
631 raise error.ConfigError(_("%s.%s not valid "
632 "('%s' is none of %s)")
632 "('%s' is none of %s)")
633 % (section, name, config, validstr))
633 % (section, name, config, validstr))
634 return config
634 return config
635
635
636 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
636 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
637 if f2 is None:
637 if f2 is None:
638 f2 = f
638 f2 = f
639 return (repo.wvfs.audit.check(f)
639 return (repo.wvfs.audit.check(f)
640 and repo.wvfs.isfileorlink(f)
640 and repo.wvfs.isfileorlink(f)
641 and repo.dirstate.normalize(f) not in repo.dirstate
641 and repo.dirstate.normalize(f) not in repo.dirstate
642 and mctx[f2].cmp(wctx[f]))
642 and mctx[f2].cmp(wctx[f]))
643
643
644 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
644 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
645 """
645 """
646 Considers any actions that care about the presence of conflicting unknown
646 Considers any actions that care about the presence of conflicting unknown
647 files. For some actions, the result is to abort; for others, it is to
647 files. For some actions, the result is to abort; for others, it is to
648 choose a different action.
648 choose a different action.
649 """
649 """
650 conflicts = set()
650 conflicts = set()
651 warnconflicts = set()
651 warnconflicts = set()
652 abortconflicts = set()
652 abortconflicts = set()
653 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
653 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
654 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
654 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
655 if not force:
655 if not force:
656 def collectconflicts(conflicts, config):
656 def collectconflicts(conflicts, config):
657 if config == 'abort':
657 if config == 'abort':
658 abortconflicts.update(conflicts)
658 abortconflicts.update(conflicts)
659 elif config == 'warn':
659 elif config == 'warn':
660 warnconflicts.update(conflicts)
660 warnconflicts.update(conflicts)
661
661
662 for f, (m, args, msg) in actions.iteritems():
662 for f, (m, args, msg) in actions.iteritems():
663 if m in ('c', 'dc'):
663 if m in ('c', 'dc'):
664 if _checkunknownfile(repo, wctx, mctx, f):
664 if _checkunknownfile(repo, wctx, mctx, f):
665 conflicts.add(f)
665 conflicts.add(f)
666 elif m == 'dg':
666 elif m == 'dg':
667 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
667 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
668 conflicts.add(f)
668 conflicts.add(f)
669
669
670 ignoredconflicts = set([c for c in conflicts
670 ignoredconflicts = set([c for c in conflicts
671 if repo.dirstate._ignore(c)])
671 if repo.dirstate._ignore(c)])
672 unknownconflicts = conflicts - ignoredconflicts
672 unknownconflicts = conflicts - ignoredconflicts
673 collectconflicts(ignoredconflicts, ignoredconfig)
673 collectconflicts(ignoredconflicts, ignoredconfig)
674 collectconflicts(unknownconflicts, unknownconfig)
674 collectconflicts(unknownconflicts, unknownconfig)
675 else:
675 else:
676 for f, (m, args, msg) in actions.iteritems():
676 for f, (m, args, msg) in actions.iteritems():
677 if m == 'cm':
677 if m == 'cm':
678 fl2, anc = args
678 fl2, anc = args
679 different = _checkunknownfile(repo, wctx, mctx, f)
679 different = _checkunknownfile(repo, wctx, mctx, f)
680 if repo.dirstate._ignore(f):
680 if repo.dirstate._ignore(f):
681 config = ignoredconfig
681 config = ignoredconfig
682 else:
682 else:
683 config = unknownconfig
683 config = unknownconfig
684
684
685 # The behavior when force is True is described by this table:
685 # The behavior when force is True is described by this table:
686 # config different mergeforce | action backup
686 # config different mergeforce | action backup
687 # * n * | get n
687 # * n * | get n
688 # * y y | merge -
688 # * y y | merge -
689 # abort y n | merge - (1)
689 # abort y n | merge - (1)
690 # warn y n | warn + get y
690 # warn y n | warn + get y
691 # ignore y n | get y
691 # ignore y n | get y
692 #
692 #
693 # (1) this is probably the wrong behavior here -- we should
693 # (1) this is probably the wrong behavior here -- we should
694 # probably abort, but some actions like rebases currently
694 # probably abort, but some actions like rebases currently
695 # don't like an abort happening in the middle of
695 # don't like an abort happening in the middle of
696 # merge.update.
696 # merge.update.
697 if not different:
697 if not different:
698 actions[f] = ('g', (fl2, False), "remote created")
698 actions[f] = ('g', (fl2, False), "remote created")
699 elif mergeforce or config == 'abort':
699 elif mergeforce or config == 'abort':
700 actions[f] = ('m', (f, f, None, False, anc),
700 actions[f] = ('m', (f, f, None, False, anc),
701 "remote differs from untracked local")
701 "remote differs from untracked local")
702 elif config == 'abort':
702 elif config == 'abort':
703 abortconflicts.add(f)
703 abortconflicts.add(f)
704 else:
704 else:
705 if config == 'warn':
705 if config == 'warn':
706 warnconflicts.add(f)
706 warnconflicts.add(f)
707 actions[f] = ('g', (fl2, True), "remote created")
707 actions[f] = ('g', (fl2, True), "remote created")
708
708
709 for f in sorted(abortconflicts):
709 for f in sorted(abortconflicts):
710 repo.ui.warn(_("%s: untracked file differs\n") % f)
710 repo.ui.warn(_("%s: untracked file differs\n") % f)
711 if abortconflicts:
711 if abortconflicts:
712 raise error.Abort(_("untracked files in working directory "
712 raise error.Abort(_("untracked files in working directory "
713 "differ from files in requested revision"))
713 "differ from files in requested revision"))
714
714
715 for f in sorted(warnconflicts):
715 for f in sorted(warnconflicts):
716 repo.ui.warn(_("%s: replacing untracked file\n") % f)
716 repo.ui.warn(_("%s: replacing untracked file\n") % f)
717
717
718 for f, (m, args, msg) in actions.iteritems():
718 for f, (m, args, msg) in actions.iteritems():
719 backup = f in conflicts
719 backup = f in conflicts
720 if m == 'c':
720 if m == 'c':
721 flags, = args
721 flags, = args
722 actions[f] = ('g', (flags, backup), msg)
722 actions[f] = ('g', (flags, backup), msg)
723
723
724 def _forgetremoved(wctx, mctx, branchmerge):
724 def _forgetremoved(wctx, mctx, branchmerge):
725 """
725 """
726 Forget removed files
726 Forget removed files
727
727
728 If we're jumping between revisions (as opposed to merging), and if
728 If we're jumping between revisions (as opposed to merging), and if
729 neither the working directory nor the target rev has the file,
729 neither the working directory nor the target rev has the file,
730 then we need to remove it from the dirstate, to prevent the
730 then we need to remove it from the dirstate, to prevent the
731 dirstate from listing the file when it is no longer in the
731 dirstate from listing the file when it is no longer in the
732 manifest.
732 manifest.
733
733
734 If we're merging, and the other revision has removed a file
734 If we're merging, and the other revision has removed a file
735 that is not present in the working directory, we need to mark it
735 that is not present in the working directory, we need to mark it
736 as removed.
736 as removed.
737 """
737 """
738
738
739 actions = {}
739 actions = {}
740 m = 'f'
740 m = 'f'
741 if branchmerge:
741 if branchmerge:
742 m = 'r'
742 m = 'r'
743 for f in wctx.deleted():
743 for f in wctx.deleted():
744 if f not in mctx:
744 if f not in mctx:
745 actions[f] = m, None, "forget deleted"
745 actions[f] = m, None, "forget deleted"
746
746
747 if not branchmerge:
747 if not branchmerge:
748 for f in wctx.removed():
748 for f in wctx.removed():
749 if f not in mctx:
749 if f not in mctx:
750 actions[f] = 'f', None, "forget removed"
750 actions[f] = 'f', None, "forget removed"
751
751
752 return actions
752 return actions
753
753
754 def _checkcollision(repo, wmf, actions):
754 def _checkcollision(repo, wmf, actions):
755 # build provisional merged manifest up
755 # build provisional merged manifest up
756 pmmf = set(wmf)
756 pmmf = set(wmf)
757
757
758 if actions:
758 if actions:
759 # k, dr, e and rd are no-op
759 # k, dr, e and rd are no-op
760 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
760 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
761 for f, args, msg in actions[m]:
761 for f, args, msg in actions[m]:
762 pmmf.add(f)
762 pmmf.add(f)
763 for f, args, msg in actions['r']:
763 for f, args, msg in actions['r']:
764 pmmf.discard(f)
764 pmmf.discard(f)
765 for f, args, msg in actions['dm']:
765 for f, args, msg in actions['dm']:
766 f2, flags = args
766 f2, flags = args
767 pmmf.discard(f2)
767 pmmf.discard(f2)
768 pmmf.add(f)
768 pmmf.add(f)
769 for f, args, msg in actions['dg']:
769 for f, args, msg in actions['dg']:
770 pmmf.add(f)
770 pmmf.add(f)
771 for f, args, msg in actions['m']:
771 for f, args, msg in actions['m']:
772 f1, f2, fa, move, anc = args
772 f1, f2, fa, move, anc = args
773 if move:
773 if move:
774 pmmf.discard(f1)
774 pmmf.discard(f1)
775 pmmf.add(f)
775 pmmf.add(f)
776
776
777 # check case-folding collision in provisional merged manifest
777 # check case-folding collision in provisional merged manifest
778 foldmap = {}
778 foldmap = {}
779 for f in pmmf:
779 for f in pmmf:
780 fold = util.normcase(f)
780 fold = util.normcase(f)
781 if fold in foldmap:
781 if fold in foldmap:
782 raise error.Abort(_("case-folding collision between %s and %s")
782 raise error.Abort(_("case-folding collision between %s and %s")
783 % (f, foldmap[fold]))
783 % (f, foldmap[fold]))
784 foldmap[fold] = f
784 foldmap[fold] = f
785
785
786 # check case-folding of directories
786 # check case-folding of directories
787 foldprefix = unfoldprefix = lastfull = ''
787 foldprefix = unfoldprefix = lastfull = ''
788 for fold, f in sorted(foldmap.items()):
788 for fold, f in sorted(foldmap.items()):
789 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
789 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
790 # the folded prefix matches but actual casing is different
790 # the folded prefix matches but actual casing is different
791 raise error.Abort(_("case-folding collision between "
791 raise error.Abort(_("case-folding collision between "
792 "%s and directory of %s") % (lastfull, f))
792 "%s and directory of %s") % (lastfull, f))
793 foldprefix = fold + '/'
793 foldprefix = fold + '/'
794 unfoldprefix = f + '/'
794 unfoldprefix = f + '/'
795 lastfull = f
795 lastfull = f
796
796
797 def driverpreprocess(repo, ms, wctx, labels=None):
797 def driverpreprocess(repo, ms, wctx, labels=None):
798 """run the preprocess step of the merge driver, if any
798 """run the preprocess step of the merge driver, if any
799
799
800 This is currently not implemented -- it's an extension point."""
800 This is currently not implemented -- it's an extension point."""
801 return True
801 return True
802
802
803 def driverconclude(repo, ms, wctx, labels=None):
803 def driverconclude(repo, ms, wctx, labels=None):
804 """run the conclude step of the merge driver, if any
804 """run the conclude step of the merge driver, if any
805
805
806 This is currently not implemented -- it's an extension point."""
806 This is currently not implemented -- it's an extension point."""
807 return True
807 return True
808
808
809 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
809 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
810 acceptremote, followcopies, forcefulldiff=False):
810 acceptremote, followcopies, forcefulldiff=False):
811 """
811 """
812 Merge wctx and p2 with ancestor pa and generate merge action list
812 Merge wctx and p2 with ancestor pa and generate merge action list
813
813
814 branchmerge and force are as passed in to update
814 branchmerge and force are as passed in to update
815 matcher = matcher to filter file lists
815 matcher = matcher to filter file lists
816 acceptremote = accept the incoming changes without prompting
816 acceptremote = accept the incoming changes without prompting
817 """
817 """
818 if matcher is not None and matcher.always():
818 if matcher is not None and matcher.always():
819 matcher = None
819 matcher = None
820
820
821 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
821 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
822
822
823 # manifests fetched in order are going to be faster, so prime the caches
823 # manifests fetched in order are going to be faster, so prime the caches
824 [x.manifest() for x in
824 [x.manifest() for x in
825 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
825 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
826
826
827 if followcopies:
827 if followcopies:
828 ret = copies.mergecopies(repo, wctx, p2, pa)
828 ret = copies.mergecopies(repo, wctx, p2, pa)
829 copy, movewithdir, diverge, renamedelete, dirmove = ret
829 copy, movewithdir, diverge, renamedelete, dirmove = ret
830
830
831 boolbm = pycompat.bytestr(bool(branchmerge))
831 boolbm = pycompat.bytestr(bool(branchmerge))
832 boolf = pycompat.bytestr(bool(force))
832 boolf = pycompat.bytestr(bool(force))
833 boolm = pycompat.bytestr(bool(matcher))
833 boolm = pycompat.bytestr(bool(matcher))
834 repo.ui.note(_("resolving manifests\n"))
834 repo.ui.note(_("resolving manifests\n"))
835 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
835 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
836 % (boolbm, boolf, boolm))
836 % (boolbm, boolf, boolm))
837 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
837 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
838
838
839 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
839 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
840 copied = set(copy.values())
840 copied = set(copy.values())
841 copied.update(movewithdir.values())
841 copied.update(movewithdir.values())
842
842
843 if '.hgsubstate' in m1:
843 if '.hgsubstate' in m1:
844 # check whether sub state is modified
844 # check whether sub state is modified
845 if any(wctx.sub(s).dirty() for s in wctx.substate):
845 if any(wctx.sub(s).dirty() for s in wctx.substate):
846 m1['.hgsubstate'] = modifiednodeid
846 m1['.hgsubstate'] = modifiednodeid
847
847
848 # Don't use m2-vs-ma optimization if:
848 # Don't use m2-vs-ma optimization if:
849 # - ma is the same as m1 or m2, which we're just going to diff again later
849 # - ma is the same as m1 or m2, which we're just going to diff again later
850 # - The caller specifically asks for a full diff, which is useful during bid
850 # - The caller specifically asks for a full diff, which is useful during bid
851 # merge.
851 # merge.
852 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
852 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
853 # Identify which files are relevant to the merge, so we can limit the
853 # Identify which files are relevant to the merge, so we can limit the
854 # total m1-vs-m2 diff to just those files. This has significant
854 # total m1-vs-m2 diff to just those files. This has significant
855 # performance benefits in large repositories.
855 # performance benefits in large repositories.
856 relevantfiles = set(ma.diff(m2).keys())
856 relevantfiles = set(ma.diff(m2).keys())
857
857
858 # For copied and moved files, we need to add the source file too.
858 # For copied and moved files, we need to add the source file too.
859 for copykey, copyvalue in copy.iteritems():
859 for copykey, copyvalue in copy.iteritems():
860 if copyvalue in relevantfiles:
860 if copyvalue in relevantfiles:
861 relevantfiles.add(copykey)
861 relevantfiles.add(copykey)
862 for movedirkey in movewithdir:
862 for movedirkey in movewithdir:
863 relevantfiles.add(movedirkey)
863 relevantfiles.add(movedirkey)
864 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
864 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
865 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
865 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
866
866
867 diff = m1.diff(m2, match=matcher)
867 diff = m1.diff(m2, match=matcher)
868
868
869 if matcher is None:
869 if matcher is None:
870 matcher = matchmod.always('', '')
870 matcher = matchmod.always('', '')
871
871
872 actions = {}
872 actions = {}
873 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
873 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
874 if n1 and n2: # file exists on both local and remote side
874 if n1 and n2: # file exists on both local and remote side
875 if f not in ma:
875 if f not in ma:
876 fa = copy.get(f, None)
876 fa = copy.get(f, None)
877 if fa is not None:
877 if fa is not None:
878 actions[f] = ('m', (f, f, fa, False, pa.node()),
878 actions[f] = ('m', (f, f, fa, False, pa.node()),
879 "both renamed from " + fa)
879 "both renamed from " + fa)
880 else:
880 else:
881 actions[f] = ('m', (f, f, None, False, pa.node()),
881 actions[f] = ('m', (f, f, None, False, pa.node()),
882 "both created")
882 "both created")
883 else:
883 else:
884 a = ma[f]
884 a = ma[f]
885 fla = ma.flags(f)
885 fla = ma.flags(f)
886 nol = 'l' not in fl1 + fl2 + fla
886 nol = 'l' not in fl1 + fl2 + fla
887 if n2 == a and fl2 == fla:
887 if n2 == a and fl2 == fla:
888 actions[f] = ('k', (), "remote unchanged")
888 actions[f] = ('k', (), "remote unchanged")
889 elif n1 == a and fl1 == fla: # local unchanged - use remote
889 elif n1 == a and fl1 == fla: # local unchanged - use remote
890 if n1 == n2: # optimization: keep local content
890 if n1 == n2: # optimization: keep local content
891 actions[f] = ('e', (fl2,), "update permissions")
891 actions[f] = ('e', (fl2,), "update permissions")
892 else:
892 else:
893 actions[f] = ('g', (fl2, False), "remote is newer")
893 actions[f] = ('g', (fl2, False), "remote is newer")
894 elif nol and n2 == a: # remote only changed 'x'
894 elif nol and n2 == a: # remote only changed 'x'
895 actions[f] = ('e', (fl2,), "update permissions")
895 actions[f] = ('e', (fl2,), "update permissions")
896 elif nol and n1 == a: # local only changed 'x'
896 elif nol and n1 == a: # local only changed 'x'
897 actions[f] = ('g', (fl1, False), "remote is newer")
897 actions[f] = ('g', (fl1, False), "remote is newer")
898 else: # both changed something
898 else: # both changed something
899 actions[f] = ('m', (f, f, f, False, pa.node()),
899 actions[f] = ('m', (f, f, f, False, pa.node()),
900 "versions differ")
900 "versions differ")
901 elif n1: # file exists only on local side
901 elif n1: # file exists only on local side
902 if f in copied:
902 if f in copied:
903 pass # we'll deal with it on m2 side
903 pass # we'll deal with it on m2 side
904 elif f in movewithdir: # directory rename, move local
904 elif f in movewithdir: # directory rename, move local
905 f2 = movewithdir[f]
905 f2 = movewithdir[f]
906 if f2 in m2:
906 if f2 in m2:
907 actions[f2] = ('m', (f, f2, None, True, pa.node()),
907 actions[f2] = ('m', (f, f2, None, True, pa.node()),
908 "remote directory rename, both created")
908 "remote directory rename, both created")
909 else:
909 else:
910 actions[f2] = ('dm', (f, fl1),
910 actions[f2] = ('dm', (f, fl1),
911 "remote directory rename - move from " + f)
911 "remote directory rename - move from " + f)
912 elif f in copy:
912 elif f in copy:
913 f2 = copy[f]
913 f2 = copy[f]
914 actions[f] = ('m', (f, f2, f2, False, pa.node()),
914 actions[f] = ('m', (f, f2, f2, False, pa.node()),
915 "local copied/moved from " + f2)
915 "local copied/moved from " + f2)
916 elif f in ma: # clean, a different, no remote
916 elif f in ma: # clean, a different, no remote
917 if n1 != ma[f]:
917 if n1 != ma[f]:
918 if acceptremote:
918 if acceptremote:
919 actions[f] = ('r', None, "remote delete")
919 actions[f] = ('r', None, "remote delete")
920 else:
920 else:
921 actions[f] = ('cd', (f, None, f, False, pa.node()),
921 actions[f] = ('cd', (f, None, f, False, pa.node()),
922 "prompt changed/deleted")
922 "prompt changed/deleted")
923 elif n1 == addednodeid:
923 elif n1 == addednodeid:
924 # This extra 'a' is added by working copy manifest to mark
924 # This extra 'a' is added by working copy manifest to mark
925 # the file as locally added. We should forget it instead of
925 # the file as locally added. We should forget it instead of
926 # deleting it.
926 # deleting it.
927 actions[f] = ('f', None, "remote deleted")
927 actions[f] = ('f', None, "remote deleted")
928 else:
928 else:
929 actions[f] = ('r', None, "other deleted")
929 actions[f] = ('r', None, "other deleted")
930 elif n2: # file exists only on remote side
930 elif n2: # file exists only on remote side
931 if f in copied:
931 if f in copied:
932 pass # we'll deal with it on m1 side
932 pass # we'll deal with it on m1 side
933 elif f in movewithdir:
933 elif f in movewithdir:
934 f2 = movewithdir[f]
934 f2 = movewithdir[f]
935 if f2 in m1:
935 if f2 in m1:
936 actions[f2] = ('m', (f2, f, None, False, pa.node()),
936 actions[f2] = ('m', (f2, f, None, False, pa.node()),
937 "local directory rename, both created")
937 "local directory rename, both created")
938 else:
938 else:
939 actions[f2] = ('dg', (f, fl2),
939 actions[f2] = ('dg', (f, fl2),
940 "local directory rename - get from " + f)
940 "local directory rename - get from " + f)
941 elif f in copy:
941 elif f in copy:
942 f2 = copy[f]
942 f2 = copy[f]
943 if f2 in m2:
943 if f2 in m2:
944 actions[f] = ('m', (f2, f, f2, False, pa.node()),
944 actions[f] = ('m', (f2, f, f2, False, pa.node()),
945 "remote copied from " + f2)
945 "remote copied from " + f2)
946 else:
946 else:
947 actions[f] = ('m', (f2, f, f2, True, pa.node()),
947 actions[f] = ('m', (f2, f, f2, True, pa.node()),
948 "remote moved from " + f2)
948 "remote moved from " + f2)
949 elif f not in ma:
949 elif f not in ma:
950 # local unknown, remote created: the logic is described by the
950 # local unknown, remote created: the logic is described by the
951 # following table:
951 # following table:
952 #
952 #
953 # force branchmerge different | action
953 # force branchmerge different | action
954 # n * * | create
954 # n * * | create
955 # y n * | create
955 # y n * | create
956 # y y n | create
956 # y y n | create
957 # y y y | merge
957 # y y y | merge
958 #
958 #
959 # Checking whether the files are different is expensive, so we
959 # Checking whether the files are different is expensive, so we
960 # don't do that when we can avoid it.
960 # don't do that when we can avoid it.
961 if not force:
961 if not force:
962 actions[f] = ('c', (fl2,), "remote created")
962 actions[f] = ('c', (fl2,), "remote created")
963 elif not branchmerge:
963 elif not branchmerge:
964 actions[f] = ('c', (fl2,), "remote created")
964 actions[f] = ('c', (fl2,), "remote created")
965 else:
965 else:
966 actions[f] = ('cm', (fl2, pa.node()),
966 actions[f] = ('cm', (fl2, pa.node()),
967 "remote created, get or merge")
967 "remote created, get or merge")
968 elif n2 != ma[f]:
968 elif n2 != ma[f]:
969 df = None
969 df = None
970 for d in dirmove:
970 for d in dirmove:
971 if f.startswith(d):
971 if f.startswith(d):
972 # new file added in a directory that was moved
972 # new file added in a directory that was moved
973 df = dirmove[d] + f[len(d):]
973 df = dirmove[d] + f[len(d):]
974 break
974 break
975 if df is not None and df in m1:
975 if df is not None and df in m1:
976 actions[df] = ('m', (df, f, f, False, pa.node()),
976 actions[df] = ('m', (df, f, f, False, pa.node()),
977 "local directory rename - respect move from " + f)
977 "local directory rename - respect move from " + f)
978 elif acceptremote:
978 elif acceptremote:
979 actions[f] = ('c', (fl2,), "remote recreating")
979 actions[f] = ('c', (fl2,), "remote recreating")
980 else:
980 else:
981 actions[f] = ('dc', (None, f, f, False, pa.node()),
981 actions[f] = ('dc', (None, f, f, False, pa.node()),
982 "prompt deleted/changed")
982 "prompt deleted/changed")
983
983
984 return actions, diverge, renamedelete
984 return actions, diverge, renamedelete
985
985
986 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
986 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
987 """Resolves false conflicts where the nodeid changed but the content
987 """Resolves false conflicts where the nodeid changed but the content
988 remained the same."""
988 remained the same."""
989
989
990 for f, (m, args, msg) in actions.items():
990 for f, (m, args, msg) in actions.items():
991 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
991 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
992 # local did change but ended up with same content
992 # local did change but ended up with same content
993 actions[f] = 'r', None, "prompt same"
993 actions[f] = 'r', None, "prompt same"
994 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
994 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
995 # remote did change but ended up with same content
995 # remote did change but ended up with same content
996 del actions[f] # don't get = keep local deleted
996 del actions[f] # don't get = keep local deleted
997
997
998 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
998 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
999 acceptremote, followcopies, matcher=None,
999 acceptremote, followcopies, matcher=None,
1000 mergeforce=False):
1000 mergeforce=False):
1001 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1001 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1002 # Avoid cycle.
1002 # Avoid cycle.
1003 from . import sparse
1003 from . import sparse
1004
1004
1005 if len(ancestors) == 1: # default
1005 if len(ancestors) == 1: # default
1006 actions, diverge, renamedelete = manifestmerge(
1006 actions, diverge, renamedelete = manifestmerge(
1007 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1007 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1008 acceptremote, followcopies)
1008 acceptremote, followcopies)
1009 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1009 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1010
1010
1011 else: # only when merge.preferancestor=* - the default
1011 else: # only when merge.preferancestor=* - the default
1012 repo.ui.note(
1012 repo.ui.note(
1013 _("note: merging %s and %s using bids from ancestors %s\n") %
1013 _("note: merging %s and %s using bids from ancestors %s\n") %
1014 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1014 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1015 for anc in ancestors)))
1015 for anc in ancestors)))
1016
1016
1017 # Call for bids
1017 # Call for bids
1018 fbids = {} # mapping filename to bids (action method to list af actions)
1018 fbids = {} # mapping filename to bids (action method to list af actions)
1019 diverge, renamedelete = None, None
1019 diverge, renamedelete = None, None
1020 for ancestor in ancestors:
1020 for ancestor in ancestors:
1021 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1021 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1022 actions, diverge1, renamedelete1 = manifestmerge(
1022 actions, diverge1, renamedelete1 = manifestmerge(
1023 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1023 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1024 acceptremote, followcopies, forcefulldiff=True)
1024 acceptremote, followcopies, forcefulldiff=True)
1025 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1025 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1026
1026
1027 # Track the shortest set of warning on the theory that bid
1027 # Track the shortest set of warning on the theory that bid
1028 # merge will correctly incorporate more information
1028 # merge will correctly incorporate more information
1029 if diverge is None or len(diverge1) < len(diverge):
1029 if diverge is None or len(diverge1) < len(diverge):
1030 diverge = diverge1
1030 diverge = diverge1
1031 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1031 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1032 renamedelete = renamedelete1
1032 renamedelete = renamedelete1
1033
1033
1034 for f, a in sorted(actions.iteritems()):
1034 for f, a in sorted(actions.iteritems()):
1035 m, args, msg = a
1035 m, args, msg = a
1036 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1036 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1037 if f in fbids:
1037 if f in fbids:
1038 d = fbids[f]
1038 d = fbids[f]
1039 if m in d:
1039 if m in d:
1040 d[m].append(a)
1040 d[m].append(a)
1041 else:
1041 else:
1042 d[m] = [a]
1042 d[m] = [a]
1043 else:
1043 else:
1044 fbids[f] = {m: [a]}
1044 fbids[f] = {m: [a]}
1045
1045
1046 # Pick the best bid for each file
1046 # Pick the best bid for each file
1047 repo.ui.note(_('\nauction for merging merge bids\n'))
1047 repo.ui.note(_('\nauction for merging merge bids\n'))
1048 actions = {}
1048 actions = {}
1049 dms = [] # filenames that have dm actions
1049 dms = [] # filenames that have dm actions
1050 for f, bids in sorted(fbids.items()):
1050 for f, bids in sorted(fbids.items()):
1051 # bids is a mapping from action method to list af actions
1051 # bids is a mapping from action method to list af actions
1052 # Consensus?
1052 # Consensus?
1053 if len(bids) == 1: # all bids are the same kind of method
1053 if len(bids) == 1: # all bids are the same kind of method
1054 m, l = list(bids.items())[0]
1054 m, l = list(bids.items())[0]
1055 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1055 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1056 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1056 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1057 actions[f] = l[0]
1057 actions[f] = l[0]
1058 if m == 'dm':
1058 if m == 'dm':
1059 dms.append(f)
1059 dms.append(f)
1060 continue
1060 continue
1061 # If keep is an option, just do it.
1061 # If keep is an option, just do it.
1062 if 'k' in bids:
1062 if 'k' in bids:
1063 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1063 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1064 actions[f] = bids['k'][0]
1064 actions[f] = bids['k'][0]
1065 continue
1065 continue
1066 # If there are gets and they all agree [how could they not?], do it.
1066 # If there are gets and they all agree [how could they not?], do it.
1067 if 'g' in bids:
1067 if 'g' in bids:
1068 ga0 = bids['g'][0]
1068 ga0 = bids['g'][0]
1069 if all(a == ga0 for a in bids['g'][1:]):
1069 if all(a == ga0 for a in bids['g'][1:]):
1070 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1070 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1071 actions[f] = ga0
1071 actions[f] = ga0
1072 continue
1072 continue
1073 # TODO: Consider other simple actions such as mode changes
1073 # TODO: Consider other simple actions such as mode changes
1074 # Handle inefficient democrazy.
1074 # Handle inefficient democrazy.
1075 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1075 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1076 for m, l in sorted(bids.items()):
1076 for m, l in sorted(bids.items()):
1077 for _f, args, msg in l:
1077 for _f, args, msg in l:
1078 repo.ui.note(' %s -> %s\n' % (msg, m))
1078 repo.ui.note(' %s -> %s\n' % (msg, m))
1079 # Pick random action. TODO: Instead, prompt user when resolving
1079 # Pick random action. TODO: Instead, prompt user when resolving
1080 m, l = list(bids.items())[0]
1080 m, l = list(bids.items())[0]
1081 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1081 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1082 (f, m))
1082 (f, m))
1083 actions[f] = l[0]
1083 actions[f] = l[0]
1084 if m == 'dm':
1084 if m == 'dm':
1085 dms.append(f)
1085 dms.append(f)
1086 continue
1086 continue
1087 # Work around 'dm' that can cause multiple actions for the same file
1087 # Work around 'dm' that can cause multiple actions for the same file
1088 for f in dms:
1088 for f in dms:
1089 dm, (f0, flags), msg = actions[f]
1089 dm, (f0, flags), msg = actions[f]
1090 assert dm == 'dm', dm
1090 assert dm == 'dm', dm
1091 if f0 in actions and actions[f0][0] == 'r':
1091 if f0 in actions and actions[f0][0] == 'r':
1092 # We have one bid for removing a file and another for moving it.
1092 # We have one bid for removing a file and another for moving it.
1093 # These two could be merged as first move and then delete ...
1093 # These two could be merged as first move and then delete ...
1094 # but instead drop moving and just delete.
1094 # but instead drop moving and just delete.
1095 del actions[f]
1095 del actions[f]
1096 repo.ui.note(_('end of auction\n\n'))
1096 repo.ui.note(_('end of auction\n\n'))
1097
1097
1098 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1098 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1099
1099
1100 if wctx.rev() is None:
1100 if wctx.rev() is None:
1101 fractions = _forgetremoved(wctx, mctx, branchmerge)
1101 fractions = _forgetremoved(wctx, mctx, branchmerge)
1102 actions.update(fractions)
1102 actions.update(fractions)
1103
1103
1104 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1104 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1105 actions)
1105 actions)
1106
1106
1107 return prunedactions, diverge, renamedelete
1107 return prunedactions, diverge, renamedelete
1108
1108
1109 def _getcwd():
1109 def _getcwd():
1110 try:
1110 try:
1111 return pycompat.getcwd()
1111 return pycompat.getcwd()
1112 except OSError as err:
1112 except OSError as err:
1113 if err.errno == errno.ENOENT:
1113 if err.errno == errno.ENOENT:
1114 return None
1114 return None
1115 raise
1115 raise
1116
1116
1117 def batchremove(repo, wctx, actions):
1117 def batchremove(repo, wctx, actions):
1118 """apply removes to the working directory
1118 """apply removes to the working directory
1119
1119
1120 yields tuples for progress updates
1120 yields tuples for progress updates
1121 """
1121 """
1122 verbose = repo.ui.verbose
1122 verbose = repo.ui.verbose
1123 cwd = _getcwd()
1123 cwd = _getcwd()
1124 i = 0
1124 i = 0
1125 for f, args, msg in actions:
1125 for f, args, msg in actions:
1126 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1126 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1127 if verbose:
1127 if verbose:
1128 repo.ui.note(_("removing %s\n") % f)
1128 repo.ui.note(_("removing %s\n") % f)
1129 wctx[f].audit()
1129 wctx[f].audit()
1130 try:
1130 try:
1131 wctx[f].remove(ignoremissing=True)
1131 wctx[f].remove(ignoremissing=True)
1132 except OSError as inst:
1132 except OSError as inst:
1133 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1133 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1134 (f, inst.strerror))
1134 (f, inst.strerror))
1135 if i == 100:
1135 if i == 100:
1136 yield i, f
1136 yield i, f
1137 i = 0
1137 i = 0
1138 i += 1
1138 i += 1
1139 if i > 0:
1139 if i > 0:
1140 yield i, f
1140 yield i, f
1141
1141
1142 if cwd and not _getcwd():
1142 if cwd and not _getcwd():
1143 # cwd was removed in the course of removing files; print a helpful
1143 # cwd was removed in the course of removing files; print a helpful
1144 # warning.
1144 # warning.
1145 repo.ui.warn(_("current directory was removed\n"
1145 repo.ui.warn(_("current directory was removed\n"
1146 "(consider changing to repo root: %s)\n") % repo.root)
1146 "(consider changing to repo root: %s)\n") % repo.root)
1147
1147
1148 # It's necessary to flush here in case we're inside a worker fork and will
1148 # It's necessary to flush here in case we're inside a worker fork and will
1149 # quit after this function.
1149 # quit after this function.
1150 wctx.flushall()
1150 wctx.flushall()
1151
1151
1152 def batchget(repo, mctx, wctx, actions):
1152 def batchget(repo, mctx, wctx, actions):
1153 """apply gets to the working directory
1153 """apply gets to the working directory
1154
1154
1155 mctx is the context to get from
1155 mctx is the context to get from
1156
1156
1157 yields tuples for progress updates
1157 yields tuples for progress updates
1158 """
1158 """
1159 verbose = repo.ui.verbose
1159 verbose = repo.ui.verbose
1160 fctx = mctx.filectx
1160 fctx = mctx.filectx
1161 ui = repo.ui
1161 ui = repo.ui
1162 i = 0
1162 i = 0
1163 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1163 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1164 for f, (flags, backup), msg in actions:
1164 for f, (flags, backup), msg in actions:
1165 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1165 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1166 if verbose:
1166 if verbose:
1167 repo.ui.note(_("getting %s\n") % f)
1167 repo.ui.note(_("getting %s\n") % f)
1168
1168
1169 if backup:
1169 if backup:
1170 absf = repo.wjoin(f)
1170 absf = repo.wjoin(f)
1171 orig = scmutil.origpath(ui, repo, absf)
1171 orig = scmutil.origpath(ui, repo, absf)
1172 try:
1172 try:
1173 if repo.wvfs.isfileorlink(f):
1173 if repo.wvfs.isfileorlink(f):
1174 util.rename(absf, orig)
1174 util.rename(absf, orig)
1175 except OSError as e:
1175 except OSError as e:
1176 if e.errno != errno.ENOENT:
1176 if e.errno != errno.ENOENT:
1177 raise
1177 raise
1178 wctx[f].clearunknown()
1178 wctx[f].clearunknown()
1179 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1179 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1180 if i == 100:
1180 if i == 100:
1181 yield i, f
1181 yield i, f
1182 i = 0
1182 i = 0
1183 i += 1
1183 i += 1
1184 if i > 0:
1184 if i > 0:
1185 yield i, f
1185 yield i, f
1186
1186
1187 # It's necessary to flush here in case we're inside a worker fork and will
1187 # It's necessary to flush here in case we're inside a worker fork and will
1188 # quit after this function.
1188 # quit after this function.
1189 wctx.flushall()
1189 wctx.flushall()
1190
1190
1191 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1191 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1192 """apply the merge action list to the working directory
1192 """apply the merge action list to the working directory
1193
1193
1194 wctx is the working copy context
1194 wctx is the working copy context
1195 mctx is the context to be merged into the working copy
1195 mctx is the context to be merged into the working copy
1196
1196
1197 Return a tuple of counts (updated, merged, removed, unresolved) that
1197 Return a tuple of counts (updated, merged, removed, unresolved) that
1198 describes how many files were affected by the update.
1198 describes how many files were affected by the update.
1199 """
1199 """
1200
1200
1201 updated, merged, removed = 0, 0, 0
1201 updated, merged, removed = 0, 0, 0
1202 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1202 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1203 moves = []
1203 moves = []
1204 for m, l in actions.items():
1204 for m, l in actions.items():
1205 l.sort()
1205 l.sort()
1206
1206
1207 # 'cd' and 'dc' actions are treated like other merge conflicts
1207 # 'cd' and 'dc' actions are treated like other merge conflicts
1208 mergeactions = sorted(actions['cd'])
1208 mergeactions = sorted(actions['cd'])
1209 mergeactions.extend(sorted(actions['dc']))
1209 mergeactions.extend(sorted(actions['dc']))
1210 mergeactions.extend(actions['m'])
1210 mergeactions.extend(actions['m'])
1211 for f, args, msg in mergeactions:
1211 for f, args, msg in mergeactions:
1212 f1, f2, fa, move, anc = args
1212 f1, f2, fa, move, anc = args
1213 if f == '.hgsubstate': # merged internally
1213 if f == '.hgsubstate': # merged internally
1214 continue
1214 continue
1215 if f1 is None:
1215 if f1 is None:
1216 fcl = filemerge.absentfilectx(wctx, fa)
1216 fcl = filemerge.absentfilectx(wctx, fa)
1217 else:
1217 else:
1218 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1218 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1219 fcl = wctx[f1]
1219 fcl = wctx[f1]
1220 if f2 is None:
1220 if f2 is None:
1221 fco = filemerge.absentfilectx(mctx, fa)
1221 fco = filemerge.absentfilectx(mctx, fa)
1222 else:
1222 else:
1223 fco = mctx[f2]
1223 fco = mctx[f2]
1224 actx = repo[anc]
1224 actx = repo[anc]
1225 if fa in actx:
1225 if fa in actx:
1226 fca = actx[fa]
1226 fca = actx[fa]
1227 else:
1227 else:
1228 # TODO: move to absentfilectx
1228 # TODO: move to absentfilectx
1229 fca = repo.filectx(f1, fileid=nullrev)
1229 fca = repo.filectx(f1, fileid=nullrev)
1230 ms.add(fcl, fco, fca, f)
1230 ms.add(fcl, fco, fca, f)
1231 if f1 != f and move:
1231 if f1 != f and move:
1232 moves.append(f1)
1232 moves.append(f1)
1233
1233
1234 _updating = _('updating')
1234 _updating = _('updating')
1235 _files = _('files')
1235 _files = _('files')
1236 progress = repo.ui.progress
1236 progress = repo.ui.progress
1237
1237
1238 # remove renamed files after safely stored
1238 # remove renamed files after safely stored
1239 for f in moves:
1239 for f in moves:
1240 if wctx[f].lexists():
1240 if wctx[f].lexists():
1241 repo.ui.debug("removing %s\n" % f)
1241 repo.ui.debug("removing %s\n" % f)
1242 wctx[f].audit()
1242 wctx[f].audit()
1243 wctx[f].remove()
1243 wctx[f].remove()
1244
1244
1245 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1245 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1246 z = 0
1246
1247
1247 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1248 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1248 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1249 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1249
1250
1250 # remove in parallel (must come first)
1251 # record path conflicts
1251 z = 0
1252 for f, args, msg in actions['p']:
1253 f1, fo = args
1254 s = repo.ui.status
1255 s(_("%s: path conflict - a file or link has the same name as a "
1256 "directory\n") % f)
1257 if fo == 'l':
1258 s(_("the local file has been renamed to %s\n") % f1)
1259 else:
1260 s(_("the remote file has been renamed to %s\n") % f1)
1261 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1262 ms.addpath(f, f1, fo)
1263 z += 1
1264 progress(_updating, z, item=f, total=numupdates, unit=_files)
1265
1266 # remove in parallel (must come before getting)
1252 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1267 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1253 actions['r'])
1268 actions['r'])
1254 for i, item in prog:
1269 for i, item in prog:
1255 z += i
1270 z += i
1256 progress(_updating, z, item=item, total=numupdates, unit=_files)
1271 progress(_updating, z, item=item, total=numupdates, unit=_files)
1257 removed = len(actions['r'])
1272 removed = len(actions['r'])
1258
1273
1259 # We should flush before forking into worker processes, since those workers
1274 # We should flush before forking into worker processes, since those workers
1260 # flush when they complete, and we don't want to duplicate work.
1275 # flush when they complete, and we don't want to duplicate work.
1261 wctx.flushall()
1276 wctx.flushall()
1262
1277
1263 # get in parallel
1278 # get in parallel
1264 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1279 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1265 actions['g'])
1280 actions['g'])
1266 for i, item in prog:
1281 for i, item in prog:
1267 z += i
1282 z += i
1268 progress(_updating, z, item=item, total=numupdates, unit=_files)
1283 progress(_updating, z, item=item, total=numupdates, unit=_files)
1269 updated = len(actions['g'])
1284 updated = len(actions['g'])
1270
1285
1271 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1286 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1272 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1287 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1273
1288
1274 # forget (manifest only, just log it) (must come first)
1289 # forget (manifest only, just log it) (must come first)
1275 for f, args, msg in actions['f']:
1290 for f, args, msg in actions['f']:
1276 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1291 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1277 z += 1
1292 z += 1
1278 progress(_updating, z, item=f, total=numupdates, unit=_files)
1293 progress(_updating, z, item=f, total=numupdates, unit=_files)
1279
1294
1280 # re-add (manifest only, just log it)
1295 # re-add (manifest only, just log it)
1281 for f, args, msg in actions['a']:
1296 for f, args, msg in actions['a']:
1282 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1297 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1283 z += 1
1298 z += 1
1284 progress(_updating, z, item=f, total=numupdates, unit=_files)
1299 progress(_updating, z, item=f, total=numupdates, unit=_files)
1285
1300
1286 # re-add/mark as modified (manifest only, just log it)
1301 # re-add/mark as modified (manifest only, just log it)
1287 for f, args, msg in actions['am']:
1302 for f, args, msg in actions['am']:
1288 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1303 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1289 z += 1
1304 z += 1
1290 progress(_updating, z, item=f, total=numupdates, unit=_files)
1305 progress(_updating, z, item=f, total=numupdates, unit=_files)
1291
1306
1292 # keep (noop, just log it)
1307 # keep (noop, just log it)
1293 for f, args, msg in actions['k']:
1308 for f, args, msg in actions['k']:
1294 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1309 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1295 # no progress
1310 # no progress
1296
1311
1297 # directory rename, move local
1312 # directory rename, move local
1298 for f, args, msg in actions['dm']:
1313 for f, args, msg in actions['dm']:
1299 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1314 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1300 z += 1
1315 z += 1
1301 progress(_updating, z, item=f, total=numupdates, unit=_files)
1316 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 f0, flags = args
1317 f0, flags = args
1303 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1318 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1304 wctx[f].audit()
1319 wctx[f].audit()
1305 wctx[f].write(wctx.filectx(f0).data(), flags)
1320 wctx[f].write(wctx.filectx(f0).data(), flags)
1306 wctx[f0].remove()
1321 wctx[f0].remove()
1307 updated += 1
1322 updated += 1
1308
1323
1309 # local directory rename, get
1324 # local directory rename, get
1310 for f, args, msg in actions['dg']:
1325 for f, args, msg in actions['dg']:
1311 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1326 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1312 z += 1
1327 z += 1
1313 progress(_updating, z, item=f, total=numupdates, unit=_files)
1328 progress(_updating, z, item=f, total=numupdates, unit=_files)
1314 f0, flags = args
1329 f0, flags = args
1315 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1330 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1316 wctx[f].write(mctx.filectx(f0).data(), flags)
1331 wctx[f].write(mctx.filectx(f0).data(), flags)
1317 updated += 1
1332 updated += 1
1318
1333
1319 # exec
1334 # exec
1320 for f, args, msg in actions['e']:
1335 for f, args, msg in actions['e']:
1321 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1336 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1322 z += 1
1337 z += 1
1323 progress(_updating, z, item=f, total=numupdates, unit=_files)
1338 progress(_updating, z, item=f, total=numupdates, unit=_files)
1324 flags, = args
1339 flags, = args
1325 wctx[f].audit()
1340 wctx[f].audit()
1326 wctx[f].setflags('l' in flags, 'x' in flags)
1341 wctx[f].setflags('l' in flags, 'x' in flags)
1327 updated += 1
1342 updated += 1
1328
1343
1329 # the ordering is important here -- ms.mergedriver will raise if the merge
1344 # the ordering is important here -- ms.mergedriver will raise if the merge
1330 # driver has changed, and we want to be able to bypass it when overwrite is
1345 # driver has changed, and we want to be able to bypass it when overwrite is
1331 # True
1346 # True
1332 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1347 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1333
1348
1334 if usemergedriver:
1349 if usemergedriver:
1335 ms.commit()
1350 ms.commit()
1336 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1351 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1337 # the driver might leave some files unresolved
1352 # the driver might leave some files unresolved
1338 unresolvedf = set(ms.unresolved())
1353 unresolvedf = set(ms.unresolved())
1339 if not proceed:
1354 if not proceed:
1340 # XXX setting unresolved to at least 1 is a hack to make sure we
1355 # XXX setting unresolved to at least 1 is a hack to make sure we
1341 # error out
1356 # error out
1342 return updated, merged, removed, max(len(unresolvedf), 1)
1357 return updated, merged, removed, max(len(unresolvedf), 1)
1343 newactions = []
1358 newactions = []
1344 for f, args, msg in mergeactions:
1359 for f, args, msg in mergeactions:
1345 if f in unresolvedf:
1360 if f in unresolvedf:
1346 newactions.append((f, args, msg))
1361 newactions.append((f, args, msg))
1347 mergeactions = newactions
1362 mergeactions = newactions
1348
1363
1349 # premerge
1364 # premerge
1350 tocomplete = []
1365 tocomplete = []
1351 for f, args, msg in mergeactions:
1366 for f, args, msg in mergeactions:
1352 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1367 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1353 z += 1
1368 z += 1
1354 progress(_updating, z, item=f, total=numupdates, unit=_files)
1369 progress(_updating, z, item=f, total=numupdates, unit=_files)
1355 if f == '.hgsubstate': # subrepo states need updating
1370 if f == '.hgsubstate': # subrepo states need updating
1356 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1371 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1357 overwrite, labels)
1372 overwrite, labels)
1358 continue
1373 continue
1359 wctx[f].audit()
1374 wctx[f].audit()
1360 complete, r = ms.preresolve(f, wctx)
1375 complete, r = ms.preresolve(f, wctx)
1361 if not complete:
1376 if not complete:
1362 numupdates += 1
1377 numupdates += 1
1363 tocomplete.append((f, args, msg))
1378 tocomplete.append((f, args, msg))
1364
1379
1365 # merge
1380 # merge
1366 for f, args, msg in tocomplete:
1381 for f, args, msg in tocomplete:
1367 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1382 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1368 z += 1
1383 z += 1
1369 progress(_updating, z, item=f, total=numupdates, unit=_files)
1384 progress(_updating, z, item=f, total=numupdates, unit=_files)
1370 ms.resolve(f, wctx)
1385 ms.resolve(f, wctx)
1371
1386
1372 ms.commit()
1387 ms.commit()
1373
1388
1374 unresolved = ms.unresolvedcount()
1389 unresolved = ms.unresolvedcount()
1375
1390
1376 if usemergedriver and not unresolved and ms.mdstate() != 's':
1391 if usemergedriver and not unresolved and ms.mdstate() != 's':
1377 if not driverconclude(repo, ms, wctx, labels=labels):
1392 if not driverconclude(repo, ms, wctx, labels=labels):
1378 # XXX setting unresolved to at least 1 is a hack to make sure we
1393 # XXX setting unresolved to at least 1 is a hack to make sure we
1379 # error out
1394 # error out
1380 unresolved = max(unresolved, 1)
1395 unresolved = max(unresolved, 1)
1381
1396
1382 ms.commit()
1397 ms.commit()
1383
1398
1384 msupdated, msmerged, msremoved = ms.counts()
1399 msupdated, msmerged, msremoved = ms.counts()
1385 updated += msupdated
1400 updated += msupdated
1386 merged += msmerged
1401 merged += msmerged
1387 removed += msremoved
1402 removed += msremoved
1388
1403
1389 extraactions = ms.actions()
1404 extraactions = ms.actions()
1390 if extraactions:
1405 if extraactions:
1391 mfiles = set(a[0] for a in actions['m'])
1406 mfiles = set(a[0] for a in actions['m'])
1392 for k, acts in extraactions.iteritems():
1407 for k, acts in extraactions.iteritems():
1393 actions[k].extend(acts)
1408 actions[k].extend(acts)
1394 # Remove these files from actions['m'] as well. This is important
1409 # Remove these files from actions['m'] as well. This is important
1395 # because in recordupdates, files in actions['m'] are processed
1410 # because in recordupdates, files in actions['m'] are processed
1396 # after files in other actions, and the merge driver might add
1411 # after files in other actions, and the merge driver might add
1397 # files to those actions via extraactions above. This can lead to a
1412 # files to those actions via extraactions above. This can lead to a
1398 # file being recorded twice, with poor results. This is especially
1413 # file being recorded twice, with poor results. This is especially
1399 # problematic for actions['r'] (currently only possible with the
1414 # problematic for actions['r'] (currently only possible with the
1400 # merge driver in the initial merge process; interrupted merges
1415 # merge driver in the initial merge process; interrupted merges
1401 # don't go through this flow).
1416 # don't go through this flow).
1402 #
1417 #
1403 # The real fix here is to have indexes by both file and action so
1418 # The real fix here is to have indexes by both file and action so
1404 # that when the action for a file is changed it is automatically
1419 # that when the action for a file is changed it is automatically
1405 # reflected in the other action lists. But that involves a more
1420 # reflected in the other action lists. But that involves a more
1406 # complex data structure, so this will do for now.
1421 # complex data structure, so this will do for now.
1407 #
1422 #
1408 # We don't need to do the same operation for 'dc' and 'cd' because
1423 # We don't need to do the same operation for 'dc' and 'cd' because
1409 # those lists aren't consulted again.
1424 # those lists aren't consulted again.
1410 mfiles.difference_update(a[0] for a in acts)
1425 mfiles.difference_update(a[0] for a in acts)
1411
1426
1412 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1427 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1413
1428
1414 progress(_updating, None, total=numupdates, unit=_files)
1429 progress(_updating, None, total=numupdates, unit=_files)
1415
1430
1416 return updated, merged, removed, unresolved
1431 return updated, merged, removed, unresolved
1417
1432
1418 def recordupdates(repo, actions, branchmerge):
1433 def recordupdates(repo, actions, branchmerge):
1419 "record merge actions to the dirstate"
1434 "record merge actions to the dirstate"
1420 # remove (must come first)
1435 # remove (must come first)
1421 for f, args, msg in actions.get('r', []):
1436 for f, args, msg in actions.get('r', []):
1422 if branchmerge:
1437 if branchmerge:
1423 repo.dirstate.remove(f)
1438 repo.dirstate.remove(f)
1424 else:
1439 else:
1425 repo.dirstate.drop(f)
1440 repo.dirstate.drop(f)
1426
1441
1427 # forget (must come first)
1442 # forget (must come first)
1428 for f, args, msg in actions.get('f', []):
1443 for f, args, msg in actions.get('f', []):
1429 repo.dirstate.drop(f)
1444 repo.dirstate.drop(f)
1430
1445
1431 # re-add
1446 # re-add
1432 for f, args, msg in actions.get('a', []):
1447 for f, args, msg in actions.get('a', []):
1433 repo.dirstate.add(f)
1448 repo.dirstate.add(f)
1434
1449
1435 # re-add/mark as modified
1450 # re-add/mark as modified
1436 for f, args, msg in actions.get('am', []):
1451 for f, args, msg in actions.get('am', []):
1437 if branchmerge:
1452 if branchmerge:
1438 repo.dirstate.normallookup(f)
1453 repo.dirstate.normallookup(f)
1439 else:
1454 else:
1440 repo.dirstate.add(f)
1455 repo.dirstate.add(f)
1441
1456
1442 # exec change
1457 # exec change
1443 for f, args, msg in actions.get('e', []):
1458 for f, args, msg in actions.get('e', []):
1444 repo.dirstate.normallookup(f)
1459 repo.dirstate.normallookup(f)
1445
1460
1446 # keep
1461 # keep
1447 for f, args, msg in actions.get('k', []):
1462 for f, args, msg in actions.get('k', []):
1448 pass
1463 pass
1449
1464
1450 # get
1465 # get
1451 for f, args, msg in actions.get('g', []):
1466 for f, args, msg in actions.get('g', []):
1452 if branchmerge:
1467 if branchmerge:
1453 repo.dirstate.otherparent(f)
1468 repo.dirstate.otherparent(f)
1454 else:
1469 else:
1455 repo.dirstate.normal(f)
1470 repo.dirstate.normal(f)
1456
1471
1457 # merge
1472 # merge
1458 for f, args, msg in actions.get('m', []):
1473 for f, args, msg in actions.get('m', []):
1459 f1, f2, fa, move, anc = args
1474 f1, f2, fa, move, anc = args
1460 if branchmerge:
1475 if branchmerge:
1461 # We've done a branch merge, mark this file as merged
1476 # We've done a branch merge, mark this file as merged
1462 # so that we properly record the merger later
1477 # so that we properly record the merger later
1463 repo.dirstate.merge(f)
1478 repo.dirstate.merge(f)
1464 if f1 != f2: # copy/rename
1479 if f1 != f2: # copy/rename
1465 if move:
1480 if move:
1466 repo.dirstate.remove(f1)
1481 repo.dirstate.remove(f1)
1467 if f1 != f:
1482 if f1 != f:
1468 repo.dirstate.copy(f1, f)
1483 repo.dirstate.copy(f1, f)
1469 else:
1484 else:
1470 repo.dirstate.copy(f2, f)
1485 repo.dirstate.copy(f2, f)
1471 else:
1486 else:
1472 # We've update-merged a locally modified file, so
1487 # We've update-merged a locally modified file, so
1473 # we set the dirstate to emulate a normal checkout
1488 # we set the dirstate to emulate a normal checkout
1474 # of that file some time in the past. Thus our
1489 # of that file some time in the past. Thus our
1475 # merge will appear as a normal local file
1490 # merge will appear as a normal local file
1476 # modification.
1491 # modification.
1477 if f2 == f: # file not locally copied/moved
1492 if f2 == f: # file not locally copied/moved
1478 repo.dirstate.normallookup(f)
1493 repo.dirstate.normallookup(f)
1479 if move:
1494 if move:
1480 repo.dirstate.drop(f1)
1495 repo.dirstate.drop(f1)
1481
1496
1482 # directory rename, move local
1497 # directory rename, move local
1483 for f, args, msg in actions.get('dm', []):
1498 for f, args, msg in actions.get('dm', []):
1484 f0, flag = args
1499 f0, flag = args
1485 if branchmerge:
1500 if branchmerge:
1486 repo.dirstate.add(f)
1501 repo.dirstate.add(f)
1487 repo.dirstate.remove(f0)
1502 repo.dirstate.remove(f0)
1488 repo.dirstate.copy(f0, f)
1503 repo.dirstate.copy(f0, f)
1489 else:
1504 else:
1490 repo.dirstate.normal(f)
1505 repo.dirstate.normal(f)
1491 repo.dirstate.drop(f0)
1506 repo.dirstate.drop(f0)
1492
1507
1493 # directory rename, get
1508 # directory rename, get
1494 for f, args, msg in actions.get('dg', []):
1509 for f, args, msg in actions.get('dg', []):
1495 f0, flag = args
1510 f0, flag = args
1496 if branchmerge:
1511 if branchmerge:
1497 repo.dirstate.add(f)
1512 repo.dirstate.add(f)
1498 repo.dirstate.copy(f0, f)
1513 repo.dirstate.copy(f0, f)
1499 else:
1514 else:
1500 repo.dirstate.normal(f)
1515 repo.dirstate.normal(f)
1501
1516
1502 def update(repo, node, branchmerge, force, ancestor=None,
1517 def update(repo, node, branchmerge, force, ancestor=None,
1503 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1518 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1504 updatecheck=None, wc=None):
1519 updatecheck=None, wc=None):
1505 """
1520 """
1506 Perform a merge between the working directory and the given node
1521 Perform a merge between the working directory and the given node
1507
1522
1508 node = the node to update to
1523 node = the node to update to
1509 branchmerge = whether to merge between branches
1524 branchmerge = whether to merge between branches
1510 force = whether to force branch merging or file overwriting
1525 force = whether to force branch merging or file overwriting
1511 matcher = a matcher to filter file lists (dirstate not updated)
1526 matcher = a matcher to filter file lists (dirstate not updated)
1512 mergeancestor = whether it is merging with an ancestor. If true,
1527 mergeancestor = whether it is merging with an ancestor. If true,
1513 we should accept the incoming changes for any prompts that occur.
1528 we should accept the incoming changes for any prompts that occur.
1514 If false, merging with an ancestor (fast-forward) is only allowed
1529 If false, merging with an ancestor (fast-forward) is only allowed
1515 between different named branches. This flag is used by rebase extension
1530 between different named branches. This flag is used by rebase extension
1516 as a temporary fix and should be avoided in general.
1531 as a temporary fix and should be avoided in general.
1517 labels = labels to use for base, local and other
1532 labels = labels to use for base, local and other
1518 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1533 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1519 this is True, then 'force' should be True as well.
1534 this is True, then 'force' should be True as well.
1520
1535
1521 The table below shows all the behaviors of the update command
1536 The table below shows all the behaviors of the update command
1522 given the -c and -C or no options, whether the working directory
1537 given the -c and -C or no options, whether the working directory
1523 is dirty, whether a revision is specified, and the relationship of
1538 is dirty, whether a revision is specified, and the relationship of
1524 the parent rev to the target rev (linear or not). Match from top first. The
1539 the parent rev to the target rev (linear or not). Match from top first. The
1525 -n option doesn't exist on the command line, but represents the
1540 -n option doesn't exist on the command line, but represents the
1526 experimental.updatecheck=noconflict option.
1541 experimental.updatecheck=noconflict option.
1527
1542
1528 This logic is tested by test-update-branches.t.
1543 This logic is tested by test-update-branches.t.
1529
1544
1530 -c -C -n -m dirty rev linear | result
1545 -c -C -n -m dirty rev linear | result
1531 y y * * * * * | (1)
1546 y y * * * * * | (1)
1532 y * y * * * * | (1)
1547 y * y * * * * | (1)
1533 y * * y * * * | (1)
1548 y * * y * * * | (1)
1534 * y y * * * * | (1)
1549 * y y * * * * | (1)
1535 * y * y * * * | (1)
1550 * y * y * * * | (1)
1536 * * y y * * * | (1)
1551 * * y y * * * | (1)
1537 * * * * * n n | x
1552 * * * * * n n | x
1538 * * * * n * * | ok
1553 * * * * n * * | ok
1539 n n n n y * y | merge
1554 n n n n y * y | merge
1540 n n n n y y n | (2)
1555 n n n n y y n | (2)
1541 n n n y y * * | merge
1556 n n n y y * * | merge
1542 n n y n y * * | merge if no conflict
1557 n n y n y * * | merge if no conflict
1543 n y n n y * * | discard
1558 n y n n y * * | discard
1544 y n n n y * * | (3)
1559 y n n n y * * | (3)
1545
1560
1546 x = can't happen
1561 x = can't happen
1547 * = don't-care
1562 * = don't-care
1548 1 = incompatible options (checked in commands.py)
1563 1 = incompatible options (checked in commands.py)
1549 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1564 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1550 3 = abort: uncommitted changes (checked in commands.py)
1565 3 = abort: uncommitted changes (checked in commands.py)
1551
1566
1552 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1567 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1553 to repo[None] if None is passed.
1568 to repo[None] if None is passed.
1554
1569
1555 Return the same tuple as applyupdates().
1570 Return the same tuple as applyupdates().
1556 """
1571 """
1557 # Avoid cycle.
1572 # Avoid cycle.
1558 from . import sparse
1573 from . import sparse
1559
1574
1560 # This function used to find the default destination if node was None, but
1575 # This function used to find the default destination if node was None, but
1561 # that's now in destutil.py.
1576 # that's now in destutil.py.
1562 assert node is not None
1577 assert node is not None
1563 if not branchmerge and not force:
1578 if not branchmerge and not force:
1564 # TODO: remove the default once all callers that pass branchmerge=False
1579 # TODO: remove the default once all callers that pass branchmerge=False
1565 # and force=False pass a value for updatecheck. We may want to allow
1580 # and force=False pass a value for updatecheck. We may want to allow
1566 # updatecheck='abort' to better suppport some of these callers.
1581 # updatecheck='abort' to better suppport some of these callers.
1567 if updatecheck is None:
1582 if updatecheck is None:
1568 updatecheck = 'linear'
1583 updatecheck = 'linear'
1569 assert updatecheck in ('none', 'linear', 'noconflict')
1584 assert updatecheck in ('none', 'linear', 'noconflict')
1570 # If we're doing a partial update, we need to skip updating
1585 # If we're doing a partial update, we need to skip updating
1571 # the dirstate, so make a note of any partial-ness to the
1586 # the dirstate, so make a note of any partial-ness to the
1572 # update here.
1587 # update here.
1573 if matcher is None or matcher.always():
1588 if matcher is None or matcher.always():
1574 partial = False
1589 partial = False
1575 else:
1590 else:
1576 partial = True
1591 partial = True
1577 with repo.wlock():
1592 with repo.wlock():
1578 if wc is None:
1593 if wc is None:
1579 wc = repo[None]
1594 wc = repo[None]
1580 pl = wc.parents()
1595 pl = wc.parents()
1581 p1 = pl[0]
1596 p1 = pl[0]
1582 pas = [None]
1597 pas = [None]
1583 if ancestor is not None:
1598 if ancestor is not None:
1584 pas = [repo[ancestor]]
1599 pas = [repo[ancestor]]
1585
1600
1586 overwrite = force and not branchmerge
1601 overwrite = force and not branchmerge
1587
1602
1588 p2 = repo[node]
1603 p2 = repo[node]
1589 if pas[0] is None:
1604 if pas[0] is None:
1590 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1605 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1591 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1606 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1592 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1607 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1593 else:
1608 else:
1594 pas = [p1.ancestor(p2, warn=branchmerge)]
1609 pas = [p1.ancestor(p2, warn=branchmerge)]
1595
1610
1596 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1611 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1597
1612
1598 ### check phase
1613 ### check phase
1599 if not overwrite:
1614 if not overwrite:
1600 if len(pl) > 1:
1615 if len(pl) > 1:
1601 raise error.Abort(_("outstanding uncommitted merge"))
1616 raise error.Abort(_("outstanding uncommitted merge"))
1602 ms = mergestate.read(repo)
1617 ms = mergestate.read(repo)
1603 if list(ms.unresolved()):
1618 if list(ms.unresolved()):
1604 raise error.Abort(_("outstanding merge conflicts"))
1619 raise error.Abort(_("outstanding merge conflicts"))
1605 if branchmerge:
1620 if branchmerge:
1606 if pas == [p2]:
1621 if pas == [p2]:
1607 raise error.Abort(_("merging with a working directory ancestor"
1622 raise error.Abort(_("merging with a working directory ancestor"
1608 " has no effect"))
1623 " has no effect"))
1609 elif pas == [p1]:
1624 elif pas == [p1]:
1610 if not mergeancestor and wc.branch() == p2.branch():
1625 if not mergeancestor and wc.branch() == p2.branch():
1611 raise error.Abort(_("nothing to merge"),
1626 raise error.Abort(_("nothing to merge"),
1612 hint=_("use 'hg update' "
1627 hint=_("use 'hg update' "
1613 "or check 'hg heads'"))
1628 "or check 'hg heads'"))
1614 if not force and (wc.files() or wc.deleted()):
1629 if not force and (wc.files() or wc.deleted()):
1615 raise error.Abort(_("uncommitted changes"),
1630 raise error.Abort(_("uncommitted changes"),
1616 hint=_("use 'hg status' to list changes"))
1631 hint=_("use 'hg status' to list changes"))
1617 for s in sorted(wc.substate):
1632 for s in sorted(wc.substate):
1618 wc.sub(s).bailifchanged()
1633 wc.sub(s).bailifchanged()
1619
1634
1620 elif not overwrite:
1635 elif not overwrite:
1621 if p1 == p2: # no-op update
1636 if p1 == p2: # no-op update
1622 # call the hooks and exit early
1637 # call the hooks and exit early
1623 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1638 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1624 repo.hook('update', parent1=xp2, parent2='', error=0)
1639 repo.hook('update', parent1=xp2, parent2='', error=0)
1625 return 0, 0, 0, 0
1640 return 0, 0, 0, 0
1626
1641
1627 if (updatecheck == 'linear' and
1642 if (updatecheck == 'linear' and
1628 pas not in ([p1], [p2])): # nonlinear
1643 pas not in ([p1], [p2])): # nonlinear
1629 dirty = wc.dirty(missing=True)
1644 dirty = wc.dirty(missing=True)
1630 if dirty:
1645 if dirty:
1631 # Branching is a bit strange to ensure we do the minimal
1646 # Branching is a bit strange to ensure we do the minimal
1632 # amount of call to obsutil.foreground.
1647 # amount of call to obsutil.foreground.
1633 foreground = obsutil.foreground(repo, [p1.node()])
1648 foreground = obsutil.foreground(repo, [p1.node()])
1634 # note: the <node> variable contains a random identifier
1649 # note: the <node> variable contains a random identifier
1635 if repo[node].node() in foreground:
1650 if repo[node].node() in foreground:
1636 pass # allow updating to successors
1651 pass # allow updating to successors
1637 else:
1652 else:
1638 msg = _("uncommitted changes")
1653 msg = _("uncommitted changes")
1639 hint = _("commit or update --clean to discard changes")
1654 hint = _("commit or update --clean to discard changes")
1640 raise error.UpdateAbort(msg, hint=hint)
1655 raise error.UpdateAbort(msg, hint=hint)
1641 else:
1656 else:
1642 # Allow jumping branches if clean and specific rev given
1657 # Allow jumping branches if clean and specific rev given
1643 pass
1658 pass
1644
1659
1645 if overwrite:
1660 if overwrite:
1646 pas = [wc]
1661 pas = [wc]
1647 elif not branchmerge:
1662 elif not branchmerge:
1648 pas = [p1]
1663 pas = [p1]
1649
1664
1650 # deprecated config: merge.followcopies
1665 # deprecated config: merge.followcopies
1651 followcopies = repo.ui.configbool('merge', 'followcopies')
1666 followcopies = repo.ui.configbool('merge', 'followcopies')
1652 if overwrite:
1667 if overwrite:
1653 followcopies = False
1668 followcopies = False
1654 elif not pas[0]:
1669 elif not pas[0]:
1655 followcopies = False
1670 followcopies = False
1656 if not branchmerge and not wc.dirty(missing=True):
1671 if not branchmerge and not wc.dirty(missing=True):
1657 followcopies = False
1672 followcopies = False
1658
1673
1659 ### calculate phase
1674 ### calculate phase
1660 actionbyfile, diverge, renamedelete = calculateupdates(
1675 actionbyfile, diverge, renamedelete = calculateupdates(
1661 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1676 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1662 followcopies, matcher=matcher, mergeforce=mergeforce)
1677 followcopies, matcher=matcher, mergeforce=mergeforce)
1663
1678
1664 if updatecheck == 'noconflict':
1679 if updatecheck == 'noconflict':
1665 for f, (m, args, msg) in actionbyfile.iteritems():
1680 for f, (m, args, msg) in actionbyfile.iteritems():
1666 if m not in ('g', 'k', 'e', 'r'):
1681 if m not in ('g', 'k', 'e', 'r'):
1667 msg = _("conflicting changes")
1682 msg = _("conflicting changes")
1668 hint = _("commit or update --clean to discard changes")
1683 hint = _("commit or update --clean to discard changes")
1669 raise error.Abort(msg, hint=hint)
1684 raise error.Abort(msg, hint=hint)
1670
1685
1671 # Prompt and create actions. Most of this is in the resolve phase
1686 # Prompt and create actions. Most of this is in the resolve phase
1672 # already, but we can't handle .hgsubstate in filemerge or
1687 # already, but we can't handle .hgsubstate in filemerge or
1673 # subrepo.submerge yet so we have to keep prompting for it.
1688 # subrepo.submerge yet so we have to keep prompting for it.
1674 if '.hgsubstate' in actionbyfile:
1689 if '.hgsubstate' in actionbyfile:
1675 f = '.hgsubstate'
1690 f = '.hgsubstate'
1676 m, args, msg = actionbyfile[f]
1691 m, args, msg = actionbyfile[f]
1677 prompts = filemerge.partextras(labels)
1692 prompts = filemerge.partextras(labels)
1678 prompts['f'] = f
1693 prompts['f'] = f
1679 if m == 'cd':
1694 if m == 'cd':
1680 if repo.ui.promptchoice(
1695 if repo.ui.promptchoice(
1681 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1696 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1682 "use (c)hanged version or (d)elete?"
1697 "use (c)hanged version or (d)elete?"
1683 "$$ &Changed $$ &Delete") % prompts, 0):
1698 "$$ &Changed $$ &Delete") % prompts, 0):
1684 actionbyfile[f] = ('r', None, "prompt delete")
1699 actionbyfile[f] = ('r', None, "prompt delete")
1685 elif f in p1:
1700 elif f in p1:
1686 actionbyfile[f] = ('am', None, "prompt keep")
1701 actionbyfile[f] = ('am', None, "prompt keep")
1687 else:
1702 else:
1688 actionbyfile[f] = ('a', None, "prompt keep")
1703 actionbyfile[f] = ('a', None, "prompt keep")
1689 elif m == 'dc':
1704 elif m == 'dc':
1690 f1, f2, fa, move, anc = args
1705 f1, f2, fa, move, anc = args
1691 flags = p2[f2].flags()
1706 flags = p2[f2].flags()
1692 if repo.ui.promptchoice(
1707 if repo.ui.promptchoice(
1693 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1708 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1694 "use (c)hanged version or leave (d)eleted?"
1709 "use (c)hanged version or leave (d)eleted?"
1695 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1710 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1696 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1711 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1697 else:
1712 else:
1698 del actionbyfile[f]
1713 del actionbyfile[f]
1699
1714
1700 # Convert to dictionary-of-lists format
1715 # Convert to dictionary-of-lists format
1701 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1716 actions = dict((m, [])
1717 for m in 'a am f g cd dc r dm dg m e k p'.split())
1702 for f, (m, args, msg) in actionbyfile.iteritems():
1718 for f, (m, args, msg) in actionbyfile.iteritems():
1703 if m not in actions:
1719 if m not in actions:
1704 actions[m] = []
1720 actions[m] = []
1705 actions[m].append((f, args, msg))
1721 actions[m].append((f, args, msg))
1706
1722
1707 if not util.fscasesensitive(repo.path):
1723 if not util.fscasesensitive(repo.path):
1708 # check collision between files only in p2 for clean update
1724 # check collision between files only in p2 for clean update
1709 if (not branchmerge and
1725 if (not branchmerge and
1710 (force or not wc.dirty(missing=True, branch=False))):
1726 (force or not wc.dirty(missing=True, branch=False))):
1711 _checkcollision(repo, p2.manifest(), None)
1727 _checkcollision(repo, p2.manifest(), None)
1712 else:
1728 else:
1713 _checkcollision(repo, wc.manifest(), actions)
1729 _checkcollision(repo, wc.manifest(), actions)
1714
1730
1715 # divergent renames
1731 # divergent renames
1716 for f, fl in sorted(diverge.iteritems()):
1732 for f, fl in sorted(diverge.iteritems()):
1717 repo.ui.warn(_("note: possible conflict - %s was renamed "
1733 repo.ui.warn(_("note: possible conflict - %s was renamed "
1718 "multiple times to:\n") % f)
1734 "multiple times to:\n") % f)
1719 for nf in fl:
1735 for nf in fl:
1720 repo.ui.warn(" %s\n" % nf)
1736 repo.ui.warn(" %s\n" % nf)
1721
1737
1722 # rename and delete
1738 # rename and delete
1723 for f, fl in sorted(renamedelete.iteritems()):
1739 for f, fl in sorted(renamedelete.iteritems()):
1724 repo.ui.warn(_("note: possible conflict - %s was deleted "
1740 repo.ui.warn(_("note: possible conflict - %s was deleted "
1725 "and renamed to:\n") % f)
1741 "and renamed to:\n") % f)
1726 for nf in fl:
1742 for nf in fl:
1727 repo.ui.warn(" %s\n" % nf)
1743 repo.ui.warn(" %s\n" % nf)
1728
1744
1729 ### apply phase
1745 ### apply phase
1730 if not branchmerge: # just jump to the new rev
1746 if not branchmerge: # just jump to the new rev
1731 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1747 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1732 if not partial:
1748 if not partial:
1733 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1749 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1734 # note that we're in the middle of an update
1750 # note that we're in the middle of an update
1735 repo.vfs.write('updatestate', p2.hex())
1751 repo.vfs.write('updatestate', p2.hex())
1736
1752
1737 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1753 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1738 wc.flushall()
1754 wc.flushall()
1739
1755
1740 if not partial:
1756 if not partial:
1741 with repo.dirstate.parentchange():
1757 with repo.dirstate.parentchange():
1742 repo.setparents(fp1, fp2)
1758 repo.setparents(fp1, fp2)
1743 recordupdates(repo, actions, branchmerge)
1759 recordupdates(repo, actions, branchmerge)
1744 # update completed, clear state
1760 # update completed, clear state
1745 util.unlink(repo.vfs.join('updatestate'))
1761 util.unlink(repo.vfs.join('updatestate'))
1746
1762
1747 if not branchmerge:
1763 if not branchmerge:
1748 repo.dirstate.setbranch(p2.branch())
1764 repo.dirstate.setbranch(p2.branch())
1749
1765
1750 # If we're updating to a location, clean up any stale temporary includes
1766 # If we're updating to a location, clean up any stale temporary includes
1751 # (ex: this happens during hg rebase --abort).
1767 # (ex: this happens during hg rebase --abort).
1752 if not branchmerge:
1768 if not branchmerge:
1753 sparse.prunetemporaryincludes(repo)
1769 sparse.prunetemporaryincludes(repo)
1754
1770
1755 if not partial:
1771 if not partial:
1756 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1772 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1757 return stats
1773 return stats
1758
1774
1759 def graft(repo, ctx, pctx, labels, keepparent=False):
1775 def graft(repo, ctx, pctx, labels, keepparent=False):
1760 """Do a graft-like merge.
1776 """Do a graft-like merge.
1761
1777
1762 This is a merge where the merge ancestor is chosen such that one
1778 This is a merge where the merge ancestor is chosen such that one
1763 or more changesets are grafted onto the current changeset. In
1779 or more changesets are grafted onto the current changeset. In
1764 addition to the merge, this fixes up the dirstate to include only
1780 addition to the merge, this fixes up the dirstate to include only
1765 a single parent (if keepparent is False) and tries to duplicate any
1781 a single parent (if keepparent is False) and tries to duplicate any
1766 renames/copies appropriately.
1782 renames/copies appropriately.
1767
1783
1768 ctx - changeset to rebase
1784 ctx - changeset to rebase
1769 pctx - merge base, usually ctx.p1()
1785 pctx - merge base, usually ctx.p1()
1770 labels - merge labels eg ['local', 'graft']
1786 labels - merge labels eg ['local', 'graft']
1771 keepparent - keep second parent if any
1787 keepparent - keep second parent if any
1772
1788
1773 """
1789 """
1774 # If we're grafting a descendant onto an ancestor, be sure to pass
1790 # If we're grafting a descendant onto an ancestor, be sure to pass
1775 # mergeancestor=True to update. This does two things: 1) allows the merge if
1791 # mergeancestor=True to update. This does two things: 1) allows the merge if
1776 # the destination is the same as the parent of the ctx (so we can use graft
1792 # the destination is the same as the parent of the ctx (so we can use graft
1777 # to copy commits), and 2) informs update that the incoming changes are
1793 # to copy commits), and 2) informs update that the incoming changes are
1778 # newer than the destination so it doesn't prompt about "remote changed foo
1794 # newer than the destination so it doesn't prompt about "remote changed foo
1779 # which local deleted".
1795 # which local deleted".
1780 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1796 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1781
1797
1782 stats = update(repo, ctx.node(), True, True, pctx.node(),
1798 stats = update(repo, ctx.node(), True, True, pctx.node(),
1783 mergeancestor=mergeancestor, labels=labels)
1799 mergeancestor=mergeancestor, labels=labels)
1784
1800
1785 pother = nullid
1801 pother = nullid
1786 parents = ctx.parents()
1802 parents = ctx.parents()
1787 if keepparent and len(parents) == 2 and pctx in parents:
1803 if keepparent and len(parents) == 2 and pctx in parents:
1788 parents.remove(pctx)
1804 parents.remove(pctx)
1789 pother = parents[0].node()
1805 pother = parents[0].node()
1790
1806
1791 with repo.dirstate.parentchange():
1807 with repo.dirstate.parentchange():
1792 repo.setparents(repo['.'].node(), pother)
1808 repo.setparents(repo['.'].node(), pother)
1793 repo.dirstate.write(repo.currenttransaction())
1809 repo.dirstate.write(repo.currenttransaction())
1794 # fix up dirstate for copies and renames
1810 # fix up dirstate for copies and renames
1795 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1811 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1796 return stats
1812 return stats
@@ -1,704 +1,705 b''
1 # sparse.py - functionality for sparse checkouts
1 # sparse.py - functionality for sparse checkouts
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 error,
17 error,
18 match as matchmod,
18 match as matchmod,
19 merge as mergemod,
19 merge as mergemod,
20 pathutil,
20 pathutil,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25
25
26 # Whether sparse features are enabled. This variable is intended to be
26 # Whether sparse features are enabled. This variable is intended to be
27 # temporary to facilitate porting sparse to core. It should eventually be
27 # temporary to facilitate porting sparse to core. It should eventually be
28 # a per-repo option, possibly a repo requirement.
28 # a per-repo option, possibly a repo requirement.
29 enabled = False
29 enabled = False
30
30
31 def parseconfig(ui, raw):
31 def parseconfig(ui, raw):
32 """Parse sparse config file content.
32 """Parse sparse config file content.
33
33
34 Returns a tuple of includes, excludes, and profiles.
34 Returns a tuple of includes, excludes, and profiles.
35 """
35 """
36 includes = set()
36 includes = set()
37 excludes = set()
37 excludes = set()
38 profiles = set()
38 profiles = set()
39 current = None
39 current = None
40 havesection = False
40 havesection = False
41
41
42 for line in raw.split('\n'):
42 for line in raw.split('\n'):
43 line = line.strip()
43 line = line.strip()
44 if not line or line.startswith('#'):
44 if not line or line.startswith('#'):
45 # empty or comment line, skip
45 # empty or comment line, skip
46 continue
46 continue
47 elif line.startswith('%include '):
47 elif line.startswith('%include '):
48 line = line[9:].strip()
48 line = line[9:].strip()
49 if line:
49 if line:
50 profiles.add(line)
50 profiles.add(line)
51 elif line == '[include]':
51 elif line == '[include]':
52 if havesection and current != includes:
52 if havesection and current != includes:
53 # TODO pass filename into this API so we can report it.
53 # TODO pass filename into this API so we can report it.
54 raise error.Abort(_('sparse config cannot have includes ' +
54 raise error.Abort(_('sparse config cannot have includes ' +
55 'after excludes'))
55 'after excludes'))
56 havesection = True
56 havesection = True
57 current = includes
57 current = includes
58 continue
58 continue
59 elif line == '[exclude]':
59 elif line == '[exclude]':
60 havesection = True
60 havesection = True
61 current = excludes
61 current = excludes
62 elif line:
62 elif line:
63 if current is None:
63 if current is None:
64 raise error.Abort(_('sparse config entry outside of '
64 raise error.Abort(_('sparse config entry outside of '
65 'section: %s') % line,
65 'section: %s') % line,
66 hint=_('add an [include] or [exclude] line '
66 hint=_('add an [include] or [exclude] line '
67 'to declare the entry type'))
67 'to declare the entry type'))
68
68
69 if line.strip().startswith('/'):
69 if line.strip().startswith('/'):
70 ui.warn(_('warning: sparse profile cannot use' +
70 ui.warn(_('warning: sparse profile cannot use' +
71 ' paths starting with /, ignoring %s\n') % line)
71 ' paths starting with /, ignoring %s\n') % line)
72 continue
72 continue
73 current.add(line)
73 current.add(line)
74
74
75 return includes, excludes, profiles
75 return includes, excludes, profiles
76
76
77 # Exists as separate function to facilitate monkeypatching.
77 # Exists as separate function to facilitate monkeypatching.
78 def readprofile(repo, profile, changeid):
78 def readprofile(repo, profile, changeid):
79 """Resolve the raw content of a sparse profile file."""
79 """Resolve the raw content of a sparse profile file."""
80 # TODO add some kind of cache here because this incurs a manifest
80 # TODO add some kind of cache here because this incurs a manifest
81 # resolve and can be slow.
81 # resolve and can be slow.
82 return repo.filectx(profile, changeid=changeid).data()
82 return repo.filectx(profile, changeid=changeid).data()
83
83
84 def patternsforrev(repo, rev):
84 def patternsforrev(repo, rev):
85 """Obtain sparse checkout patterns for the given rev.
85 """Obtain sparse checkout patterns for the given rev.
86
86
87 Returns a tuple of iterables representing includes, excludes, and
87 Returns a tuple of iterables representing includes, excludes, and
88 patterns.
88 patterns.
89 """
89 """
90 # Feature isn't enabled. No-op.
90 # Feature isn't enabled. No-op.
91 if not enabled:
91 if not enabled:
92 return set(), set(), set()
92 return set(), set(), set()
93
93
94 raw = repo.vfs.tryread('sparse')
94 raw = repo.vfs.tryread('sparse')
95 if not raw:
95 if not raw:
96 return set(), set(), set()
96 return set(), set(), set()
97
97
98 if rev is None:
98 if rev is None:
99 raise error.Abort(_('cannot parse sparse patterns from working '
99 raise error.Abort(_('cannot parse sparse patterns from working '
100 'directory'))
100 'directory'))
101
101
102 includes, excludes, profiles = parseconfig(repo.ui, raw)
102 includes, excludes, profiles = parseconfig(repo.ui, raw)
103 ctx = repo[rev]
103 ctx = repo[rev]
104
104
105 if profiles:
105 if profiles:
106 visited = set()
106 visited = set()
107 while profiles:
107 while profiles:
108 profile = profiles.pop()
108 profile = profiles.pop()
109 if profile in visited:
109 if profile in visited:
110 continue
110 continue
111
111
112 visited.add(profile)
112 visited.add(profile)
113
113
114 try:
114 try:
115 raw = readprofile(repo, profile, rev)
115 raw = readprofile(repo, profile, rev)
116 except error.ManifestLookupError:
116 except error.ManifestLookupError:
117 msg = (
117 msg = (
118 "warning: sparse profile '%s' not found "
118 "warning: sparse profile '%s' not found "
119 "in rev %s - ignoring it\n" % (profile, ctx))
119 "in rev %s - ignoring it\n" % (profile, ctx))
120 # experimental config: sparse.missingwarning
120 # experimental config: sparse.missingwarning
121 if repo.ui.configbool(
121 if repo.ui.configbool(
122 'sparse', 'missingwarning'):
122 'sparse', 'missingwarning'):
123 repo.ui.warn(msg)
123 repo.ui.warn(msg)
124 else:
124 else:
125 repo.ui.debug(msg)
125 repo.ui.debug(msg)
126 continue
126 continue
127
127
128 pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
128 pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
129 includes.update(pincludes)
129 includes.update(pincludes)
130 excludes.update(pexcludes)
130 excludes.update(pexcludes)
131 profiles.update(subprofs)
131 profiles.update(subprofs)
132
132
133 profiles = visited
133 profiles = visited
134
134
135 if includes:
135 if includes:
136 includes.add('.hg*')
136 includes.add('.hg*')
137
137
138 return includes, excludes, profiles
138 return includes, excludes, profiles
139
139
140 def activeconfig(repo):
140 def activeconfig(repo):
141 """Determine the active sparse config rules.
141 """Determine the active sparse config rules.
142
142
143 Rules are constructed by reading the current sparse config and bringing in
143 Rules are constructed by reading the current sparse config and bringing in
144 referenced profiles from parents of the working directory.
144 referenced profiles from parents of the working directory.
145 """
145 """
146 revs = [repo.changelog.rev(node) for node in
146 revs = [repo.changelog.rev(node) for node in
147 repo.dirstate.parents() if node != nullid]
147 repo.dirstate.parents() if node != nullid]
148
148
149 allincludes = set()
149 allincludes = set()
150 allexcludes = set()
150 allexcludes = set()
151 allprofiles = set()
151 allprofiles = set()
152
152
153 for rev in revs:
153 for rev in revs:
154 includes, excludes, profiles = patternsforrev(repo, rev)
154 includes, excludes, profiles = patternsforrev(repo, rev)
155 allincludes |= includes
155 allincludes |= includes
156 allexcludes |= excludes
156 allexcludes |= excludes
157 allprofiles |= profiles
157 allprofiles |= profiles
158
158
159 return allincludes, allexcludes, allprofiles
159 return allincludes, allexcludes, allprofiles
160
160
161 def configsignature(repo, includetemp=True):
161 def configsignature(repo, includetemp=True):
162 """Obtain the signature string for the current sparse configuration.
162 """Obtain the signature string for the current sparse configuration.
163
163
164 This is used to construct a cache key for matchers.
164 This is used to construct a cache key for matchers.
165 """
165 """
166 cache = repo._sparsesignaturecache
166 cache = repo._sparsesignaturecache
167
167
168 signature = cache.get('signature')
168 signature = cache.get('signature')
169
169
170 if includetemp:
170 if includetemp:
171 tempsignature = cache.get('tempsignature')
171 tempsignature = cache.get('tempsignature')
172 else:
172 else:
173 tempsignature = '0'
173 tempsignature = '0'
174
174
175 if signature is None or (includetemp and tempsignature is None):
175 if signature is None or (includetemp and tempsignature is None):
176 signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
176 signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
177 cache['signature'] = signature
177 cache['signature'] = signature
178
178
179 if includetemp:
179 if includetemp:
180 raw = repo.vfs.tryread('tempsparse')
180 raw = repo.vfs.tryread('tempsparse')
181 tempsignature = hashlib.sha1(raw).hexdigest()
181 tempsignature = hashlib.sha1(raw).hexdigest()
182 cache['tempsignature'] = tempsignature
182 cache['tempsignature'] = tempsignature
183
183
184 return '%s %s' % (signature, tempsignature)
184 return '%s %s' % (signature, tempsignature)
185
185
186 def writeconfig(repo, includes, excludes, profiles):
186 def writeconfig(repo, includes, excludes, profiles):
187 """Write the sparse config file given a sparse configuration."""
187 """Write the sparse config file given a sparse configuration."""
188 with repo.vfs('sparse', 'wb') as fh:
188 with repo.vfs('sparse', 'wb') as fh:
189 for p in sorted(profiles):
189 for p in sorted(profiles):
190 fh.write('%%include %s\n' % p)
190 fh.write('%%include %s\n' % p)
191
191
192 if includes:
192 if includes:
193 fh.write('[include]\n')
193 fh.write('[include]\n')
194 for i in sorted(includes):
194 for i in sorted(includes):
195 fh.write(i)
195 fh.write(i)
196 fh.write('\n')
196 fh.write('\n')
197
197
198 if excludes:
198 if excludes:
199 fh.write('[exclude]\n')
199 fh.write('[exclude]\n')
200 for e in sorted(excludes):
200 for e in sorted(excludes):
201 fh.write(e)
201 fh.write(e)
202 fh.write('\n')
202 fh.write('\n')
203
203
204 repo._sparsesignaturecache.clear()
204 repo._sparsesignaturecache.clear()
205
205
206 def readtemporaryincludes(repo):
206 def readtemporaryincludes(repo):
207 raw = repo.vfs.tryread('tempsparse')
207 raw = repo.vfs.tryread('tempsparse')
208 if not raw:
208 if not raw:
209 return set()
209 return set()
210
210
211 return set(raw.split('\n'))
211 return set(raw.split('\n'))
212
212
213 def writetemporaryincludes(repo, includes):
213 def writetemporaryincludes(repo, includes):
214 repo.vfs.write('tempsparse', '\n'.join(sorted(includes)))
214 repo.vfs.write('tempsparse', '\n'.join(sorted(includes)))
215 repo._sparsesignaturecache.clear()
215 repo._sparsesignaturecache.clear()
216
216
217 def addtemporaryincludes(repo, additional):
217 def addtemporaryincludes(repo, additional):
218 includes = readtemporaryincludes(repo)
218 includes = readtemporaryincludes(repo)
219 for i in additional:
219 for i in additional:
220 includes.add(i)
220 includes.add(i)
221 writetemporaryincludes(repo, includes)
221 writetemporaryincludes(repo, includes)
222
222
223 def prunetemporaryincludes(repo):
223 def prunetemporaryincludes(repo):
224 if not enabled or not repo.vfs.exists('tempsparse'):
224 if not enabled or not repo.vfs.exists('tempsparse'):
225 return
225 return
226
226
227 s = repo.status()
227 s = repo.status()
228 if s.modified or s.added or s.removed or s.deleted:
228 if s.modified or s.added or s.removed or s.deleted:
229 # Still have pending changes. Don't bother trying to prune.
229 # Still have pending changes. Don't bother trying to prune.
230 return
230 return
231
231
232 sparsematch = matcher(repo, includetemp=False)
232 sparsematch = matcher(repo, includetemp=False)
233 dirstate = repo.dirstate
233 dirstate = repo.dirstate
234 actions = []
234 actions = []
235 dropped = []
235 dropped = []
236 tempincludes = readtemporaryincludes(repo)
236 tempincludes = readtemporaryincludes(repo)
237 for file in tempincludes:
237 for file in tempincludes:
238 if file in dirstate and not sparsematch(file):
238 if file in dirstate and not sparsematch(file):
239 message = _('dropping temporarily included sparse files')
239 message = _('dropping temporarily included sparse files')
240 actions.append((file, None, message))
240 actions.append((file, None, message))
241 dropped.append(file)
241 dropped.append(file)
242
242
243 typeactions = collections.defaultdict(list)
243 typeactions = collections.defaultdict(list)
244 typeactions['r'] = actions
244 typeactions['r'] = actions
245 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
245 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
246
246
247 # Fix dirstate
247 # Fix dirstate
248 for file in dropped:
248 for file in dropped:
249 dirstate.drop(file)
249 dirstate.drop(file)
250
250
251 repo.vfs.unlink('tempsparse')
251 repo.vfs.unlink('tempsparse')
252 repo._sparsesignaturecache.clear()
252 repo._sparsesignaturecache.clear()
253 msg = _('cleaned up %d temporarily added file(s) from the '
253 msg = _('cleaned up %d temporarily added file(s) from the '
254 'sparse checkout\n')
254 'sparse checkout\n')
255 repo.ui.status(msg % len(tempincludes))
255 repo.ui.status(msg % len(tempincludes))
256
256
257 def forceincludematcher(matcher, includes):
257 def forceincludematcher(matcher, includes):
258 """Returns a matcher that returns true for any of the forced includes
258 """Returns a matcher that returns true for any of the forced includes
259 before testing against the actual matcher."""
259 before testing against the actual matcher."""
260 kindpats = [('path', include, '') for include in includes]
260 kindpats = [('path', include, '') for include in includes]
261 includematcher = matchmod.includematcher('', '', kindpats)
261 includematcher = matchmod.includematcher('', '', kindpats)
262 return matchmod.unionmatcher([includematcher, matcher])
262 return matchmod.unionmatcher([includematcher, matcher])
263
263
264 def matcher(repo, revs=None, includetemp=True):
264 def matcher(repo, revs=None, includetemp=True):
265 """Obtain a matcher for sparse working directories for the given revs.
265 """Obtain a matcher for sparse working directories for the given revs.
266
266
267 If multiple revisions are specified, the matcher is the union of all
267 If multiple revisions are specified, the matcher is the union of all
268 revs.
268 revs.
269
269
270 ``includetemp`` indicates whether to use the temporary sparse profile.
270 ``includetemp`` indicates whether to use the temporary sparse profile.
271 """
271 """
272 # If sparse isn't enabled, sparse matcher matches everything.
272 # If sparse isn't enabled, sparse matcher matches everything.
273 if not enabled:
273 if not enabled:
274 return matchmod.always(repo.root, '')
274 return matchmod.always(repo.root, '')
275
275
276 if not revs or revs == [None]:
276 if not revs or revs == [None]:
277 revs = [repo.changelog.rev(node)
277 revs = [repo.changelog.rev(node)
278 for node in repo.dirstate.parents() if node != nullid]
278 for node in repo.dirstate.parents() if node != nullid]
279
279
280 signature = configsignature(repo, includetemp=includetemp)
280 signature = configsignature(repo, includetemp=includetemp)
281
281
282 key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs)))
282 key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs)))
283
283
284 result = repo._sparsematchercache.get(key)
284 result = repo._sparsematchercache.get(key)
285 if result:
285 if result:
286 return result
286 return result
287
287
288 matchers = []
288 matchers = []
289 for rev in revs:
289 for rev in revs:
290 try:
290 try:
291 includes, excludes, profiles = patternsforrev(repo, rev)
291 includes, excludes, profiles = patternsforrev(repo, rev)
292
292
293 if includes or excludes:
293 if includes or excludes:
294 # Explicitly include subdirectories of includes so
294 # Explicitly include subdirectories of includes so
295 # status will walk them down to the actual include.
295 # status will walk them down to the actual include.
296 subdirs = set()
296 subdirs = set()
297 for include in includes:
297 for include in includes:
298 # TODO consider using posix path functions here so Windows
298 # TODO consider using posix path functions here so Windows
299 # \ directory separators don't come into play.
299 # \ directory separators don't come into play.
300 dirname = os.path.dirname(include)
300 dirname = os.path.dirname(include)
301 # basename is used to avoid issues with absolute
301 # basename is used to avoid issues with absolute
302 # paths (which on Windows can include the drive).
302 # paths (which on Windows can include the drive).
303 while os.path.basename(dirname):
303 while os.path.basename(dirname):
304 subdirs.add(dirname)
304 subdirs.add(dirname)
305 dirname = os.path.dirname(dirname)
305 dirname = os.path.dirname(dirname)
306
306
307 matcher = matchmod.match(repo.root, '', [],
307 matcher = matchmod.match(repo.root, '', [],
308 include=includes, exclude=excludes,
308 include=includes, exclude=excludes,
309 default='relpath')
309 default='relpath')
310 if subdirs:
310 if subdirs:
311 matcher = forceincludematcher(matcher, subdirs)
311 matcher = forceincludematcher(matcher, subdirs)
312 matchers.append(matcher)
312 matchers.append(matcher)
313 except IOError:
313 except IOError:
314 pass
314 pass
315
315
316 if not matchers:
316 if not matchers:
317 result = matchmod.always(repo.root, '')
317 result = matchmod.always(repo.root, '')
318 elif len(matchers) == 1:
318 elif len(matchers) == 1:
319 result = matchers[0]
319 result = matchers[0]
320 else:
320 else:
321 result = matchmod.unionmatcher(matchers)
321 result = matchmod.unionmatcher(matchers)
322
322
323 if includetemp:
323 if includetemp:
324 tempincludes = readtemporaryincludes(repo)
324 tempincludes = readtemporaryincludes(repo)
325 result = forceincludematcher(result, tempincludes)
325 result = forceincludematcher(result, tempincludes)
326
326
327 repo._sparsematchercache[key] = result
327 repo._sparsematchercache[key] = result
328
328
329 return result
329 return result
330
330
331 def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
331 def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
332 """Filter updates to only lay out files that match the sparse rules."""
332 """Filter updates to only lay out files that match the sparse rules."""
333 if not enabled:
333 if not enabled:
334 return actions
334 return actions
335
335
336 oldrevs = [pctx.rev() for pctx in wctx.parents()]
336 oldrevs = [pctx.rev() for pctx in wctx.parents()]
337 oldsparsematch = matcher(repo, oldrevs)
337 oldsparsematch = matcher(repo, oldrevs)
338
338
339 if oldsparsematch.always():
339 if oldsparsematch.always():
340 return actions
340 return actions
341
341
342 files = set()
342 files = set()
343 prunedactions = {}
343 prunedactions = {}
344
344
345 if branchmerge:
345 if branchmerge:
346 # If we're merging, use the wctx filter, since we're merging into
346 # If we're merging, use the wctx filter, since we're merging into
347 # the wctx.
347 # the wctx.
348 sparsematch = matcher(repo, [wctx.parents()[0].rev()])
348 sparsematch = matcher(repo, [wctx.parents()[0].rev()])
349 else:
349 else:
350 # If we're updating, use the target context's filter, since we're
350 # If we're updating, use the target context's filter, since we're
351 # moving to the target context.
351 # moving to the target context.
352 sparsematch = matcher(repo, [mctx.rev()])
352 sparsematch = matcher(repo, [mctx.rev()])
353
353
354 temporaryfiles = []
354 temporaryfiles = []
355 for file, action in actions.iteritems():
355 for file, action in actions.iteritems():
356 type, args, msg = action
356 type, args, msg = action
357 files.add(file)
357 files.add(file)
358 if sparsematch(file):
358 if sparsematch(file):
359 prunedactions[file] = action
359 prunedactions[file] = action
360 elif type == 'm':
360 elif type == 'm':
361 temporaryfiles.append(file)
361 temporaryfiles.append(file)
362 prunedactions[file] = action
362 prunedactions[file] = action
363 elif branchmerge:
363 elif branchmerge:
364 if type != 'k':
364 if type != 'k':
365 temporaryfiles.append(file)
365 temporaryfiles.append(file)
366 prunedactions[file] = action
366 prunedactions[file] = action
367 elif type == 'f':
367 elif type == 'f':
368 prunedactions[file] = action
368 prunedactions[file] = action
369 elif file in wctx:
369 elif file in wctx:
370 prunedactions[file] = ('r', args, msg)
370 prunedactions[file] = ('r', args, msg)
371
371
372 if len(temporaryfiles) > 0:
372 if len(temporaryfiles) > 0:
373 repo.ui.status(_('temporarily included %d file(s) in the sparse '
373 repo.ui.status(_('temporarily included %d file(s) in the sparse '
374 'checkout for merging\n') % len(temporaryfiles))
374 'checkout for merging\n') % len(temporaryfiles))
375 addtemporaryincludes(repo, temporaryfiles)
375 addtemporaryincludes(repo, temporaryfiles)
376
376
377 # Add the new files to the working copy so they can be merged, etc
377 # Add the new files to the working copy so they can be merged, etc
378 actions = []
378 actions = []
379 message = 'temporarily adding to sparse checkout'
379 message = 'temporarily adding to sparse checkout'
380 wctxmanifest = repo[None].manifest()
380 wctxmanifest = repo[None].manifest()
381 for file in temporaryfiles:
381 for file in temporaryfiles:
382 if file in wctxmanifest:
382 if file in wctxmanifest:
383 fctx = repo[None][file]
383 fctx = repo[None][file]
384 actions.append((file, (fctx.flags(), False), message))
384 actions.append((file, (fctx.flags(), False), message))
385
385
386 typeactions = collections.defaultdict(list)
386 typeactions = collections.defaultdict(list)
387 typeactions['g'] = actions
387 typeactions['g'] = actions
388 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
388 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
389 False)
389 False)
390
390
391 dirstate = repo.dirstate
391 dirstate = repo.dirstate
392 for file, flags, msg in actions:
392 for file, flags, msg in actions:
393 dirstate.normal(file)
393 dirstate.normal(file)
394
394
395 profiles = activeconfig(repo)[2]
395 profiles = activeconfig(repo)[2]
396 changedprofiles = profiles & files
396 changedprofiles = profiles & files
397 # If an active profile changed during the update, refresh the checkout.
397 # If an active profile changed during the update, refresh the checkout.
398 # Don't do this during a branch merge, since all incoming changes should
398 # Don't do this during a branch merge, since all incoming changes should
399 # have been handled by the temporary includes above.
399 # have been handled by the temporary includes above.
400 if changedprofiles and not branchmerge:
400 if changedprofiles and not branchmerge:
401 mf = mctx.manifest()
401 mf = mctx.manifest()
402 for file in mf:
402 for file in mf:
403 old = oldsparsematch(file)
403 old = oldsparsematch(file)
404 new = sparsematch(file)
404 new = sparsematch(file)
405 if not old and new:
405 if not old and new:
406 flags = mf.flags(file)
406 flags = mf.flags(file)
407 prunedactions[file] = ('g', (flags, False), '')
407 prunedactions[file] = ('g', (flags, False), '')
408 elif old and not new:
408 elif old and not new:
409 prunedactions[file] = ('r', [], '')
409 prunedactions[file] = ('r', [], '')
410
410
411 return prunedactions
411 return prunedactions
412
412
413 def refreshwdir(repo, origstatus, origsparsematch, force=False):
413 def refreshwdir(repo, origstatus, origsparsematch, force=False):
414 """Refreshes working directory by taking sparse config into account.
414 """Refreshes working directory by taking sparse config into account.
415
415
416 The old status and sparse matcher is compared against the current sparse
416 The old status and sparse matcher is compared against the current sparse
417 matcher.
417 matcher.
418
418
419 Will abort if a file with pending changes is being excluded or included
419 Will abort if a file with pending changes is being excluded or included
420 unless ``force`` is True.
420 unless ``force`` is True.
421 """
421 """
422 # Verify there are no pending changes
422 # Verify there are no pending changes
423 pending = set()
423 pending = set()
424 pending.update(origstatus.modified)
424 pending.update(origstatus.modified)
425 pending.update(origstatus.added)
425 pending.update(origstatus.added)
426 pending.update(origstatus.removed)
426 pending.update(origstatus.removed)
427 sparsematch = matcher(repo)
427 sparsematch = matcher(repo)
428 abort = False
428 abort = False
429
429
430 for f in pending:
430 for f in pending:
431 if not sparsematch(f):
431 if not sparsematch(f):
432 repo.ui.warn(_("pending changes to '%s'\n") % f)
432 repo.ui.warn(_("pending changes to '%s'\n") % f)
433 abort = not force
433 abort = not force
434
434
435 if abort:
435 if abort:
436 raise error.Abort(_('could not update sparseness due to pending '
436 raise error.Abort(_('could not update sparseness due to pending '
437 'changes'))
437 'changes'))
438
438
439 # Calculate actions
439 # Calculate actions
440 dirstate = repo.dirstate
440 dirstate = repo.dirstate
441 ctx = repo['.']
441 ctx = repo['.']
442 added = []
442 added = []
443 lookup = []
443 lookup = []
444 dropped = []
444 dropped = []
445 mf = ctx.manifest()
445 mf = ctx.manifest()
446 files = set(mf)
446 files = set(mf)
447
447
448 actions = {}
448 actions = {}
449
449
450 for file in files:
450 for file in files:
451 old = origsparsematch(file)
451 old = origsparsematch(file)
452 new = sparsematch(file)
452 new = sparsematch(file)
453 # Add files that are newly included, or that don't exist in
453 # Add files that are newly included, or that don't exist in
454 # the dirstate yet.
454 # the dirstate yet.
455 if (new and not old) or (old and new and not file in dirstate):
455 if (new and not old) or (old and new and not file in dirstate):
456 fl = mf.flags(file)
456 fl = mf.flags(file)
457 if repo.wvfs.exists(file):
457 if repo.wvfs.exists(file):
458 actions[file] = ('e', (fl,), '')
458 actions[file] = ('e', (fl,), '')
459 lookup.append(file)
459 lookup.append(file)
460 else:
460 else:
461 actions[file] = ('g', (fl, False), '')
461 actions[file] = ('g', (fl, False), '')
462 added.append(file)
462 added.append(file)
463 # Drop files that are newly excluded, or that still exist in
463 # Drop files that are newly excluded, or that still exist in
464 # the dirstate.
464 # the dirstate.
465 elif (old and not new) or (not old and not new and file in dirstate):
465 elif (old and not new) or (not old and not new and file in dirstate):
466 dropped.append(file)
466 dropped.append(file)
467 if file not in pending:
467 if file not in pending:
468 actions[file] = ('r', [], '')
468 actions[file] = ('r', [], '')
469
469
470 # Verify there are no pending changes in newly included files
470 # Verify there are no pending changes in newly included files
471 abort = False
471 abort = False
472 for file in lookup:
472 for file in lookup:
473 repo.ui.warn(_("pending changes to '%s'\n") % file)
473 repo.ui.warn(_("pending changes to '%s'\n") % file)
474 abort = not force
474 abort = not force
475 if abort:
475 if abort:
476 raise error.Abort(_('cannot change sparseness due to pending '
476 raise error.Abort(_('cannot change sparseness due to pending '
477 'changes (delete the files or use '
477 'changes (delete the files or use '
478 '--force to bring them back dirty)'))
478 '--force to bring them back dirty)'))
479
479
480 # Check for files that were only in the dirstate.
480 # Check for files that were only in the dirstate.
481 for file, state in dirstate.iteritems():
481 for file, state in dirstate.iteritems():
482 if not file in files:
482 if not file in files:
483 old = origsparsematch(file)
483 old = origsparsematch(file)
484 new = sparsematch(file)
484 new = sparsematch(file)
485 if old and not new:
485 if old and not new:
486 dropped.append(file)
486 dropped.append(file)
487
487
488 # Apply changes to disk
488 # Apply changes to disk
489 typeactions = dict((m, []) for m in 'a f g am cd dc r dm dg m e k'.split())
489 typeactions = dict((m, [])
490 for m in 'a f g am cd dc r dm dg m e k p'.split())
490 for f, (m, args, msg) in actions.iteritems():
491 for f, (m, args, msg) in actions.iteritems():
491 if m not in typeactions:
492 if m not in typeactions:
492 typeactions[m] = []
493 typeactions[m] = []
493 typeactions[m].append((f, args, msg))
494 typeactions[m].append((f, args, msg))
494
495
495 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
496 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
496
497
497 # Fix dirstate
498 # Fix dirstate
498 for file in added:
499 for file in added:
499 dirstate.normal(file)
500 dirstate.normal(file)
500
501
501 for file in dropped:
502 for file in dropped:
502 dirstate.drop(file)
503 dirstate.drop(file)
503
504
504 for file in lookup:
505 for file in lookup:
505 # File exists on disk, and we're bringing it back in an unknown state.
506 # File exists on disk, and we're bringing it back in an unknown state.
506 dirstate.normallookup(file)
507 dirstate.normallookup(file)
507
508
508 return added, dropped, lookup
509 return added, dropped, lookup
509
510
510 def aftercommit(repo, node):
511 def aftercommit(repo, node):
511 """Perform actions after a working directory commit."""
512 """Perform actions after a working directory commit."""
512 # This function is called unconditionally, even if sparse isn't
513 # This function is called unconditionally, even if sparse isn't
513 # enabled.
514 # enabled.
514 ctx = repo[node]
515 ctx = repo[node]
515
516
516 profiles = patternsforrev(repo, ctx.rev())[2]
517 profiles = patternsforrev(repo, ctx.rev())[2]
517
518
518 # profiles will only have data if sparse is enabled.
519 # profiles will only have data if sparse is enabled.
519 if profiles & set(ctx.files()):
520 if profiles & set(ctx.files()):
520 origstatus = repo.status()
521 origstatus = repo.status()
521 origsparsematch = matcher(repo)
522 origsparsematch = matcher(repo)
522 refreshwdir(repo, origstatus, origsparsematch, force=True)
523 refreshwdir(repo, origstatus, origsparsematch, force=True)
523
524
524 prunetemporaryincludes(repo)
525 prunetemporaryincludes(repo)
525
526
526 def _updateconfigandrefreshwdir(repo, includes, excludes, profiles,
527 def _updateconfigandrefreshwdir(repo, includes, excludes, profiles,
527 force=False, removing=False):
528 force=False, removing=False):
528 """Update the sparse config and working directory state."""
529 """Update the sparse config and working directory state."""
529 raw = repo.vfs.tryread('sparse')
530 raw = repo.vfs.tryread('sparse')
530 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
531 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
531
532
532 oldstatus = repo.status()
533 oldstatus = repo.status()
533 oldmatch = matcher(repo)
534 oldmatch = matcher(repo)
534 oldrequires = set(repo.requirements)
535 oldrequires = set(repo.requirements)
535
536
536 # TODO remove this try..except once the matcher integrates better
537 # TODO remove this try..except once the matcher integrates better
537 # with dirstate. We currently have to write the updated config
538 # with dirstate. We currently have to write the updated config
538 # because that will invalidate the matcher cache and force a
539 # because that will invalidate the matcher cache and force a
539 # re-read. We ideally want to update the cached matcher on the
540 # re-read. We ideally want to update the cached matcher on the
540 # repo instance then flush the new config to disk once wdir is
541 # repo instance then flush the new config to disk once wdir is
541 # updated. But this requires massive rework to matcher() and its
542 # updated. But this requires massive rework to matcher() and its
542 # consumers.
543 # consumers.
543
544
544 if 'exp-sparse' in oldrequires and removing:
545 if 'exp-sparse' in oldrequires and removing:
545 repo.requirements.discard('exp-sparse')
546 repo.requirements.discard('exp-sparse')
546 scmutil.writerequires(repo.vfs, repo.requirements)
547 scmutil.writerequires(repo.vfs, repo.requirements)
547 elif 'exp-sparse' not in oldrequires:
548 elif 'exp-sparse' not in oldrequires:
548 repo.requirements.add('exp-sparse')
549 repo.requirements.add('exp-sparse')
549 scmutil.writerequires(repo.vfs, repo.requirements)
550 scmutil.writerequires(repo.vfs, repo.requirements)
550
551
551 try:
552 try:
552 writeconfig(repo, includes, excludes, profiles)
553 writeconfig(repo, includes, excludes, profiles)
553 return refreshwdir(repo, oldstatus, oldmatch, force=force)
554 return refreshwdir(repo, oldstatus, oldmatch, force=force)
554 except Exception:
555 except Exception:
555 if repo.requirements != oldrequires:
556 if repo.requirements != oldrequires:
556 repo.requirements.clear()
557 repo.requirements.clear()
557 repo.requirements |= oldrequires
558 repo.requirements |= oldrequires
558 scmutil.writerequires(repo.vfs, repo.requirements)
559 scmutil.writerequires(repo.vfs, repo.requirements)
559 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
560 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
560 raise
561 raise
561
562
562 def clearrules(repo, force=False):
563 def clearrules(repo, force=False):
563 """Clears include/exclude rules from the sparse config.
564 """Clears include/exclude rules from the sparse config.
564
565
565 The remaining sparse config only has profiles, if defined. The working
566 The remaining sparse config only has profiles, if defined. The working
566 directory is refreshed, as needed.
567 directory is refreshed, as needed.
567 """
568 """
568 with repo.wlock():
569 with repo.wlock():
569 raw = repo.vfs.tryread('sparse')
570 raw = repo.vfs.tryread('sparse')
570 includes, excludes, profiles = parseconfig(repo.ui, raw)
571 includes, excludes, profiles = parseconfig(repo.ui, raw)
571
572
572 if not includes and not excludes:
573 if not includes and not excludes:
573 return
574 return
574
575
575 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
576 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
576
577
577 def importfromfiles(repo, opts, paths, force=False):
578 def importfromfiles(repo, opts, paths, force=False):
578 """Import sparse config rules from files.
579 """Import sparse config rules from files.
579
580
580 The updated sparse config is written out and the working directory
581 The updated sparse config is written out and the working directory
581 is refreshed, as needed.
582 is refreshed, as needed.
582 """
583 """
583 with repo.wlock():
584 with repo.wlock():
584 # read current configuration
585 # read current configuration
585 raw = repo.vfs.tryread('sparse')
586 raw = repo.vfs.tryread('sparse')
586 includes, excludes, profiles = parseconfig(repo.ui, raw)
587 includes, excludes, profiles = parseconfig(repo.ui, raw)
587 aincludes, aexcludes, aprofiles = activeconfig(repo)
588 aincludes, aexcludes, aprofiles = activeconfig(repo)
588
589
589 # Import rules on top; only take in rules that are not yet
590 # Import rules on top; only take in rules that are not yet
590 # part of the active rules.
591 # part of the active rules.
591 changed = False
592 changed = False
592 for p in paths:
593 for p in paths:
593 with util.posixfile(util.expandpath(p)) as fh:
594 with util.posixfile(util.expandpath(p)) as fh:
594 raw = fh.read()
595 raw = fh.read()
595
596
596 iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
597 iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
597 oldsize = len(includes) + len(excludes) + len(profiles)
598 oldsize = len(includes) + len(excludes) + len(profiles)
598 includes.update(iincludes - aincludes)
599 includes.update(iincludes - aincludes)
599 excludes.update(iexcludes - aexcludes)
600 excludes.update(iexcludes - aexcludes)
600 profiles.update(iprofiles - aprofiles)
601 profiles.update(iprofiles - aprofiles)
601 if len(includes) + len(excludes) + len(profiles) > oldsize:
602 if len(includes) + len(excludes) + len(profiles) > oldsize:
602 changed = True
603 changed = True
603
604
604 profilecount = includecount = excludecount = 0
605 profilecount = includecount = excludecount = 0
605 fcounts = (0, 0, 0)
606 fcounts = (0, 0, 0)
606
607
607 if changed:
608 if changed:
608 profilecount = len(profiles - aprofiles)
609 profilecount = len(profiles - aprofiles)
609 includecount = len(includes - aincludes)
610 includecount = len(includes - aincludes)
610 excludecount = len(excludes - aexcludes)
611 excludecount = len(excludes - aexcludes)
611
612
612 fcounts = map(len, _updateconfigandrefreshwdir(
613 fcounts = map(len, _updateconfigandrefreshwdir(
613 repo, includes, excludes, profiles, force=force))
614 repo, includes, excludes, profiles, force=force))
614
615
615 printchanges(repo.ui, opts, profilecount, includecount, excludecount,
616 printchanges(repo.ui, opts, profilecount, includecount, excludecount,
616 *fcounts)
617 *fcounts)
617
618
618 def updateconfig(repo, pats, opts, include=False, exclude=False, reset=False,
619 def updateconfig(repo, pats, opts, include=False, exclude=False, reset=False,
619 delete=False, enableprofile=False, disableprofile=False,
620 delete=False, enableprofile=False, disableprofile=False,
620 force=False, usereporootpaths=False):
621 force=False, usereporootpaths=False):
621 """Perform a sparse config update.
622 """Perform a sparse config update.
622
623
623 Only one of the actions may be performed.
624 Only one of the actions may be performed.
624
625
625 The new config is written out and a working directory refresh is performed.
626 The new config is written out and a working directory refresh is performed.
626 """
627 """
627 with repo.wlock():
628 with repo.wlock():
628 raw = repo.vfs.tryread('sparse')
629 raw = repo.vfs.tryread('sparse')
629 oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
630 oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
630
631
631 if reset:
632 if reset:
632 newinclude = set()
633 newinclude = set()
633 newexclude = set()
634 newexclude = set()
634 newprofiles = set()
635 newprofiles = set()
635 else:
636 else:
636 newinclude = set(oldinclude)
637 newinclude = set(oldinclude)
637 newexclude = set(oldexclude)
638 newexclude = set(oldexclude)
638 newprofiles = set(oldprofiles)
639 newprofiles = set(oldprofiles)
639
640
640 if any(os.path.isabs(pat) for pat in pats):
641 if any(os.path.isabs(pat) for pat in pats):
641 raise error.Abort(_('paths cannot be absolute'))
642 raise error.Abort(_('paths cannot be absolute'))
642
643
643 if not usereporootpaths:
644 if not usereporootpaths:
644 # let's treat paths as relative to cwd
645 # let's treat paths as relative to cwd
645 root, cwd = repo.root, repo.getcwd()
646 root, cwd = repo.root, repo.getcwd()
646 abspats = []
647 abspats = []
647 for kindpat in pats:
648 for kindpat in pats:
648 kind, pat = matchmod._patsplit(kindpat, None)
649 kind, pat = matchmod._patsplit(kindpat, None)
649 if kind in matchmod.cwdrelativepatternkinds or kind is None:
650 if kind in matchmod.cwdrelativepatternkinds or kind is None:
650 ap = (kind + ':' if kind else '') +\
651 ap = (kind + ':' if kind else '') +\
651 pathutil.canonpath(root, cwd, pat)
652 pathutil.canonpath(root, cwd, pat)
652 abspats.append(ap)
653 abspats.append(ap)
653 else:
654 else:
654 abspats.append(kindpat)
655 abspats.append(kindpat)
655 pats = abspats
656 pats = abspats
656
657
657 if include:
658 if include:
658 newinclude.update(pats)
659 newinclude.update(pats)
659 elif exclude:
660 elif exclude:
660 newexclude.update(pats)
661 newexclude.update(pats)
661 elif enableprofile:
662 elif enableprofile:
662 newprofiles.update(pats)
663 newprofiles.update(pats)
663 elif disableprofile:
664 elif disableprofile:
664 newprofiles.difference_update(pats)
665 newprofiles.difference_update(pats)
665 elif delete:
666 elif delete:
666 newinclude.difference_update(pats)
667 newinclude.difference_update(pats)
667 newexclude.difference_update(pats)
668 newexclude.difference_update(pats)
668
669
669 profilecount = (len(newprofiles - oldprofiles) -
670 profilecount = (len(newprofiles - oldprofiles) -
670 len(oldprofiles - newprofiles))
671 len(oldprofiles - newprofiles))
671 includecount = (len(newinclude - oldinclude) -
672 includecount = (len(newinclude - oldinclude) -
672 len(oldinclude - newinclude))
673 len(oldinclude - newinclude))
673 excludecount = (len(newexclude - oldexclude) -
674 excludecount = (len(newexclude - oldexclude) -
674 len(oldexclude - newexclude))
675 len(oldexclude - newexclude))
675
676
676 fcounts = map(len, _updateconfigandrefreshwdir(
677 fcounts = map(len, _updateconfigandrefreshwdir(
677 repo, newinclude, newexclude, newprofiles, force=force,
678 repo, newinclude, newexclude, newprofiles, force=force,
678 removing=reset))
679 removing=reset))
679
680
680 printchanges(repo.ui, opts, profilecount, includecount,
681 printchanges(repo.ui, opts, profilecount, includecount,
681 excludecount, *fcounts)
682 excludecount, *fcounts)
682
683
683 def printchanges(ui, opts, profilecount=0, includecount=0, excludecount=0,
684 def printchanges(ui, opts, profilecount=0, includecount=0, excludecount=0,
684 added=0, dropped=0, conflicting=0):
685 added=0, dropped=0, conflicting=0):
685 """Print output summarizing sparse config changes."""
686 """Print output summarizing sparse config changes."""
686 with ui.formatter('sparse', opts) as fm:
687 with ui.formatter('sparse', opts) as fm:
687 fm.startitem()
688 fm.startitem()
688 fm.condwrite(ui.verbose, 'profiles_added', _('Profiles changed: %d\n'),
689 fm.condwrite(ui.verbose, 'profiles_added', _('Profiles changed: %d\n'),
689 profilecount)
690 profilecount)
690 fm.condwrite(ui.verbose, 'include_rules_added',
691 fm.condwrite(ui.verbose, 'include_rules_added',
691 _('Include rules changed: %d\n'), includecount)
692 _('Include rules changed: %d\n'), includecount)
692 fm.condwrite(ui.verbose, 'exclude_rules_added',
693 fm.condwrite(ui.verbose, 'exclude_rules_added',
693 _('Exclude rules changed: %d\n'), excludecount)
694 _('Exclude rules changed: %d\n'), excludecount)
694
695
695 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
696 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
696 # files are added or removed outside of the templating formatter
697 # files are added or removed outside of the templating formatter
697 # framework. No point in repeating ourselves in that case.
698 # framework. No point in repeating ourselves in that case.
698 if not fm.isplain():
699 if not fm.isplain():
699 fm.condwrite(ui.verbose, 'files_added', _('Files added: %d\n'),
700 fm.condwrite(ui.verbose, 'files_added', _('Files added: %d\n'),
700 added)
701 added)
701 fm.condwrite(ui.verbose, 'files_dropped', _('Files dropped: %d\n'),
702 fm.condwrite(ui.verbose, 'files_dropped', _('Files dropped: %d\n'),
702 dropped)
703 dropped)
703 fm.condwrite(ui.verbose, 'files_conflicting',
704 fm.condwrite(ui.verbose, 'files_conflicting',
704 _('Files conflicting: %d\n'), conflicting)
705 _('Files conflicting: %d\n'), conflicting)
General Comments 0
You need to be logged in to leave comments. Login now