##// END OF EJS Templates
mergestate: define NO_OP_ACTION in module scope instead of inside mergeresult...
Pulkit Goyal -
r46096:590a840f default
parent child Browse files
Show More
@@ -1,2242 +1,2236 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 modifiednodeid,
18 modifiednodeid,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .thirdparty import attr
22 from .thirdparty import attr
23 from . import (
23 from . import (
24 copies,
24 copies,
25 encoding,
25 encoding,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 match as matchmod,
28 match as matchmod,
29 mergestate as mergestatemod,
29 mergestate as mergestatemod,
30 obsutil,
30 obsutil,
31 pathutil,
31 pathutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepoutil,
34 subrepoutil,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42
42
43 def _getcheckunknownconfig(repo, section, name):
43 def _getcheckunknownconfig(repo, section, name):
44 config = repo.ui.config(section, name)
44 config = repo.ui.config(section, name)
45 valid = [b'abort', b'ignore', b'warn']
45 valid = [b'abort', b'ignore', b'warn']
46 if config not in valid:
46 if config not in valid:
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 raise error.ConfigError(
48 raise error.ConfigError(
49 _(b"%s.%s not valid ('%s' is none of %s)")
49 _(b"%s.%s not valid ('%s' is none of %s)")
50 % (section, name, config, validstr)
50 % (section, name, config, validstr)
51 )
51 )
52 return config
52 return config
53
53
54
54
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 if wctx.isinmemory():
56 if wctx.isinmemory():
57 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 # unknown file.
58 # unknown file.
59 #
59 #
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 # because that function does other useful work.
61 # because that function does other useful work.
62 return False
62 return False
63
63
64 if f2 is None:
64 if f2 is None:
65 f2 = f
65 f2 = f
66 return (
66 return (
67 repo.wvfs.audit.check(f)
67 repo.wvfs.audit.check(f)
68 and repo.wvfs.isfileorlink(f)
68 and repo.wvfs.isfileorlink(f)
69 and repo.dirstate.normalize(f) not in repo.dirstate
69 and repo.dirstate.normalize(f) not in repo.dirstate
70 and mctx[f2].cmp(wctx[f])
70 and mctx[f2].cmp(wctx[f])
71 )
71 )
72
72
73
73
74 class _unknowndirschecker(object):
74 class _unknowndirschecker(object):
75 """
75 """
76 Look for any unknown files or directories that may have a path conflict
76 Look for any unknown files or directories that may have a path conflict
77 with a file. If any path prefix of the file exists as a file or link,
77 with a file. If any path prefix of the file exists as a file or link,
78 then it conflicts. If the file itself is a directory that contains any
78 then it conflicts. If the file itself is a directory that contains any
79 file that is not tracked, then it conflicts.
79 file that is not tracked, then it conflicts.
80
80
81 Returns the shortest path at which a conflict occurs, or None if there is
81 Returns the shortest path at which a conflict occurs, or None if there is
82 no conflict.
82 no conflict.
83 """
83 """
84
84
85 def __init__(self):
85 def __init__(self):
86 # A set of paths known to be good. This prevents repeated checking of
86 # A set of paths known to be good. This prevents repeated checking of
87 # dirs. It will be updated with any new dirs that are checked and found
87 # dirs. It will be updated with any new dirs that are checked and found
88 # to be safe.
88 # to be safe.
89 self._unknowndircache = set()
89 self._unknowndircache = set()
90
90
91 # A set of paths that are known to be absent. This prevents repeated
91 # A set of paths that are known to be absent. This prevents repeated
92 # checking of subdirectories that are known not to exist. It will be
92 # checking of subdirectories that are known not to exist. It will be
93 # updated with any new dirs that are checked and found to be absent.
93 # updated with any new dirs that are checked and found to be absent.
94 self._missingdircache = set()
94 self._missingdircache = set()
95
95
96 def __call__(self, repo, wctx, f):
96 def __call__(self, repo, wctx, f):
97 if wctx.isinmemory():
97 if wctx.isinmemory():
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 return False
99 return False
100
100
101 # Check for path prefixes that exist as unknown files.
101 # Check for path prefixes that exist as unknown files.
102 for p in reversed(list(pathutil.finddirs(f))):
102 for p in reversed(list(pathutil.finddirs(f))):
103 if p in self._missingdircache:
103 if p in self._missingdircache:
104 return
104 return
105 if p in self._unknowndircache:
105 if p in self._unknowndircache:
106 continue
106 continue
107 if repo.wvfs.audit.check(p):
107 if repo.wvfs.audit.check(p):
108 if (
108 if (
109 repo.wvfs.isfileorlink(p)
109 repo.wvfs.isfileorlink(p)
110 and repo.dirstate.normalize(p) not in repo.dirstate
110 and repo.dirstate.normalize(p) not in repo.dirstate
111 ):
111 ):
112 return p
112 return p
113 if not repo.wvfs.lexists(p):
113 if not repo.wvfs.lexists(p):
114 self._missingdircache.add(p)
114 self._missingdircache.add(p)
115 return
115 return
116 self._unknowndircache.add(p)
116 self._unknowndircache.add(p)
117
117
118 # Check if the file conflicts with a directory containing unknown files.
118 # Check if the file conflicts with a directory containing unknown files.
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 # Does the directory contain any files that are not in the dirstate?
120 # Does the directory contain any files that are not in the dirstate?
121 for p, dirs, files in repo.wvfs.walk(f):
121 for p, dirs, files in repo.wvfs.walk(f):
122 for fn in files:
122 for fn in files:
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 relf = repo.dirstate.normalize(relf, isknown=True)
124 relf = repo.dirstate.normalize(relf, isknown=True)
125 if relf not in repo.dirstate:
125 if relf not in repo.dirstate:
126 return f
126 return f
127 return None
127 return None
128
128
129
129
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 """
131 """
132 Considers any actions that care about the presence of conflicting unknown
132 Considers any actions that care about the presence of conflicting unknown
133 files. For some actions, the result is to abort; for others, it is to
133 files. For some actions, the result is to abort; for others, it is to
134 choose a different action.
134 choose a different action.
135 """
135 """
136 fileconflicts = set()
136 fileconflicts = set()
137 pathconflicts = set()
137 pathconflicts = set()
138 warnconflicts = set()
138 warnconflicts = set()
139 abortconflicts = set()
139 abortconflicts = set()
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 pathconfig = repo.ui.configbool(
142 pathconfig = repo.ui.configbool(
143 b'experimental', b'merge.checkpathconflicts'
143 b'experimental', b'merge.checkpathconflicts'
144 )
144 )
145 if not force:
145 if not force:
146
146
147 def collectconflicts(conflicts, config):
147 def collectconflicts(conflicts, config):
148 if config == b'abort':
148 if config == b'abort':
149 abortconflicts.update(conflicts)
149 abortconflicts.update(conflicts)
150 elif config == b'warn':
150 elif config == b'warn':
151 warnconflicts.update(conflicts)
151 warnconflicts.update(conflicts)
152
152
153 checkunknowndirs = _unknowndirschecker()
153 checkunknowndirs = _unknowndirschecker()
154 for f in mresult.files(
154 for f in mresult.files(
155 (
155 (
156 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_CREATED,
157 mergestatemod.ACTION_DELETED_CHANGED,
157 mergestatemod.ACTION_DELETED_CHANGED,
158 )
158 )
159 ):
159 ):
160 if _checkunknownfile(repo, wctx, mctx, f):
160 if _checkunknownfile(repo, wctx, mctx, f):
161 fileconflicts.add(f)
161 fileconflicts.add(f)
162 elif pathconfig and f not in wctx:
162 elif pathconfig and f not in wctx:
163 path = checkunknowndirs(repo, wctx, f)
163 path = checkunknowndirs(repo, wctx, f)
164 if path is not None:
164 if path is not None:
165 pathconflicts.add(path)
165 pathconflicts.add(path)
166 for f, args, msg in mresult.getactions(
166 for f, args, msg in mresult.getactions(
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 ):
168 ):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 fileconflicts.add(f)
170 fileconflicts.add(f)
171
171
172 allconflicts = fileconflicts | pathconflicts
172 allconflicts = fileconflicts | pathconflicts
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 unknownconflicts = allconflicts - ignoredconflicts
174 unknownconflicts = allconflicts - ignoredconflicts
175 collectconflicts(ignoredconflicts, ignoredconfig)
175 collectconflicts(ignoredconflicts, ignoredconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
177 else:
177 else:
178 for f, args, msg in list(
178 for f, args, msg in list(
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 ):
180 ):
181 fl2, anc = args
181 fl2, anc = args
182 different = _checkunknownfile(repo, wctx, mctx, f)
182 different = _checkunknownfile(repo, wctx, mctx, f)
183 if repo.dirstate._ignore(f):
183 if repo.dirstate._ignore(f):
184 config = ignoredconfig
184 config = ignoredconfig
185 else:
185 else:
186 config = unknownconfig
186 config = unknownconfig
187
187
188 # The behavior when force is True is described by this table:
188 # The behavior when force is True is described by this table:
189 # config different mergeforce | action backup
189 # config different mergeforce | action backup
190 # * n * | get n
190 # * n * | get n
191 # * y y | merge -
191 # * y y | merge -
192 # abort y n | merge - (1)
192 # abort y n | merge - (1)
193 # warn y n | warn + get y
193 # warn y n | warn + get y
194 # ignore y n | get y
194 # ignore y n | get y
195 #
195 #
196 # (1) this is probably the wrong behavior here -- we should
196 # (1) this is probably the wrong behavior here -- we should
197 # probably abort, but some actions like rebases currently
197 # probably abort, but some actions like rebases currently
198 # don't like an abort happening in the middle of
198 # don't like an abort happening in the middle of
199 # merge.update.
199 # merge.update.
200 if not different:
200 if not different:
201 mresult.addfile(
201 mresult.addfile(
202 f,
202 f,
203 mergestatemod.ACTION_GET,
203 mergestatemod.ACTION_GET,
204 (fl2, False),
204 (fl2, False),
205 b'remote created',
205 b'remote created',
206 )
206 )
207 elif mergeforce or config == b'abort':
207 elif mergeforce or config == b'abort':
208 mresult.addfile(
208 mresult.addfile(
209 f,
209 f,
210 mergestatemod.ACTION_MERGE,
210 mergestatemod.ACTION_MERGE,
211 (f, f, None, False, anc),
211 (f, f, None, False, anc),
212 b'remote differs from untracked local',
212 b'remote differs from untracked local',
213 )
213 )
214 elif config == b'abort':
214 elif config == b'abort':
215 abortconflicts.add(f)
215 abortconflicts.add(f)
216 else:
216 else:
217 if config == b'warn':
217 if config == b'warn':
218 warnconflicts.add(f)
218 warnconflicts.add(f)
219 mresult.addfile(
219 mresult.addfile(
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 )
221 )
222
222
223 for f in sorted(abortconflicts):
223 for f in sorted(abortconflicts):
224 warn = repo.ui.warn
224 warn = repo.ui.warn
225 if f in pathconflicts:
225 if f in pathconflicts:
226 if repo.wvfs.isfileorlink(f):
226 if repo.wvfs.isfileorlink(f):
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 else:
228 else:
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 else:
230 else:
231 warn(_(b"%s: untracked file differs\n") % f)
231 warn(_(b"%s: untracked file differs\n") % f)
232 if abortconflicts:
232 if abortconflicts:
233 raise error.Abort(
233 raise error.Abort(
234 _(
234 _(
235 b"untracked files in working directory "
235 b"untracked files in working directory "
236 b"differ from files in requested revision"
236 b"differ from files in requested revision"
237 )
237 )
238 )
238 )
239
239
240 for f in sorted(warnconflicts):
240 for f in sorted(warnconflicts):
241 if repo.wvfs.isfileorlink(f):
241 if repo.wvfs.isfileorlink(f):
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 else:
243 else:
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245
245
246 for f, args, msg in list(
246 for f, args, msg in list(
247 mresult.getactions([mergestatemod.ACTION_CREATED])
247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 ):
248 ):
249 backup = (
249 backup = (
250 f in fileconflicts
250 f in fileconflicts
251 or f in pathconflicts
251 or f in pathconflicts
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 )
253 )
254 (flags,) = args
254 (flags,) = args
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256
256
257
257
258 def _forgetremoved(wctx, mctx, branchmerge, mresult):
258 def _forgetremoved(wctx, mctx, branchmerge, mresult):
259 """
259 """
260 Forget removed files
260 Forget removed files
261
261
262 If we're jumping between revisions (as opposed to merging), and if
262 If we're jumping between revisions (as opposed to merging), and if
263 neither the working directory nor the target rev has the file,
263 neither the working directory nor the target rev has the file,
264 then we need to remove it from the dirstate, to prevent the
264 then we need to remove it from the dirstate, to prevent the
265 dirstate from listing the file when it is no longer in the
265 dirstate from listing the file when it is no longer in the
266 manifest.
266 manifest.
267
267
268 If we're merging, and the other revision has removed a file
268 If we're merging, and the other revision has removed a file
269 that is not present in the working directory, we need to mark it
269 that is not present in the working directory, we need to mark it
270 as removed.
270 as removed.
271 """
271 """
272
272
273 m = mergestatemod.ACTION_FORGET
273 m = mergestatemod.ACTION_FORGET
274 if branchmerge:
274 if branchmerge:
275 m = mergestatemod.ACTION_REMOVE
275 m = mergestatemod.ACTION_REMOVE
276 for f in wctx.deleted():
276 for f in wctx.deleted():
277 if f not in mctx:
277 if f not in mctx:
278 mresult.addfile(f, m, None, b"forget deleted")
278 mresult.addfile(f, m, None, b"forget deleted")
279
279
280 if not branchmerge:
280 if not branchmerge:
281 for f in wctx.removed():
281 for f in wctx.removed():
282 if f not in mctx:
282 if f not in mctx:
283 mresult.addfile(
283 mresult.addfile(
284 f, mergestatemod.ACTION_FORGET, None, b"forget removed",
284 f, mergestatemod.ACTION_FORGET, None, b"forget removed",
285 )
285 )
286
286
287
287
288 def _checkcollision(repo, wmf, mresult):
288 def _checkcollision(repo, wmf, mresult):
289 """
289 """
290 Check for case-folding collisions.
290 Check for case-folding collisions.
291 """
291 """
292 # If the repo is narrowed, filter out files outside the narrowspec.
292 # If the repo is narrowed, filter out files outside the narrowspec.
293 narrowmatch = repo.narrowmatch()
293 narrowmatch = repo.narrowmatch()
294 if not narrowmatch.always():
294 if not narrowmatch.always():
295 pmmf = set(wmf.walk(narrowmatch))
295 pmmf = set(wmf.walk(narrowmatch))
296 if mresult:
296 if mresult:
297 for f in list(mresult.files()):
297 for f in list(mresult.files()):
298 if not narrowmatch(f):
298 if not narrowmatch(f):
299 mresult.removefile(f)
299 mresult.removefile(f)
300 else:
300 else:
301 # build provisional merged manifest up
301 # build provisional merged manifest up
302 pmmf = set(wmf)
302 pmmf = set(wmf)
303
303
304 if mresult:
304 if mresult:
305 # KEEP and EXEC are no-op
305 # KEEP and EXEC are no-op
306 for f in mresult.files(
306 for f in mresult.files(
307 (
307 (
308 mergestatemod.ACTION_ADD,
308 mergestatemod.ACTION_ADD,
309 mergestatemod.ACTION_ADD_MODIFIED,
309 mergestatemod.ACTION_ADD_MODIFIED,
310 mergestatemod.ACTION_FORGET,
310 mergestatemod.ACTION_FORGET,
311 mergestatemod.ACTION_GET,
311 mergestatemod.ACTION_GET,
312 mergestatemod.ACTION_CHANGED_DELETED,
312 mergestatemod.ACTION_CHANGED_DELETED,
313 mergestatemod.ACTION_DELETED_CHANGED,
313 mergestatemod.ACTION_DELETED_CHANGED,
314 )
314 )
315 ):
315 ):
316 pmmf.add(f)
316 pmmf.add(f)
317 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
317 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
318 pmmf.discard(f)
318 pmmf.discard(f)
319 for f, args, msg in mresult.getactions(
319 for f, args, msg in mresult.getactions(
320 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
320 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
321 ):
321 ):
322 f2, flags = args
322 f2, flags = args
323 pmmf.discard(f2)
323 pmmf.discard(f2)
324 pmmf.add(f)
324 pmmf.add(f)
325 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
325 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
326 pmmf.add(f)
326 pmmf.add(f)
327 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
327 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
328 f1, f2, fa, move, anc = args
328 f1, f2, fa, move, anc = args
329 if move:
329 if move:
330 pmmf.discard(f1)
330 pmmf.discard(f1)
331 pmmf.add(f)
331 pmmf.add(f)
332
332
333 # check case-folding collision in provisional merged manifest
333 # check case-folding collision in provisional merged manifest
334 foldmap = {}
334 foldmap = {}
335 for f in pmmf:
335 for f in pmmf:
336 fold = util.normcase(f)
336 fold = util.normcase(f)
337 if fold in foldmap:
337 if fold in foldmap:
338 raise error.Abort(
338 raise error.Abort(
339 _(b"case-folding collision between %s and %s")
339 _(b"case-folding collision between %s and %s")
340 % (f, foldmap[fold])
340 % (f, foldmap[fold])
341 )
341 )
342 foldmap[fold] = f
342 foldmap[fold] = f
343
343
344 # check case-folding of directories
344 # check case-folding of directories
345 foldprefix = unfoldprefix = lastfull = b''
345 foldprefix = unfoldprefix = lastfull = b''
346 for fold, f in sorted(foldmap.items()):
346 for fold, f in sorted(foldmap.items()):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 # the folded prefix matches but actual casing is different
348 # the folded prefix matches but actual casing is different
349 raise error.Abort(
349 raise error.Abort(
350 _(b"case-folding collision between %s and directory of %s")
350 _(b"case-folding collision between %s and directory of %s")
351 % (lastfull, f)
351 % (lastfull, f)
352 )
352 )
353 foldprefix = fold + b'/'
353 foldprefix = fold + b'/'
354 unfoldprefix = f + b'/'
354 unfoldprefix = f + b'/'
355 lastfull = f
355 lastfull = f
356
356
357
357
358 def _filesindirs(repo, manifest, dirs):
358 def _filesindirs(repo, manifest, dirs):
359 """
359 """
360 Generator that yields pairs of all the files in the manifest that are found
360 Generator that yields pairs of all the files in the manifest that are found
361 inside the directories listed in dirs, and which directory they are found
361 inside the directories listed in dirs, and which directory they are found
362 in.
362 in.
363 """
363 """
364 for f in manifest:
364 for f in manifest:
365 for p in pathutil.finddirs(f):
365 for p in pathutil.finddirs(f):
366 if p in dirs:
366 if p in dirs:
367 yield f, p
367 yield f, p
368 break
368 break
369
369
370
370
371 def checkpathconflicts(repo, wctx, mctx, mresult):
371 def checkpathconflicts(repo, wctx, mctx, mresult):
372 """
372 """
373 Check if any actions introduce path conflicts in the repository, updating
373 Check if any actions introduce path conflicts in the repository, updating
374 actions to record or handle the path conflict accordingly.
374 actions to record or handle the path conflict accordingly.
375 """
375 """
376 mf = wctx.manifest()
376 mf = wctx.manifest()
377
377
378 # The set of local files that conflict with a remote directory.
378 # The set of local files that conflict with a remote directory.
379 localconflicts = set()
379 localconflicts = set()
380
380
381 # The set of directories that conflict with a remote file, and so may cause
381 # The set of directories that conflict with a remote file, and so may cause
382 # conflicts if they still contain any files after the merge.
382 # conflicts if they still contain any files after the merge.
383 remoteconflicts = set()
383 remoteconflicts = set()
384
384
385 # The set of directories that appear as both a file and a directory in the
385 # The set of directories that appear as both a file and a directory in the
386 # remote manifest. These indicate an invalid remote manifest, which
386 # remote manifest. These indicate an invalid remote manifest, which
387 # can't be updated to cleanly.
387 # can't be updated to cleanly.
388 invalidconflicts = set()
388 invalidconflicts = set()
389
389
390 # The set of directories that contain files that are being created.
390 # The set of directories that contain files that are being created.
391 createdfiledirs = set()
391 createdfiledirs = set()
392
392
393 # The set of files deleted by all the actions.
393 # The set of files deleted by all the actions.
394 deletedfiles = set()
394 deletedfiles = set()
395
395
396 for f in mresult.files(
396 for f in mresult.files(
397 (
397 (
398 mergestatemod.ACTION_CREATED,
398 mergestatemod.ACTION_CREATED,
399 mergestatemod.ACTION_DELETED_CHANGED,
399 mergestatemod.ACTION_DELETED_CHANGED,
400 mergestatemod.ACTION_MERGE,
400 mergestatemod.ACTION_MERGE,
401 mergestatemod.ACTION_CREATED_MERGE,
401 mergestatemod.ACTION_CREATED_MERGE,
402 )
402 )
403 ):
403 ):
404 # This action may create a new local file.
404 # This action may create a new local file.
405 createdfiledirs.update(pathutil.finddirs(f))
405 createdfiledirs.update(pathutil.finddirs(f))
406 if mf.hasdir(f):
406 if mf.hasdir(f):
407 # The file aliases a local directory. This might be ok if all
407 # The file aliases a local directory. This might be ok if all
408 # the files in the local directory are being deleted. This
408 # the files in the local directory are being deleted. This
409 # will be checked once we know what all the deleted files are.
409 # will be checked once we know what all the deleted files are.
410 remoteconflicts.add(f)
410 remoteconflicts.add(f)
411 # Track the names of all deleted files.
411 # Track the names of all deleted files.
412 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
412 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
413 deletedfiles.add(f)
413 deletedfiles.add(f)
414 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
414 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
415 f1, f2, fa, move, anc = args
415 f1, f2, fa, move, anc = args
416 if move:
416 if move:
417 deletedfiles.add(f1)
417 deletedfiles.add(f1)
418 for (f, args, msg) in mresult.getactions(
418 for (f, args, msg) in mresult.getactions(
419 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
419 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
420 ):
420 ):
421 f2, flags = args
421 f2, flags = args
422 deletedfiles.add(f2)
422 deletedfiles.add(f2)
423
423
424 # Check all directories that contain created files for path conflicts.
424 # Check all directories that contain created files for path conflicts.
425 for p in createdfiledirs:
425 for p in createdfiledirs:
426 if p in mf:
426 if p in mf:
427 if p in mctx:
427 if p in mctx:
428 # A file is in a directory which aliases both a local
428 # A file is in a directory which aliases both a local
429 # and a remote file. This is an internal inconsistency
429 # and a remote file. This is an internal inconsistency
430 # within the remote manifest.
430 # within the remote manifest.
431 invalidconflicts.add(p)
431 invalidconflicts.add(p)
432 else:
432 else:
433 # A file is in a directory which aliases a local file.
433 # A file is in a directory which aliases a local file.
434 # We will need to rename the local file.
434 # We will need to rename the local file.
435 localconflicts.add(p)
435 localconflicts.add(p)
436 pd = mresult.getfile(p)
436 pd = mresult.getfile(p)
437 if pd and pd[0] in (
437 if pd and pd[0] in (
438 mergestatemod.ACTION_CREATED,
438 mergestatemod.ACTION_CREATED,
439 mergestatemod.ACTION_DELETED_CHANGED,
439 mergestatemod.ACTION_DELETED_CHANGED,
440 mergestatemod.ACTION_MERGE,
440 mergestatemod.ACTION_MERGE,
441 mergestatemod.ACTION_CREATED_MERGE,
441 mergestatemod.ACTION_CREATED_MERGE,
442 ):
442 ):
443 # The file is in a directory which aliases a remote file.
443 # The file is in a directory which aliases a remote file.
444 # This is an internal inconsistency within the remote
444 # This is an internal inconsistency within the remote
445 # manifest.
445 # manifest.
446 invalidconflicts.add(p)
446 invalidconflicts.add(p)
447
447
448 # Rename all local conflicting files that have not been deleted.
448 # Rename all local conflicting files that have not been deleted.
449 for p in localconflicts:
449 for p in localconflicts:
450 if p not in deletedfiles:
450 if p not in deletedfiles:
451 ctxname = bytes(wctx).rstrip(b'+')
451 ctxname = bytes(wctx).rstrip(b'+')
452 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
452 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
453 porig = wctx[p].copysource() or p
453 porig = wctx[p].copysource() or p
454 mresult.addfile(
454 mresult.addfile(
455 pnew,
455 pnew,
456 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
456 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
457 (p, porig),
457 (p, porig),
458 b'local path conflict',
458 b'local path conflict',
459 )
459 )
460 mresult.addfile(
460 mresult.addfile(
461 p,
461 p,
462 mergestatemod.ACTION_PATH_CONFLICT,
462 mergestatemod.ACTION_PATH_CONFLICT,
463 (pnew, b'l'),
463 (pnew, b'l'),
464 b'path conflict',
464 b'path conflict',
465 )
465 )
466
466
467 if remoteconflicts:
467 if remoteconflicts:
468 # Check if all files in the conflicting directories have been removed.
468 # Check if all files in the conflicting directories have been removed.
469 ctxname = bytes(mctx).rstrip(b'+')
469 ctxname = bytes(mctx).rstrip(b'+')
470 for f, p in _filesindirs(repo, mf, remoteconflicts):
470 for f, p in _filesindirs(repo, mf, remoteconflicts):
471 if f not in deletedfiles:
471 if f not in deletedfiles:
472 m, args, msg = mresult.getfile(p)
472 m, args, msg = mresult.getfile(p)
473 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
473 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
474 if m in (
474 if m in (
475 mergestatemod.ACTION_DELETED_CHANGED,
475 mergestatemod.ACTION_DELETED_CHANGED,
476 mergestatemod.ACTION_MERGE,
476 mergestatemod.ACTION_MERGE,
477 ):
477 ):
478 # Action was merge, just update target.
478 # Action was merge, just update target.
479 mresult.addfile(pnew, m, args, msg)
479 mresult.addfile(pnew, m, args, msg)
480 else:
480 else:
481 # Action was create, change to renamed get action.
481 # Action was create, change to renamed get action.
482 fl = args[0]
482 fl = args[0]
483 mresult.addfile(
483 mresult.addfile(
484 pnew,
484 pnew,
485 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
485 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
486 (p, fl),
486 (p, fl),
487 b'remote path conflict',
487 b'remote path conflict',
488 )
488 )
489 mresult.addfile(
489 mresult.addfile(
490 p,
490 p,
491 mergestatemod.ACTION_PATH_CONFLICT,
491 mergestatemod.ACTION_PATH_CONFLICT,
492 (pnew, mergestatemod.ACTION_REMOVE),
492 (pnew, mergestatemod.ACTION_REMOVE),
493 b'path conflict',
493 b'path conflict',
494 )
494 )
495 remoteconflicts.remove(p)
495 remoteconflicts.remove(p)
496 break
496 break
497
497
498 if invalidconflicts:
498 if invalidconflicts:
499 for p in invalidconflicts:
499 for p in invalidconflicts:
500 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
500 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
501 raise error.Abort(_(b"destination manifest contains path conflicts"))
501 raise error.Abort(_(b"destination manifest contains path conflicts"))
502
502
503
503
504 def _filternarrowactions(narrowmatch, branchmerge, mresult):
504 def _filternarrowactions(narrowmatch, branchmerge, mresult):
505 """
505 """
506 Filters out actions that can ignored because the repo is narrowed.
506 Filters out actions that can ignored because the repo is narrowed.
507
507
508 Raise an exception if the merge cannot be completed because the repo is
508 Raise an exception if the merge cannot be completed because the repo is
509 narrowed.
509 narrowed.
510 """
510 """
511 # TODO: handle with nonconflicttypes
511 # TODO: handle with nonconflicttypes
512 nonconflicttypes = {
512 nonconflicttypes = {
513 mergestatemod.ACTION_ADD,
513 mergestatemod.ACTION_ADD,
514 mergestatemod.ACTION_ADD_MODIFIED,
514 mergestatemod.ACTION_ADD_MODIFIED,
515 mergestatemod.ACTION_CREATED,
515 mergestatemod.ACTION_CREATED,
516 mergestatemod.ACTION_CREATED_MERGE,
516 mergestatemod.ACTION_CREATED_MERGE,
517 mergestatemod.ACTION_FORGET,
517 mergestatemod.ACTION_FORGET,
518 mergestatemod.ACTION_GET,
518 mergestatemod.ACTION_GET,
519 mergestatemod.ACTION_REMOVE,
519 mergestatemod.ACTION_REMOVE,
520 mergestatemod.ACTION_EXEC,
520 mergestatemod.ACTION_EXEC,
521 }
521 }
522 # We mutate the items in the dict during iteration, so iterate
522 # We mutate the items in the dict during iteration, so iterate
523 # over a copy.
523 # over a copy.
524 for f, action in mresult.filemap():
524 for f, action in mresult.filemap():
525 if narrowmatch(f):
525 if narrowmatch(f):
526 pass
526 pass
527 elif not branchmerge:
527 elif not branchmerge:
528 mresult.removefile(f) # just updating, ignore changes outside clone
528 mresult.removefile(f) # just updating, ignore changes outside clone
529 elif action[0] in mergeresult.NO_OP_ACTIONS:
529 elif action[0] in mergestatemod.NO_OP_ACTIONS:
530 mresult.removefile(f) # merge does not affect file
530 mresult.removefile(f) # merge does not affect file
531 elif action[0] in nonconflicttypes:
531 elif action[0] in nonconflicttypes:
532 raise error.Abort(
532 raise error.Abort(
533 _(
533 _(
534 b'merge affects file \'%s\' outside narrow, '
534 b'merge affects file \'%s\' outside narrow, '
535 b'which is not yet supported'
535 b'which is not yet supported'
536 )
536 )
537 % f,
537 % f,
538 hint=_(b'merging in the other direction may work'),
538 hint=_(b'merging in the other direction may work'),
539 )
539 )
540 else:
540 else:
541 raise error.Abort(
541 raise error.Abort(
542 _(b'conflict in file \'%s\' is outside narrow clone') % f
542 _(b'conflict in file \'%s\' is outside narrow clone') % f
543 )
543 )
544
544
545
545
546 class mergeresult(object):
546 class mergeresult(object):
547 ''''An object representing result of merging manifests.
547 ''''An object representing result of merging manifests.
548
548
549 It has information about what actions need to be performed on dirstate
549 It has information about what actions need to be performed on dirstate
550 mapping of divergent renames and other such cases. '''
550 mapping of divergent renames and other such cases. '''
551
551
552 NO_OP_ACTIONS = (
553 mergestatemod.ACTION_KEEP,
554 mergestatemod.ACTION_KEEP_ABSENT,
555 mergestatemod.ACTION_KEEP_NEW,
556 )
557
558 def __init__(self):
552 def __init__(self):
559 """
553 """
560 filemapping: dict of filename as keys and action related info as values
554 filemapping: dict of filename as keys and action related info as values
561 diverge: mapping of source name -> list of dest name for
555 diverge: mapping of source name -> list of dest name for
562 divergent renames
556 divergent renames
563 renamedelete: mapping of source name -> list of destinations for files
557 renamedelete: mapping of source name -> list of destinations for files
564 deleted on one side and renamed on other.
558 deleted on one side and renamed on other.
565 commitinfo: dict containing data which should be used on commit
559 commitinfo: dict containing data which should be used on commit
566 contains a filename -> info mapping
560 contains a filename -> info mapping
567 actionmapping: dict of action names as keys and values are dict of
561 actionmapping: dict of action names as keys and values are dict of
568 filename as key and related data as values
562 filename as key and related data as values
569 """
563 """
570 self._filemapping = {}
564 self._filemapping = {}
571 self._diverge = {}
565 self._diverge = {}
572 self._renamedelete = {}
566 self._renamedelete = {}
573 self._commitinfo = collections.defaultdict(dict)
567 self._commitinfo = collections.defaultdict(dict)
574 self._actionmapping = collections.defaultdict(dict)
568 self._actionmapping = collections.defaultdict(dict)
575
569
576 def updatevalues(self, diverge, renamedelete):
570 def updatevalues(self, diverge, renamedelete):
577 self._diverge = diverge
571 self._diverge = diverge
578 self._renamedelete = renamedelete
572 self._renamedelete = renamedelete
579
573
580 def addfile(self, filename, action, data, message):
574 def addfile(self, filename, action, data, message):
581 """ adds a new file to the mergeresult object
575 """ adds a new file to the mergeresult object
582
576
583 filename: file which we are adding
577 filename: file which we are adding
584 action: one of mergestatemod.ACTION_*
578 action: one of mergestatemod.ACTION_*
585 data: a tuple of information like fctx and ctx related to this merge
579 data: a tuple of information like fctx and ctx related to this merge
586 message: a message about the merge
580 message: a message about the merge
587 """
581 """
588 # if the file already existed, we need to delete it's old
582 # if the file already existed, we need to delete it's old
589 # entry form _actionmapping too
583 # entry form _actionmapping too
590 if filename in self._filemapping:
584 if filename in self._filemapping:
591 a, d, m = self._filemapping[filename]
585 a, d, m = self._filemapping[filename]
592 del self._actionmapping[a][filename]
586 del self._actionmapping[a][filename]
593
587
594 self._filemapping[filename] = (action, data, message)
588 self._filemapping[filename] = (action, data, message)
595 self._actionmapping[action][filename] = (data, message)
589 self._actionmapping[action][filename] = (data, message)
596
590
597 def getfile(self, filename, default_return=None):
591 def getfile(self, filename, default_return=None):
598 """ returns (action, args, msg) about this file
592 """ returns (action, args, msg) about this file
599
593
600 returns default_return if the file is not present """
594 returns default_return if the file is not present """
601 if filename in self._filemapping:
595 if filename in self._filemapping:
602 return self._filemapping[filename]
596 return self._filemapping[filename]
603 return default_return
597 return default_return
604
598
605 def files(self, actions=None):
599 def files(self, actions=None):
606 """ returns files on which provided action needs to perfromed
600 """ returns files on which provided action needs to perfromed
607
601
608 If actions is None, all files are returned
602 If actions is None, all files are returned
609 """
603 """
610 # TODO: think whether we should return renamedelete and
604 # TODO: think whether we should return renamedelete and
611 # diverge filenames also
605 # diverge filenames also
612 if actions is None:
606 if actions is None:
613 for f in self._filemapping:
607 for f in self._filemapping:
614 yield f
608 yield f
615
609
616 else:
610 else:
617 for a in actions:
611 for a in actions:
618 for f in self._actionmapping[a]:
612 for f in self._actionmapping[a]:
619 yield f
613 yield f
620
614
621 def removefile(self, filename):
615 def removefile(self, filename):
622 """ removes a file from the mergeresult object as the file might
616 """ removes a file from the mergeresult object as the file might
623 not merging anymore """
617 not merging anymore """
624 action, data, message = self._filemapping[filename]
618 action, data, message = self._filemapping[filename]
625 del self._filemapping[filename]
619 del self._filemapping[filename]
626 del self._actionmapping[action][filename]
620 del self._actionmapping[action][filename]
627
621
628 def getactions(self, actions, sort=False):
622 def getactions(self, actions, sort=False):
629 """ get list of files which are marked with these actions
623 """ get list of files which are marked with these actions
630 if sort is true, files for each action is sorted and then added
624 if sort is true, files for each action is sorted and then added
631
625
632 Returns a list of tuple of form (filename, data, message)
626 Returns a list of tuple of form (filename, data, message)
633 """
627 """
634 for a in actions:
628 for a in actions:
635 if sort:
629 if sort:
636 for f in sorted(self._actionmapping[a]):
630 for f in sorted(self._actionmapping[a]):
637 args, msg = self._actionmapping[a][f]
631 args, msg = self._actionmapping[a][f]
638 yield f, args, msg
632 yield f, args, msg
639 else:
633 else:
640 for f, (args, msg) in pycompat.iteritems(
634 for f, (args, msg) in pycompat.iteritems(
641 self._actionmapping[a]
635 self._actionmapping[a]
642 ):
636 ):
643 yield f, args, msg
637 yield f, args, msg
644
638
645 def len(self, actions=None):
639 def len(self, actions=None):
646 """ returns number of files which needs actions
640 """ returns number of files which needs actions
647
641
648 if actions is passed, total of number of files in that action
642 if actions is passed, total of number of files in that action
649 only is returned """
643 only is returned """
650
644
651 if actions is None:
645 if actions is None:
652 return len(self._filemapping)
646 return len(self._filemapping)
653
647
654 return sum(len(self._actionmapping[a]) for a in actions)
648 return sum(len(self._actionmapping[a]) for a in actions)
655
649
656 def filemap(self, sort=False):
650 def filemap(self, sort=False):
657 if sorted:
651 if sorted:
658 for key, val in sorted(pycompat.iteritems(self._filemapping)):
652 for key, val in sorted(pycompat.iteritems(self._filemapping)):
659 yield key, val
653 yield key, val
660 else:
654 else:
661 for key, val in pycompat.iteritems(self._filemapping):
655 for key, val in pycompat.iteritems(self._filemapping):
662 yield key, val
656 yield key, val
663
657
664 def addcommitinfo(self, filename, key, value):
658 def addcommitinfo(self, filename, key, value):
665 """ adds key-value information about filename which will be required
659 """ adds key-value information about filename which will be required
666 while committing this merge """
660 while committing this merge """
667 self._commitinfo[filename][key] = value
661 self._commitinfo[filename][key] = value
668
662
669 @property
663 @property
670 def diverge(self):
664 def diverge(self):
671 return self._diverge
665 return self._diverge
672
666
673 @property
667 @property
674 def renamedelete(self):
668 def renamedelete(self):
675 return self._renamedelete
669 return self._renamedelete
676
670
677 @property
671 @property
678 def commitinfo(self):
672 def commitinfo(self):
679 return self._commitinfo
673 return self._commitinfo
680
674
681 @property
675 @property
682 def actionsdict(self):
676 def actionsdict(self):
683 """ returns a dictionary of actions to be perfomed with action as key
677 """ returns a dictionary of actions to be perfomed with action as key
684 and a list of files and related arguments as values """
678 and a list of files and related arguments as values """
685 res = collections.defaultdict(list)
679 res = collections.defaultdict(list)
686 for a, d in pycompat.iteritems(self._actionmapping):
680 for a, d in pycompat.iteritems(self._actionmapping):
687 for f, (args, msg) in pycompat.iteritems(d):
681 for f, (args, msg) in pycompat.iteritems(d):
688 res[a].append((f, args, msg))
682 res[a].append((f, args, msg))
689 return res
683 return res
690
684
691 def setactions(self, actions):
685 def setactions(self, actions):
692 self._filemapping = actions
686 self._filemapping = actions
693 self._actionmapping = collections.defaultdict(dict)
687 self._actionmapping = collections.defaultdict(dict)
694 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
688 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
695 self._actionmapping[act][f] = data, msg
689 self._actionmapping[act][f] = data, msg
696
690
697 def hasconflicts(self):
691 def hasconflicts(self):
698 """ tells whether this merge resulted in some actions which can
692 """ tells whether this merge resulted in some actions which can
699 result in conflicts or not """
693 result in conflicts or not """
700 for a in self._actionmapping.keys():
694 for a in self._actionmapping.keys():
701 if (
695 if (
702 a
696 a
703 not in (
697 not in (
704 mergestatemod.ACTION_GET,
698 mergestatemod.ACTION_GET,
705 mergestatemod.ACTION_EXEC,
699 mergestatemod.ACTION_EXEC,
706 mergestatemod.ACTION_REMOVE,
700 mergestatemod.ACTION_REMOVE,
707 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
701 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
708 )
702 )
709 and self._actionmapping[a]
703 and self._actionmapping[a]
710 and a not in self.NO_OP_ACTIONS
704 and a not in mergestatemod.NO_OP_ACTIONS
711 ):
705 ):
712 return True
706 return True
713
707
714 return False
708 return False
715
709
716
710
717 def manifestmerge(
711 def manifestmerge(
718 repo,
712 repo,
719 wctx,
713 wctx,
720 p2,
714 p2,
721 pa,
715 pa,
722 branchmerge,
716 branchmerge,
723 force,
717 force,
724 matcher,
718 matcher,
725 acceptremote,
719 acceptremote,
726 followcopies,
720 followcopies,
727 forcefulldiff=False,
721 forcefulldiff=False,
728 ):
722 ):
729 """
723 """
730 Merge wctx and p2 with ancestor pa and generate merge action list
724 Merge wctx and p2 with ancestor pa and generate merge action list
731
725
732 branchmerge and force are as passed in to update
726 branchmerge and force are as passed in to update
733 matcher = matcher to filter file lists
727 matcher = matcher to filter file lists
734 acceptremote = accept the incoming changes without prompting
728 acceptremote = accept the incoming changes without prompting
735
729
736 Returns an object of mergeresult class
730 Returns an object of mergeresult class
737 """
731 """
738 mresult = mergeresult()
732 mresult = mergeresult()
739 if matcher is not None and matcher.always():
733 if matcher is not None and matcher.always():
740 matcher = None
734 matcher = None
741
735
742 # manifests fetched in order are going to be faster, so prime the caches
736 # manifests fetched in order are going to be faster, so prime the caches
743 [
737 [
744 x.manifest()
738 x.manifest()
745 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
739 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
746 ]
740 ]
747
741
748 branch_copies1 = copies.branch_copies()
742 branch_copies1 = copies.branch_copies()
749 branch_copies2 = copies.branch_copies()
743 branch_copies2 = copies.branch_copies()
750 diverge = {}
744 diverge = {}
751 # information from merge which is needed at commit time
745 # information from merge which is needed at commit time
752 # for example choosing filelog of which parent to commit
746 # for example choosing filelog of which parent to commit
753 # TODO: use specific constants in future for this mapping
747 # TODO: use specific constants in future for this mapping
754 if followcopies:
748 if followcopies:
755 branch_copies1, branch_copies2, diverge = copies.mergecopies(
749 branch_copies1, branch_copies2, diverge = copies.mergecopies(
756 repo, wctx, p2, pa
750 repo, wctx, p2, pa
757 )
751 )
758
752
759 boolbm = pycompat.bytestr(bool(branchmerge))
753 boolbm = pycompat.bytestr(bool(branchmerge))
760 boolf = pycompat.bytestr(bool(force))
754 boolf = pycompat.bytestr(bool(force))
761 boolm = pycompat.bytestr(bool(matcher))
755 boolm = pycompat.bytestr(bool(matcher))
762 repo.ui.note(_(b"resolving manifests\n"))
756 repo.ui.note(_(b"resolving manifests\n"))
763 repo.ui.debug(
757 repo.ui.debug(
764 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
758 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
765 )
759 )
766 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
760 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
767
761
768 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
762 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
769 copied1 = set(branch_copies1.copy.values())
763 copied1 = set(branch_copies1.copy.values())
770 copied1.update(branch_copies1.movewithdir.values())
764 copied1.update(branch_copies1.movewithdir.values())
771 copied2 = set(branch_copies2.copy.values())
765 copied2 = set(branch_copies2.copy.values())
772 copied2.update(branch_copies2.movewithdir.values())
766 copied2.update(branch_copies2.movewithdir.values())
773
767
774 if b'.hgsubstate' in m1 and wctx.rev() is None:
768 if b'.hgsubstate' in m1 and wctx.rev() is None:
775 # Check whether sub state is modified, and overwrite the manifest
769 # Check whether sub state is modified, and overwrite the manifest
776 # to flag the change. If wctx is a committed revision, we shouldn't
770 # to flag the change. If wctx is a committed revision, we shouldn't
777 # care for the dirty state of the working directory.
771 # care for the dirty state of the working directory.
778 if any(wctx.sub(s).dirty() for s in wctx.substate):
772 if any(wctx.sub(s).dirty() for s in wctx.substate):
779 m1[b'.hgsubstate'] = modifiednodeid
773 m1[b'.hgsubstate'] = modifiednodeid
780
774
781 # Don't use m2-vs-ma optimization if:
775 # Don't use m2-vs-ma optimization if:
782 # - ma is the same as m1 or m2, which we're just going to diff again later
776 # - ma is the same as m1 or m2, which we're just going to diff again later
783 # - The caller specifically asks for a full diff, which is useful during bid
777 # - The caller specifically asks for a full diff, which is useful during bid
784 # merge.
778 # merge.
785 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
779 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
786 # Identify which files are relevant to the merge, so we can limit the
780 # Identify which files are relevant to the merge, so we can limit the
787 # total m1-vs-m2 diff to just those files. This has significant
781 # total m1-vs-m2 diff to just those files. This has significant
788 # performance benefits in large repositories.
782 # performance benefits in large repositories.
789 relevantfiles = set(ma.diff(m2).keys())
783 relevantfiles = set(ma.diff(m2).keys())
790
784
791 # For copied and moved files, we need to add the source file too.
785 # For copied and moved files, we need to add the source file too.
792 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
786 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
793 if copyvalue in relevantfiles:
787 if copyvalue in relevantfiles:
794 relevantfiles.add(copykey)
788 relevantfiles.add(copykey)
795 for movedirkey in branch_copies1.movewithdir:
789 for movedirkey in branch_copies1.movewithdir:
796 relevantfiles.add(movedirkey)
790 relevantfiles.add(movedirkey)
797 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
791 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
798 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
792 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
799
793
800 diff = m1.diff(m2, match=matcher)
794 diff = m1.diff(m2, match=matcher)
801
795
802 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
796 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
803 if n1 and n2: # file exists on both local and remote side
797 if n1 and n2: # file exists on both local and remote side
804 if f not in ma:
798 if f not in ma:
805 # TODO: what if they're renamed from different sources?
799 # TODO: what if they're renamed from different sources?
806 fa = branch_copies1.copy.get(
800 fa = branch_copies1.copy.get(
807 f, None
801 f, None
808 ) or branch_copies2.copy.get(f, None)
802 ) or branch_copies2.copy.get(f, None)
809 args, msg = None, None
803 args, msg = None, None
810 if fa is not None:
804 if fa is not None:
811 args = (f, f, fa, False, pa.node())
805 args = (f, f, fa, False, pa.node())
812 msg = b'both renamed from %s' % fa
806 msg = b'both renamed from %s' % fa
813 else:
807 else:
814 args = (f, f, None, False, pa.node())
808 args = (f, f, None, False, pa.node())
815 msg = b'both created'
809 msg = b'both created'
816 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
810 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
817 else:
811 else:
818 a = ma[f]
812 a = ma[f]
819 fla = ma.flags(f)
813 fla = ma.flags(f)
820 nol = b'l' not in fl1 + fl2 + fla
814 nol = b'l' not in fl1 + fl2 + fla
821 if n2 == a and fl2 == fla:
815 if n2 == a and fl2 == fla:
822 mresult.addfile(
816 mresult.addfile(
823 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
817 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
824 )
818 )
825 elif n1 == a and fl1 == fla: # local unchanged - use remote
819 elif n1 == a and fl1 == fla: # local unchanged - use remote
826 if n1 == n2: # optimization: keep local content
820 if n1 == n2: # optimization: keep local content
827 mresult.addfile(
821 mresult.addfile(
828 f,
822 f,
829 mergestatemod.ACTION_EXEC,
823 mergestatemod.ACTION_EXEC,
830 (fl2,),
824 (fl2,),
831 b'update permissions',
825 b'update permissions',
832 )
826 )
833 else:
827 else:
834 mresult.addfile(
828 mresult.addfile(
835 f,
829 f,
836 mergestatemod.ACTION_GET,
830 mergestatemod.ACTION_GET,
837 (fl2, False),
831 (fl2, False),
838 b'remote is newer',
832 b'remote is newer',
839 )
833 )
840 if branchmerge:
834 if branchmerge:
841 mresult.addcommitinfo(
835 mresult.addcommitinfo(
842 f, b'filenode-source', b'other'
836 f, b'filenode-source', b'other'
843 )
837 )
844 elif nol and n2 == a: # remote only changed 'x'
838 elif nol and n2 == a: # remote only changed 'x'
845 mresult.addfile(
839 mresult.addfile(
846 f,
840 f,
847 mergestatemod.ACTION_EXEC,
841 mergestatemod.ACTION_EXEC,
848 (fl2,),
842 (fl2,),
849 b'update permissions',
843 b'update permissions',
850 )
844 )
851 elif nol and n1 == a: # local only changed 'x'
845 elif nol and n1 == a: # local only changed 'x'
852 mresult.addfile(
846 mresult.addfile(
853 f,
847 f,
854 mergestatemod.ACTION_GET,
848 mergestatemod.ACTION_GET,
855 (fl1, False),
849 (fl1, False),
856 b'remote is newer',
850 b'remote is newer',
857 )
851 )
858 if branchmerge:
852 if branchmerge:
859 mresult.addcommitinfo(f, b'filenode-source', b'other')
853 mresult.addcommitinfo(f, b'filenode-source', b'other')
860 else: # both changed something
854 else: # both changed something
861 mresult.addfile(
855 mresult.addfile(
862 f,
856 f,
863 mergestatemod.ACTION_MERGE,
857 mergestatemod.ACTION_MERGE,
864 (f, f, f, False, pa.node()),
858 (f, f, f, False, pa.node()),
865 b'versions differ',
859 b'versions differ',
866 )
860 )
867 elif n1: # file exists only on local side
861 elif n1: # file exists only on local side
868 if f in copied2:
862 if f in copied2:
869 pass # we'll deal with it on m2 side
863 pass # we'll deal with it on m2 side
870 elif (
864 elif (
871 f in branch_copies1.movewithdir
865 f in branch_copies1.movewithdir
872 ): # directory rename, move local
866 ): # directory rename, move local
873 f2 = branch_copies1.movewithdir[f]
867 f2 = branch_copies1.movewithdir[f]
874 if f2 in m2:
868 if f2 in m2:
875 mresult.addfile(
869 mresult.addfile(
876 f2,
870 f2,
877 mergestatemod.ACTION_MERGE,
871 mergestatemod.ACTION_MERGE,
878 (f, f2, None, True, pa.node()),
872 (f, f2, None, True, pa.node()),
879 b'remote directory rename, both created',
873 b'remote directory rename, both created',
880 )
874 )
881 else:
875 else:
882 mresult.addfile(
876 mresult.addfile(
883 f2,
877 f2,
884 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
878 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
885 (f, fl1),
879 (f, fl1),
886 b'remote directory rename - move from %s' % f,
880 b'remote directory rename - move from %s' % f,
887 )
881 )
888 elif f in branch_copies1.copy:
882 elif f in branch_copies1.copy:
889 f2 = branch_copies1.copy[f]
883 f2 = branch_copies1.copy[f]
890 mresult.addfile(
884 mresult.addfile(
891 f,
885 f,
892 mergestatemod.ACTION_MERGE,
886 mergestatemod.ACTION_MERGE,
893 (f, f2, f2, False, pa.node()),
887 (f, f2, f2, False, pa.node()),
894 b'local copied/moved from %s' % f2,
888 b'local copied/moved from %s' % f2,
895 )
889 )
896 elif f in ma: # clean, a different, no remote
890 elif f in ma: # clean, a different, no remote
897 if n1 != ma[f]:
891 if n1 != ma[f]:
898 if acceptremote:
892 if acceptremote:
899 mresult.addfile(
893 mresult.addfile(
900 f,
894 f,
901 mergestatemod.ACTION_REMOVE,
895 mergestatemod.ACTION_REMOVE,
902 None,
896 None,
903 b'remote delete',
897 b'remote delete',
904 )
898 )
905 else:
899 else:
906 mresult.addfile(
900 mresult.addfile(
907 f,
901 f,
908 mergestatemod.ACTION_CHANGED_DELETED,
902 mergestatemod.ACTION_CHANGED_DELETED,
909 (f, None, f, False, pa.node()),
903 (f, None, f, False, pa.node()),
910 b'prompt changed/deleted',
904 b'prompt changed/deleted',
911 )
905 )
912 elif n1 == addednodeid:
906 elif n1 == addednodeid:
913 # This file was locally added. We should forget it instead of
907 # This file was locally added. We should forget it instead of
914 # deleting it.
908 # deleting it.
915 mresult.addfile(
909 mresult.addfile(
916 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
910 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
917 )
911 )
918 else:
912 else:
919 mresult.addfile(
913 mresult.addfile(
920 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
914 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
921 )
915 )
922 else: # file not in ancestor, not in remote
916 else: # file not in ancestor, not in remote
923 mresult.addfile(
917 mresult.addfile(
924 f,
918 f,
925 mergestatemod.ACTION_KEEP_NEW,
919 mergestatemod.ACTION_KEEP_NEW,
926 None,
920 None,
927 b'ancestor missing, remote missing',
921 b'ancestor missing, remote missing',
928 )
922 )
929
923
930 elif n2: # file exists only on remote side
924 elif n2: # file exists only on remote side
931 if f in copied1:
925 if f in copied1:
932 pass # we'll deal with it on m1 side
926 pass # we'll deal with it on m1 side
933 elif f in branch_copies2.movewithdir:
927 elif f in branch_copies2.movewithdir:
934 f2 = branch_copies2.movewithdir[f]
928 f2 = branch_copies2.movewithdir[f]
935 if f2 in m1:
929 if f2 in m1:
936 mresult.addfile(
930 mresult.addfile(
937 f2,
931 f2,
938 mergestatemod.ACTION_MERGE,
932 mergestatemod.ACTION_MERGE,
939 (f2, f, None, False, pa.node()),
933 (f2, f, None, False, pa.node()),
940 b'local directory rename, both created',
934 b'local directory rename, both created',
941 )
935 )
942 else:
936 else:
943 mresult.addfile(
937 mresult.addfile(
944 f2,
938 f2,
945 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
939 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
946 (f, fl2),
940 (f, fl2),
947 b'local directory rename - get from %s' % f,
941 b'local directory rename - get from %s' % f,
948 )
942 )
949 elif f in branch_copies2.copy:
943 elif f in branch_copies2.copy:
950 f2 = branch_copies2.copy[f]
944 f2 = branch_copies2.copy[f]
951 msg, args = None, None
945 msg, args = None, None
952 if f2 in m2:
946 if f2 in m2:
953 args = (f2, f, f2, False, pa.node())
947 args = (f2, f, f2, False, pa.node())
954 msg = b'remote copied from %s' % f2
948 msg = b'remote copied from %s' % f2
955 else:
949 else:
956 args = (f2, f, f2, True, pa.node())
950 args = (f2, f, f2, True, pa.node())
957 msg = b'remote moved from %s' % f2
951 msg = b'remote moved from %s' % f2
958 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
952 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
959 elif f not in ma:
953 elif f not in ma:
960 # local unknown, remote created: the logic is described by the
954 # local unknown, remote created: the logic is described by the
961 # following table:
955 # following table:
962 #
956 #
963 # force branchmerge different | action
957 # force branchmerge different | action
964 # n * * | create
958 # n * * | create
965 # y n * | create
959 # y n * | create
966 # y y n | create
960 # y y n | create
967 # y y y | merge
961 # y y y | merge
968 #
962 #
969 # Checking whether the files are different is expensive, so we
963 # Checking whether the files are different is expensive, so we
970 # don't do that when we can avoid it.
964 # don't do that when we can avoid it.
971 if not force:
965 if not force:
972 mresult.addfile(
966 mresult.addfile(
973 f,
967 f,
974 mergestatemod.ACTION_CREATED,
968 mergestatemod.ACTION_CREATED,
975 (fl2,),
969 (fl2,),
976 b'remote created',
970 b'remote created',
977 )
971 )
978 elif not branchmerge:
972 elif not branchmerge:
979 mresult.addfile(
973 mresult.addfile(
980 f,
974 f,
981 mergestatemod.ACTION_CREATED,
975 mergestatemod.ACTION_CREATED,
982 (fl2,),
976 (fl2,),
983 b'remote created',
977 b'remote created',
984 )
978 )
985 else:
979 else:
986 mresult.addfile(
980 mresult.addfile(
987 f,
981 f,
988 mergestatemod.ACTION_CREATED_MERGE,
982 mergestatemod.ACTION_CREATED_MERGE,
989 (fl2, pa.node()),
983 (fl2, pa.node()),
990 b'remote created, get or merge',
984 b'remote created, get or merge',
991 )
985 )
992 elif n2 != ma[f]:
986 elif n2 != ma[f]:
993 df = None
987 df = None
994 for d in branch_copies1.dirmove:
988 for d in branch_copies1.dirmove:
995 if f.startswith(d):
989 if f.startswith(d):
996 # new file added in a directory that was moved
990 # new file added in a directory that was moved
997 df = branch_copies1.dirmove[d] + f[len(d) :]
991 df = branch_copies1.dirmove[d] + f[len(d) :]
998 break
992 break
999 if df is not None and df in m1:
993 if df is not None and df in m1:
1000 mresult.addfile(
994 mresult.addfile(
1001 df,
995 df,
1002 mergestatemod.ACTION_MERGE,
996 mergestatemod.ACTION_MERGE,
1003 (df, f, f, False, pa.node()),
997 (df, f, f, False, pa.node()),
1004 b'local directory rename - respect move '
998 b'local directory rename - respect move '
1005 b'from %s' % f,
999 b'from %s' % f,
1006 )
1000 )
1007 elif acceptremote:
1001 elif acceptremote:
1008 mresult.addfile(
1002 mresult.addfile(
1009 f,
1003 f,
1010 mergestatemod.ACTION_CREATED,
1004 mergestatemod.ACTION_CREATED,
1011 (fl2,),
1005 (fl2,),
1012 b'remote recreating',
1006 b'remote recreating',
1013 )
1007 )
1014 else:
1008 else:
1015 mresult.addfile(
1009 mresult.addfile(
1016 f,
1010 f,
1017 mergestatemod.ACTION_DELETED_CHANGED,
1011 mergestatemod.ACTION_DELETED_CHANGED,
1018 (None, f, f, False, pa.node()),
1012 (None, f, f, False, pa.node()),
1019 b'prompt deleted/changed',
1013 b'prompt deleted/changed',
1020 )
1014 )
1021 else:
1015 else:
1022 mresult.addfile(
1016 mresult.addfile(
1023 f,
1017 f,
1024 mergestatemod.ACTION_KEEP_ABSENT,
1018 mergestatemod.ACTION_KEEP_ABSENT,
1025 None,
1019 None,
1026 b'local not present, remote unchanged',
1020 b'local not present, remote unchanged',
1027 )
1021 )
1028
1022
1029 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1023 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1030 # If we are merging, look for path conflicts.
1024 # If we are merging, look for path conflicts.
1031 checkpathconflicts(repo, wctx, p2, mresult)
1025 checkpathconflicts(repo, wctx, p2, mresult)
1032
1026
1033 narrowmatch = repo.narrowmatch()
1027 narrowmatch = repo.narrowmatch()
1034 if not narrowmatch.always():
1028 if not narrowmatch.always():
1035 # Updates "actions" in place
1029 # Updates "actions" in place
1036 _filternarrowactions(narrowmatch, branchmerge, mresult)
1030 _filternarrowactions(narrowmatch, branchmerge, mresult)
1037
1031
1038 renamedelete = branch_copies1.renamedelete
1032 renamedelete = branch_copies1.renamedelete
1039 renamedelete.update(branch_copies2.renamedelete)
1033 renamedelete.update(branch_copies2.renamedelete)
1040
1034
1041 mresult.updatevalues(diverge, renamedelete)
1035 mresult.updatevalues(diverge, renamedelete)
1042 return mresult
1036 return mresult
1043
1037
1044
1038
1045 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1039 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1046 """Resolves false conflicts where the nodeid changed but the content
1040 """Resolves false conflicts where the nodeid changed but the content
1047 remained the same."""
1041 remained the same."""
1048 # We force a copy of actions.items() because we're going to mutate
1042 # We force a copy of actions.items() because we're going to mutate
1049 # actions as we resolve trivial conflicts.
1043 # actions as we resolve trivial conflicts.
1050 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1044 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1051 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1045 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1052 # local did change but ended up with same content
1046 # local did change but ended up with same content
1053 mresult.addfile(
1047 mresult.addfile(
1054 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1048 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1055 )
1049 )
1056
1050
1057 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1051 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1058 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1052 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1059 # remote did change but ended up with same content
1053 # remote did change but ended up with same content
1060 mresult.removefile(f) # don't get = keep local deleted
1054 mresult.removefile(f) # don't get = keep local deleted
1061
1055
1062
1056
1063 def calculateupdates(
1057 def calculateupdates(
1064 repo,
1058 repo,
1065 wctx,
1059 wctx,
1066 mctx,
1060 mctx,
1067 ancestors,
1061 ancestors,
1068 branchmerge,
1062 branchmerge,
1069 force,
1063 force,
1070 acceptremote,
1064 acceptremote,
1071 followcopies,
1065 followcopies,
1072 matcher=None,
1066 matcher=None,
1073 mergeforce=False,
1067 mergeforce=False,
1074 ):
1068 ):
1075 """
1069 """
1076 Calculate the actions needed to merge mctx into wctx using ancestors
1070 Calculate the actions needed to merge mctx into wctx using ancestors
1077
1071
1078 Uses manifestmerge() to merge manifest and get list of actions required to
1072 Uses manifestmerge() to merge manifest and get list of actions required to
1079 perform for merging two manifests. If there are multiple ancestors, uses bid
1073 perform for merging two manifests. If there are multiple ancestors, uses bid
1080 merge if enabled.
1074 merge if enabled.
1081
1075
1082 Also filters out actions which are unrequired if repository is sparse.
1076 Also filters out actions which are unrequired if repository is sparse.
1083
1077
1084 Returns mergeresult object same as manifestmerge().
1078 Returns mergeresult object same as manifestmerge().
1085 """
1079 """
1086 # Avoid cycle.
1080 # Avoid cycle.
1087 from . import sparse
1081 from . import sparse
1088
1082
1089 mresult = None
1083 mresult = None
1090 if len(ancestors) == 1: # default
1084 if len(ancestors) == 1: # default
1091 mresult = manifestmerge(
1085 mresult = manifestmerge(
1092 repo,
1086 repo,
1093 wctx,
1087 wctx,
1094 mctx,
1088 mctx,
1095 ancestors[0],
1089 ancestors[0],
1096 branchmerge,
1090 branchmerge,
1097 force,
1091 force,
1098 matcher,
1092 matcher,
1099 acceptremote,
1093 acceptremote,
1100 followcopies,
1094 followcopies,
1101 )
1095 )
1102 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1096 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1103
1097
1104 else: # only when merge.preferancestor=* - the default
1098 else: # only when merge.preferancestor=* - the default
1105 repo.ui.note(
1099 repo.ui.note(
1106 _(b"note: merging %s and %s using bids from ancestors %s\n")
1100 _(b"note: merging %s and %s using bids from ancestors %s\n")
1107 % (
1101 % (
1108 wctx,
1102 wctx,
1109 mctx,
1103 mctx,
1110 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1104 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1111 )
1105 )
1112 )
1106 )
1113
1107
1114 # mapping filename to bids (action method to list af actions)
1108 # mapping filename to bids (action method to list af actions)
1115 # {FILENAME1 : BID1, FILENAME2 : BID2}
1109 # {FILENAME1 : BID1, FILENAME2 : BID2}
1116 # BID is another dictionary which contains
1110 # BID is another dictionary which contains
1117 # mapping of following form:
1111 # mapping of following form:
1118 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1112 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1119 fbids = {}
1113 fbids = {}
1120 mresult = mergeresult()
1114 mresult = mergeresult()
1121 diverge, renamedelete = None, None
1115 diverge, renamedelete = None, None
1122 for ancestor in ancestors:
1116 for ancestor in ancestors:
1123 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1117 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1124 mresult1 = manifestmerge(
1118 mresult1 = manifestmerge(
1125 repo,
1119 repo,
1126 wctx,
1120 wctx,
1127 mctx,
1121 mctx,
1128 ancestor,
1122 ancestor,
1129 branchmerge,
1123 branchmerge,
1130 force,
1124 force,
1131 matcher,
1125 matcher,
1132 acceptremote,
1126 acceptremote,
1133 followcopies,
1127 followcopies,
1134 forcefulldiff=True,
1128 forcefulldiff=True,
1135 )
1129 )
1136 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1130 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1137
1131
1138 # Track the shortest set of warning on the theory that bid
1132 # Track the shortest set of warning on the theory that bid
1139 # merge will correctly incorporate more information
1133 # merge will correctly incorporate more information
1140 if diverge is None or len(mresult1.diverge) < len(diverge):
1134 if diverge is None or len(mresult1.diverge) < len(diverge):
1141 diverge = mresult1.diverge
1135 diverge = mresult1.diverge
1142 if renamedelete is None or len(renamedelete) < len(
1136 if renamedelete is None or len(renamedelete) < len(
1143 mresult1.renamedelete
1137 mresult1.renamedelete
1144 ):
1138 ):
1145 renamedelete = mresult1.renamedelete
1139 renamedelete = mresult1.renamedelete
1146
1140
1147 # blindly update final mergeresult commitinfo with what we get
1141 # blindly update final mergeresult commitinfo with what we get
1148 # from mergeresult object for each ancestor
1142 # from mergeresult object for each ancestor
1149 # TODO: some commitinfo depends on what bid merge choose and hence
1143 # TODO: some commitinfo depends on what bid merge choose and hence
1150 # we will need to make commitinfo also depend on bid merge logic
1144 # we will need to make commitinfo also depend on bid merge logic
1151 mresult._commitinfo.update(mresult1._commitinfo)
1145 mresult._commitinfo.update(mresult1._commitinfo)
1152
1146
1153 for f, a in mresult1.filemap(sort=True):
1147 for f, a in mresult1.filemap(sort=True):
1154 m, args, msg = a
1148 m, args, msg = a
1155 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1149 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1156 if f in fbids:
1150 if f in fbids:
1157 d = fbids[f]
1151 d = fbids[f]
1158 if m in d:
1152 if m in d:
1159 d[m].append(a)
1153 d[m].append(a)
1160 else:
1154 else:
1161 d[m] = [a]
1155 d[m] = [a]
1162 else:
1156 else:
1163 fbids[f] = {m: [a]}
1157 fbids[f] = {m: [a]}
1164
1158
1165 # Call for bids
1159 # Call for bids
1166 # Pick the best bid for each file
1160 # Pick the best bid for each file
1167 repo.ui.note(
1161 repo.ui.note(
1168 _(b'\nauction for merging merge bids (%d ancestors)\n')
1162 _(b'\nauction for merging merge bids (%d ancestors)\n')
1169 % len(ancestors)
1163 % len(ancestors)
1170 )
1164 )
1171 for f, bids in sorted(fbids.items()):
1165 for f, bids in sorted(fbids.items()):
1172 if repo.ui.debugflag:
1166 if repo.ui.debugflag:
1173 repo.ui.debug(b" list of bids for %s:\n" % f)
1167 repo.ui.debug(b" list of bids for %s:\n" % f)
1174 for m, l in sorted(bids.items()):
1168 for m, l in sorted(bids.items()):
1175 for _f, args, msg in l:
1169 for _f, args, msg in l:
1176 repo.ui.debug(b' %s -> %s\n' % (msg, m))
1170 repo.ui.debug(b' %s -> %s\n' % (msg, m))
1177 # bids is a mapping from action method to list af actions
1171 # bids is a mapping from action method to list af actions
1178 # Consensus?
1172 # Consensus?
1179 if len(bids) == 1: # all bids are the same kind of method
1173 if len(bids) == 1: # all bids are the same kind of method
1180 m, l = list(bids.items())[0]
1174 m, l = list(bids.items())[0]
1181 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1175 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1182 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1176 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1183 mresult.addfile(f, *l[0])
1177 mresult.addfile(f, *l[0])
1184 continue
1178 continue
1185 # If keep is an option, just do it.
1179 # If keep is an option, just do it.
1186 if mergestatemod.ACTION_KEEP in bids:
1180 if mergestatemod.ACTION_KEEP in bids:
1187 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1181 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1188 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1182 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1189 continue
1183 continue
1190 # If keep absent is an option, just do that
1184 # If keep absent is an option, just do that
1191 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1185 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1192 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1186 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1193 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1187 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1194 continue
1188 continue
1195 # If keep new is an option, let's just do that
1189 # If keep new is an option, let's just do that
1196 if mergestatemod.ACTION_KEEP_NEW in bids:
1190 if mergestatemod.ACTION_KEEP_NEW in bids:
1197 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1191 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1198 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1192 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1199 continue
1193 continue
1200 # If there are gets and they all agree [how could they not?], do it.
1194 # If there are gets and they all agree [how could they not?], do it.
1201 if mergestatemod.ACTION_GET in bids:
1195 if mergestatemod.ACTION_GET in bids:
1202 ga0 = bids[mergestatemod.ACTION_GET][0]
1196 ga0 = bids[mergestatemod.ACTION_GET][0]
1203 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1197 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1204 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1198 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1205 mresult.addfile(f, *ga0)
1199 mresult.addfile(f, *ga0)
1206 continue
1200 continue
1207 # TODO: Consider other simple actions such as mode changes
1201 # TODO: Consider other simple actions such as mode changes
1208 # Handle inefficient democrazy.
1202 # Handle inefficient democrazy.
1209 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1203 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1210 for m, l in sorted(bids.items()):
1204 for m, l in sorted(bids.items()):
1211 for _f, args, msg in l:
1205 for _f, args, msg in l:
1212 repo.ui.note(b' %s -> %s\n' % (msg, m))
1206 repo.ui.note(b' %s -> %s\n' % (msg, m))
1213 # Pick random action. TODO: Instead, prompt user when resolving
1207 # Pick random action. TODO: Instead, prompt user when resolving
1214 m, l = list(bids.items())[0]
1208 m, l = list(bids.items())[0]
1215 repo.ui.warn(
1209 repo.ui.warn(
1216 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1210 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1217 )
1211 )
1218 mresult.addfile(f, *l[0])
1212 mresult.addfile(f, *l[0])
1219 continue
1213 continue
1220 repo.ui.note(_(b'end of auction\n\n'))
1214 repo.ui.note(_(b'end of auction\n\n'))
1221 mresult.updatevalues(diverge, renamedelete)
1215 mresult.updatevalues(diverge, renamedelete)
1222
1216
1223 if wctx.rev() is None:
1217 if wctx.rev() is None:
1224 _forgetremoved(wctx, mctx, branchmerge, mresult)
1218 _forgetremoved(wctx, mctx, branchmerge, mresult)
1225
1219
1226 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1220 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1227 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1221 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1228
1222
1229 return mresult
1223 return mresult
1230
1224
1231
1225
1232 def _getcwd():
1226 def _getcwd():
1233 try:
1227 try:
1234 return encoding.getcwd()
1228 return encoding.getcwd()
1235 except OSError as err:
1229 except OSError as err:
1236 if err.errno == errno.ENOENT:
1230 if err.errno == errno.ENOENT:
1237 return None
1231 return None
1238 raise
1232 raise
1239
1233
1240
1234
1241 def batchremove(repo, wctx, actions):
1235 def batchremove(repo, wctx, actions):
1242 """apply removes to the working directory
1236 """apply removes to the working directory
1243
1237
1244 yields tuples for progress updates
1238 yields tuples for progress updates
1245 """
1239 """
1246 verbose = repo.ui.verbose
1240 verbose = repo.ui.verbose
1247 cwd = _getcwd()
1241 cwd = _getcwd()
1248 i = 0
1242 i = 0
1249 for f, args, msg in actions:
1243 for f, args, msg in actions:
1250 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1244 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1251 if verbose:
1245 if verbose:
1252 repo.ui.note(_(b"removing %s\n") % f)
1246 repo.ui.note(_(b"removing %s\n") % f)
1253 wctx[f].audit()
1247 wctx[f].audit()
1254 try:
1248 try:
1255 wctx[f].remove(ignoremissing=True)
1249 wctx[f].remove(ignoremissing=True)
1256 except OSError as inst:
1250 except OSError as inst:
1257 repo.ui.warn(
1251 repo.ui.warn(
1258 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1252 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1259 )
1253 )
1260 if i == 100:
1254 if i == 100:
1261 yield i, f
1255 yield i, f
1262 i = 0
1256 i = 0
1263 i += 1
1257 i += 1
1264 if i > 0:
1258 if i > 0:
1265 yield i, f
1259 yield i, f
1266
1260
1267 if cwd and not _getcwd():
1261 if cwd and not _getcwd():
1268 # cwd was removed in the course of removing files; print a helpful
1262 # cwd was removed in the course of removing files; print a helpful
1269 # warning.
1263 # warning.
1270 repo.ui.warn(
1264 repo.ui.warn(
1271 _(
1265 _(
1272 b"current directory was removed\n"
1266 b"current directory was removed\n"
1273 b"(consider changing to repo root: %s)\n"
1267 b"(consider changing to repo root: %s)\n"
1274 )
1268 )
1275 % repo.root
1269 % repo.root
1276 )
1270 )
1277
1271
1278
1272
1279 def batchget(repo, mctx, wctx, wantfiledata, actions):
1273 def batchget(repo, mctx, wctx, wantfiledata, actions):
1280 """apply gets to the working directory
1274 """apply gets to the working directory
1281
1275
1282 mctx is the context to get from
1276 mctx is the context to get from
1283
1277
1284 Yields arbitrarily many (False, tuple) for progress updates, followed by
1278 Yields arbitrarily many (False, tuple) for progress updates, followed by
1285 exactly one (True, filedata). When wantfiledata is false, filedata is an
1279 exactly one (True, filedata). When wantfiledata is false, filedata is an
1286 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1280 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1287 mtime) of the file f written for each action.
1281 mtime) of the file f written for each action.
1288 """
1282 """
1289 filedata = {}
1283 filedata = {}
1290 verbose = repo.ui.verbose
1284 verbose = repo.ui.verbose
1291 fctx = mctx.filectx
1285 fctx = mctx.filectx
1292 ui = repo.ui
1286 ui = repo.ui
1293 i = 0
1287 i = 0
1294 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1288 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1295 for f, (flags, backup), msg in actions:
1289 for f, (flags, backup), msg in actions:
1296 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1290 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1297 if verbose:
1291 if verbose:
1298 repo.ui.note(_(b"getting %s\n") % f)
1292 repo.ui.note(_(b"getting %s\n") % f)
1299
1293
1300 if backup:
1294 if backup:
1301 # If a file or directory exists with the same name, back that
1295 # If a file or directory exists with the same name, back that
1302 # up. Otherwise, look to see if there is a file that conflicts
1296 # up. Otherwise, look to see if there is a file that conflicts
1303 # with a directory this file is in, and if so, back that up.
1297 # with a directory this file is in, and if so, back that up.
1304 conflicting = f
1298 conflicting = f
1305 if not repo.wvfs.lexists(f):
1299 if not repo.wvfs.lexists(f):
1306 for p in pathutil.finddirs(f):
1300 for p in pathutil.finddirs(f):
1307 if repo.wvfs.isfileorlink(p):
1301 if repo.wvfs.isfileorlink(p):
1308 conflicting = p
1302 conflicting = p
1309 break
1303 break
1310 if repo.wvfs.lexists(conflicting):
1304 if repo.wvfs.lexists(conflicting):
1311 orig = scmutil.backuppath(ui, repo, conflicting)
1305 orig = scmutil.backuppath(ui, repo, conflicting)
1312 util.rename(repo.wjoin(conflicting), orig)
1306 util.rename(repo.wjoin(conflicting), orig)
1313 wfctx = wctx[f]
1307 wfctx = wctx[f]
1314 wfctx.clearunknown()
1308 wfctx.clearunknown()
1315 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1309 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1316 size = wfctx.write(
1310 size = wfctx.write(
1317 fctx(f).data(),
1311 fctx(f).data(),
1318 flags,
1312 flags,
1319 backgroundclose=True,
1313 backgroundclose=True,
1320 atomictemp=atomictemp,
1314 atomictemp=atomictemp,
1321 )
1315 )
1322 if wantfiledata:
1316 if wantfiledata:
1323 s = wfctx.lstat()
1317 s = wfctx.lstat()
1324 mode = s.st_mode
1318 mode = s.st_mode
1325 mtime = s[stat.ST_MTIME]
1319 mtime = s[stat.ST_MTIME]
1326 filedata[f] = (mode, size, mtime) # for dirstate.normal
1320 filedata[f] = (mode, size, mtime) # for dirstate.normal
1327 if i == 100:
1321 if i == 100:
1328 yield False, (i, f)
1322 yield False, (i, f)
1329 i = 0
1323 i = 0
1330 i += 1
1324 i += 1
1331 if i > 0:
1325 if i > 0:
1332 yield False, (i, f)
1326 yield False, (i, f)
1333 yield True, filedata
1327 yield True, filedata
1334
1328
1335
1329
1336 def _prefetchfiles(repo, ctx, mresult):
1330 def _prefetchfiles(repo, ctx, mresult):
1337 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1331 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1338 of merge actions. ``ctx`` is the context being merged in."""
1332 of merge actions. ``ctx`` is the context being merged in."""
1339
1333
1340 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1334 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1341 # don't touch the context to be merged in. 'cd' is skipped, because
1335 # don't touch the context to be merged in. 'cd' is skipped, because
1342 # changed/deleted never resolves to something from the remote side.
1336 # changed/deleted never resolves to something from the remote side.
1343 files = mresult.files(
1337 files = mresult.files(
1344 [
1338 [
1345 mergestatemod.ACTION_GET,
1339 mergestatemod.ACTION_GET,
1346 mergestatemod.ACTION_DELETED_CHANGED,
1340 mergestatemod.ACTION_DELETED_CHANGED,
1347 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1341 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1348 mergestatemod.ACTION_MERGE,
1342 mergestatemod.ACTION_MERGE,
1349 ]
1343 ]
1350 )
1344 )
1351
1345
1352 prefetch = scmutil.prefetchfiles
1346 prefetch = scmutil.prefetchfiles
1353 matchfiles = scmutil.matchfiles
1347 matchfiles = scmutil.matchfiles
1354 prefetch(
1348 prefetch(
1355 repo, [(ctx.rev(), matchfiles(repo, files),)],
1349 repo, [(ctx.rev(), matchfiles(repo, files),)],
1356 )
1350 )
1357
1351
1358
1352
1359 @attr.s(frozen=True)
1353 @attr.s(frozen=True)
1360 class updateresult(object):
1354 class updateresult(object):
1361 updatedcount = attr.ib()
1355 updatedcount = attr.ib()
1362 mergedcount = attr.ib()
1356 mergedcount = attr.ib()
1363 removedcount = attr.ib()
1357 removedcount = attr.ib()
1364 unresolvedcount = attr.ib()
1358 unresolvedcount = attr.ib()
1365
1359
1366 def isempty(self):
1360 def isempty(self):
1367 return not (
1361 return not (
1368 self.updatedcount
1362 self.updatedcount
1369 or self.mergedcount
1363 or self.mergedcount
1370 or self.removedcount
1364 or self.removedcount
1371 or self.unresolvedcount
1365 or self.unresolvedcount
1372 )
1366 )
1373
1367
1374
1368
1375 def applyupdates(
1369 def applyupdates(
1376 repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None,
1370 repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None,
1377 ):
1371 ):
1378 """apply the merge action list to the working directory
1372 """apply the merge action list to the working directory
1379
1373
1380 mresult is a mergeresult object representing result of the merge
1374 mresult is a mergeresult object representing result of the merge
1381 wctx is the working copy context
1375 wctx is the working copy context
1382 mctx is the context to be merged into the working copy
1376 mctx is the context to be merged into the working copy
1383
1377
1384 Return a tuple of (counts, filedata), where counts is a tuple
1378 Return a tuple of (counts, filedata), where counts is a tuple
1385 (updated, merged, removed, unresolved) that describes how many
1379 (updated, merged, removed, unresolved) that describes how many
1386 files were affected by the update, and filedata is as described in
1380 files were affected by the update, and filedata is as described in
1387 batchget.
1381 batchget.
1388 """
1382 """
1389
1383
1390 _prefetchfiles(repo, mctx, mresult)
1384 _prefetchfiles(repo, mctx, mresult)
1391
1385
1392 updated, merged, removed = 0, 0, 0
1386 updated, merged, removed = 0, 0, 0
1393 ms = wctx.mergestate(clean=True)
1387 ms = wctx.mergestate(clean=True)
1394 ms.start(wctx.p1().node(), mctx.node(), labels)
1388 ms.start(wctx.p1().node(), mctx.node(), labels)
1395
1389
1396 for f, op in pycompat.iteritems(mresult.commitinfo):
1390 for f, op in pycompat.iteritems(mresult.commitinfo):
1397 # the other side of filenode was choosen while merging, store this in
1391 # the other side of filenode was choosen while merging, store this in
1398 # mergestate so that it can be reused on commit
1392 # mergestate so that it can be reused on commit
1399 ms.addcommitinfo(f, op)
1393 ms.addcommitinfo(f, op)
1400
1394
1401 numupdates = mresult.len() - mresult.len(mergeresult.NO_OP_ACTIONS)
1395 numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
1402 progress = repo.ui.makeprogress(
1396 progress = repo.ui.makeprogress(
1403 _(b'updating'), unit=_(b'files'), total=numupdates
1397 _(b'updating'), unit=_(b'files'), total=numupdates
1404 )
1398 )
1405
1399
1406 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1400 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1407 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1401 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1408
1402
1409 # record path conflicts
1403 # record path conflicts
1410 for f, args, msg in mresult.getactions(
1404 for f, args, msg in mresult.getactions(
1411 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1405 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1412 ):
1406 ):
1413 f1, fo = args
1407 f1, fo = args
1414 s = repo.ui.status
1408 s = repo.ui.status
1415 s(
1409 s(
1416 _(
1410 _(
1417 b"%s: path conflict - a file or link has the same name as a "
1411 b"%s: path conflict - a file or link has the same name as a "
1418 b"directory\n"
1412 b"directory\n"
1419 )
1413 )
1420 % f
1414 % f
1421 )
1415 )
1422 if fo == b'l':
1416 if fo == b'l':
1423 s(_(b"the local file has been renamed to %s\n") % f1)
1417 s(_(b"the local file has been renamed to %s\n") % f1)
1424 else:
1418 else:
1425 s(_(b"the remote file has been renamed to %s\n") % f1)
1419 s(_(b"the remote file has been renamed to %s\n") % f1)
1426 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1420 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1427 ms.addpathconflict(f, f1, fo)
1421 ms.addpathconflict(f, f1, fo)
1428 progress.increment(item=f)
1422 progress.increment(item=f)
1429
1423
1430 # When merging in-memory, we can't support worker processes, so set the
1424 # When merging in-memory, we can't support worker processes, so set the
1431 # per-item cost at 0 in that case.
1425 # per-item cost at 0 in that case.
1432 cost = 0 if wctx.isinmemory() else 0.001
1426 cost = 0 if wctx.isinmemory() else 0.001
1433
1427
1434 # remove in parallel (must come before resolving path conflicts and getting)
1428 # remove in parallel (must come before resolving path conflicts and getting)
1435 prog = worker.worker(
1429 prog = worker.worker(
1436 repo.ui,
1430 repo.ui,
1437 cost,
1431 cost,
1438 batchremove,
1432 batchremove,
1439 (repo, wctx),
1433 (repo, wctx),
1440 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1434 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1441 )
1435 )
1442 for i, item in prog:
1436 for i, item in prog:
1443 progress.increment(step=i, item=item)
1437 progress.increment(step=i, item=item)
1444 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1438 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1445
1439
1446 # resolve path conflicts (must come before getting)
1440 # resolve path conflicts (must come before getting)
1447 for f, args, msg in mresult.getactions(
1441 for f, args, msg in mresult.getactions(
1448 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1442 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1449 ):
1443 ):
1450 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1444 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1451 (f0, origf0) = args
1445 (f0, origf0) = args
1452 if wctx[f0].lexists():
1446 if wctx[f0].lexists():
1453 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1447 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1454 wctx[f].audit()
1448 wctx[f].audit()
1455 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1449 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1456 wctx[f0].remove()
1450 wctx[f0].remove()
1457 progress.increment(item=f)
1451 progress.increment(item=f)
1458
1452
1459 # get in parallel.
1453 # get in parallel.
1460 threadsafe = repo.ui.configbool(
1454 threadsafe = repo.ui.configbool(
1461 b'experimental', b'worker.wdir-get-thread-safe'
1455 b'experimental', b'worker.wdir-get-thread-safe'
1462 )
1456 )
1463 prog = worker.worker(
1457 prog = worker.worker(
1464 repo.ui,
1458 repo.ui,
1465 cost,
1459 cost,
1466 batchget,
1460 batchget,
1467 (repo, mctx, wctx, wantfiledata),
1461 (repo, mctx, wctx, wantfiledata),
1468 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1462 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1469 threadsafe=threadsafe,
1463 threadsafe=threadsafe,
1470 hasretval=True,
1464 hasretval=True,
1471 )
1465 )
1472 getfiledata = {}
1466 getfiledata = {}
1473 for final, res in prog:
1467 for final, res in prog:
1474 if final:
1468 if final:
1475 getfiledata = res
1469 getfiledata = res
1476 else:
1470 else:
1477 i, item = res
1471 i, item = res
1478 progress.increment(step=i, item=item)
1472 progress.increment(step=i, item=item)
1479
1473
1480 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1474 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1481 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1475 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1482
1476
1483 # forget (manifest only, just log it) (must come first)
1477 # forget (manifest only, just log it) (must come first)
1484 for f, args, msg in mresult.getactions(
1478 for f, args, msg in mresult.getactions(
1485 (mergestatemod.ACTION_FORGET,), sort=True
1479 (mergestatemod.ACTION_FORGET,), sort=True
1486 ):
1480 ):
1487 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1481 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1488 progress.increment(item=f)
1482 progress.increment(item=f)
1489
1483
1490 # re-add (manifest only, just log it)
1484 # re-add (manifest only, just log it)
1491 for f, args, msg in mresult.getactions(
1485 for f, args, msg in mresult.getactions(
1492 (mergestatemod.ACTION_ADD,), sort=True
1486 (mergestatemod.ACTION_ADD,), sort=True
1493 ):
1487 ):
1494 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1488 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1495 progress.increment(item=f)
1489 progress.increment(item=f)
1496
1490
1497 # re-add/mark as modified (manifest only, just log it)
1491 # re-add/mark as modified (manifest only, just log it)
1498 for f, args, msg in mresult.getactions(
1492 for f, args, msg in mresult.getactions(
1499 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1493 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1500 ):
1494 ):
1501 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1495 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1502 progress.increment(item=f)
1496 progress.increment(item=f)
1503
1497
1504 # keep (noop, just log it)
1498 # keep (noop, just log it)
1505 for a in mergeresult.NO_OP_ACTIONS:
1499 for a in mergestatemod.NO_OP_ACTIONS:
1506 for f, args, msg in mresult.getactions((a,), sort=True):
1500 for f, args, msg in mresult.getactions((a,), sort=True):
1507 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
1501 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
1508 # no progress
1502 # no progress
1509
1503
1510 # directory rename, move local
1504 # directory rename, move local
1511 for f, args, msg in mresult.getactions(
1505 for f, args, msg in mresult.getactions(
1512 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1506 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1513 ):
1507 ):
1514 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1508 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1515 progress.increment(item=f)
1509 progress.increment(item=f)
1516 f0, flags = args
1510 f0, flags = args
1517 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1511 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1518 wctx[f].audit()
1512 wctx[f].audit()
1519 wctx[f].write(wctx.filectx(f0).data(), flags)
1513 wctx[f].write(wctx.filectx(f0).data(), flags)
1520 wctx[f0].remove()
1514 wctx[f0].remove()
1521
1515
1522 # local directory rename, get
1516 # local directory rename, get
1523 for f, args, msg in mresult.getactions(
1517 for f, args, msg in mresult.getactions(
1524 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1518 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1525 ):
1519 ):
1526 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1520 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1527 progress.increment(item=f)
1521 progress.increment(item=f)
1528 f0, flags = args
1522 f0, flags = args
1529 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1523 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1530 wctx[f].write(mctx.filectx(f0).data(), flags)
1524 wctx[f].write(mctx.filectx(f0).data(), flags)
1531
1525
1532 # exec
1526 # exec
1533 for f, args, msg in mresult.getactions(
1527 for f, args, msg in mresult.getactions(
1534 (mergestatemod.ACTION_EXEC,), sort=True
1528 (mergestatemod.ACTION_EXEC,), sort=True
1535 ):
1529 ):
1536 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1530 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1537 progress.increment(item=f)
1531 progress.increment(item=f)
1538 (flags,) = args
1532 (flags,) = args
1539 wctx[f].audit()
1533 wctx[f].audit()
1540 wctx[f].setflags(b'l' in flags, b'x' in flags)
1534 wctx[f].setflags(b'l' in flags, b'x' in flags)
1541
1535
1542 moves = []
1536 moves = []
1543
1537
1544 # 'cd' and 'dc' actions are treated like other merge conflicts
1538 # 'cd' and 'dc' actions are treated like other merge conflicts
1545 mergeactions = list(
1539 mergeactions = list(
1546 mresult.getactions(
1540 mresult.getactions(
1547 [
1541 [
1548 mergestatemod.ACTION_CHANGED_DELETED,
1542 mergestatemod.ACTION_CHANGED_DELETED,
1549 mergestatemod.ACTION_DELETED_CHANGED,
1543 mergestatemod.ACTION_DELETED_CHANGED,
1550 mergestatemod.ACTION_MERGE,
1544 mergestatemod.ACTION_MERGE,
1551 ],
1545 ],
1552 sort=True,
1546 sort=True,
1553 )
1547 )
1554 )
1548 )
1555 for f, args, msg in mergeactions:
1549 for f, args, msg in mergeactions:
1556 f1, f2, fa, move, anc = args
1550 f1, f2, fa, move, anc = args
1557 if f == b'.hgsubstate': # merged internally
1551 if f == b'.hgsubstate': # merged internally
1558 continue
1552 continue
1559 if f1 is None:
1553 if f1 is None:
1560 fcl = filemerge.absentfilectx(wctx, fa)
1554 fcl = filemerge.absentfilectx(wctx, fa)
1561 else:
1555 else:
1562 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1556 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1563 fcl = wctx[f1]
1557 fcl = wctx[f1]
1564 if f2 is None:
1558 if f2 is None:
1565 fco = filemerge.absentfilectx(mctx, fa)
1559 fco = filemerge.absentfilectx(mctx, fa)
1566 else:
1560 else:
1567 fco = mctx[f2]
1561 fco = mctx[f2]
1568 actx = repo[anc]
1562 actx = repo[anc]
1569 if fa in actx:
1563 if fa in actx:
1570 fca = actx[fa]
1564 fca = actx[fa]
1571 else:
1565 else:
1572 # TODO: move to absentfilectx
1566 # TODO: move to absentfilectx
1573 fca = repo.filectx(f1, fileid=nullrev)
1567 fca = repo.filectx(f1, fileid=nullrev)
1574 ms.add(fcl, fco, fca, f)
1568 ms.add(fcl, fco, fca, f)
1575 if f1 != f and move:
1569 if f1 != f and move:
1576 moves.append(f1)
1570 moves.append(f1)
1577
1571
1578 # remove renamed files after safely stored
1572 # remove renamed files after safely stored
1579 for f in moves:
1573 for f in moves:
1580 if wctx[f].lexists():
1574 if wctx[f].lexists():
1581 repo.ui.debug(b"removing %s\n" % f)
1575 repo.ui.debug(b"removing %s\n" % f)
1582 wctx[f].audit()
1576 wctx[f].audit()
1583 wctx[f].remove()
1577 wctx[f].remove()
1584
1578
1585 # these actions updates the file
1579 # these actions updates the file
1586 updated = mresult.len(
1580 updated = mresult.len(
1587 (
1581 (
1588 mergestatemod.ACTION_GET,
1582 mergestatemod.ACTION_GET,
1589 mergestatemod.ACTION_EXEC,
1583 mergestatemod.ACTION_EXEC,
1590 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1584 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1591 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1585 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1592 )
1586 )
1593 )
1587 )
1594
1588
1595 try:
1589 try:
1596 # premerge
1590 # premerge
1597 tocomplete = []
1591 tocomplete = []
1598 for f, args, msg in mergeactions:
1592 for f, args, msg in mergeactions:
1599 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1593 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1600 progress.increment(item=f)
1594 progress.increment(item=f)
1601 if f == b'.hgsubstate': # subrepo states need updating
1595 if f == b'.hgsubstate': # subrepo states need updating
1602 subrepoutil.submerge(
1596 subrepoutil.submerge(
1603 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1597 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1604 )
1598 )
1605 continue
1599 continue
1606 wctx[f].audit()
1600 wctx[f].audit()
1607 complete, r = ms.preresolve(f, wctx)
1601 complete, r = ms.preresolve(f, wctx)
1608 if not complete:
1602 if not complete:
1609 numupdates += 1
1603 numupdates += 1
1610 tocomplete.append((f, args, msg))
1604 tocomplete.append((f, args, msg))
1611
1605
1612 # merge
1606 # merge
1613 for f, args, msg in tocomplete:
1607 for f, args, msg in tocomplete:
1614 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1608 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1615 progress.increment(item=f, total=numupdates)
1609 progress.increment(item=f, total=numupdates)
1616 ms.resolve(f, wctx)
1610 ms.resolve(f, wctx)
1617
1611
1618 finally:
1612 finally:
1619 ms.commit()
1613 ms.commit()
1620
1614
1621 unresolved = ms.unresolvedcount()
1615 unresolved = ms.unresolvedcount()
1622
1616
1623 msupdated, msmerged, msremoved = ms.counts()
1617 msupdated, msmerged, msremoved = ms.counts()
1624 updated += msupdated
1618 updated += msupdated
1625 merged += msmerged
1619 merged += msmerged
1626 removed += msremoved
1620 removed += msremoved
1627
1621
1628 extraactions = ms.actions()
1622 extraactions = ms.actions()
1629 if extraactions:
1623 if extraactions:
1630 for k, acts in pycompat.iteritems(extraactions):
1624 for k, acts in pycompat.iteritems(extraactions):
1631 for a in acts:
1625 for a in acts:
1632 mresult.addfile(a[0], k, *a[1:])
1626 mresult.addfile(a[0], k, *a[1:])
1633 if k == mergestatemod.ACTION_GET and wantfiledata:
1627 if k == mergestatemod.ACTION_GET and wantfiledata:
1634 # no filedata until mergestate is updated to provide it
1628 # no filedata until mergestate is updated to provide it
1635 for a in acts:
1629 for a in acts:
1636 getfiledata[a[0]] = None
1630 getfiledata[a[0]] = None
1637
1631
1638 progress.complete()
1632 progress.complete()
1639 assert len(getfiledata) == (
1633 assert len(getfiledata) == (
1640 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1634 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1641 )
1635 )
1642 return updateresult(updated, merged, removed, unresolved), getfiledata
1636 return updateresult(updated, merged, removed, unresolved), getfiledata
1643
1637
1644
1638
1645 def _advertisefsmonitor(repo, num_gets, p1node):
1639 def _advertisefsmonitor(repo, num_gets, p1node):
1646 # Advertise fsmonitor when its presence could be useful.
1640 # Advertise fsmonitor when its presence could be useful.
1647 #
1641 #
1648 # We only advertise when performing an update from an empty working
1642 # We only advertise when performing an update from an empty working
1649 # directory. This typically only occurs during initial clone.
1643 # directory. This typically only occurs during initial clone.
1650 #
1644 #
1651 # We give users a mechanism to disable the warning in case it is
1645 # We give users a mechanism to disable the warning in case it is
1652 # annoying.
1646 # annoying.
1653 #
1647 #
1654 # We only allow on Linux and MacOS because that's where fsmonitor is
1648 # We only allow on Linux and MacOS because that's where fsmonitor is
1655 # considered stable.
1649 # considered stable.
1656 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1650 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1657 fsmonitorthreshold = repo.ui.configint(
1651 fsmonitorthreshold = repo.ui.configint(
1658 b'fsmonitor', b'warn_update_file_count'
1652 b'fsmonitor', b'warn_update_file_count'
1659 )
1653 )
1660 # avoid cycle dirstate -> sparse -> merge -> dirstate
1654 # avoid cycle dirstate -> sparse -> merge -> dirstate
1661 from . import dirstate
1655 from . import dirstate
1662
1656
1663 if dirstate.rustmod is not None:
1657 if dirstate.rustmod is not None:
1664 # When using rust status, fsmonitor becomes necessary at higher sizes
1658 # When using rust status, fsmonitor becomes necessary at higher sizes
1665 fsmonitorthreshold = repo.ui.configint(
1659 fsmonitorthreshold = repo.ui.configint(
1666 b'fsmonitor', b'warn_update_file_count_rust',
1660 b'fsmonitor', b'warn_update_file_count_rust',
1667 )
1661 )
1668
1662
1669 try:
1663 try:
1670 # avoid cycle: extensions -> cmdutil -> merge
1664 # avoid cycle: extensions -> cmdutil -> merge
1671 from . import extensions
1665 from . import extensions
1672
1666
1673 extensions.find(b'fsmonitor')
1667 extensions.find(b'fsmonitor')
1674 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1668 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1675 # We intentionally don't look at whether fsmonitor has disabled
1669 # We intentionally don't look at whether fsmonitor has disabled
1676 # itself because a) fsmonitor may have already printed a warning
1670 # itself because a) fsmonitor may have already printed a warning
1677 # b) we only care about the config state here.
1671 # b) we only care about the config state here.
1678 except KeyError:
1672 except KeyError:
1679 fsmonitorenabled = False
1673 fsmonitorenabled = False
1680
1674
1681 if (
1675 if (
1682 fsmonitorwarning
1676 fsmonitorwarning
1683 and not fsmonitorenabled
1677 and not fsmonitorenabled
1684 and p1node == nullid
1678 and p1node == nullid
1685 and num_gets >= fsmonitorthreshold
1679 and num_gets >= fsmonitorthreshold
1686 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1680 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1687 ):
1681 ):
1688 repo.ui.warn(
1682 repo.ui.warn(
1689 _(
1683 _(
1690 b'(warning: large working directory being used without '
1684 b'(warning: large working directory being used without '
1691 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1685 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1692 b'see "hg help -e fsmonitor")\n'
1686 b'see "hg help -e fsmonitor")\n'
1693 )
1687 )
1694 )
1688 )
1695
1689
1696
1690
1697 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1691 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1698 UPDATECHECK_NONE = b'none'
1692 UPDATECHECK_NONE = b'none'
1699 UPDATECHECK_LINEAR = b'linear'
1693 UPDATECHECK_LINEAR = b'linear'
1700 UPDATECHECK_NO_CONFLICT = b'noconflict'
1694 UPDATECHECK_NO_CONFLICT = b'noconflict'
1701
1695
1702
1696
1703 def update(
1697 def update(
1704 repo,
1698 repo,
1705 node,
1699 node,
1706 branchmerge,
1700 branchmerge,
1707 force,
1701 force,
1708 ancestor=None,
1702 ancestor=None,
1709 mergeancestor=False,
1703 mergeancestor=False,
1710 labels=None,
1704 labels=None,
1711 matcher=None,
1705 matcher=None,
1712 mergeforce=False,
1706 mergeforce=False,
1713 updatedirstate=True,
1707 updatedirstate=True,
1714 updatecheck=None,
1708 updatecheck=None,
1715 wc=None,
1709 wc=None,
1716 ):
1710 ):
1717 """
1711 """
1718 Perform a merge between the working directory and the given node
1712 Perform a merge between the working directory and the given node
1719
1713
1720 node = the node to update to
1714 node = the node to update to
1721 branchmerge = whether to merge between branches
1715 branchmerge = whether to merge between branches
1722 force = whether to force branch merging or file overwriting
1716 force = whether to force branch merging or file overwriting
1723 matcher = a matcher to filter file lists (dirstate not updated)
1717 matcher = a matcher to filter file lists (dirstate not updated)
1724 mergeancestor = whether it is merging with an ancestor. If true,
1718 mergeancestor = whether it is merging with an ancestor. If true,
1725 we should accept the incoming changes for any prompts that occur.
1719 we should accept the incoming changes for any prompts that occur.
1726 If false, merging with an ancestor (fast-forward) is only allowed
1720 If false, merging with an ancestor (fast-forward) is only allowed
1727 between different named branches. This flag is used by rebase extension
1721 between different named branches. This flag is used by rebase extension
1728 as a temporary fix and should be avoided in general.
1722 as a temporary fix and should be avoided in general.
1729 labels = labels to use for base, local and other
1723 labels = labels to use for base, local and other
1730 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1724 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1731 this is True, then 'force' should be True as well.
1725 this is True, then 'force' should be True as well.
1732
1726
1733 The table below shows all the behaviors of the update command given the
1727 The table below shows all the behaviors of the update command given the
1734 -c/--check and -C/--clean or no options, whether the working directory is
1728 -c/--check and -C/--clean or no options, whether the working directory is
1735 dirty, whether a revision is specified, and the relationship of the parent
1729 dirty, whether a revision is specified, and the relationship of the parent
1736 rev to the target rev (linear or not). Match from top first. The -n
1730 rev to the target rev (linear or not). Match from top first. The -n
1737 option doesn't exist on the command line, but represents the
1731 option doesn't exist on the command line, but represents the
1738 experimental.updatecheck=noconflict option.
1732 experimental.updatecheck=noconflict option.
1739
1733
1740 This logic is tested by test-update-branches.t.
1734 This logic is tested by test-update-branches.t.
1741
1735
1742 -c -C -n -m dirty rev linear | result
1736 -c -C -n -m dirty rev linear | result
1743 y y * * * * * | (1)
1737 y y * * * * * | (1)
1744 y * y * * * * | (1)
1738 y * y * * * * | (1)
1745 y * * y * * * | (1)
1739 y * * y * * * | (1)
1746 * y y * * * * | (1)
1740 * y y * * * * | (1)
1747 * y * y * * * | (1)
1741 * y * y * * * | (1)
1748 * * y y * * * | (1)
1742 * * y y * * * | (1)
1749 * * * * * n n | x
1743 * * * * * n n | x
1750 * * * * n * * | ok
1744 * * * * n * * | ok
1751 n n n n y * y | merge
1745 n n n n y * y | merge
1752 n n n n y y n | (2)
1746 n n n n y y n | (2)
1753 n n n y y * * | merge
1747 n n n y y * * | merge
1754 n n y n y * * | merge if no conflict
1748 n n y n y * * | merge if no conflict
1755 n y n n y * * | discard
1749 n y n n y * * | discard
1756 y n n n y * * | (3)
1750 y n n n y * * | (3)
1757
1751
1758 x = can't happen
1752 x = can't happen
1759 * = don't-care
1753 * = don't-care
1760 1 = incompatible options (checked in commands.py)
1754 1 = incompatible options (checked in commands.py)
1761 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1755 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1762 3 = abort: uncommitted changes (checked in commands.py)
1756 3 = abort: uncommitted changes (checked in commands.py)
1763
1757
1764 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1758 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1765 to repo[None] if None is passed.
1759 to repo[None] if None is passed.
1766
1760
1767 Return the same tuple as applyupdates().
1761 Return the same tuple as applyupdates().
1768 """
1762 """
1769 # Avoid cycle.
1763 # Avoid cycle.
1770 from . import sparse
1764 from . import sparse
1771
1765
1772 # This function used to find the default destination if node was None, but
1766 # This function used to find the default destination if node was None, but
1773 # that's now in destutil.py.
1767 # that's now in destutil.py.
1774 assert node is not None
1768 assert node is not None
1775 if not branchmerge and not force:
1769 if not branchmerge and not force:
1776 # TODO: remove the default once all callers that pass branchmerge=False
1770 # TODO: remove the default once all callers that pass branchmerge=False
1777 # and force=False pass a value for updatecheck. We may want to allow
1771 # and force=False pass a value for updatecheck. We may want to allow
1778 # updatecheck='abort' to better suppport some of these callers.
1772 # updatecheck='abort' to better suppport some of these callers.
1779 if updatecheck is None:
1773 if updatecheck is None:
1780 updatecheck = UPDATECHECK_LINEAR
1774 updatecheck = UPDATECHECK_LINEAR
1781 if updatecheck not in (
1775 if updatecheck not in (
1782 UPDATECHECK_NONE,
1776 UPDATECHECK_NONE,
1783 UPDATECHECK_LINEAR,
1777 UPDATECHECK_LINEAR,
1784 UPDATECHECK_NO_CONFLICT,
1778 UPDATECHECK_NO_CONFLICT,
1785 ):
1779 ):
1786 raise ValueError(
1780 raise ValueError(
1787 r'Invalid updatecheck %r (can accept %r)'
1781 r'Invalid updatecheck %r (can accept %r)'
1788 % (
1782 % (
1789 updatecheck,
1783 updatecheck,
1790 (
1784 (
1791 UPDATECHECK_NONE,
1785 UPDATECHECK_NONE,
1792 UPDATECHECK_LINEAR,
1786 UPDATECHECK_LINEAR,
1793 UPDATECHECK_NO_CONFLICT,
1787 UPDATECHECK_NO_CONFLICT,
1794 ),
1788 ),
1795 )
1789 )
1796 )
1790 )
1797 if wc is not None and wc.isinmemory():
1791 if wc is not None and wc.isinmemory():
1798 maybe_wlock = util.nullcontextmanager()
1792 maybe_wlock = util.nullcontextmanager()
1799 else:
1793 else:
1800 maybe_wlock = repo.wlock()
1794 maybe_wlock = repo.wlock()
1801 with maybe_wlock:
1795 with maybe_wlock:
1802 if wc is None:
1796 if wc is None:
1803 wc = repo[None]
1797 wc = repo[None]
1804 pl = wc.parents()
1798 pl = wc.parents()
1805 p1 = pl[0]
1799 p1 = pl[0]
1806 p2 = repo[node]
1800 p2 = repo[node]
1807 if ancestor is not None:
1801 if ancestor is not None:
1808 pas = [repo[ancestor]]
1802 pas = [repo[ancestor]]
1809 else:
1803 else:
1810 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1804 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1811 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1805 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1812 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1806 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1813 else:
1807 else:
1814 pas = [p1.ancestor(p2, warn=branchmerge)]
1808 pas = [p1.ancestor(p2, warn=branchmerge)]
1815
1809
1816 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1810 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1817
1811
1818 overwrite = force and not branchmerge
1812 overwrite = force and not branchmerge
1819 ### check phase
1813 ### check phase
1820 if not overwrite:
1814 if not overwrite:
1821 if len(pl) > 1:
1815 if len(pl) > 1:
1822 raise error.Abort(_(b"outstanding uncommitted merge"))
1816 raise error.Abort(_(b"outstanding uncommitted merge"))
1823 ms = wc.mergestate()
1817 ms = wc.mergestate()
1824 if list(ms.unresolved()):
1818 if list(ms.unresolved()):
1825 raise error.Abort(
1819 raise error.Abort(
1826 _(b"outstanding merge conflicts"),
1820 _(b"outstanding merge conflicts"),
1827 hint=_(b"use 'hg resolve' to resolve"),
1821 hint=_(b"use 'hg resolve' to resolve"),
1828 )
1822 )
1829 if branchmerge:
1823 if branchmerge:
1830 if pas == [p2]:
1824 if pas == [p2]:
1831 raise error.Abort(
1825 raise error.Abort(
1832 _(
1826 _(
1833 b"merging with a working directory ancestor"
1827 b"merging with a working directory ancestor"
1834 b" has no effect"
1828 b" has no effect"
1835 )
1829 )
1836 )
1830 )
1837 elif pas == [p1]:
1831 elif pas == [p1]:
1838 if not mergeancestor and wc.branch() == p2.branch():
1832 if not mergeancestor and wc.branch() == p2.branch():
1839 raise error.Abort(
1833 raise error.Abort(
1840 _(b"nothing to merge"),
1834 _(b"nothing to merge"),
1841 hint=_(b"use 'hg update' or check 'hg heads'"),
1835 hint=_(b"use 'hg update' or check 'hg heads'"),
1842 )
1836 )
1843 if not force and (wc.files() or wc.deleted()):
1837 if not force and (wc.files() or wc.deleted()):
1844 raise error.Abort(
1838 raise error.Abort(
1845 _(b"uncommitted changes"),
1839 _(b"uncommitted changes"),
1846 hint=_(b"use 'hg status' to list changes"),
1840 hint=_(b"use 'hg status' to list changes"),
1847 )
1841 )
1848 if not wc.isinmemory():
1842 if not wc.isinmemory():
1849 for s in sorted(wc.substate):
1843 for s in sorted(wc.substate):
1850 wc.sub(s).bailifchanged()
1844 wc.sub(s).bailifchanged()
1851
1845
1852 elif not overwrite:
1846 elif not overwrite:
1853 if p1 == p2: # no-op update
1847 if p1 == p2: # no-op update
1854 # call the hooks and exit early
1848 # call the hooks and exit early
1855 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1849 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1856 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1850 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1857 return updateresult(0, 0, 0, 0)
1851 return updateresult(0, 0, 0, 0)
1858
1852
1859 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1853 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1860 [p1],
1854 [p1],
1861 [p2],
1855 [p2],
1862 ): # nonlinear
1856 ): # nonlinear
1863 dirty = wc.dirty(missing=True)
1857 dirty = wc.dirty(missing=True)
1864 if dirty:
1858 if dirty:
1865 # Branching is a bit strange to ensure we do the minimal
1859 # Branching is a bit strange to ensure we do the minimal
1866 # amount of call to obsutil.foreground.
1860 # amount of call to obsutil.foreground.
1867 foreground = obsutil.foreground(repo, [p1.node()])
1861 foreground = obsutil.foreground(repo, [p1.node()])
1868 # note: the <node> variable contains a random identifier
1862 # note: the <node> variable contains a random identifier
1869 if repo[node].node() in foreground:
1863 if repo[node].node() in foreground:
1870 pass # allow updating to successors
1864 pass # allow updating to successors
1871 else:
1865 else:
1872 msg = _(b"uncommitted changes")
1866 msg = _(b"uncommitted changes")
1873 hint = _(b"commit or update --clean to discard changes")
1867 hint = _(b"commit or update --clean to discard changes")
1874 raise error.UpdateAbort(msg, hint=hint)
1868 raise error.UpdateAbort(msg, hint=hint)
1875 else:
1869 else:
1876 # Allow jumping branches if clean and specific rev given
1870 # Allow jumping branches if clean and specific rev given
1877 pass
1871 pass
1878
1872
1879 if overwrite:
1873 if overwrite:
1880 pas = [wc]
1874 pas = [wc]
1881 elif not branchmerge:
1875 elif not branchmerge:
1882 pas = [p1]
1876 pas = [p1]
1883
1877
1884 # deprecated config: merge.followcopies
1878 # deprecated config: merge.followcopies
1885 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1879 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1886 if overwrite:
1880 if overwrite:
1887 followcopies = False
1881 followcopies = False
1888 elif not pas[0]:
1882 elif not pas[0]:
1889 followcopies = False
1883 followcopies = False
1890 if not branchmerge and not wc.dirty(missing=True):
1884 if not branchmerge and not wc.dirty(missing=True):
1891 followcopies = False
1885 followcopies = False
1892
1886
1893 ### calculate phase
1887 ### calculate phase
1894 mresult = calculateupdates(
1888 mresult = calculateupdates(
1895 repo,
1889 repo,
1896 wc,
1890 wc,
1897 p2,
1891 p2,
1898 pas,
1892 pas,
1899 branchmerge,
1893 branchmerge,
1900 force,
1894 force,
1901 mergeancestor,
1895 mergeancestor,
1902 followcopies,
1896 followcopies,
1903 matcher=matcher,
1897 matcher=matcher,
1904 mergeforce=mergeforce,
1898 mergeforce=mergeforce,
1905 )
1899 )
1906
1900
1907 if updatecheck == UPDATECHECK_NO_CONFLICT:
1901 if updatecheck == UPDATECHECK_NO_CONFLICT:
1908 if mresult.hasconflicts():
1902 if mresult.hasconflicts():
1909 msg = _(b"conflicting changes")
1903 msg = _(b"conflicting changes")
1910 hint = _(b"commit or update --clean to discard changes")
1904 hint = _(b"commit or update --clean to discard changes")
1911 raise error.Abort(msg, hint=hint)
1905 raise error.Abort(msg, hint=hint)
1912
1906
1913 # Prompt and create actions. Most of this is in the resolve phase
1907 # Prompt and create actions. Most of this is in the resolve phase
1914 # already, but we can't handle .hgsubstate in filemerge or
1908 # already, but we can't handle .hgsubstate in filemerge or
1915 # subrepoutil.submerge yet so we have to keep prompting for it.
1909 # subrepoutil.submerge yet so we have to keep prompting for it.
1916 vals = mresult.getfile(b'.hgsubstate')
1910 vals = mresult.getfile(b'.hgsubstate')
1917 if vals:
1911 if vals:
1918 f = b'.hgsubstate'
1912 f = b'.hgsubstate'
1919 m, args, msg = vals
1913 m, args, msg = vals
1920 prompts = filemerge.partextras(labels)
1914 prompts = filemerge.partextras(labels)
1921 prompts[b'f'] = f
1915 prompts[b'f'] = f
1922 if m == mergestatemod.ACTION_CHANGED_DELETED:
1916 if m == mergestatemod.ACTION_CHANGED_DELETED:
1923 if repo.ui.promptchoice(
1917 if repo.ui.promptchoice(
1924 _(
1918 _(
1925 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1919 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1926 b"use (c)hanged version or (d)elete?"
1920 b"use (c)hanged version or (d)elete?"
1927 b"$$ &Changed $$ &Delete"
1921 b"$$ &Changed $$ &Delete"
1928 )
1922 )
1929 % prompts,
1923 % prompts,
1930 0,
1924 0,
1931 ):
1925 ):
1932 mresult.addfile(
1926 mresult.addfile(
1933 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1927 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1934 )
1928 )
1935 elif f in p1:
1929 elif f in p1:
1936 mresult.addfile(
1930 mresult.addfile(
1937 f,
1931 f,
1938 mergestatemod.ACTION_ADD_MODIFIED,
1932 mergestatemod.ACTION_ADD_MODIFIED,
1939 None,
1933 None,
1940 b'prompt keep',
1934 b'prompt keep',
1941 )
1935 )
1942 else:
1936 else:
1943 mresult.addfile(
1937 mresult.addfile(
1944 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1938 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1945 )
1939 )
1946 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1940 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1947 f1, f2, fa, move, anc = args
1941 f1, f2, fa, move, anc = args
1948 flags = p2[f2].flags()
1942 flags = p2[f2].flags()
1949 if (
1943 if (
1950 repo.ui.promptchoice(
1944 repo.ui.promptchoice(
1951 _(
1945 _(
1952 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1946 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1953 b"use (c)hanged version or leave (d)eleted?"
1947 b"use (c)hanged version or leave (d)eleted?"
1954 b"$$ &Changed $$ &Deleted"
1948 b"$$ &Changed $$ &Deleted"
1955 )
1949 )
1956 % prompts,
1950 % prompts,
1957 0,
1951 0,
1958 )
1952 )
1959 == 0
1953 == 0
1960 ):
1954 ):
1961 mresult.addfile(
1955 mresult.addfile(
1962 f,
1956 f,
1963 mergestatemod.ACTION_GET,
1957 mergestatemod.ACTION_GET,
1964 (flags, False),
1958 (flags, False),
1965 b'prompt recreating',
1959 b'prompt recreating',
1966 )
1960 )
1967 else:
1961 else:
1968 mresult.removefile(f)
1962 mresult.removefile(f)
1969
1963
1970 if not util.fscasesensitive(repo.path):
1964 if not util.fscasesensitive(repo.path):
1971 # check collision between files only in p2 for clean update
1965 # check collision between files only in p2 for clean update
1972 if not branchmerge and (
1966 if not branchmerge and (
1973 force or not wc.dirty(missing=True, branch=False)
1967 force or not wc.dirty(missing=True, branch=False)
1974 ):
1968 ):
1975 _checkcollision(repo, p2.manifest(), None)
1969 _checkcollision(repo, p2.manifest(), None)
1976 else:
1970 else:
1977 _checkcollision(repo, wc.manifest(), mresult)
1971 _checkcollision(repo, wc.manifest(), mresult)
1978
1972
1979 # divergent renames
1973 # divergent renames
1980 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1974 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1981 repo.ui.warn(
1975 repo.ui.warn(
1982 _(
1976 _(
1983 b"note: possible conflict - %s was renamed "
1977 b"note: possible conflict - %s was renamed "
1984 b"multiple times to:\n"
1978 b"multiple times to:\n"
1985 )
1979 )
1986 % f
1980 % f
1987 )
1981 )
1988 for nf in sorted(fl):
1982 for nf in sorted(fl):
1989 repo.ui.warn(b" %s\n" % nf)
1983 repo.ui.warn(b" %s\n" % nf)
1990
1984
1991 # rename and delete
1985 # rename and delete
1992 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1986 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1993 repo.ui.warn(
1987 repo.ui.warn(
1994 _(
1988 _(
1995 b"note: possible conflict - %s was deleted "
1989 b"note: possible conflict - %s was deleted "
1996 b"and renamed to:\n"
1990 b"and renamed to:\n"
1997 )
1991 )
1998 % f
1992 % f
1999 )
1993 )
2000 for nf in sorted(fl):
1994 for nf in sorted(fl):
2001 repo.ui.warn(b" %s\n" % nf)
1995 repo.ui.warn(b" %s\n" % nf)
2002
1996
2003 ### apply phase
1997 ### apply phase
2004 if not branchmerge: # just jump to the new rev
1998 if not branchmerge: # just jump to the new rev
2005 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1999 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2006 # If we're doing a partial update, we need to skip updating
2000 # If we're doing a partial update, we need to skip updating
2007 # the dirstate.
2001 # the dirstate.
2008 always = matcher is None or matcher.always()
2002 always = matcher is None or matcher.always()
2009 updatedirstate = updatedirstate and always and not wc.isinmemory()
2003 updatedirstate = updatedirstate and always and not wc.isinmemory()
2010 if updatedirstate:
2004 if updatedirstate:
2011 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2005 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2012 # note that we're in the middle of an update
2006 # note that we're in the middle of an update
2013 repo.vfs.write(b'updatestate', p2.hex())
2007 repo.vfs.write(b'updatestate', p2.hex())
2014
2008
2015 _advertisefsmonitor(
2009 _advertisefsmonitor(
2016 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2010 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2017 )
2011 )
2018
2012
2019 wantfiledata = updatedirstate and not branchmerge
2013 wantfiledata = updatedirstate and not branchmerge
2020 stats, getfiledata = applyupdates(
2014 stats, getfiledata = applyupdates(
2021 repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels,
2015 repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels,
2022 )
2016 )
2023
2017
2024 if updatedirstate:
2018 if updatedirstate:
2025 with repo.dirstate.parentchange():
2019 with repo.dirstate.parentchange():
2026 repo.setparents(fp1, fp2)
2020 repo.setparents(fp1, fp2)
2027 mergestatemod.recordupdates(
2021 mergestatemod.recordupdates(
2028 repo, mresult.actionsdict, branchmerge, getfiledata
2022 repo, mresult.actionsdict, branchmerge, getfiledata
2029 )
2023 )
2030 # update completed, clear state
2024 # update completed, clear state
2031 util.unlink(repo.vfs.join(b'updatestate'))
2025 util.unlink(repo.vfs.join(b'updatestate'))
2032
2026
2033 if not branchmerge:
2027 if not branchmerge:
2034 repo.dirstate.setbranch(p2.branch())
2028 repo.dirstate.setbranch(p2.branch())
2035
2029
2036 # If we're updating to a location, clean up any stale temporary includes
2030 # If we're updating to a location, clean up any stale temporary includes
2037 # (ex: this happens during hg rebase --abort).
2031 # (ex: this happens during hg rebase --abort).
2038 if not branchmerge:
2032 if not branchmerge:
2039 sparse.prunetemporaryincludes(repo)
2033 sparse.prunetemporaryincludes(repo)
2040
2034
2041 if updatedirstate:
2035 if updatedirstate:
2042 repo.hook(
2036 repo.hook(
2043 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2037 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2044 )
2038 )
2045 return stats
2039 return stats
2046
2040
2047
2041
2048 def merge(ctx, labels=None, force=False, wc=None):
2042 def merge(ctx, labels=None, force=False, wc=None):
2049 """Merge another topological branch into the working copy.
2043 """Merge another topological branch into the working copy.
2050
2044
2051 force = whether the merge was run with 'merge --force' (deprecated)
2045 force = whether the merge was run with 'merge --force' (deprecated)
2052 """
2046 """
2053
2047
2054 return update(
2048 return update(
2055 ctx.repo(),
2049 ctx.repo(),
2056 ctx.rev(),
2050 ctx.rev(),
2057 labels=labels,
2051 labels=labels,
2058 branchmerge=True,
2052 branchmerge=True,
2059 force=force,
2053 force=force,
2060 mergeforce=force,
2054 mergeforce=force,
2061 wc=wc,
2055 wc=wc,
2062 )
2056 )
2063
2057
2064
2058
2065 def clean_update(ctx, wc=None):
2059 def clean_update(ctx, wc=None):
2066 """Do a clean update to the given commit.
2060 """Do a clean update to the given commit.
2067
2061
2068 This involves updating to the commit and discarding any changes in the
2062 This involves updating to the commit and discarding any changes in the
2069 working copy.
2063 working copy.
2070 """
2064 """
2071 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2065 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2072
2066
2073
2067
2074 def revert_to(ctx, matcher=None, wc=None):
2068 def revert_to(ctx, matcher=None, wc=None):
2075 """Revert the working copy to the given commit.
2069 """Revert the working copy to the given commit.
2076
2070
2077 The working copy will keep its current parent(s) but its content will
2071 The working copy will keep its current parent(s) but its content will
2078 be the same as in the given commit.
2072 be the same as in the given commit.
2079 """
2073 """
2080
2074
2081 return update(
2075 return update(
2082 ctx.repo(),
2076 ctx.repo(),
2083 ctx.rev(),
2077 ctx.rev(),
2084 branchmerge=False,
2078 branchmerge=False,
2085 force=True,
2079 force=True,
2086 updatedirstate=False,
2080 updatedirstate=False,
2087 matcher=matcher,
2081 matcher=matcher,
2088 wc=wc,
2082 wc=wc,
2089 )
2083 )
2090
2084
2091
2085
2092 def graft(
2086 def graft(
2093 repo,
2087 repo,
2094 ctx,
2088 ctx,
2095 base=None,
2089 base=None,
2096 labels=None,
2090 labels=None,
2097 keepparent=False,
2091 keepparent=False,
2098 keepconflictparent=False,
2092 keepconflictparent=False,
2099 wctx=None,
2093 wctx=None,
2100 ):
2094 ):
2101 """Do a graft-like merge.
2095 """Do a graft-like merge.
2102
2096
2103 This is a merge where the merge ancestor is chosen such that one
2097 This is a merge where the merge ancestor is chosen such that one
2104 or more changesets are grafted onto the current changeset. In
2098 or more changesets are grafted onto the current changeset. In
2105 addition to the merge, this fixes up the dirstate to include only
2099 addition to the merge, this fixes up the dirstate to include only
2106 a single parent (if keepparent is False) and tries to duplicate any
2100 a single parent (if keepparent is False) and tries to duplicate any
2107 renames/copies appropriately.
2101 renames/copies appropriately.
2108
2102
2109 ctx - changeset to rebase
2103 ctx - changeset to rebase
2110 base - merge base, or ctx.p1() if not specified
2104 base - merge base, or ctx.p1() if not specified
2111 labels - merge labels eg ['local', 'graft']
2105 labels - merge labels eg ['local', 'graft']
2112 keepparent - keep second parent if any
2106 keepparent - keep second parent if any
2113 keepconflictparent - if unresolved, keep parent used for the merge
2107 keepconflictparent - if unresolved, keep parent used for the merge
2114
2108
2115 """
2109 """
2116 # If we're grafting a descendant onto an ancestor, be sure to pass
2110 # If we're grafting a descendant onto an ancestor, be sure to pass
2117 # mergeancestor=True to update. This does two things: 1) allows the merge if
2111 # mergeancestor=True to update. This does two things: 1) allows the merge if
2118 # the destination is the same as the parent of the ctx (so we can use graft
2112 # the destination is the same as the parent of the ctx (so we can use graft
2119 # to copy commits), and 2) informs update that the incoming changes are
2113 # to copy commits), and 2) informs update that the incoming changes are
2120 # newer than the destination so it doesn't prompt about "remote changed foo
2114 # newer than the destination so it doesn't prompt about "remote changed foo
2121 # which local deleted".
2115 # which local deleted".
2122 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2116 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2123 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2117 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2124 wctx = wctx or repo[None]
2118 wctx = wctx or repo[None]
2125 pctx = wctx.p1()
2119 pctx = wctx.p1()
2126 base = base or ctx.p1()
2120 base = base or ctx.p1()
2127 mergeancestor = (
2121 mergeancestor = (
2128 repo.changelog.isancestor(pctx.node(), ctx.node())
2122 repo.changelog.isancestor(pctx.node(), ctx.node())
2129 or pctx.rev() == base.rev()
2123 or pctx.rev() == base.rev()
2130 )
2124 )
2131
2125
2132 stats = update(
2126 stats = update(
2133 repo,
2127 repo,
2134 ctx.node(),
2128 ctx.node(),
2135 True,
2129 True,
2136 True,
2130 True,
2137 base.node(),
2131 base.node(),
2138 mergeancestor=mergeancestor,
2132 mergeancestor=mergeancestor,
2139 labels=labels,
2133 labels=labels,
2140 wc=wctx,
2134 wc=wctx,
2141 )
2135 )
2142
2136
2143 if keepconflictparent and stats.unresolvedcount:
2137 if keepconflictparent and stats.unresolvedcount:
2144 pother = ctx.node()
2138 pother = ctx.node()
2145 else:
2139 else:
2146 pother = nullid
2140 pother = nullid
2147 parents = ctx.parents()
2141 parents = ctx.parents()
2148 if keepparent and len(parents) == 2 and base in parents:
2142 if keepparent and len(parents) == 2 and base in parents:
2149 parents.remove(base)
2143 parents.remove(base)
2150 pother = parents[0].node()
2144 pother = parents[0].node()
2151 # Never set both parents equal to each other
2145 # Never set both parents equal to each other
2152 if pother == pctx.node():
2146 if pother == pctx.node():
2153 pother = nullid
2147 pother = nullid
2154
2148
2155 if wctx.isinmemory():
2149 if wctx.isinmemory():
2156 wctx.setparents(pctx.node(), pother)
2150 wctx.setparents(pctx.node(), pother)
2157 # fix up dirstate for copies and renames
2151 # fix up dirstate for copies and renames
2158 copies.graftcopies(wctx, ctx, base)
2152 copies.graftcopies(wctx, ctx, base)
2159 else:
2153 else:
2160 with repo.dirstate.parentchange():
2154 with repo.dirstate.parentchange():
2161 repo.setparents(pctx.node(), pother)
2155 repo.setparents(pctx.node(), pother)
2162 repo.dirstate.write(repo.currenttransaction())
2156 repo.dirstate.write(repo.currenttransaction())
2163 # fix up dirstate for copies and renames
2157 # fix up dirstate for copies and renames
2164 copies.graftcopies(wctx, ctx, base)
2158 copies.graftcopies(wctx, ctx, base)
2165 return stats
2159 return stats
2166
2160
2167
2161
2168 def purge(
2162 def purge(
2169 repo,
2163 repo,
2170 matcher,
2164 matcher,
2171 unknown=True,
2165 unknown=True,
2172 ignored=False,
2166 ignored=False,
2173 removeemptydirs=True,
2167 removeemptydirs=True,
2174 removefiles=True,
2168 removefiles=True,
2175 abortonerror=False,
2169 abortonerror=False,
2176 noop=False,
2170 noop=False,
2177 ):
2171 ):
2178 """Purge the working directory of untracked files.
2172 """Purge the working directory of untracked files.
2179
2173
2180 ``matcher`` is a matcher configured to scan the working directory -
2174 ``matcher`` is a matcher configured to scan the working directory -
2181 potentially a subset.
2175 potentially a subset.
2182
2176
2183 ``unknown`` controls whether unknown files should be purged.
2177 ``unknown`` controls whether unknown files should be purged.
2184
2178
2185 ``ignored`` controls whether ignored files should be purged.
2179 ``ignored`` controls whether ignored files should be purged.
2186
2180
2187 ``removeemptydirs`` controls whether empty directories should be removed.
2181 ``removeemptydirs`` controls whether empty directories should be removed.
2188
2182
2189 ``removefiles`` controls whether files are removed.
2183 ``removefiles`` controls whether files are removed.
2190
2184
2191 ``abortonerror`` causes an exception to be raised if an error occurs
2185 ``abortonerror`` causes an exception to be raised if an error occurs
2192 deleting a file or directory.
2186 deleting a file or directory.
2193
2187
2194 ``noop`` controls whether to actually remove files. If not defined, actions
2188 ``noop`` controls whether to actually remove files. If not defined, actions
2195 will be taken.
2189 will be taken.
2196
2190
2197 Returns an iterable of relative paths in the working directory that were
2191 Returns an iterable of relative paths in the working directory that were
2198 or would be removed.
2192 or would be removed.
2199 """
2193 """
2200
2194
2201 def remove(removefn, path):
2195 def remove(removefn, path):
2202 try:
2196 try:
2203 removefn(path)
2197 removefn(path)
2204 except OSError:
2198 except OSError:
2205 m = _(b'%s cannot be removed') % path
2199 m = _(b'%s cannot be removed') % path
2206 if abortonerror:
2200 if abortonerror:
2207 raise error.Abort(m)
2201 raise error.Abort(m)
2208 else:
2202 else:
2209 repo.ui.warn(_(b'warning: %s\n') % m)
2203 repo.ui.warn(_(b'warning: %s\n') % m)
2210
2204
2211 # There's no API to copy a matcher. So mutate the passed matcher and
2205 # There's no API to copy a matcher. So mutate the passed matcher and
2212 # restore it when we're done.
2206 # restore it when we're done.
2213 oldtraversedir = matcher.traversedir
2207 oldtraversedir = matcher.traversedir
2214
2208
2215 res = []
2209 res = []
2216
2210
2217 try:
2211 try:
2218 if removeemptydirs:
2212 if removeemptydirs:
2219 directories = []
2213 directories = []
2220 matcher.traversedir = directories.append
2214 matcher.traversedir = directories.append
2221
2215
2222 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2216 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2223
2217
2224 if removefiles:
2218 if removefiles:
2225 for f in sorted(status.unknown + status.ignored):
2219 for f in sorted(status.unknown + status.ignored):
2226 if not noop:
2220 if not noop:
2227 repo.ui.note(_(b'removing file %s\n') % f)
2221 repo.ui.note(_(b'removing file %s\n') % f)
2228 remove(repo.wvfs.unlink, f)
2222 remove(repo.wvfs.unlink, f)
2229 res.append(f)
2223 res.append(f)
2230
2224
2231 if removeemptydirs:
2225 if removeemptydirs:
2232 for f in sorted(directories, reverse=True):
2226 for f in sorted(directories, reverse=True):
2233 if matcher(f) and not repo.wvfs.listdir(f):
2227 if matcher(f) and not repo.wvfs.listdir(f):
2234 if not noop:
2228 if not noop:
2235 repo.ui.note(_(b'removing directory %s\n') % f)
2229 repo.ui.note(_(b'removing directory %s\n') % f)
2236 remove(repo.wvfs.rmdir, f)
2230 remove(repo.wvfs.rmdir, f)
2237 res.append(f)
2231 res.append(f)
2238
2232
2239 return res
2233 return res
2240
2234
2241 finally:
2235 finally:
2242 matcher.traversedir = oldtraversedir
2236 matcher.traversedir = oldtraversedir
@@ -1,828 +1,835 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullhex,
12 nullhex,
13 nullid,
13 nullid,
14 )
14 )
15 from . import (
15 from . import (
16 error,
16 error,
17 filemerge,
17 filemerge,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 )
20 )
21 from .utils import hashutil
21 from .utils import hashutil
22
22
23 _pack = struct.pack
23 _pack = struct.pack
24 _unpack = struct.unpack
24 _unpack = struct.unpack
25
25
26
26
27 def _droponode(data):
27 def _droponode(data):
28 # used for compatibility for v1
28 # used for compatibility for v1
29 bits = data.split(b'\0')
29 bits = data.split(b'\0')
30 bits = bits[:-2] + bits[-1:]
30 bits = bits[:-2] + bits[-1:]
31 return b'\0'.join(bits)
31 return b'\0'.join(bits)
32
32
33
33
34 def _filectxorabsent(hexnode, ctx, f):
34 def _filectxorabsent(hexnode, ctx, f):
35 if hexnode == nullhex:
35 if hexnode == nullhex:
36 return filemerge.absentfilectx(ctx, f)
36 return filemerge.absentfilectx(ctx, f)
37 else:
37 else:
38 return ctx[f]
38 return ctx[f]
39
39
40
40
41 # Merge state record types. See ``mergestate`` docs for more.
41 # Merge state record types. See ``mergestate`` docs for more.
42
42
43 ####
43 ####
44 # merge records which records metadata about a current merge
44 # merge records which records metadata about a current merge
45 # exists only once in a mergestate
45 # exists only once in a mergestate
46 #####
46 #####
47 RECORD_LOCAL = b'L'
47 RECORD_LOCAL = b'L'
48 RECORD_OTHER = b'O'
48 RECORD_OTHER = b'O'
49 # record merge labels
49 # record merge labels
50 RECORD_LABELS = b'l'
50 RECORD_LABELS = b'l'
51
51
52 #####
52 #####
53 # record extra information about files, with one entry containing info about one
53 # record extra information about files, with one entry containing info about one
54 # file. Hence, multiple of them can exists
54 # file. Hence, multiple of them can exists
55 #####
55 #####
56 RECORD_FILE_VALUES = b'f'
56 RECORD_FILE_VALUES = b'f'
57
57
58 #####
58 #####
59 # merge records which represents state of individual merges of files/folders
59 # merge records which represents state of individual merges of files/folders
60 # These are top level records for each entry containing merge related info.
60 # These are top level records for each entry containing merge related info.
61 # Each record of these has info about one file. Hence multiple of them can
61 # Each record of these has info about one file. Hence multiple of them can
62 # exists
62 # exists
63 #####
63 #####
64 RECORD_MERGED = b'F'
64 RECORD_MERGED = b'F'
65 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 RECORD_CHANGEDELETE_CONFLICT = b'C'
66 # the path was dir on one side of merge and file on another
66 # the path was dir on one side of merge and file on another
67 RECORD_PATH_CONFLICT = b'P'
67 RECORD_PATH_CONFLICT = b'P'
68
68
69 #####
69 #####
70 # possible state which a merge entry can have. These are stored inside top-level
70 # possible state which a merge entry can have. These are stored inside top-level
71 # merge records mentioned just above.
71 # merge records mentioned just above.
72 #####
72 #####
73 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_UNRESOLVED = b'u'
74 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_RESOLVED = b'r'
75 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
76 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_RESOLVED_PATH = b'pr'
77 # represents that the file was automatically merged in favor
77 # represents that the file was automatically merged in favor
78 # of other version. This info is used on commit.
78 # of other version. This info is used on commit.
79 # This is now deprecated and commit related information is now
79 # This is now deprecated and commit related information is now
80 # stored in RECORD_FILE_VALUES
80 # stored in RECORD_FILE_VALUES
81 MERGE_RECORD_MERGED_OTHER = b'o'
81 MERGE_RECORD_MERGED_OTHER = b'o'
82
82
83 #####
83 #####
84 # top level record which stores other unknown records. Multiple of these can
84 # top level record which stores other unknown records. Multiple of these can
85 # exists
85 # exists
86 #####
86 #####
87 RECORD_OVERRIDE = b't'
87 RECORD_OVERRIDE = b't'
88
88
89 #####
89 #####
90 # legacy records which are no longer used but kept to prevent breaking BC
90 # legacy records which are no longer used but kept to prevent breaking BC
91 #####
91 #####
92 # This record was release in 5.4 and usage was removed in 5.5
92 # This record was release in 5.4 and usage was removed in 5.5
93 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 LEGACY_RECORD_RESOLVED_OTHER = b'R'
94 # This record was release in 3.7 and usage was removed in 5.6
94 # This record was release in 3.7 and usage was removed in 5.6
95 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
96 # This record was release in 3.7 and usage was removed in 5.6
96 # This record was release in 3.7 and usage was removed in 5.6
97 LEGACY_MERGE_DRIVER_STATE = b'm'
97 LEGACY_MERGE_DRIVER_STATE = b'm'
98 # This record was release in 3.7 and usage was removed in 5.6
98 # This record was release in 3.7 and usage was removed in 5.6
99 LEGACY_MERGE_DRIVER_MERGE = b'D'
99 LEGACY_MERGE_DRIVER_MERGE = b'D'
100
100
101
101
102 ACTION_FORGET = b'f'
102 ACTION_FORGET = b'f'
103 ACTION_REMOVE = b'r'
103 ACTION_REMOVE = b'r'
104 ACTION_ADD = b'a'
104 ACTION_ADD = b'a'
105 ACTION_GET = b'g'
105 ACTION_GET = b'g'
106 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT = b'p'
107 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
108 ACTION_ADD_MODIFIED = b'am'
108 ACTION_ADD_MODIFIED = b'am'
109 ACTION_CREATED = b'c'
109 ACTION_CREATED = b'c'
110 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_DELETED_CHANGED = b'dc'
111 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_CHANGED_DELETED = b'cd'
112 ACTION_MERGE = b'm'
112 ACTION_MERGE = b'm'
113 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
114 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
115 ACTION_KEEP = b'k'
115 ACTION_KEEP = b'k'
116 # the file was absent on local side before merge and we should
116 # the file was absent on local side before merge and we should
117 # keep it absent (absent means file not present, it can be a result
117 # keep it absent (absent means file not present, it can be a result
118 # of file deletion, rename etc.)
118 # of file deletion, rename etc.)
119 ACTION_KEEP_ABSENT = b'ka'
119 ACTION_KEEP_ABSENT = b'ka'
120 # the file is absent on the ancestor and remote side of the merge
120 # the file is absent on the ancestor and remote side of the merge
121 # hence this file is new and we should keep it
121 # hence this file is new and we should keep it
122 ACTION_KEEP_NEW = b'kn'
122 ACTION_KEEP_NEW = b'kn'
123 ACTION_EXEC = b'e'
123 ACTION_EXEC = b'e'
124 ACTION_CREATED_MERGE = b'cm'
124 ACTION_CREATED_MERGE = b'cm'
125
125
126 # actions which are no op
127 NO_OP_ACTIONS = (
128 ACTION_KEEP,
129 ACTION_KEEP_ABSENT,
130 ACTION_KEEP_NEW,
131 )
132
126
133
127 class _mergestate_base(object):
134 class _mergestate_base(object):
128 '''track 3-way merge state of individual files
135 '''track 3-way merge state of individual files
129
136
130 The merge state is stored on disk when needed. Two files are used: one with
137 The merge state is stored on disk when needed. Two files are used: one with
131 an old format (version 1), and one with a new format (version 2). Version 2
138 an old format (version 1), and one with a new format (version 2). Version 2
132 stores a superset of the data in version 1, including new kinds of records
139 stores a superset of the data in version 1, including new kinds of records
133 in the future. For more about the new format, see the documentation for
140 in the future. For more about the new format, see the documentation for
134 `_readrecordsv2`.
141 `_readrecordsv2`.
135
142
136 Each record can contain arbitrary content, and has an associated type. This
143 Each record can contain arbitrary content, and has an associated type. This
137 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 `type` should be a letter. If `type` is uppercase, the record is mandatory:
138 versions of Mercurial that don't support it should abort. If `type` is
145 versions of Mercurial that don't support it should abort. If `type` is
139 lowercase, the record can be safely ignored.
146 lowercase, the record can be safely ignored.
140
147
141 Currently known records:
148 Currently known records:
142
149
143 L: the node of the "local" part of the merge (hexified version)
150 L: the node of the "local" part of the merge (hexified version)
144 O: the node of the "other" part of the merge (hexified version)
151 O: the node of the "other" part of the merge (hexified version)
145 F: a file to be merged entry
152 F: a file to be merged entry
146 C: a change/delete or delete/change conflict
153 C: a change/delete or delete/change conflict
147 P: a path conflict (file vs directory)
154 P: a path conflict (file vs directory)
148 f: a (filename, dictionary) tuple of optional values for a given file
155 f: a (filename, dictionary) tuple of optional values for a given file
149 l: the labels for the parts of the merge.
156 l: the labels for the parts of the merge.
150
157
151 Merge record states (stored in self._state, indexed by filename):
158 Merge record states (stored in self._state, indexed by filename):
152 u: unresolved conflict
159 u: unresolved conflict
153 r: resolved conflict
160 r: resolved conflict
154 pu: unresolved path conflict (file conflicts with directory)
161 pu: unresolved path conflict (file conflicts with directory)
155 pr: resolved path conflict
162 pr: resolved path conflict
156
163
157 The resolve command transitions between 'u' and 'r' for conflicts and
164 The resolve command transitions between 'u' and 'r' for conflicts and
158 'pu' and 'pr' for path conflicts.
165 'pu' and 'pr' for path conflicts.
159 '''
166 '''
160
167
161 def __init__(self, repo):
168 def __init__(self, repo):
162 """Initialize the merge state.
169 """Initialize the merge state.
163
170
164 Do not use this directly! Instead call read() or clean()."""
171 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
172 self._repo = repo
166 self._state = {}
173 self._state = {}
167 self._stateextras = collections.defaultdict(dict)
174 self._stateextras = collections.defaultdict(dict)
168 self._local = None
175 self._local = None
169 self._other = None
176 self._other = None
170 self._labels = None
177 self._labels = None
171 # contains a mapping of form:
178 # contains a mapping of form:
172 # {filename : (merge_return_value, action_to_be_performed}
179 # {filename : (merge_return_value, action_to_be_performed}
173 # these are results of re-running merge process
180 # these are results of re-running merge process
174 # this dict is used to perform actions on dirstate caused by re-running
181 # this dict is used to perform actions on dirstate caused by re-running
175 # the merge
182 # the merge
176 self._results = {}
183 self._results = {}
177 self._dirty = False
184 self._dirty = False
178
185
179 def reset(self):
186 def reset(self):
180 pass
187 pass
181
188
182 def start(self, node, other, labels=None):
189 def start(self, node, other, labels=None):
183 self._local = node
190 self._local = node
184 self._other = other
191 self._other = other
185 self._labels = labels
192 self._labels = labels
186
193
187 @util.propertycache
194 @util.propertycache
188 def local(self):
195 def local(self):
189 if self._local is None:
196 if self._local is None:
190 msg = b"local accessed but self._local isn't set"
197 msg = b"local accessed but self._local isn't set"
191 raise error.ProgrammingError(msg)
198 raise error.ProgrammingError(msg)
192 return self._local
199 return self._local
193
200
194 @util.propertycache
201 @util.propertycache
195 def localctx(self):
202 def localctx(self):
196 return self._repo[self.local]
203 return self._repo[self.local]
197
204
198 @util.propertycache
205 @util.propertycache
199 def other(self):
206 def other(self):
200 if self._other is None:
207 if self._other is None:
201 msg = b"other accessed but self._other isn't set"
208 msg = b"other accessed but self._other isn't set"
202 raise error.ProgrammingError(msg)
209 raise error.ProgrammingError(msg)
203 return self._other
210 return self._other
204
211
205 @util.propertycache
212 @util.propertycache
206 def otherctx(self):
213 def otherctx(self):
207 return self._repo[self.other]
214 return self._repo[self.other]
208
215
209 def active(self):
216 def active(self):
210 """Whether mergestate is active.
217 """Whether mergestate is active.
211
218
212 Returns True if there appears to be mergestate. This is a rough proxy
219 Returns True if there appears to be mergestate. This is a rough proxy
213 for "is a merge in progress."
220 for "is a merge in progress."
214 """
221 """
215 return bool(self._local) or bool(self._state)
222 return bool(self._local) or bool(self._state)
216
223
217 def commit(self):
224 def commit(self):
218 """Write current state on disk (if necessary)"""
225 """Write current state on disk (if necessary)"""
219
226
220 @staticmethod
227 @staticmethod
221 def getlocalkey(path):
228 def getlocalkey(path):
222 """hash the path of a local file context for storage in the .hg/merge
229 """hash the path of a local file context for storage in the .hg/merge
223 directory."""
230 directory."""
224
231
225 return hex(hashutil.sha1(path).digest())
232 return hex(hashutil.sha1(path).digest())
226
233
227 def _make_backup(self, fctx, localkey):
234 def _make_backup(self, fctx, localkey):
228 raise NotImplementedError()
235 raise NotImplementedError()
229
236
230 def _restore_backup(self, fctx, localkey, flags):
237 def _restore_backup(self, fctx, localkey, flags):
231 raise NotImplementedError()
238 raise NotImplementedError()
232
239
233 def add(self, fcl, fco, fca, fd):
240 def add(self, fcl, fco, fca, fd):
234 """add a new (potentially?) conflicting file the merge state
241 """add a new (potentially?) conflicting file the merge state
235 fcl: file context for local,
242 fcl: file context for local,
236 fco: file context for remote,
243 fco: file context for remote,
237 fca: file context for ancestors,
244 fca: file context for ancestors,
238 fd: file path of the resulting merge.
245 fd: file path of the resulting merge.
239
246
240 note: also write the local version to the `.hg/merge` directory.
247 note: also write the local version to the `.hg/merge` directory.
241 """
248 """
242 if fcl.isabsent():
249 if fcl.isabsent():
243 localkey = nullhex
250 localkey = nullhex
244 else:
251 else:
245 localkey = mergestate.getlocalkey(fcl.path())
252 localkey = mergestate.getlocalkey(fcl.path())
246 self._make_backup(fcl, localkey)
253 self._make_backup(fcl, localkey)
247 self._state[fd] = [
254 self._state[fd] = [
248 MERGE_RECORD_UNRESOLVED,
255 MERGE_RECORD_UNRESOLVED,
249 localkey,
256 localkey,
250 fcl.path(),
257 fcl.path(),
251 fca.path(),
258 fca.path(),
252 hex(fca.filenode()),
259 hex(fca.filenode()),
253 fco.path(),
260 fco.path(),
254 hex(fco.filenode()),
261 hex(fco.filenode()),
255 fcl.flags(),
262 fcl.flags(),
256 ]
263 ]
257 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
264 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
258 self._dirty = True
265 self._dirty = True
259
266
260 def addpathconflict(self, path, frename, forigin):
267 def addpathconflict(self, path, frename, forigin):
261 """add a new conflicting path to the merge state
268 """add a new conflicting path to the merge state
262 path: the path that conflicts
269 path: the path that conflicts
263 frename: the filename the conflicting file was renamed to
270 frename: the filename the conflicting file was renamed to
264 forigin: origin of the file ('l' or 'r' for local/remote)
271 forigin: origin of the file ('l' or 'r' for local/remote)
265 """
272 """
266 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
267 self._dirty = True
274 self._dirty = True
268
275
269 def addcommitinfo(self, path, data):
276 def addcommitinfo(self, path, data):
270 """ stores information which is required at commit
277 """ stores information which is required at commit
271 into _stateextras """
278 into _stateextras """
272 self._stateextras[path].update(data)
279 self._stateextras[path].update(data)
273 self._dirty = True
280 self._dirty = True
274
281
275 def __contains__(self, dfile):
282 def __contains__(self, dfile):
276 return dfile in self._state
283 return dfile in self._state
277
284
278 def __getitem__(self, dfile):
285 def __getitem__(self, dfile):
279 return self._state[dfile][0]
286 return self._state[dfile][0]
280
287
281 def __iter__(self):
288 def __iter__(self):
282 return iter(sorted(self._state))
289 return iter(sorted(self._state))
283
290
284 def files(self):
291 def files(self):
285 return self._state.keys()
292 return self._state.keys()
286
293
287 def mark(self, dfile, state):
294 def mark(self, dfile, state):
288 self._state[dfile][0] = state
295 self._state[dfile][0] = state
289 self._dirty = True
296 self._dirty = True
290
297
291 def unresolved(self):
298 def unresolved(self):
292 """Obtain the paths of unresolved files."""
299 """Obtain the paths of unresolved files."""
293
300
294 for f, entry in pycompat.iteritems(self._state):
301 for f, entry in pycompat.iteritems(self._state):
295 if entry[0] in (
302 if entry[0] in (
296 MERGE_RECORD_UNRESOLVED,
303 MERGE_RECORD_UNRESOLVED,
297 MERGE_RECORD_UNRESOLVED_PATH,
304 MERGE_RECORD_UNRESOLVED_PATH,
298 ):
305 ):
299 yield f
306 yield f
300
307
301 def extras(self, filename):
308 def extras(self, filename):
302 return self._stateextras[filename]
309 return self._stateextras[filename]
303
310
304 def _resolve(self, preresolve, dfile, wctx):
311 def _resolve(self, preresolve, dfile, wctx):
305 """rerun merge process for file path `dfile`.
312 """rerun merge process for file path `dfile`.
306 Returns whether the merge was completed and the return value of merge
313 Returns whether the merge was completed and the return value of merge
307 obtained from filemerge._filemerge().
314 obtained from filemerge._filemerge().
308 """
315 """
309 if self[dfile] in (
316 if self[dfile] in (
310 MERGE_RECORD_RESOLVED,
317 MERGE_RECORD_RESOLVED,
311 LEGACY_RECORD_DRIVER_RESOLVED,
318 LEGACY_RECORD_DRIVER_RESOLVED,
312 ):
319 ):
313 return True, 0
320 return True, 0
314 stateentry = self._state[dfile]
321 stateentry = self._state[dfile]
315 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
322 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
316 octx = self._repo[self._other]
323 octx = self._repo[self._other]
317 extras = self.extras(dfile)
324 extras = self.extras(dfile)
318 anccommitnode = extras.get(b'ancestorlinknode')
325 anccommitnode = extras.get(b'ancestorlinknode')
319 if anccommitnode:
326 if anccommitnode:
320 actx = self._repo[anccommitnode]
327 actx = self._repo[anccommitnode]
321 else:
328 else:
322 actx = None
329 actx = None
323 fcd = _filectxorabsent(localkey, wctx, dfile)
330 fcd = _filectxorabsent(localkey, wctx, dfile)
324 fco = _filectxorabsent(onode, octx, ofile)
331 fco = _filectxorabsent(onode, octx, ofile)
325 # TODO: move this to filectxorabsent
332 # TODO: move this to filectxorabsent
326 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
333 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
327 # "premerge" x flags
334 # "premerge" x flags
328 flo = fco.flags()
335 flo = fco.flags()
329 fla = fca.flags()
336 fla = fca.flags()
330 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
337 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
331 if fca.node() == nullid and flags != flo:
338 if fca.node() == nullid and flags != flo:
332 if preresolve:
339 if preresolve:
333 self._repo.ui.warn(
340 self._repo.ui.warn(
334 _(
341 _(
335 b'warning: cannot merge flags for %s '
342 b'warning: cannot merge flags for %s '
336 b'without common ancestor - keeping local flags\n'
343 b'without common ancestor - keeping local flags\n'
337 )
344 )
338 % afile
345 % afile
339 )
346 )
340 elif flags == fla:
347 elif flags == fla:
341 flags = flo
348 flags = flo
342 if preresolve:
349 if preresolve:
343 # restore local
350 # restore local
344 if localkey != nullhex:
351 if localkey != nullhex:
345 self._restore_backup(wctx[dfile], localkey, flags)
352 self._restore_backup(wctx[dfile], localkey, flags)
346 else:
353 else:
347 wctx[dfile].remove(ignoremissing=True)
354 wctx[dfile].remove(ignoremissing=True)
348 complete, merge_ret, deleted = filemerge.premerge(
355 complete, merge_ret, deleted = filemerge.premerge(
349 self._repo,
356 self._repo,
350 wctx,
357 wctx,
351 self._local,
358 self._local,
352 lfile,
359 lfile,
353 fcd,
360 fcd,
354 fco,
361 fco,
355 fca,
362 fca,
356 labels=self._labels,
363 labels=self._labels,
357 )
364 )
358 else:
365 else:
359 complete, merge_ret, deleted = filemerge.filemerge(
366 complete, merge_ret, deleted = filemerge.filemerge(
360 self._repo,
367 self._repo,
361 wctx,
368 wctx,
362 self._local,
369 self._local,
363 lfile,
370 lfile,
364 fcd,
371 fcd,
365 fco,
372 fco,
366 fca,
373 fca,
367 labels=self._labels,
374 labels=self._labels,
368 )
375 )
369 if merge_ret is None:
376 if merge_ret is None:
370 # If return value of merge is None, then there are no real conflict
377 # If return value of merge is None, then there are no real conflict
371 del self._state[dfile]
378 del self._state[dfile]
372 self._stateextras.pop(dfile, None)
379 self._stateextras.pop(dfile, None)
373 self._dirty = True
380 self._dirty = True
374 elif not merge_ret:
381 elif not merge_ret:
375 self.mark(dfile, MERGE_RECORD_RESOLVED)
382 self.mark(dfile, MERGE_RECORD_RESOLVED)
376
383
377 if complete:
384 if complete:
378 action = None
385 action = None
379 if deleted:
386 if deleted:
380 if fcd.isabsent():
387 if fcd.isabsent():
381 # dc: local picked. Need to drop if present, which may
388 # dc: local picked. Need to drop if present, which may
382 # happen on re-resolves.
389 # happen on re-resolves.
383 action = ACTION_FORGET
390 action = ACTION_FORGET
384 else:
391 else:
385 # cd: remote picked (or otherwise deleted)
392 # cd: remote picked (or otherwise deleted)
386 action = ACTION_REMOVE
393 action = ACTION_REMOVE
387 else:
394 else:
388 if fcd.isabsent(): # dc: remote picked
395 if fcd.isabsent(): # dc: remote picked
389 action = ACTION_GET
396 action = ACTION_GET
390 elif fco.isabsent(): # cd: local picked
397 elif fco.isabsent(): # cd: local picked
391 if dfile in self.localctx:
398 if dfile in self.localctx:
392 action = ACTION_ADD_MODIFIED
399 action = ACTION_ADD_MODIFIED
393 else:
400 else:
394 action = ACTION_ADD
401 action = ACTION_ADD
395 # else: regular merges (no action necessary)
402 # else: regular merges (no action necessary)
396 self._results[dfile] = merge_ret, action
403 self._results[dfile] = merge_ret, action
397
404
398 return complete, merge_ret
405 return complete, merge_ret
399
406
400 def preresolve(self, dfile, wctx):
407 def preresolve(self, dfile, wctx):
401 """run premerge process for dfile
408 """run premerge process for dfile
402
409
403 Returns whether the merge is complete, and the exit code."""
410 Returns whether the merge is complete, and the exit code."""
404 return self._resolve(True, dfile, wctx)
411 return self._resolve(True, dfile, wctx)
405
412
406 def resolve(self, dfile, wctx):
413 def resolve(self, dfile, wctx):
407 """run merge process (assuming premerge was run) for dfile
414 """run merge process (assuming premerge was run) for dfile
408
415
409 Returns the exit code of the merge."""
416 Returns the exit code of the merge."""
410 return self._resolve(False, dfile, wctx)[1]
417 return self._resolve(False, dfile, wctx)[1]
411
418
412 def counts(self):
419 def counts(self):
413 """return counts for updated, merged and removed files in this
420 """return counts for updated, merged and removed files in this
414 session"""
421 session"""
415 updated, merged, removed = 0, 0, 0
422 updated, merged, removed = 0, 0, 0
416 for r, action in pycompat.itervalues(self._results):
423 for r, action in pycompat.itervalues(self._results):
417 if r is None:
424 if r is None:
418 updated += 1
425 updated += 1
419 elif r == 0:
426 elif r == 0:
420 if action == ACTION_REMOVE:
427 if action == ACTION_REMOVE:
421 removed += 1
428 removed += 1
422 else:
429 else:
423 merged += 1
430 merged += 1
424 return updated, merged, removed
431 return updated, merged, removed
425
432
426 def unresolvedcount(self):
433 def unresolvedcount(self):
427 """get unresolved count for this merge (persistent)"""
434 """get unresolved count for this merge (persistent)"""
428 return len(list(self.unresolved()))
435 return len(list(self.unresolved()))
429
436
430 def actions(self):
437 def actions(self):
431 """return lists of actions to perform on the dirstate"""
438 """return lists of actions to perform on the dirstate"""
432 actions = {
439 actions = {
433 ACTION_REMOVE: [],
440 ACTION_REMOVE: [],
434 ACTION_FORGET: [],
441 ACTION_FORGET: [],
435 ACTION_ADD: [],
442 ACTION_ADD: [],
436 ACTION_ADD_MODIFIED: [],
443 ACTION_ADD_MODIFIED: [],
437 ACTION_GET: [],
444 ACTION_GET: [],
438 }
445 }
439 for f, (r, action) in pycompat.iteritems(self._results):
446 for f, (r, action) in pycompat.iteritems(self._results):
440 if action is not None:
447 if action is not None:
441 actions[action].append((f, None, b"merge result"))
448 actions[action].append((f, None, b"merge result"))
442 return actions
449 return actions
443
450
444
451
445 class mergestate(_mergestate_base):
452 class mergestate(_mergestate_base):
446
453
447 statepathv1 = b'merge/state'
454 statepathv1 = b'merge/state'
448 statepathv2 = b'merge/state2'
455 statepathv2 = b'merge/state2'
449
456
450 @staticmethod
457 @staticmethod
451 def clean(repo):
458 def clean(repo):
452 """Initialize a brand new merge state, removing any existing state on
459 """Initialize a brand new merge state, removing any existing state on
453 disk."""
460 disk."""
454 ms = mergestate(repo)
461 ms = mergestate(repo)
455 ms.reset()
462 ms.reset()
456 return ms
463 return ms
457
464
458 @staticmethod
465 @staticmethod
459 def read(repo):
466 def read(repo):
460 """Initialize the merge state, reading it from disk."""
467 """Initialize the merge state, reading it from disk."""
461 ms = mergestate(repo)
468 ms = mergestate(repo)
462 ms._read()
469 ms._read()
463 return ms
470 return ms
464
471
465 def _read(self):
472 def _read(self):
466 """Analyse each record content to restore a serialized state from disk
473 """Analyse each record content to restore a serialized state from disk
467
474
468 This function process "record" entry produced by the de-serialization
475 This function process "record" entry produced by the de-serialization
469 of on disk file.
476 of on disk file.
470 """
477 """
471 unsupported = set()
478 unsupported = set()
472 records = self._readrecords()
479 records = self._readrecords()
473 for rtype, record in records:
480 for rtype, record in records:
474 if rtype == RECORD_LOCAL:
481 if rtype == RECORD_LOCAL:
475 self._local = bin(record)
482 self._local = bin(record)
476 elif rtype == RECORD_OTHER:
483 elif rtype == RECORD_OTHER:
477 self._other = bin(record)
484 self._other = bin(record)
478 elif rtype == LEGACY_MERGE_DRIVER_STATE:
485 elif rtype == LEGACY_MERGE_DRIVER_STATE:
479 pass
486 pass
480 elif rtype in (
487 elif rtype in (
481 RECORD_MERGED,
488 RECORD_MERGED,
482 RECORD_CHANGEDELETE_CONFLICT,
489 RECORD_CHANGEDELETE_CONFLICT,
483 RECORD_PATH_CONFLICT,
490 RECORD_PATH_CONFLICT,
484 LEGACY_MERGE_DRIVER_MERGE,
491 LEGACY_MERGE_DRIVER_MERGE,
485 LEGACY_RECORD_RESOLVED_OTHER,
492 LEGACY_RECORD_RESOLVED_OTHER,
486 ):
493 ):
487 bits = record.split(b'\0')
494 bits = record.split(b'\0')
488 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
495 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
489 # and we now store related information in _stateextras, so
496 # and we now store related information in _stateextras, so
490 # lets write to _stateextras directly
497 # lets write to _stateextras directly
491 if bits[1] == MERGE_RECORD_MERGED_OTHER:
498 if bits[1] == MERGE_RECORD_MERGED_OTHER:
492 self._stateextras[bits[0]][b'filenode-source'] = b'other'
499 self._stateextras[bits[0]][b'filenode-source'] = b'other'
493 else:
500 else:
494 self._state[bits[0]] = bits[1:]
501 self._state[bits[0]] = bits[1:]
495 elif rtype == RECORD_FILE_VALUES:
502 elif rtype == RECORD_FILE_VALUES:
496 filename, rawextras = record.split(b'\0', 1)
503 filename, rawextras = record.split(b'\0', 1)
497 extraparts = rawextras.split(b'\0')
504 extraparts = rawextras.split(b'\0')
498 extras = {}
505 extras = {}
499 i = 0
506 i = 0
500 while i < len(extraparts):
507 while i < len(extraparts):
501 extras[extraparts[i]] = extraparts[i + 1]
508 extras[extraparts[i]] = extraparts[i + 1]
502 i += 2
509 i += 2
503
510
504 self._stateextras[filename] = extras
511 self._stateextras[filename] = extras
505 elif rtype == RECORD_LABELS:
512 elif rtype == RECORD_LABELS:
506 labels = record.split(b'\0', 2)
513 labels = record.split(b'\0', 2)
507 self._labels = [l for l in labels if len(l) > 0]
514 self._labels = [l for l in labels if len(l) > 0]
508 elif not rtype.islower():
515 elif not rtype.islower():
509 unsupported.add(rtype)
516 unsupported.add(rtype)
510
517
511 if unsupported:
518 if unsupported:
512 raise error.UnsupportedMergeRecords(unsupported)
519 raise error.UnsupportedMergeRecords(unsupported)
513
520
514 def _readrecords(self):
521 def _readrecords(self):
515 """Read merge state from disk and return a list of record (TYPE, data)
522 """Read merge state from disk and return a list of record (TYPE, data)
516
523
517 We read data from both v1 and v2 files and decide which one to use.
524 We read data from both v1 and v2 files and decide which one to use.
518
525
519 V1 has been used by version prior to 2.9.1 and contains less data than
526 V1 has been used by version prior to 2.9.1 and contains less data than
520 v2. We read both versions and check if no data in v2 contradicts
527 v2. We read both versions and check if no data in v2 contradicts
521 v1. If there is not contradiction we can safely assume that both v1
528 v1. If there is not contradiction we can safely assume that both v1
522 and v2 were written at the same time and use the extract data in v2. If
529 and v2 were written at the same time and use the extract data in v2. If
523 there is contradiction we ignore v2 content as we assume an old version
530 there is contradiction we ignore v2 content as we assume an old version
524 of Mercurial has overwritten the mergestate file and left an old v2
531 of Mercurial has overwritten the mergestate file and left an old v2
525 file around.
532 file around.
526
533
527 returns list of record [(TYPE, data), ...]"""
534 returns list of record [(TYPE, data), ...]"""
528 v1records = self._readrecordsv1()
535 v1records = self._readrecordsv1()
529 v2records = self._readrecordsv2()
536 v2records = self._readrecordsv2()
530 if self._v1v2match(v1records, v2records):
537 if self._v1v2match(v1records, v2records):
531 return v2records
538 return v2records
532 else:
539 else:
533 # v1 file is newer than v2 file, use it
540 # v1 file is newer than v2 file, use it
534 # we have to infer the "other" changeset of the merge
541 # we have to infer the "other" changeset of the merge
535 # we cannot do better than that with v1 of the format
542 # we cannot do better than that with v1 of the format
536 mctx = self._repo[None].parents()[-1]
543 mctx = self._repo[None].parents()[-1]
537 v1records.append((RECORD_OTHER, mctx.hex()))
544 v1records.append((RECORD_OTHER, mctx.hex()))
538 # add place holder "other" file node information
545 # add place holder "other" file node information
539 # nobody is using it yet so we do no need to fetch the data
546 # nobody is using it yet so we do no need to fetch the data
540 # if mctx was wrong `mctx[bits[-2]]` may fails.
547 # if mctx was wrong `mctx[bits[-2]]` may fails.
541 for idx, r in enumerate(v1records):
548 for idx, r in enumerate(v1records):
542 if r[0] == RECORD_MERGED:
549 if r[0] == RECORD_MERGED:
543 bits = r[1].split(b'\0')
550 bits = r[1].split(b'\0')
544 bits.insert(-2, b'')
551 bits.insert(-2, b'')
545 v1records[idx] = (r[0], b'\0'.join(bits))
552 v1records[idx] = (r[0], b'\0'.join(bits))
546 return v1records
553 return v1records
547
554
548 def _v1v2match(self, v1records, v2records):
555 def _v1v2match(self, v1records, v2records):
549 oldv2 = set() # old format version of v2 record
556 oldv2 = set() # old format version of v2 record
550 for rec in v2records:
557 for rec in v2records:
551 if rec[0] == RECORD_LOCAL:
558 if rec[0] == RECORD_LOCAL:
552 oldv2.add(rec)
559 oldv2.add(rec)
553 elif rec[0] == RECORD_MERGED:
560 elif rec[0] == RECORD_MERGED:
554 # drop the onode data (not contained in v1)
561 # drop the onode data (not contained in v1)
555 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
562 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
556 for rec in v1records:
563 for rec in v1records:
557 if rec not in oldv2:
564 if rec not in oldv2:
558 return False
565 return False
559 else:
566 else:
560 return True
567 return True
561
568
562 def _readrecordsv1(self):
569 def _readrecordsv1(self):
563 """read on disk merge state for version 1 file
570 """read on disk merge state for version 1 file
564
571
565 returns list of record [(TYPE, data), ...]
572 returns list of record [(TYPE, data), ...]
566
573
567 Note: the "F" data from this file are one entry short
574 Note: the "F" data from this file are one entry short
568 (no "other file node" entry)
575 (no "other file node" entry)
569 """
576 """
570 records = []
577 records = []
571 try:
578 try:
572 f = self._repo.vfs(self.statepathv1)
579 f = self._repo.vfs(self.statepathv1)
573 for i, l in enumerate(f):
580 for i, l in enumerate(f):
574 if i == 0:
581 if i == 0:
575 records.append((RECORD_LOCAL, l[:-1]))
582 records.append((RECORD_LOCAL, l[:-1]))
576 else:
583 else:
577 records.append((RECORD_MERGED, l[:-1]))
584 records.append((RECORD_MERGED, l[:-1]))
578 f.close()
585 f.close()
579 except IOError as err:
586 except IOError as err:
580 if err.errno != errno.ENOENT:
587 if err.errno != errno.ENOENT:
581 raise
588 raise
582 return records
589 return records
583
590
584 def _readrecordsv2(self):
591 def _readrecordsv2(self):
585 """read on disk merge state for version 2 file
592 """read on disk merge state for version 2 file
586
593
587 This format is a list of arbitrary records of the form:
594 This format is a list of arbitrary records of the form:
588
595
589 [type][length][content]
596 [type][length][content]
590
597
591 `type` is a single character, `length` is a 4 byte integer, and
598 `type` is a single character, `length` is a 4 byte integer, and
592 `content` is an arbitrary byte sequence of length `length`.
599 `content` is an arbitrary byte sequence of length `length`.
593
600
594 Mercurial versions prior to 3.7 have a bug where if there are
601 Mercurial versions prior to 3.7 have a bug where if there are
595 unsupported mandatory merge records, attempting to clear out the merge
602 unsupported mandatory merge records, attempting to clear out the merge
596 state with hg update --clean or similar aborts. The 't' record type
603 state with hg update --clean or similar aborts. The 't' record type
597 works around that by writing out what those versions treat as an
604 works around that by writing out what those versions treat as an
598 advisory record, but later versions interpret as special: the first
605 advisory record, but later versions interpret as special: the first
599 character is the 'real' record type and everything onwards is the data.
606 character is the 'real' record type and everything onwards is the data.
600
607
601 Returns list of records [(TYPE, data), ...]."""
608 Returns list of records [(TYPE, data), ...]."""
602 records = []
609 records = []
603 try:
610 try:
604 f = self._repo.vfs(self.statepathv2)
611 f = self._repo.vfs(self.statepathv2)
605 data = f.read()
612 data = f.read()
606 off = 0
613 off = 0
607 end = len(data)
614 end = len(data)
608 while off < end:
615 while off < end:
609 rtype = data[off : off + 1]
616 rtype = data[off : off + 1]
610 off += 1
617 off += 1
611 length = _unpack(b'>I', data[off : (off + 4)])[0]
618 length = _unpack(b'>I', data[off : (off + 4)])[0]
612 off += 4
619 off += 4
613 record = data[off : (off + length)]
620 record = data[off : (off + length)]
614 off += length
621 off += length
615 if rtype == RECORD_OVERRIDE:
622 if rtype == RECORD_OVERRIDE:
616 rtype, record = record[0:1], record[1:]
623 rtype, record = record[0:1], record[1:]
617 records.append((rtype, record))
624 records.append((rtype, record))
618 f.close()
625 f.close()
619 except IOError as err:
626 except IOError as err:
620 if err.errno != errno.ENOENT:
627 if err.errno != errno.ENOENT:
621 raise
628 raise
622 return records
629 return records
623
630
624 def commit(self):
631 def commit(self):
625 if self._dirty:
632 if self._dirty:
626 records = self._makerecords()
633 records = self._makerecords()
627 self._writerecords(records)
634 self._writerecords(records)
628 self._dirty = False
635 self._dirty = False
629
636
630 def _makerecords(self):
637 def _makerecords(self):
631 records = []
638 records = []
632 records.append((RECORD_LOCAL, hex(self._local)))
639 records.append((RECORD_LOCAL, hex(self._local)))
633 records.append((RECORD_OTHER, hex(self._other)))
640 records.append((RECORD_OTHER, hex(self._other)))
634 # Write out state items. In all cases, the value of the state map entry
641 # Write out state items. In all cases, the value of the state map entry
635 # is written as the contents of the record. The record type depends on
642 # is written as the contents of the record. The record type depends on
636 # the type of state that is stored, and capital-letter records are used
643 # the type of state that is stored, and capital-letter records are used
637 # to prevent older versions of Mercurial that do not support the feature
644 # to prevent older versions of Mercurial that do not support the feature
638 # from loading them.
645 # from loading them.
639 for filename, v in pycompat.iteritems(self._state):
646 for filename, v in pycompat.iteritems(self._state):
640 if v[0] in (
647 if v[0] in (
641 MERGE_RECORD_UNRESOLVED_PATH,
648 MERGE_RECORD_UNRESOLVED_PATH,
642 MERGE_RECORD_RESOLVED_PATH,
649 MERGE_RECORD_RESOLVED_PATH,
643 ):
650 ):
644 # Path conflicts. These are stored in 'P' records. The current
651 # Path conflicts. These are stored in 'P' records. The current
645 # resolution state ('pu' or 'pr') is stored within the record.
652 # resolution state ('pu' or 'pr') is stored within the record.
646 records.append(
653 records.append(
647 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
654 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
648 )
655 )
649 elif v[1] == nullhex or v[6] == nullhex:
656 elif v[1] == nullhex or v[6] == nullhex:
650 # Change/Delete or Delete/Change conflicts. These are stored in
657 # Change/Delete or Delete/Change conflicts. These are stored in
651 # 'C' records. v[1] is the local file, and is nullhex when the
658 # 'C' records. v[1] is the local file, and is nullhex when the
652 # file is deleted locally ('dc'). v[6] is the remote file, and
659 # file is deleted locally ('dc'). v[6] is the remote file, and
653 # is nullhex when the file is deleted remotely ('cd').
660 # is nullhex when the file is deleted remotely ('cd').
654 records.append(
661 records.append(
655 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
662 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
656 )
663 )
657 else:
664 else:
658 # Normal files. These are stored in 'F' records.
665 # Normal files. These are stored in 'F' records.
659 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
666 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
660 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
667 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
661 rawextras = b'\0'.join(
668 rawextras = b'\0'.join(
662 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
669 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
663 )
670 )
664 records.append(
671 records.append(
665 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
672 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
666 )
673 )
667 if self._labels is not None:
674 if self._labels is not None:
668 labels = b'\0'.join(self._labels)
675 labels = b'\0'.join(self._labels)
669 records.append((RECORD_LABELS, labels))
676 records.append((RECORD_LABELS, labels))
670 return records
677 return records
671
678
672 def _writerecords(self, records):
679 def _writerecords(self, records):
673 """Write current state on disk (both v1 and v2)"""
680 """Write current state on disk (both v1 and v2)"""
674 self._writerecordsv1(records)
681 self._writerecordsv1(records)
675 self._writerecordsv2(records)
682 self._writerecordsv2(records)
676
683
677 def _writerecordsv1(self, records):
684 def _writerecordsv1(self, records):
678 """Write current state on disk in a version 1 file"""
685 """Write current state on disk in a version 1 file"""
679 f = self._repo.vfs(self.statepathv1, b'wb')
686 f = self._repo.vfs(self.statepathv1, b'wb')
680 irecords = iter(records)
687 irecords = iter(records)
681 lrecords = next(irecords)
688 lrecords = next(irecords)
682 assert lrecords[0] == RECORD_LOCAL
689 assert lrecords[0] == RECORD_LOCAL
683 f.write(hex(self._local) + b'\n')
690 f.write(hex(self._local) + b'\n')
684 for rtype, data in irecords:
691 for rtype, data in irecords:
685 if rtype == RECORD_MERGED:
692 if rtype == RECORD_MERGED:
686 f.write(b'%s\n' % _droponode(data))
693 f.write(b'%s\n' % _droponode(data))
687 f.close()
694 f.close()
688
695
689 def _writerecordsv2(self, records):
696 def _writerecordsv2(self, records):
690 """Write current state on disk in a version 2 file
697 """Write current state on disk in a version 2 file
691
698
692 See the docstring for _readrecordsv2 for why we use 't'."""
699 See the docstring for _readrecordsv2 for why we use 't'."""
693 # these are the records that all version 2 clients can read
700 # these are the records that all version 2 clients can read
694 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
701 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
695 f = self._repo.vfs(self.statepathv2, b'wb')
702 f = self._repo.vfs(self.statepathv2, b'wb')
696 for key, data in records:
703 for key, data in records:
697 assert len(key) == 1
704 assert len(key) == 1
698 if key not in allowlist:
705 if key not in allowlist:
699 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
706 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
700 format = b'>sI%is' % len(data)
707 format = b'>sI%is' % len(data)
701 f.write(_pack(format, key, len(data), data))
708 f.write(_pack(format, key, len(data), data))
702 f.close()
709 f.close()
703
710
704 def _make_backup(self, fctx, localkey):
711 def _make_backup(self, fctx, localkey):
705 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
712 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
706
713
707 def _restore_backup(self, fctx, localkey, flags):
714 def _restore_backup(self, fctx, localkey, flags):
708 with self._repo.vfs(b'merge/' + localkey) as f:
715 with self._repo.vfs(b'merge/' + localkey) as f:
709 fctx.write(f.read(), flags)
716 fctx.write(f.read(), flags)
710
717
711 def reset(self):
718 def reset(self):
712 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
719 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
713
720
714
721
715 class memmergestate(_mergestate_base):
722 class memmergestate(_mergestate_base):
716 def __init__(self, repo):
723 def __init__(self, repo):
717 super(memmergestate, self).__init__(repo)
724 super(memmergestate, self).__init__(repo)
718 self._backups = {}
725 self._backups = {}
719
726
720 def _make_backup(self, fctx, localkey):
727 def _make_backup(self, fctx, localkey):
721 self._backups[localkey] = fctx.data()
728 self._backups[localkey] = fctx.data()
722
729
723 def _restore_backup(self, fctx, localkey, flags):
730 def _restore_backup(self, fctx, localkey, flags):
724 fctx.write(self._backups[localkey], flags)
731 fctx.write(self._backups[localkey], flags)
725
732
726
733
727 def recordupdates(repo, actions, branchmerge, getfiledata):
734 def recordupdates(repo, actions, branchmerge, getfiledata):
728 """record merge actions to the dirstate"""
735 """record merge actions to the dirstate"""
729 # remove (must come first)
736 # remove (must come first)
730 for f, args, msg in actions.get(ACTION_REMOVE, []):
737 for f, args, msg in actions.get(ACTION_REMOVE, []):
731 if branchmerge:
738 if branchmerge:
732 repo.dirstate.remove(f)
739 repo.dirstate.remove(f)
733 else:
740 else:
734 repo.dirstate.drop(f)
741 repo.dirstate.drop(f)
735
742
736 # forget (must come first)
743 # forget (must come first)
737 for f, args, msg in actions.get(ACTION_FORGET, []):
744 for f, args, msg in actions.get(ACTION_FORGET, []):
738 repo.dirstate.drop(f)
745 repo.dirstate.drop(f)
739
746
740 # resolve path conflicts
747 # resolve path conflicts
741 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
748 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
742 (f0, origf0) = args
749 (f0, origf0) = args
743 repo.dirstate.add(f)
750 repo.dirstate.add(f)
744 repo.dirstate.copy(origf0, f)
751 repo.dirstate.copy(origf0, f)
745 if f0 == origf0:
752 if f0 == origf0:
746 repo.dirstate.remove(f0)
753 repo.dirstate.remove(f0)
747 else:
754 else:
748 repo.dirstate.drop(f0)
755 repo.dirstate.drop(f0)
749
756
750 # re-add
757 # re-add
751 for f, args, msg in actions.get(ACTION_ADD, []):
758 for f, args, msg in actions.get(ACTION_ADD, []):
752 repo.dirstate.add(f)
759 repo.dirstate.add(f)
753
760
754 # re-add/mark as modified
761 # re-add/mark as modified
755 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
762 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
756 if branchmerge:
763 if branchmerge:
757 repo.dirstate.normallookup(f)
764 repo.dirstate.normallookup(f)
758 else:
765 else:
759 repo.dirstate.add(f)
766 repo.dirstate.add(f)
760
767
761 # exec change
768 # exec change
762 for f, args, msg in actions.get(ACTION_EXEC, []):
769 for f, args, msg in actions.get(ACTION_EXEC, []):
763 repo.dirstate.normallookup(f)
770 repo.dirstate.normallookup(f)
764
771
765 # keep
772 # keep
766 for f, args, msg in actions.get(ACTION_KEEP, []):
773 for f, args, msg in actions.get(ACTION_KEEP, []):
767 pass
774 pass
768
775
769 # keep deleted
776 # keep deleted
770 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
777 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
771 pass
778 pass
772
779
773 # keep new
780 # keep new
774 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
781 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
775 pass
782 pass
776
783
777 # get
784 # get
778 for f, args, msg in actions.get(ACTION_GET, []):
785 for f, args, msg in actions.get(ACTION_GET, []):
779 if branchmerge:
786 if branchmerge:
780 repo.dirstate.otherparent(f)
787 repo.dirstate.otherparent(f)
781 else:
788 else:
782 parentfiledata = getfiledata[f] if getfiledata else None
789 parentfiledata = getfiledata[f] if getfiledata else None
783 repo.dirstate.normal(f, parentfiledata=parentfiledata)
790 repo.dirstate.normal(f, parentfiledata=parentfiledata)
784
791
785 # merge
792 # merge
786 for f, args, msg in actions.get(ACTION_MERGE, []):
793 for f, args, msg in actions.get(ACTION_MERGE, []):
787 f1, f2, fa, move, anc = args
794 f1, f2, fa, move, anc = args
788 if branchmerge:
795 if branchmerge:
789 # We've done a branch merge, mark this file as merged
796 # We've done a branch merge, mark this file as merged
790 # so that we properly record the merger later
797 # so that we properly record the merger later
791 repo.dirstate.merge(f)
798 repo.dirstate.merge(f)
792 if f1 != f2: # copy/rename
799 if f1 != f2: # copy/rename
793 if move:
800 if move:
794 repo.dirstate.remove(f1)
801 repo.dirstate.remove(f1)
795 if f1 != f:
802 if f1 != f:
796 repo.dirstate.copy(f1, f)
803 repo.dirstate.copy(f1, f)
797 else:
804 else:
798 repo.dirstate.copy(f2, f)
805 repo.dirstate.copy(f2, f)
799 else:
806 else:
800 # We've update-merged a locally modified file, so
807 # We've update-merged a locally modified file, so
801 # we set the dirstate to emulate a normal checkout
808 # we set the dirstate to emulate a normal checkout
802 # of that file some time in the past. Thus our
809 # of that file some time in the past. Thus our
803 # merge will appear as a normal local file
810 # merge will appear as a normal local file
804 # modification.
811 # modification.
805 if f2 == f: # file not locally copied/moved
812 if f2 == f: # file not locally copied/moved
806 repo.dirstate.normallookup(f)
813 repo.dirstate.normallookup(f)
807 if move:
814 if move:
808 repo.dirstate.drop(f1)
815 repo.dirstate.drop(f1)
809
816
810 # directory rename, move local
817 # directory rename, move local
811 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
818 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
812 f0, flag = args
819 f0, flag = args
813 if branchmerge:
820 if branchmerge:
814 repo.dirstate.add(f)
821 repo.dirstate.add(f)
815 repo.dirstate.remove(f0)
822 repo.dirstate.remove(f0)
816 repo.dirstate.copy(f0, f)
823 repo.dirstate.copy(f0, f)
817 else:
824 else:
818 repo.dirstate.normal(f)
825 repo.dirstate.normal(f)
819 repo.dirstate.drop(f0)
826 repo.dirstate.drop(f0)
820
827
821 # directory rename, get
828 # directory rename, get
822 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
829 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
823 f0, flag = args
830 f0, flag = args
824 if branchmerge:
831 if branchmerge:
825 repo.dirstate.add(f)
832 repo.dirstate.add(f)
826 repo.dirstate.copy(f0, f)
833 repo.dirstate.copy(f0, f)
827 else:
834 else:
828 repo.dirstate.normal(f)
835 repo.dirstate.normal(f)
@@ -1,833 +1,833 b''
1 # sparse.py - functionality for sparse checkouts
1 # sparse.py - functionality for sparse checkouts
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 )
16 )
17 from . import (
17 from . import (
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 merge as mergemod,
20 merge as mergemod,
21 mergestate as mergestatemod,
21 mergestate as mergestatemod,
22 pathutil,
22 pathutil,
23 pycompat,
23 pycompat,
24 requirements,
24 requirements,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 )
27 )
28 from .utils import hashutil
28 from .utils import hashutil
29
29
30
30
31 # Whether sparse features are enabled. This variable is intended to be
31 # Whether sparse features are enabled. This variable is intended to be
32 # temporary to facilitate porting sparse to core. It should eventually be
32 # temporary to facilitate porting sparse to core. It should eventually be
33 # a per-repo option, possibly a repo requirement.
33 # a per-repo option, possibly a repo requirement.
34 enabled = False
34 enabled = False
35
35
36
36
37 def parseconfig(ui, raw, action):
37 def parseconfig(ui, raw, action):
38 """Parse sparse config file content.
38 """Parse sparse config file content.
39
39
40 action is the command which is trigerring this read, can be narrow, sparse
40 action is the command which is trigerring this read, can be narrow, sparse
41
41
42 Returns a tuple of includes, excludes, and profiles.
42 Returns a tuple of includes, excludes, and profiles.
43 """
43 """
44 includes = set()
44 includes = set()
45 excludes = set()
45 excludes = set()
46 profiles = set()
46 profiles = set()
47 current = None
47 current = None
48 havesection = False
48 havesection = False
49
49
50 for line in raw.split(b'\n'):
50 for line in raw.split(b'\n'):
51 line = line.strip()
51 line = line.strip()
52 if not line or line.startswith(b'#'):
52 if not line or line.startswith(b'#'):
53 # empty or comment line, skip
53 # empty or comment line, skip
54 continue
54 continue
55 elif line.startswith(b'%include '):
55 elif line.startswith(b'%include '):
56 line = line[9:].strip()
56 line = line[9:].strip()
57 if line:
57 if line:
58 profiles.add(line)
58 profiles.add(line)
59 elif line == b'[include]':
59 elif line == b'[include]':
60 if havesection and current != includes:
60 if havesection and current != includes:
61 # TODO pass filename into this API so we can report it.
61 # TODO pass filename into this API so we can report it.
62 raise error.Abort(
62 raise error.Abort(
63 _(
63 _(
64 b'%(action)s config cannot have includes '
64 b'%(action)s config cannot have includes '
65 b'after excludes'
65 b'after excludes'
66 )
66 )
67 % {b'action': action}
67 % {b'action': action}
68 )
68 )
69 havesection = True
69 havesection = True
70 current = includes
70 current = includes
71 continue
71 continue
72 elif line == b'[exclude]':
72 elif line == b'[exclude]':
73 havesection = True
73 havesection = True
74 current = excludes
74 current = excludes
75 elif line:
75 elif line:
76 if current is None:
76 if current is None:
77 raise error.Abort(
77 raise error.Abort(
78 _(
78 _(
79 b'%(action)s config entry outside of '
79 b'%(action)s config entry outside of '
80 b'section: %(line)s'
80 b'section: %(line)s'
81 )
81 )
82 % {b'action': action, b'line': line},
82 % {b'action': action, b'line': line},
83 hint=_(
83 hint=_(
84 b'add an [include] or [exclude] line '
84 b'add an [include] or [exclude] line '
85 b'to declare the entry type'
85 b'to declare the entry type'
86 ),
86 ),
87 )
87 )
88
88
89 if line.strip().startswith(b'/'):
89 if line.strip().startswith(b'/'):
90 ui.warn(
90 ui.warn(
91 _(
91 _(
92 b'warning: %(action)s profile cannot use'
92 b'warning: %(action)s profile cannot use'
93 b' paths starting with /, ignoring %(line)s\n'
93 b' paths starting with /, ignoring %(line)s\n'
94 )
94 )
95 % {b'action': action, b'line': line}
95 % {b'action': action, b'line': line}
96 )
96 )
97 continue
97 continue
98 current.add(line)
98 current.add(line)
99
99
100 return includes, excludes, profiles
100 return includes, excludes, profiles
101
101
102
102
103 # Exists as separate function to facilitate monkeypatching.
103 # Exists as separate function to facilitate monkeypatching.
104 def readprofile(repo, profile, changeid):
104 def readprofile(repo, profile, changeid):
105 """Resolve the raw content of a sparse profile file."""
105 """Resolve the raw content of a sparse profile file."""
106 # TODO add some kind of cache here because this incurs a manifest
106 # TODO add some kind of cache here because this incurs a manifest
107 # resolve and can be slow.
107 # resolve and can be slow.
108 return repo.filectx(profile, changeid=changeid).data()
108 return repo.filectx(profile, changeid=changeid).data()
109
109
110
110
111 def patternsforrev(repo, rev):
111 def patternsforrev(repo, rev):
112 """Obtain sparse checkout patterns for the given rev.
112 """Obtain sparse checkout patterns for the given rev.
113
113
114 Returns a tuple of iterables representing includes, excludes, and
114 Returns a tuple of iterables representing includes, excludes, and
115 patterns.
115 patterns.
116 """
116 """
117 # Feature isn't enabled. No-op.
117 # Feature isn't enabled. No-op.
118 if not enabled:
118 if not enabled:
119 return set(), set(), set()
119 return set(), set(), set()
120
120
121 raw = repo.vfs.tryread(b'sparse')
121 raw = repo.vfs.tryread(b'sparse')
122 if not raw:
122 if not raw:
123 return set(), set(), set()
123 return set(), set(), set()
124
124
125 if rev is None:
125 if rev is None:
126 raise error.Abort(
126 raise error.Abort(
127 _(b'cannot parse sparse patterns from working directory')
127 _(b'cannot parse sparse patterns from working directory')
128 )
128 )
129
129
130 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
130 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
131 ctx = repo[rev]
131 ctx = repo[rev]
132
132
133 if profiles:
133 if profiles:
134 visited = set()
134 visited = set()
135 while profiles:
135 while profiles:
136 profile = profiles.pop()
136 profile = profiles.pop()
137 if profile in visited:
137 if profile in visited:
138 continue
138 continue
139
139
140 visited.add(profile)
140 visited.add(profile)
141
141
142 try:
142 try:
143 raw = readprofile(repo, profile, rev)
143 raw = readprofile(repo, profile, rev)
144 except error.ManifestLookupError:
144 except error.ManifestLookupError:
145 msg = (
145 msg = (
146 b"warning: sparse profile '%s' not found "
146 b"warning: sparse profile '%s' not found "
147 b"in rev %s - ignoring it\n" % (profile, ctx)
147 b"in rev %s - ignoring it\n" % (profile, ctx)
148 )
148 )
149 # experimental config: sparse.missingwarning
149 # experimental config: sparse.missingwarning
150 if repo.ui.configbool(b'sparse', b'missingwarning'):
150 if repo.ui.configbool(b'sparse', b'missingwarning'):
151 repo.ui.warn(msg)
151 repo.ui.warn(msg)
152 else:
152 else:
153 repo.ui.debug(msg)
153 repo.ui.debug(msg)
154 continue
154 continue
155
155
156 pincludes, pexcludes, subprofs = parseconfig(
156 pincludes, pexcludes, subprofs = parseconfig(
157 repo.ui, raw, b'sparse'
157 repo.ui, raw, b'sparse'
158 )
158 )
159 includes.update(pincludes)
159 includes.update(pincludes)
160 excludes.update(pexcludes)
160 excludes.update(pexcludes)
161 profiles.update(subprofs)
161 profiles.update(subprofs)
162
162
163 profiles = visited
163 profiles = visited
164
164
165 if includes:
165 if includes:
166 includes.add(b'.hg*')
166 includes.add(b'.hg*')
167
167
168 return includes, excludes, profiles
168 return includes, excludes, profiles
169
169
170
170
171 def activeconfig(repo):
171 def activeconfig(repo):
172 """Determine the active sparse config rules.
172 """Determine the active sparse config rules.
173
173
174 Rules are constructed by reading the current sparse config and bringing in
174 Rules are constructed by reading the current sparse config and bringing in
175 referenced profiles from parents of the working directory.
175 referenced profiles from parents of the working directory.
176 """
176 """
177 revs = [
177 revs = [
178 repo.changelog.rev(node)
178 repo.changelog.rev(node)
179 for node in repo.dirstate.parents()
179 for node in repo.dirstate.parents()
180 if node != nullid
180 if node != nullid
181 ]
181 ]
182
182
183 allincludes = set()
183 allincludes = set()
184 allexcludes = set()
184 allexcludes = set()
185 allprofiles = set()
185 allprofiles = set()
186
186
187 for rev in revs:
187 for rev in revs:
188 includes, excludes, profiles = patternsforrev(repo, rev)
188 includes, excludes, profiles = patternsforrev(repo, rev)
189 allincludes |= includes
189 allincludes |= includes
190 allexcludes |= excludes
190 allexcludes |= excludes
191 allprofiles |= profiles
191 allprofiles |= profiles
192
192
193 return allincludes, allexcludes, allprofiles
193 return allincludes, allexcludes, allprofiles
194
194
195
195
196 def configsignature(repo, includetemp=True):
196 def configsignature(repo, includetemp=True):
197 """Obtain the signature string for the current sparse configuration.
197 """Obtain the signature string for the current sparse configuration.
198
198
199 This is used to construct a cache key for matchers.
199 This is used to construct a cache key for matchers.
200 """
200 """
201 cache = repo._sparsesignaturecache
201 cache = repo._sparsesignaturecache
202
202
203 signature = cache.get(b'signature')
203 signature = cache.get(b'signature')
204
204
205 if includetemp:
205 if includetemp:
206 tempsignature = cache.get(b'tempsignature')
206 tempsignature = cache.get(b'tempsignature')
207 else:
207 else:
208 tempsignature = b'0'
208 tempsignature = b'0'
209
209
210 if signature is None or (includetemp and tempsignature is None):
210 if signature is None or (includetemp and tempsignature is None):
211 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
211 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
212 cache[b'signature'] = signature
212 cache[b'signature'] = signature
213
213
214 if includetemp:
214 if includetemp:
215 raw = repo.vfs.tryread(b'tempsparse')
215 raw = repo.vfs.tryread(b'tempsparse')
216 tempsignature = hex(hashutil.sha1(raw).digest())
216 tempsignature = hex(hashutil.sha1(raw).digest())
217 cache[b'tempsignature'] = tempsignature
217 cache[b'tempsignature'] = tempsignature
218
218
219 return b'%s %s' % (signature, tempsignature)
219 return b'%s %s' % (signature, tempsignature)
220
220
221
221
222 def writeconfig(repo, includes, excludes, profiles):
222 def writeconfig(repo, includes, excludes, profiles):
223 """Write the sparse config file given a sparse configuration."""
223 """Write the sparse config file given a sparse configuration."""
224 with repo.vfs(b'sparse', b'wb') as fh:
224 with repo.vfs(b'sparse', b'wb') as fh:
225 for p in sorted(profiles):
225 for p in sorted(profiles):
226 fh.write(b'%%include %s\n' % p)
226 fh.write(b'%%include %s\n' % p)
227
227
228 if includes:
228 if includes:
229 fh.write(b'[include]\n')
229 fh.write(b'[include]\n')
230 for i in sorted(includes):
230 for i in sorted(includes):
231 fh.write(i)
231 fh.write(i)
232 fh.write(b'\n')
232 fh.write(b'\n')
233
233
234 if excludes:
234 if excludes:
235 fh.write(b'[exclude]\n')
235 fh.write(b'[exclude]\n')
236 for e in sorted(excludes):
236 for e in sorted(excludes):
237 fh.write(e)
237 fh.write(e)
238 fh.write(b'\n')
238 fh.write(b'\n')
239
239
240 repo._sparsesignaturecache.clear()
240 repo._sparsesignaturecache.clear()
241
241
242
242
243 def readtemporaryincludes(repo):
243 def readtemporaryincludes(repo):
244 raw = repo.vfs.tryread(b'tempsparse')
244 raw = repo.vfs.tryread(b'tempsparse')
245 if not raw:
245 if not raw:
246 return set()
246 return set()
247
247
248 return set(raw.split(b'\n'))
248 return set(raw.split(b'\n'))
249
249
250
250
251 def writetemporaryincludes(repo, includes):
251 def writetemporaryincludes(repo, includes):
252 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
252 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
253 repo._sparsesignaturecache.clear()
253 repo._sparsesignaturecache.clear()
254
254
255
255
256 def addtemporaryincludes(repo, additional):
256 def addtemporaryincludes(repo, additional):
257 includes = readtemporaryincludes(repo)
257 includes = readtemporaryincludes(repo)
258 for i in additional:
258 for i in additional:
259 includes.add(i)
259 includes.add(i)
260 writetemporaryincludes(repo, includes)
260 writetemporaryincludes(repo, includes)
261
261
262
262
263 def prunetemporaryincludes(repo):
263 def prunetemporaryincludes(repo):
264 if not enabled or not repo.vfs.exists(b'tempsparse'):
264 if not enabled or not repo.vfs.exists(b'tempsparse'):
265 return
265 return
266
266
267 s = repo.status()
267 s = repo.status()
268 if s.modified or s.added or s.removed or s.deleted:
268 if s.modified or s.added or s.removed or s.deleted:
269 # Still have pending changes. Don't bother trying to prune.
269 # Still have pending changes. Don't bother trying to prune.
270 return
270 return
271
271
272 sparsematch = matcher(repo, includetemp=False)
272 sparsematch = matcher(repo, includetemp=False)
273 dirstate = repo.dirstate
273 dirstate = repo.dirstate
274 mresult = mergemod.mergeresult()
274 mresult = mergemod.mergeresult()
275 dropped = []
275 dropped = []
276 tempincludes = readtemporaryincludes(repo)
276 tempincludes = readtemporaryincludes(repo)
277 for file in tempincludes:
277 for file in tempincludes:
278 if file in dirstate and not sparsematch(file):
278 if file in dirstate and not sparsematch(file):
279 message = _(b'dropping temporarily included sparse files')
279 message = _(b'dropping temporarily included sparse files')
280 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
280 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
281 dropped.append(file)
281 dropped.append(file)
282
282
283 mergemod.applyupdates(
283 mergemod.applyupdates(
284 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
284 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
285 )
285 )
286
286
287 # Fix dirstate
287 # Fix dirstate
288 for file in dropped:
288 for file in dropped:
289 dirstate.drop(file)
289 dirstate.drop(file)
290
290
291 repo.vfs.unlink(b'tempsparse')
291 repo.vfs.unlink(b'tempsparse')
292 repo._sparsesignaturecache.clear()
292 repo._sparsesignaturecache.clear()
293 msg = _(
293 msg = _(
294 b'cleaned up %d temporarily added file(s) from the '
294 b'cleaned up %d temporarily added file(s) from the '
295 b'sparse checkout\n'
295 b'sparse checkout\n'
296 )
296 )
297 repo.ui.status(msg % len(tempincludes))
297 repo.ui.status(msg % len(tempincludes))
298
298
299
299
300 def forceincludematcher(matcher, includes):
300 def forceincludematcher(matcher, includes):
301 """Returns a matcher that returns true for any of the forced includes
301 """Returns a matcher that returns true for any of the forced includes
302 before testing against the actual matcher."""
302 before testing against the actual matcher."""
303 kindpats = [(b'path', include, b'') for include in includes]
303 kindpats = [(b'path', include, b'') for include in includes]
304 includematcher = matchmod.includematcher(b'', kindpats)
304 includematcher = matchmod.includematcher(b'', kindpats)
305 return matchmod.unionmatcher([includematcher, matcher])
305 return matchmod.unionmatcher([includematcher, matcher])
306
306
307
307
308 def matcher(repo, revs=None, includetemp=True):
308 def matcher(repo, revs=None, includetemp=True):
309 """Obtain a matcher for sparse working directories for the given revs.
309 """Obtain a matcher for sparse working directories for the given revs.
310
310
311 If multiple revisions are specified, the matcher is the union of all
311 If multiple revisions are specified, the matcher is the union of all
312 revs.
312 revs.
313
313
314 ``includetemp`` indicates whether to use the temporary sparse profile.
314 ``includetemp`` indicates whether to use the temporary sparse profile.
315 """
315 """
316 # If sparse isn't enabled, sparse matcher matches everything.
316 # If sparse isn't enabled, sparse matcher matches everything.
317 if not enabled:
317 if not enabled:
318 return matchmod.always()
318 return matchmod.always()
319
319
320 if not revs or revs == [None]:
320 if not revs or revs == [None]:
321 revs = [
321 revs = [
322 repo.changelog.rev(node)
322 repo.changelog.rev(node)
323 for node in repo.dirstate.parents()
323 for node in repo.dirstate.parents()
324 if node != nullid
324 if node != nullid
325 ]
325 ]
326
326
327 signature = configsignature(repo, includetemp=includetemp)
327 signature = configsignature(repo, includetemp=includetemp)
328
328
329 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
329 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
330
330
331 result = repo._sparsematchercache.get(key)
331 result = repo._sparsematchercache.get(key)
332 if result:
332 if result:
333 return result
333 return result
334
334
335 matchers = []
335 matchers = []
336 for rev in revs:
336 for rev in revs:
337 try:
337 try:
338 includes, excludes, profiles = patternsforrev(repo, rev)
338 includes, excludes, profiles = patternsforrev(repo, rev)
339
339
340 if includes or excludes:
340 if includes or excludes:
341 matcher = matchmod.match(
341 matcher = matchmod.match(
342 repo.root,
342 repo.root,
343 b'',
343 b'',
344 [],
344 [],
345 include=includes,
345 include=includes,
346 exclude=excludes,
346 exclude=excludes,
347 default=b'relpath',
347 default=b'relpath',
348 )
348 )
349 matchers.append(matcher)
349 matchers.append(matcher)
350 except IOError:
350 except IOError:
351 pass
351 pass
352
352
353 if not matchers:
353 if not matchers:
354 result = matchmod.always()
354 result = matchmod.always()
355 elif len(matchers) == 1:
355 elif len(matchers) == 1:
356 result = matchers[0]
356 result = matchers[0]
357 else:
357 else:
358 result = matchmod.unionmatcher(matchers)
358 result = matchmod.unionmatcher(matchers)
359
359
360 if includetemp:
360 if includetemp:
361 tempincludes = readtemporaryincludes(repo)
361 tempincludes = readtemporaryincludes(repo)
362 result = forceincludematcher(result, tempincludes)
362 result = forceincludematcher(result, tempincludes)
363
363
364 repo._sparsematchercache[key] = result
364 repo._sparsematchercache[key] = result
365
365
366 return result
366 return result
367
367
368
368
369 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
369 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
370 """Filter updates to only lay out files that match the sparse rules."""
370 """Filter updates to only lay out files that match the sparse rules."""
371 if not enabled:
371 if not enabled:
372 return
372 return
373
373
374 oldrevs = [pctx.rev() for pctx in wctx.parents()]
374 oldrevs = [pctx.rev() for pctx in wctx.parents()]
375 oldsparsematch = matcher(repo, oldrevs)
375 oldsparsematch = matcher(repo, oldrevs)
376
376
377 if oldsparsematch.always():
377 if oldsparsematch.always():
378 return
378 return
379
379
380 files = set()
380 files = set()
381 prunedactions = {}
381 prunedactions = {}
382
382
383 if branchmerge:
383 if branchmerge:
384 # If we're merging, use the wctx filter, since we're merging into
384 # If we're merging, use the wctx filter, since we're merging into
385 # the wctx.
385 # the wctx.
386 sparsematch = matcher(repo, [wctx.p1().rev()])
386 sparsematch = matcher(repo, [wctx.p1().rev()])
387 else:
387 else:
388 # If we're updating, use the target context's filter, since we're
388 # If we're updating, use the target context's filter, since we're
389 # moving to the target context.
389 # moving to the target context.
390 sparsematch = matcher(repo, [mctx.rev()])
390 sparsematch = matcher(repo, [mctx.rev()])
391
391
392 temporaryfiles = []
392 temporaryfiles = []
393 for file, action in mresult.filemap():
393 for file, action in mresult.filemap():
394 type, args, msg = action
394 type, args, msg = action
395 files.add(file)
395 files.add(file)
396 if sparsematch(file):
396 if sparsematch(file):
397 prunedactions[file] = action
397 prunedactions[file] = action
398 elif type == mergestatemod.ACTION_MERGE:
398 elif type == mergestatemod.ACTION_MERGE:
399 temporaryfiles.append(file)
399 temporaryfiles.append(file)
400 prunedactions[file] = action
400 prunedactions[file] = action
401 elif branchmerge:
401 elif branchmerge:
402 if type not in mergemod.mergeresult.NO_OP_ACTIONS:
402 if type not in mergestatemod.NO_OP_ACTIONS:
403 temporaryfiles.append(file)
403 temporaryfiles.append(file)
404 prunedactions[file] = action
404 prunedactions[file] = action
405 elif type == mergestatemod.ACTION_FORGET:
405 elif type == mergestatemod.ACTION_FORGET:
406 prunedactions[file] = action
406 prunedactions[file] = action
407 elif file in wctx:
407 elif file in wctx:
408 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
408 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
409
409
410 # in case or rename on one side, it is possible that f1 might not
410 # in case or rename on one side, it is possible that f1 might not
411 # be present in sparse checkout we should include it
411 # be present in sparse checkout we should include it
412 # TODO: should we do the same for f2?
412 # TODO: should we do the same for f2?
413 # exists as a separate check because file can be in sparse and hence
413 # exists as a separate check because file can be in sparse and hence
414 # if we try to club this condition in above `elif type == ACTION_MERGE`
414 # if we try to club this condition in above `elif type == ACTION_MERGE`
415 # it won't be triggered
415 # it won't be triggered
416 if branchmerge and type == mergestatemod.ACTION_MERGE:
416 if branchmerge and type == mergestatemod.ACTION_MERGE:
417 f1, f2, fa, move, anc = args
417 f1, f2, fa, move, anc = args
418 if not sparsematch(f1):
418 if not sparsematch(f1):
419 temporaryfiles.append(f1)
419 temporaryfiles.append(f1)
420
420
421 if len(temporaryfiles) > 0:
421 if len(temporaryfiles) > 0:
422 repo.ui.status(
422 repo.ui.status(
423 _(
423 _(
424 b'temporarily included %d file(s) in the sparse '
424 b'temporarily included %d file(s) in the sparse '
425 b'checkout for merging\n'
425 b'checkout for merging\n'
426 )
426 )
427 % len(temporaryfiles)
427 % len(temporaryfiles)
428 )
428 )
429 addtemporaryincludes(repo, temporaryfiles)
429 addtemporaryincludes(repo, temporaryfiles)
430
430
431 # Add the new files to the working copy so they can be merged, etc
431 # Add the new files to the working copy so they can be merged, etc
432 tmresult = mergemod.mergeresult()
432 tmresult = mergemod.mergeresult()
433 message = b'temporarily adding to sparse checkout'
433 message = b'temporarily adding to sparse checkout'
434 wctxmanifest = repo[None].manifest()
434 wctxmanifest = repo[None].manifest()
435 for file in temporaryfiles:
435 for file in temporaryfiles:
436 if file in wctxmanifest:
436 if file in wctxmanifest:
437 fctx = repo[None][file]
437 fctx = repo[None][file]
438 tmresult.addfile(
438 tmresult.addfile(
439 file,
439 file,
440 mergestatemod.ACTION_GET,
440 mergestatemod.ACTION_GET,
441 (fctx.flags(), False),
441 (fctx.flags(), False),
442 message,
442 message,
443 )
443 )
444
444
445 mergemod.applyupdates(
445 mergemod.applyupdates(
446 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
446 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
447 )
447 )
448
448
449 dirstate = repo.dirstate
449 dirstate = repo.dirstate
450 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
450 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
451 dirstate.normal(file)
451 dirstate.normal(file)
452
452
453 profiles = activeconfig(repo)[2]
453 profiles = activeconfig(repo)[2]
454 changedprofiles = profiles & files
454 changedprofiles = profiles & files
455 # If an active profile changed during the update, refresh the checkout.
455 # If an active profile changed during the update, refresh the checkout.
456 # Don't do this during a branch merge, since all incoming changes should
456 # Don't do this during a branch merge, since all incoming changes should
457 # have been handled by the temporary includes above.
457 # have been handled by the temporary includes above.
458 if changedprofiles and not branchmerge:
458 if changedprofiles and not branchmerge:
459 mf = mctx.manifest()
459 mf = mctx.manifest()
460 for file in mf:
460 for file in mf:
461 old = oldsparsematch(file)
461 old = oldsparsematch(file)
462 new = sparsematch(file)
462 new = sparsematch(file)
463 if not old and new:
463 if not old and new:
464 flags = mf.flags(file)
464 flags = mf.flags(file)
465 prunedactions[file] = (
465 prunedactions[file] = (
466 mergestatemod.ACTION_GET,
466 mergestatemod.ACTION_GET,
467 (flags, False),
467 (flags, False),
468 b'',
468 b'',
469 )
469 )
470 elif old and not new:
470 elif old and not new:
471 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
471 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
472
472
473 mresult.setactions(prunedactions)
473 mresult.setactions(prunedactions)
474
474
475
475
476 def refreshwdir(repo, origstatus, origsparsematch, force=False):
476 def refreshwdir(repo, origstatus, origsparsematch, force=False):
477 """Refreshes working directory by taking sparse config into account.
477 """Refreshes working directory by taking sparse config into account.
478
478
479 The old status and sparse matcher is compared against the current sparse
479 The old status and sparse matcher is compared against the current sparse
480 matcher.
480 matcher.
481
481
482 Will abort if a file with pending changes is being excluded or included
482 Will abort if a file with pending changes is being excluded or included
483 unless ``force`` is True.
483 unless ``force`` is True.
484 """
484 """
485 # Verify there are no pending changes
485 # Verify there are no pending changes
486 pending = set()
486 pending = set()
487 pending.update(origstatus.modified)
487 pending.update(origstatus.modified)
488 pending.update(origstatus.added)
488 pending.update(origstatus.added)
489 pending.update(origstatus.removed)
489 pending.update(origstatus.removed)
490 sparsematch = matcher(repo)
490 sparsematch = matcher(repo)
491 abort = False
491 abort = False
492
492
493 for f in pending:
493 for f in pending:
494 if not sparsematch(f):
494 if not sparsematch(f):
495 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
495 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
496 abort = not force
496 abort = not force
497
497
498 if abort:
498 if abort:
499 raise error.Abort(
499 raise error.Abort(
500 _(b'could not update sparseness due to pending changes')
500 _(b'could not update sparseness due to pending changes')
501 )
501 )
502
502
503 # Calculate merge result
503 # Calculate merge result
504 dirstate = repo.dirstate
504 dirstate = repo.dirstate
505 ctx = repo[b'.']
505 ctx = repo[b'.']
506 added = []
506 added = []
507 lookup = []
507 lookup = []
508 dropped = []
508 dropped = []
509 mf = ctx.manifest()
509 mf = ctx.manifest()
510 files = set(mf)
510 files = set(mf)
511 mresult = mergemod.mergeresult()
511 mresult = mergemod.mergeresult()
512
512
513 for file in files:
513 for file in files:
514 old = origsparsematch(file)
514 old = origsparsematch(file)
515 new = sparsematch(file)
515 new = sparsematch(file)
516 # Add files that are newly included, or that don't exist in
516 # Add files that are newly included, or that don't exist in
517 # the dirstate yet.
517 # the dirstate yet.
518 if (new and not old) or (old and new and not file in dirstate):
518 if (new and not old) or (old and new and not file in dirstate):
519 fl = mf.flags(file)
519 fl = mf.flags(file)
520 if repo.wvfs.exists(file):
520 if repo.wvfs.exists(file):
521 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
521 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
522 lookup.append(file)
522 lookup.append(file)
523 else:
523 else:
524 mresult.addfile(
524 mresult.addfile(
525 file, mergestatemod.ACTION_GET, (fl, False), b''
525 file, mergestatemod.ACTION_GET, (fl, False), b''
526 )
526 )
527 added.append(file)
527 added.append(file)
528 # Drop files that are newly excluded, or that still exist in
528 # Drop files that are newly excluded, or that still exist in
529 # the dirstate.
529 # the dirstate.
530 elif (old and not new) or (not old and not new and file in dirstate):
530 elif (old and not new) or (not old and not new and file in dirstate):
531 dropped.append(file)
531 dropped.append(file)
532 if file not in pending:
532 if file not in pending:
533 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
533 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
534
534
535 # Verify there are no pending changes in newly included files
535 # Verify there are no pending changes in newly included files
536 abort = False
536 abort = False
537 for file in lookup:
537 for file in lookup:
538 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
538 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
539 abort = not force
539 abort = not force
540 if abort:
540 if abort:
541 raise error.Abort(
541 raise error.Abort(
542 _(
542 _(
543 b'cannot change sparseness due to pending '
543 b'cannot change sparseness due to pending '
544 b'changes (delete the files or use '
544 b'changes (delete the files or use '
545 b'--force to bring them back dirty)'
545 b'--force to bring them back dirty)'
546 )
546 )
547 )
547 )
548
548
549 # Check for files that were only in the dirstate.
549 # Check for files that were only in the dirstate.
550 for file, state in pycompat.iteritems(dirstate):
550 for file, state in pycompat.iteritems(dirstate):
551 if not file in files:
551 if not file in files:
552 old = origsparsematch(file)
552 old = origsparsematch(file)
553 new = sparsematch(file)
553 new = sparsematch(file)
554 if old and not new:
554 if old and not new:
555 dropped.append(file)
555 dropped.append(file)
556
556
557 mergemod.applyupdates(
557 mergemod.applyupdates(
558 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
558 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
559 )
559 )
560
560
561 # Fix dirstate
561 # Fix dirstate
562 for file in added:
562 for file in added:
563 dirstate.normal(file)
563 dirstate.normal(file)
564
564
565 for file in dropped:
565 for file in dropped:
566 dirstate.drop(file)
566 dirstate.drop(file)
567
567
568 for file in lookup:
568 for file in lookup:
569 # File exists on disk, and we're bringing it back in an unknown state.
569 # File exists on disk, and we're bringing it back in an unknown state.
570 dirstate.normallookup(file)
570 dirstate.normallookup(file)
571
571
572 return added, dropped, lookup
572 return added, dropped, lookup
573
573
574
574
575 def aftercommit(repo, node):
575 def aftercommit(repo, node):
576 """Perform actions after a working directory commit."""
576 """Perform actions after a working directory commit."""
577 # This function is called unconditionally, even if sparse isn't
577 # This function is called unconditionally, even if sparse isn't
578 # enabled.
578 # enabled.
579 ctx = repo[node]
579 ctx = repo[node]
580
580
581 profiles = patternsforrev(repo, ctx.rev())[2]
581 profiles = patternsforrev(repo, ctx.rev())[2]
582
582
583 # profiles will only have data if sparse is enabled.
583 # profiles will only have data if sparse is enabled.
584 if profiles & set(ctx.files()):
584 if profiles & set(ctx.files()):
585 origstatus = repo.status()
585 origstatus = repo.status()
586 origsparsematch = matcher(repo)
586 origsparsematch = matcher(repo)
587 refreshwdir(repo, origstatus, origsparsematch, force=True)
587 refreshwdir(repo, origstatus, origsparsematch, force=True)
588
588
589 prunetemporaryincludes(repo)
589 prunetemporaryincludes(repo)
590
590
591
591
592 def _updateconfigandrefreshwdir(
592 def _updateconfigandrefreshwdir(
593 repo, includes, excludes, profiles, force=False, removing=False
593 repo, includes, excludes, profiles, force=False, removing=False
594 ):
594 ):
595 """Update the sparse config and working directory state."""
595 """Update the sparse config and working directory state."""
596 raw = repo.vfs.tryread(b'sparse')
596 raw = repo.vfs.tryread(b'sparse')
597 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
597 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
598
598
599 oldstatus = repo.status()
599 oldstatus = repo.status()
600 oldmatch = matcher(repo)
600 oldmatch = matcher(repo)
601 oldrequires = set(repo.requirements)
601 oldrequires = set(repo.requirements)
602
602
603 # TODO remove this try..except once the matcher integrates better
603 # TODO remove this try..except once the matcher integrates better
604 # with dirstate. We currently have to write the updated config
604 # with dirstate. We currently have to write the updated config
605 # because that will invalidate the matcher cache and force a
605 # because that will invalidate the matcher cache and force a
606 # re-read. We ideally want to update the cached matcher on the
606 # re-read. We ideally want to update the cached matcher on the
607 # repo instance then flush the new config to disk once wdir is
607 # repo instance then flush the new config to disk once wdir is
608 # updated. But this requires massive rework to matcher() and its
608 # updated. But this requires massive rework to matcher() and its
609 # consumers.
609 # consumers.
610
610
611 if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
611 if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
612 repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
612 repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
613 scmutil.writereporequirements(repo)
613 scmutil.writereporequirements(repo)
614 elif requirements.SPARSE_REQUIREMENT not in oldrequires:
614 elif requirements.SPARSE_REQUIREMENT not in oldrequires:
615 repo.requirements.add(requirements.SPARSE_REQUIREMENT)
615 repo.requirements.add(requirements.SPARSE_REQUIREMENT)
616 scmutil.writereporequirements(repo)
616 scmutil.writereporequirements(repo)
617
617
618 try:
618 try:
619 writeconfig(repo, includes, excludes, profiles)
619 writeconfig(repo, includes, excludes, profiles)
620 return refreshwdir(repo, oldstatus, oldmatch, force=force)
620 return refreshwdir(repo, oldstatus, oldmatch, force=force)
621 except Exception:
621 except Exception:
622 if repo.requirements != oldrequires:
622 if repo.requirements != oldrequires:
623 repo.requirements.clear()
623 repo.requirements.clear()
624 repo.requirements |= oldrequires
624 repo.requirements |= oldrequires
625 scmutil.writereporequirements(repo)
625 scmutil.writereporequirements(repo)
626 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
626 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
627 raise
627 raise
628
628
629
629
630 def clearrules(repo, force=False):
630 def clearrules(repo, force=False):
631 """Clears include/exclude rules from the sparse config.
631 """Clears include/exclude rules from the sparse config.
632
632
633 The remaining sparse config only has profiles, if defined. The working
633 The remaining sparse config only has profiles, if defined. The working
634 directory is refreshed, as needed.
634 directory is refreshed, as needed.
635 """
635 """
636 with repo.wlock():
636 with repo.wlock():
637 raw = repo.vfs.tryread(b'sparse')
637 raw = repo.vfs.tryread(b'sparse')
638 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
638 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
639
639
640 if not includes and not excludes:
640 if not includes and not excludes:
641 return
641 return
642
642
643 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
643 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
644
644
645
645
646 def importfromfiles(repo, opts, paths, force=False):
646 def importfromfiles(repo, opts, paths, force=False):
647 """Import sparse config rules from files.
647 """Import sparse config rules from files.
648
648
649 The updated sparse config is written out and the working directory
649 The updated sparse config is written out and the working directory
650 is refreshed, as needed.
650 is refreshed, as needed.
651 """
651 """
652 with repo.wlock():
652 with repo.wlock():
653 # read current configuration
653 # read current configuration
654 raw = repo.vfs.tryread(b'sparse')
654 raw = repo.vfs.tryread(b'sparse')
655 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
655 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
656 aincludes, aexcludes, aprofiles = activeconfig(repo)
656 aincludes, aexcludes, aprofiles = activeconfig(repo)
657
657
658 # Import rules on top; only take in rules that are not yet
658 # Import rules on top; only take in rules that are not yet
659 # part of the active rules.
659 # part of the active rules.
660 changed = False
660 changed = False
661 for p in paths:
661 for p in paths:
662 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
662 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
663 raw = fh.read()
663 raw = fh.read()
664
664
665 iincludes, iexcludes, iprofiles = parseconfig(
665 iincludes, iexcludes, iprofiles = parseconfig(
666 repo.ui, raw, b'sparse'
666 repo.ui, raw, b'sparse'
667 )
667 )
668 oldsize = len(includes) + len(excludes) + len(profiles)
668 oldsize = len(includes) + len(excludes) + len(profiles)
669 includes.update(iincludes - aincludes)
669 includes.update(iincludes - aincludes)
670 excludes.update(iexcludes - aexcludes)
670 excludes.update(iexcludes - aexcludes)
671 profiles.update(iprofiles - aprofiles)
671 profiles.update(iprofiles - aprofiles)
672 if len(includes) + len(excludes) + len(profiles) > oldsize:
672 if len(includes) + len(excludes) + len(profiles) > oldsize:
673 changed = True
673 changed = True
674
674
675 profilecount = includecount = excludecount = 0
675 profilecount = includecount = excludecount = 0
676 fcounts = (0, 0, 0)
676 fcounts = (0, 0, 0)
677
677
678 if changed:
678 if changed:
679 profilecount = len(profiles - aprofiles)
679 profilecount = len(profiles - aprofiles)
680 includecount = len(includes - aincludes)
680 includecount = len(includes - aincludes)
681 excludecount = len(excludes - aexcludes)
681 excludecount = len(excludes - aexcludes)
682
682
683 fcounts = map(
683 fcounts = map(
684 len,
684 len,
685 _updateconfigandrefreshwdir(
685 _updateconfigandrefreshwdir(
686 repo, includes, excludes, profiles, force=force
686 repo, includes, excludes, profiles, force=force
687 ),
687 ),
688 )
688 )
689
689
690 printchanges(
690 printchanges(
691 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
691 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
692 )
692 )
693
693
694
694
695 def updateconfig(
695 def updateconfig(
696 repo,
696 repo,
697 pats,
697 pats,
698 opts,
698 opts,
699 include=False,
699 include=False,
700 exclude=False,
700 exclude=False,
701 reset=False,
701 reset=False,
702 delete=False,
702 delete=False,
703 enableprofile=False,
703 enableprofile=False,
704 disableprofile=False,
704 disableprofile=False,
705 force=False,
705 force=False,
706 usereporootpaths=False,
706 usereporootpaths=False,
707 ):
707 ):
708 """Perform a sparse config update.
708 """Perform a sparse config update.
709
709
710 Only one of the actions may be performed.
710 Only one of the actions may be performed.
711
711
712 The new config is written out and a working directory refresh is performed.
712 The new config is written out and a working directory refresh is performed.
713 """
713 """
714 with repo.wlock():
714 with repo.wlock():
715 raw = repo.vfs.tryread(b'sparse')
715 raw = repo.vfs.tryread(b'sparse')
716 oldinclude, oldexclude, oldprofiles = parseconfig(
716 oldinclude, oldexclude, oldprofiles = parseconfig(
717 repo.ui, raw, b'sparse'
717 repo.ui, raw, b'sparse'
718 )
718 )
719
719
720 if reset:
720 if reset:
721 newinclude = set()
721 newinclude = set()
722 newexclude = set()
722 newexclude = set()
723 newprofiles = set()
723 newprofiles = set()
724 else:
724 else:
725 newinclude = set(oldinclude)
725 newinclude = set(oldinclude)
726 newexclude = set(oldexclude)
726 newexclude = set(oldexclude)
727 newprofiles = set(oldprofiles)
727 newprofiles = set(oldprofiles)
728
728
729 if any(os.path.isabs(pat) for pat in pats):
729 if any(os.path.isabs(pat) for pat in pats):
730 raise error.Abort(_(b'paths cannot be absolute'))
730 raise error.Abort(_(b'paths cannot be absolute'))
731
731
732 if not usereporootpaths:
732 if not usereporootpaths:
733 # let's treat paths as relative to cwd
733 # let's treat paths as relative to cwd
734 root, cwd = repo.root, repo.getcwd()
734 root, cwd = repo.root, repo.getcwd()
735 abspats = []
735 abspats = []
736 for kindpat in pats:
736 for kindpat in pats:
737 kind, pat = matchmod._patsplit(kindpat, None)
737 kind, pat = matchmod._patsplit(kindpat, None)
738 if kind in matchmod.cwdrelativepatternkinds or kind is None:
738 if kind in matchmod.cwdrelativepatternkinds or kind is None:
739 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
739 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
740 root, cwd, pat
740 root, cwd, pat
741 )
741 )
742 abspats.append(ap)
742 abspats.append(ap)
743 else:
743 else:
744 abspats.append(kindpat)
744 abspats.append(kindpat)
745 pats = abspats
745 pats = abspats
746
746
747 if include:
747 if include:
748 newinclude.update(pats)
748 newinclude.update(pats)
749 elif exclude:
749 elif exclude:
750 newexclude.update(pats)
750 newexclude.update(pats)
751 elif enableprofile:
751 elif enableprofile:
752 newprofiles.update(pats)
752 newprofiles.update(pats)
753 elif disableprofile:
753 elif disableprofile:
754 newprofiles.difference_update(pats)
754 newprofiles.difference_update(pats)
755 elif delete:
755 elif delete:
756 newinclude.difference_update(pats)
756 newinclude.difference_update(pats)
757 newexclude.difference_update(pats)
757 newexclude.difference_update(pats)
758
758
759 profilecount = len(newprofiles - oldprofiles) - len(
759 profilecount = len(newprofiles - oldprofiles) - len(
760 oldprofiles - newprofiles
760 oldprofiles - newprofiles
761 )
761 )
762 includecount = len(newinclude - oldinclude) - len(
762 includecount = len(newinclude - oldinclude) - len(
763 oldinclude - newinclude
763 oldinclude - newinclude
764 )
764 )
765 excludecount = len(newexclude - oldexclude) - len(
765 excludecount = len(newexclude - oldexclude) - len(
766 oldexclude - newexclude
766 oldexclude - newexclude
767 )
767 )
768
768
769 fcounts = map(
769 fcounts = map(
770 len,
770 len,
771 _updateconfigandrefreshwdir(
771 _updateconfigandrefreshwdir(
772 repo,
772 repo,
773 newinclude,
773 newinclude,
774 newexclude,
774 newexclude,
775 newprofiles,
775 newprofiles,
776 force=force,
776 force=force,
777 removing=reset,
777 removing=reset,
778 ),
778 ),
779 )
779 )
780
780
781 printchanges(
781 printchanges(
782 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
782 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
783 )
783 )
784
784
785
785
786 def printchanges(
786 def printchanges(
787 ui,
787 ui,
788 opts,
788 opts,
789 profilecount=0,
789 profilecount=0,
790 includecount=0,
790 includecount=0,
791 excludecount=0,
791 excludecount=0,
792 added=0,
792 added=0,
793 dropped=0,
793 dropped=0,
794 conflicting=0,
794 conflicting=0,
795 ):
795 ):
796 """Print output summarizing sparse config changes."""
796 """Print output summarizing sparse config changes."""
797 with ui.formatter(b'sparse', opts) as fm:
797 with ui.formatter(b'sparse', opts) as fm:
798 fm.startitem()
798 fm.startitem()
799 fm.condwrite(
799 fm.condwrite(
800 ui.verbose,
800 ui.verbose,
801 b'profiles_added',
801 b'profiles_added',
802 _(b'Profiles changed: %d\n'),
802 _(b'Profiles changed: %d\n'),
803 profilecount,
803 profilecount,
804 )
804 )
805 fm.condwrite(
805 fm.condwrite(
806 ui.verbose,
806 ui.verbose,
807 b'include_rules_added',
807 b'include_rules_added',
808 _(b'Include rules changed: %d\n'),
808 _(b'Include rules changed: %d\n'),
809 includecount,
809 includecount,
810 )
810 )
811 fm.condwrite(
811 fm.condwrite(
812 ui.verbose,
812 ui.verbose,
813 b'exclude_rules_added',
813 b'exclude_rules_added',
814 _(b'Exclude rules changed: %d\n'),
814 _(b'Exclude rules changed: %d\n'),
815 excludecount,
815 excludecount,
816 )
816 )
817
817
818 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
818 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
819 # files are added or removed outside of the templating formatter
819 # files are added or removed outside of the templating formatter
820 # framework. No point in repeating ourselves in that case.
820 # framework. No point in repeating ourselves in that case.
821 if not fm.isplain():
821 if not fm.isplain():
822 fm.condwrite(
822 fm.condwrite(
823 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
823 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
824 )
824 )
825 fm.condwrite(
825 fm.condwrite(
826 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
826 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
827 )
827 )
828 fm.condwrite(
828 fm.condwrite(
829 ui.verbose,
829 ui.verbose,
830 b'files_conflicting',
830 b'files_conflicting',
831 _(b'Files conflicting: %d\n'),
831 _(b'Files conflicting: %d\n'),
832 conflicting,
832 conflicting,
833 )
833 )
General Comments 0
You need to be logged in to leave comments. Login now