##// END OF EJS Templates
merge-actions: make merge action a full featured object...
marmoute -
r49560:9bc86adf default
parent child Browse files
Show More
@@ -1,1866 +1,1866 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 url as urlmod,
40 url as urlmod,
41 util,
41 util,
42 )
42 )
43
43
44 from mercurial.upgrade_utils import (
44 from mercurial.upgrade_utils import (
45 actions as upgrade_actions,
45 actions as upgrade_actions,
46 )
46 )
47
47
48 from . import (
48 from . import (
49 lfcommands,
49 lfcommands,
50 lfutil,
50 lfutil,
51 storefactory,
51 storefactory,
52 )
52 )
53
53
54 ACTION_ADD = mergestatemod.ACTION_ADD
54 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
55 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_GET = mergestatemod.ACTION_GET
56 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_KEEP = mergestatemod.ACTION_KEEP
57 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
58 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59
59
60 eh = exthelper.exthelper()
60 eh = exthelper.exthelper()
61
61
62 lfstatus = lfutil.lfstatus
62 lfstatus = lfutil.lfstatus
63
63
64 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
64 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65
65
66 # -- Utility functions: commonly/repeatedly needed functionality ---------------
66 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67
67
68
68
69 def composelargefilematcher(match, manifest):
69 def composelargefilematcher(match, manifest):
70 """create a matcher that matches only the largefiles in the original
70 """create a matcher that matches only the largefiles in the original
71 matcher"""
71 matcher"""
72 m = copy.copy(match)
72 m = copy.copy(match)
73 lfile = lambda f: lfutil.standin(f) in manifest
73 lfile = lambda f: lfutil.standin(f) in manifest
74 m._files = [lf for lf in m._files if lfile(lf)]
74 m._files = [lf for lf in m._files if lfile(lf)]
75 m._fileset = set(m._files)
75 m._fileset = set(m._files)
76 m.always = lambda: False
76 m.always = lambda: False
77 origmatchfn = m.matchfn
77 origmatchfn = m.matchfn
78 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
78 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 return m
79 return m
80
80
81
81
82 def composenormalfilematcher(match, manifest, exclude=None):
82 def composenormalfilematcher(match, manifest, exclude=None):
83 excluded = set()
83 excluded = set()
84 if exclude is not None:
84 if exclude is not None:
85 excluded.update(exclude)
85 excluded.update(exclude)
86
86
87 m = copy.copy(match)
87 m = copy.copy(match)
88 notlfile = lambda f: not (
88 notlfile = lambda f: not (
89 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
89 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 )
90 )
91 m._files = [lf for lf in m._files if notlfile(lf)]
91 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._fileset = set(m._files)
92 m._fileset = set(m._files)
93 m.always = lambda: False
93 m.always = lambda: False
94 origmatchfn = m.matchfn
94 origmatchfn = m.matchfn
95 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
95 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 return m
96 return m
97
97
98
98
99 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
99 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 large = opts.get('large')
100 large = opts.get('large')
101 lfsize = lfutil.getminsize(
101 lfsize = lfutil.getminsize(
102 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
102 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 )
103 )
104
104
105 lfmatcher = None
105 lfmatcher = None
106 if lfutil.islfilesrepo(repo):
106 if lfutil.islfilesrepo(repo):
107 lfpats = ui.configlist(lfutil.longname, b'patterns')
107 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 if lfpats:
108 if lfpats:
109 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
109 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110
110
111 lfnames = []
111 lfnames = []
112 m = matcher
112 m = matcher
113
113
114 wctx = repo[None]
114 wctx = repo[None]
115 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
115 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 exact = m.exact(f)
116 exact = m.exact(f)
117 lfile = lfutil.standin(f) in wctx
117 lfile = lfutil.standin(f) in wctx
118 nfile = f in wctx
118 nfile = f in wctx
119 exists = lfile or nfile
119 exists = lfile or nfile
120
120
121 # Don't warn the user when they attempt to add a normal tracked file.
121 # Don't warn the user when they attempt to add a normal tracked file.
122 # The normal add code will do that for us.
122 # The normal add code will do that for us.
123 if exact and exists:
123 if exact and exists:
124 if lfile:
124 if lfile:
125 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
125 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 continue
126 continue
127
127
128 if (exact or not exists) and not lfutil.isstandin(f):
128 if (exact or not exists) and not lfutil.isstandin(f):
129 # In case the file was removed previously, but not committed
129 # In case the file was removed previously, but not committed
130 # (issue3507)
130 # (issue3507)
131 if not repo.wvfs.exists(f):
131 if not repo.wvfs.exists(f):
132 continue
132 continue
133
133
134 abovemin = (
134 abovemin = (
135 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
135 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 )
136 )
137 if large or abovemin or (lfmatcher and lfmatcher(f)):
137 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 lfnames.append(f)
138 lfnames.append(f)
139 if ui.verbose or not exact:
139 if ui.verbose or not exact:
140 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
140 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141
141
142 bad = []
142 bad = []
143
143
144 # Need to lock, otherwise there could be a race condition between
144 # Need to lock, otherwise there could be a race condition between
145 # when standins are created and added to the repo.
145 # when standins are created and added to the repo.
146 with repo.wlock():
146 with repo.wlock():
147 if not opts.get('dry_run'):
147 if not opts.get('dry_run'):
148 standins = []
148 standins = []
149 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 for f in lfnames:
150 for f in lfnames:
151 standinname = lfutil.standin(f)
151 standinname = lfutil.standin(f)
152 lfutil.writestandin(
152 lfutil.writestandin(
153 repo,
153 repo,
154 standinname,
154 standinname,
155 hash=b'',
155 hash=b'',
156 executable=lfutil.getexecutable(repo.wjoin(f)),
156 executable=lfutil.getexecutable(repo.wjoin(f)),
157 )
157 )
158 standins.append(standinname)
158 standins.append(standinname)
159 lfdirstate.set_tracked(f)
159 lfdirstate.set_tracked(f)
160 lfdirstate.write(repo.currenttransaction())
160 lfdirstate.write(repo.currenttransaction())
161 bad += [
161 bad += [
162 lfutil.splitstandin(f)
162 lfutil.splitstandin(f)
163 for f in repo[None].add(standins)
163 for f in repo[None].add(standins)
164 if f in m.files()
164 if f in m.files()
165 ]
165 ]
166
166
167 added = [f for f in lfnames if f not in bad]
167 added = [f for f in lfnames if f not in bad]
168 return added, bad
168 return added, bad
169
169
170
170
171 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
171 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 after = opts.get('after')
172 after = opts.get('after')
173 m = composelargefilematcher(matcher, repo[None].manifest())
173 m = composelargefilematcher(matcher, repo[None].manifest())
174 with lfstatus(repo):
174 with lfstatus(repo):
175 s = repo.status(match=m, clean=not isaddremove)
175 s = repo.status(match=m, clean=not isaddremove)
176 manifest = repo[None].manifest()
176 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [
177 modified, added, deleted, clean = [
178 [f for f in list if lfutil.standin(f) in manifest]
178 [f for f in list if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added, s.deleted, s.clean)
179 for list in (s.modified, s.added, s.deleted, s.clean)
180 ]
180 ]
181
181
182 def warn(files, msg):
182 def warn(files, msg):
183 for f in files:
183 for f in files:
184 ui.warn(msg % uipathfn(f))
184 ui.warn(msg % uipathfn(f))
185 return int(len(files) > 0)
185 return int(len(files) > 0)
186
186
187 if after:
187 if after:
188 remove = deleted
188 remove = deleted
189 result = warn(
189 result = warn(
190 modified + added + clean, _(b'not removing %s: file still exists\n')
190 modified + added + clean, _(b'not removing %s: file still exists\n')
191 )
191 )
192 else:
192 else:
193 remove = deleted + clean
193 remove = deleted + clean
194 result = warn(
194 result = warn(
195 modified,
195 modified,
196 _(
196 _(
197 b'not removing %s: file is modified (use -f'
197 b'not removing %s: file is modified (use -f'
198 b' to force removal)\n'
198 b' to force removal)\n'
199 ),
199 ),
200 )
200 )
201 result = (
201 result = (
202 warn(
202 warn(
203 added,
203 added,
204 _(
204 _(
205 b'not removing %s: file has been marked for add'
205 b'not removing %s: file has been marked for add'
206 b' (use forget to undo)\n'
206 b' (use forget to undo)\n'
207 ),
207 ),
208 )
208 )
209 or result
209 or result
210 )
210 )
211
211
212 # Need to lock because standin files are deleted then removed from the
212 # Need to lock because standin files are deleted then removed from the
213 # repository and we could race in-between.
213 # repository and we could race in-between.
214 with repo.wlock():
214 with repo.wlock():
215 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 for f in sorted(remove):
216 for f in sorted(remove):
217 if ui.verbose or not m.exact(f):
217 if ui.verbose or not m.exact(f):
218 ui.status(_(b'removing %s\n') % uipathfn(f))
218 ui.status(_(b'removing %s\n') % uipathfn(f))
219
219
220 if not dryrun:
220 if not dryrun:
221 if not after:
221 if not after:
222 repo.wvfs.unlinkpath(f, ignoremissing=True)
222 repo.wvfs.unlinkpath(f, ignoremissing=True)
223
223
224 if dryrun:
224 if dryrun:
225 return result
225 return result
226
226
227 remove = [lfutil.standin(f) for f in remove]
227 remove = [lfutil.standin(f) for f in remove]
228 # If this is being called by addremove, let the original addremove
228 # If this is being called by addremove, let the original addremove
229 # function handle this.
229 # function handle this.
230 if not isaddremove:
230 if not isaddremove:
231 for f in remove:
231 for f in remove:
232 repo.wvfs.unlinkpath(f, ignoremissing=True)
232 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo[None].forget(remove)
233 repo[None].forget(remove)
234
234
235 for f in remove:
235 for f in remove:
236 lfdirstate.set_untracked(lfutil.splitstandin(f))
236 lfdirstate.set_untracked(lfutil.splitstandin(f))
237
237
238 lfdirstate.write(repo.currenttransaction())
238 lfdirstate.write(repo.currenttransaction())
239
239
240 return result
240 return result
241
241
242
242
243 # For overriding mercurial.hgweb.webcommands so that largefiles will
243 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # appear at their right place in the manifests.
244 # appear at their right place in the manifests.
245 @eh.wrapfunction(webcommands, b'decodepath')
245 @eh.wrapfunction(webcommands, b'decodepath')
246 def decodepath(orig, path):
246 def decodepath(orig, path):
247 return lfutil.splitstandin(path) or path
247 return lfutil.splitstandin(path) or path
248
248
249
249
250 # -- Wrappers: modify existing commands --------------------------------
250 # -- Wrappers: modify existing commands --------------------------------
251
251
252
252
253 @eh.wrapcommand(
253 @eh.wrapcommand(
254 b'add',
254 b'add',
255 opts=[
255 opts=[
256 (b'', b'large', None, _(b'add as largefile')),
256 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'normal', None, _(b'add as normal file')),
257 (b'', b'normal', None, _(b'add as normal file')),
258 (
258 (
259 b'',
259 b'',
260 b'lfsize',
260 b'lfsize',
261 b'',
261 b'',
262 _(
262 _(
263 b'add all files above this size (in megabytes) '
263 b'add all files above this size (in megabytes) '
264 b'as largefiles (default: 10)'
264 b'as largefiles (default: 10)'
265 ),
265 ),
266 ),
266 ),
267 ],
267 ],
268 )
268 )
269 def overrideadd(orig, ui, repo, *pats, **opts):
269 def overrideadd(orig, ui, repo, *pats, **opts):
270 if opts.get('normal') and opts.get('large'):
270 if opts.get('normal') and opts.get('large'):
271 raise error.Abort(_(b'--normal cannot be used with --large'))
271 raise error.Abort(_(b'--normal cannot be used with --large'))
272 return orig(ui, repo, *pats, **opts)
272 return orig(ui, repo, *pats, **opts)
273
273
274
274
275 @eh.wrapfunction(cmdutil, b'add')
275 @eh.wrapfunction(cmdutil, b'add')
276 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
276 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 # The --normal flag short circuits this override
277 # The --normal flag short circuits this override
278 if opts.get('normal'):
278 if opts.get('normal'):
279 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
279 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280
280
281 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
281 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 normalmatcher = composenormalfilematcher(
282 normalmatcher = composenormalfilematcher(
283 matcher, repo[None].manifest(), ladded
283 matcher, repo[None].manifest(), ladded
284 )
284 )
285 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
285 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286
286
287 bad.extend(f for f in lbad)
287 bad.extend(f for f in lbad)
288 return bad
288 return bad
289
289
290
290
291 @eh.wrapfunction(cmdutil, b'remove')
291 @eh.wrapfunction(cmdutil, b'remove')
292 def cmdutilremove(
292 def cmdutilremove(
293 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
293 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 ):
294 ):
295 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
295 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 result = orig(
296 result = orig(
297 ui,
297 ui,
298 repo,
298 repo,
299 normalmatcher,
299 normalmatcher,
300 prefix,
300 prefix,
301 uipathfn,
301 uipathfn,
302 after,
302 after,
303 force,
303 force,
304 subrepos,
304 subrepos,
305 dryrun,
305 dryrun,
306 )
306 )
307 return (
307 return (
308 removelargefiles(
308 removelargefiles(
309 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
309 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 )
310 )
311 or result
311 or result
312 )
312 )
313
313
314
314
315 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
315 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
316 def overridestatusfn(orig, repo, rev2, **opts):
316 def overridestatusfn(orig, repo, rev2, **opts):
317 with lfstatus(repo._repo):
317 with lfstatus(repo._repo):
318 return orig(repo, rev2, **opts)
318 return orig(repo, rev2, **opts)
319
319
320
320
321 @eh.wrapcommand(b'status')
321 @eh.wrapcommand(b'status')
322 def overridestatus(orig, ui, repo, *pats, **opts):
322 def overridestatus(orig, ui, repo, *pats, **opts):
323 with lfstatus(repo):
323 with lfstatus(repo):
324 return orig(ui, repo, *pats, **opts)
324 return orig(ui, repo, *pats, **opts)
325
325
326
326
327 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
327 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
328 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
328 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
329 with lfstatus(repo._repo):
329 with lfstatus(repo._repo):
330 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
330 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
331
331
332
332
333 @eh.wrapcommand(b'log')
333 @eh.wrapcommand(b'log')
334 def overridelog(orig, ui, repo, *pats, **opts):
334 def overridelog(orig, ui, repo, *pats, **opts):
335 def overridematchandpats(
335 def overridematchandpats(
336 orig,
336 orig,
337 ctx,
337 ctx,
338 pats=(),
338 pats=(),
339 opts=None,
339 opts=None,
340 globbed=False,
340 globbed=False,
341 default=b'relpath',
341 default=b'relpath',
342 badfn=None,
342 badfn=None,
343 ):
343 ):
344 """Matcher that merges root directory with .hglf, suitable for log.
344 """Matcher that merges root directory with .hglf, suitable for log.
345 It is still possible to match .hglf directly.
345 It is still possible to match .hglf directly.
346 For any listed files run log on the standin too.
346 For any listed files run log on the standin too.
347 matchfn tries both the given filename and with .hglf stripped.
347 matchfn tries both the given filename and with .hglf stripped.
348 """
348 """
349 if opts is None:
349 if opts is None:
350 opts = {}
350 opts = {}
351 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
351 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
352 m, p = copy.copy(matchandpats)
352 m, p = copy.copy(matchandpats)
353
353
354 if m.always():
354 if m.always():
355 # We want to match everything anyway, so there's no benefit trying
355 # We want to match everything anyway, so there's no benefit trying
356 # to add standins.
356 # to add standins.
357 return matchandpats
357 return matchandpats
358
358
359 pats = set(p)
359 pats = set(p)
360
360
361 def fixpats(pat, tostandin=lfutil.standin):
361 def fixpats(pat, tostandin=lfutil.standin):
362 if pat.startswith(b'set:'):
362 if pat.startswith(b'set:'):
363 return pat
363 return pat
364
364
365 kindpat = matchmod._patsplit(pat, None)
365 kindpat = matchmod._patsplit(pat, None)
366
366
367 if kindpat[0] is not None:
367 if kindpat[0] is not None:
368 return kindpat[0] + b':' + tostandin(kindpat[1])
368 return kindpat[0] + b':' + tostandin(kindpat[1])
369 return tostandin(kindpat[1])
369 return tostandin(kindpat[1])
370
370
371 cwd = repo.getcwd()
371 cwd = repo.getcwd()
372 if cwd:
372 if cwd:
373 hglf = lfutil.shortname
373 hglf = lfutil.shortname
374 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
374 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
375
375
376 def tostandin(f):
376 def tostandin(f):
377 # The file may already be a standin, so truncate the back
377 # The file may already be a standin, so truncate the back
378 # prefix and test before mangling it. This avoids turning
378 # prefix and test before mangling it. This avoids turning
379 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
379 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
380 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
380 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
381 return f
381 return f
382
382
383 # An absolute path is from outside the repo, so truncate the
383 # An absolute path is from outside the repo, so truncate the
384 # path to the root before building the standin. Otherwise cwd
384 # path to the root before building the standin. Otherwise cwd
385 # is somewhere in the repo, relative to root, and needs to be
385 # is somewhere in the repo, relative to root, and needs to be
386 # prepended before building the standin.
386 # prepended before building the standin.
387 if os.path.isabs(cwd):
387 if os.path.isabs(cwd):
388 f = f[len(back) :]
388 f = f[len(back) :]
389 else:
389 else:
390 f = cwd + b'/' + f
390 f = cwd + b'/' + f
391 return back + lfutil.standin(f)
391 return back + lfutil.standin(f)
392
392
393 else:
393 else:
394
394
395 def tostandin(f):
395 def tostandin(f):
396 if lfutil.isstandin(f):
396 if lfutil.isstandin(f):
397 return f
397 return f
398 return lfutil.standin(f)
398 return lfutil.standin(f)
399
399
400 pats.update(fixpats(f, tostandin) for f in p)
400 pats.update(fixpats(f, tostandin) for f in p)
401
401
402 for i in range(0, len(m._files)):
402 for i in range(0, len(m._files)):
403 # Don't add '.hglf' to m.files, since that is already covered by '.'
403 # Don't add '.hglf' to m.files, since that is already covered by '.'
404 if m._files[i] == b'.':
404 if m._files[i] == b'.':
405 continue
405 continue
406 standin = lfutil.standin(m._files[i])
406 standin = lfutil.standin(m._files[i])
407 # If the "standin" is a directory, append instead of replace to
407 # If the "standin" is a directory, append instead of replace to
408 # support naming a directory on the command line with only
408 # support naming a directory on the command line with only
409 # largefiles. The original directory is kept to support normal
409 # largefiles. The original directory is kept to support normal
410 # files.
410 # files.
411 if standin in ctx:
411 if standin in ctx:
412 m._files[i] = standin
412 m._files[i] = standin
413 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
413 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
414 m._files.append(standin)
414 m._files.append(standin)
415
415
416 m._fileset = set(m._files)
416 m._fileset = set(m._files)
417 m.always = lambda: False
417 m.always = lambda: False
418 origmatchfn = m.matchfn
418 origmatchfn = m.matchfn
419
419
420 def lfmatchfn(f):
420 def lfmatchfn(f):
421 lf = lfutil.splitstandin(f)
421 lf = lfutil.splitstandin(f)
422 if lf is not None and origmatchfn(lf):
422 if lf is not None and origmatchfn(lf):
423 return True
423 return True
424 r = origmatchfn(f)
424 r = origmatchfn(f)
425 return r
425 return r
426
426
427 m.matchfn = lfmatchfn
427 m.matchfn = lfmatchfn
428
428
429 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
429 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
430 return m, pats
430 return m, pats
431
431
432 # For hg log --patch, the match object is used in two different senses:
432 # For hg log --patch, the match object is used in two different senses:
433 # (1) to determine what revisions should be printed out, and
433 # (1) to determine what revisions should be printed out, and
434 # (2) to determine what files to print out diffs for.
434 # (2) to determine what files to print out diffs for.
435 # The magic matchandpats override should be used for case (1) but not for
435 # The magic matchandpats override should be used for case (1) but not for
436 # case (2).
436 # case (2).
437 oldmatchandpats = scmutil.matchandpats
437 oldmatchandpats = scmutil.matchandpats
438
438
439 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
439 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
440 wctx = repo[None]
440 wctx = repo[None]
441 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
441 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
442 return lambda ctx: match
442 return lambda ctx: match
443
443
444 wrappedmatchandpats = extensions.wrappedfunction(
444 wrappedmatchandpats = extensions.wrappedfunction(
445 scmutil, b'matchandpats', overridematchandpats
445 scmutil, b'matchandpats', overridematchandpats
446 )
446 )
447 wrappedmakefilematcher = extensions.wrappedfunction(
447 wrappedmakefilematcher = extensions.wrappedfunction(
448 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
448 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
449 )
449 )
450 with wrappedmatchandpats, wrappedmakefilematcher:
450 with wrappedmatchandpats, wrappedmakefilematcher:
451 return orig(ui, repo, *pats, **opts)
451 return orig(ui, repo, *pats, **opts)
452
452
453
453
454 @eh.wrapcommand(
454 @eh.wrapcommand(
455 b'verify',
455 b'verify',
456 opts=[
456 opts=[
457 (
457 (
458 b'',
458 b'',
459 b'large',
459 b'large',
460 None,
460 None,
461 _(b'verify that all largefiles in current revision exists'),
461 _(b'verify that all largefiles in current revision exists'),
462 ),
462 ),
463 (
463 (
464 b'',
464 b'',
465 b'lfa',
465 b'lfa',
466 None,
466 None,
467 _(b'verify largefiles in all revisions, not just current'),
467 _(b'verify largefiles in all revisions, not just current'),
468 ),
468 ),
469 (
469 (
470 b'',
470 b'',
471 b'lfc',
471 b'lfc',
472 None,
472 None,
473 _(b'verify local largefile contents, not just existence'),
473 _(b'verify local largefile contents, not just existence'),
474 ),
474 ),
475 ],
475 ],
476 )
476 )
477 def overrideverify(orig, ui, repo, *pats, **opts):
477 def overrideverify(orig, ui, repo, *pats, **opts):
478 large = opts.pop('large', False)
478 large = opts.pop('large', False)
479 all = opts.pop('lfa', False)
479 all = opts.pop('lfa', False)
480 contents = opts.pop('lfc', False)
480 contents = opts.pop('lfc', False)
481
481
482 result = orig(ui, repo, *pats, **opts)
482 result = orig(ui, repo, *pats, **opts)
483 if large or all or contents:
483 if large or all or contents:
484 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
484 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
485 return result
485 return result
486
486
487
487
488 @eh.wrapcommand(
488 @eh.wrapcommand(
489 b'debugstate',
489 b'debugstate',
490 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
490 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
491 )
491 )
492 def overridedebugstate(orig, ui, repo, *pats, **opts):
492 def overridedebugstate(orig, ui, repo, *pats, **opts):
493 large = opts.pop('large', False)
493 large = opts.pop('large', False)
494 if large:
494 if large:
495
495
496 class fakerepo(object):
496 class fakerepo(object):
497 dirstate = lfutil.openlfdirstate(ui, repo)
497 dirstate = lfutil.openlfdirstate(ui, repo)
498
498
499 orig(ui, fakerepo, *pats, **opts)
499 orig(ui, fakerepo, *pats, **opts)
500 else:
500 else:
501 orig(ui, repo, *pats, **opts)
501 orig(ui, repo, *pats, **opts)
502
502
503
503
504 # Before starting the manifest merge, merge.updates will call
504 # Before starting the manifest merge, merge.updates will call
505 # _checkunknownfile to check if there are any files in the merged-in
505 # _checkunknownfile to check if there are any files in the merged-in
506 # changeset that collide with unknown files in the working copy.
506 # changeset that collide with unknown files in the working copy.
507 #
507 #
508 # The largefiles are seen as unknown, so this prevents us from merging
508 # The largefiles are seen as unknown, so this prevents us from merging
509 # in a file 'foo' if we already have a largefile with the same name.
509 # in a file 'foo' if we already have a largefile with the same name.
510 #
510 #
511 # The overridden function filters the unknown files by removing any
511 # The overridden function filters the unknown files by removing any
512 # largefiles. This makes the merge proceed and we can then handle this
512 # largefiles. This makes the merge proceed and we can then handle this
513 # case further in the overridden calculateupdates function below.
513 # case further in the overridden calculateupdates function below.
514 @eh.wrapfunction(merge, b'_checkunknownfile')
514 @eh.wrapfunction(merge, b'_checkunknownfile')
515 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
515 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
516 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
516 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
517 return False
517 return False
518 return origfn(repo, wctx, mctx, f, f2)
518 return origfn(repo, wctx, mctx, f, f2)
519
519
520
520
521 # The manifest merge handles conflicts on the manifest level. We want
521 # The manifest merge handles conflicts on the manifest level. We want
522 # to handle changes in largefile-ness of files at this level too.
522 # to handle changes in largefile-ness of files at this level too.
523 #
523 #
524 # The strategy is to run the original calculateupdates and then process
524 # The strategy is to run the original calculateupdates and then process
525 # the action list it outputs. There are two cases we need to deal with:
525 # the action list it outputs. There are two cases we need to deal with:
526 #
526 #
527 # 1. Normal file in p1, largefile in p2. Here the largefile is
527 # 1. Normal file in p1, largefile in p2. Here the largefile is
528 # detected via its standin file, which will enter the working copy
528 # detected via its standin file, which will enter the working copy
529 # with a "get" action. It is not "merge" since the standin is all
529 # with a "get" action. It is not "merge" since the standin is all
530 # Mercurial is concerned with at this level -- the link to the
530 # Mercurial is concerned with at this level -- the link to the
531 # existing normal file is not relevant here.
531 # existing normal file is not relevant here.
532 #
532 #
533 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
533 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
534 # since the largefile will be present in the working copy and
534 # since the largefile will be present in the working copy and
535 # different from the normal file in p2. Mercurial therefore
535 # different from the normal file in p2. Mercurial therefore
536 # triggers a merge action.
536 # triggers a merge action.
537 #
537 #
538 # In both cases, we prompt the user and emit new actions to either
538 # In both cases, we prompt the user and emit new actions to either
539 # remove the standin (if the normal file was kept) or to remove the
539 # remove the standin (if the normal file was kept) or to remove the
540 # normal file and get the standin (if the largefile was kept). The
540 # normal file and get the standin (if the largefile was kept). The
541 # default prompt answer is to use the largefile version since it was
541 # default prompt answer is to use the largefile version since it was
542 # presumably changed on purpose.
542 # presumably changed on purpose.
543 #
543 #
544 # Finally, the merge.applyupdates function will then take care of
544 # Finally, the merge.applyupdates function will then take care of
545 # writing the files into the working copy and lfcommands.updatelfiles
545 # writing the files into the working copy and lfcommands.updatelfiles
546 # will update the largefiles.
546 # will update the largefiles.
547 @eh.wrapfunction(merge, b'calculateupdates')
547 @eh.wrapfunction(merge, b'calculateupdates')
548 def overridecalculateupdates(
548 def overridecalculateupdates(
549 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
549 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 ):
550 ):
551 overwrite = force and not branchmerge
551 overwrite = force and not branchmerge
552 mresult = origfn(
552 mresult = origfn(
553 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
553 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 )
554 )
555
555
556 if overwrite:
556 if overwrite:
557 return mresult
557 return mresult
558
558
559 # Convert to dictionary with filename as key and action as value.
559 # Convert to dictionary with filename as key and action as value.
560 lfiles = set()
560 lfiles = set()
561 for f in mresult.files():
561 for f in mresult.files():
562 splitstandin = lfutil.splitstandin(f)
562 splitstandin = lfutil.splitstandin(f)
563 if splitstandin is not None and splitstandin in p1:
563 if splitstandin is not None and splitstandin in p1:
564 lfiles.add(splitstandin)
564 lfiles.add(splitstandin)
565 elif lfutil.standin(f) in p1:
565 elif lfutil.standin(f) in p1:
566 lfiles.add(f)
566 lfiles.add(f)
567
567
568 for lfile in sorted(lfiles):
568 for lfile in sorted(lfiles):
569 standin = lfutil.standin(lfile)
569 standin = lfutil.standin(lfile)
570 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
570 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
571 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
571 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
572
572
573 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
573 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
574 if sm == ACTION_DELETED_CHANGED:
574 if sm == ACTION_DELETED_CHANGED:
575 f1, f2, fa, move, anc = sargs
575 f1, f2, fa, move, anc = sargs
576 sargs = (p2[f2].flags(), False)
576 sargs = (p2[f2].flags(), False)
577 # Case 1: normal file in the working copy, largefile in
577 # Case 1: normal file in the working copy, largefile in
578 # the second parent
578 # the second parent
579 usermsg = (
579 usermsg = (
580 _(
580 _(
581 b'remote turned local normal file %s into a largefile\n'
581 b'remote turned local normal file %s into a largefile\n'
582 b'use (l)argefile or keep (n)ormal file?'
582 b'use (l)argefile or keep (n)ormal file?'
583 b'$$ &Largefile $$ &Normal file'
583 b'$$ &Largefile $$ &Normal file'
584 )
584 )
585 % lfile
585 % lfile
586 )
586 )
587 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
587 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
588 mresult.addfile(
588 mresult.addfile(
589 lfile, ACTION_REMOVE, None, b'replaced by standin'
589 lfile, ACTION_REMOVE, None, b'replaced by standin'
590 )
590 )
591 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
591 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
592 else: # keep local normal file
592 else: # keep local normal file
593 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
593 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
594 if branchmerge:
594 if branchmerge:
595 mresult.addfile(
595 mresult.addfile(
596 standin,
596 standin,
597 ACTION_KEEP,
597 ACTION_KEEP,
598 None,
598 None,
599 b'replaced by non-standin',
599 b'replaced by non-standin',
600 )
600 )
601 else:
601 else:
602 mresult.addfile(
602 mresult.addfile(
603 standin,
603 standin,
604 ACTION_REMOVE,
604 ACTION_REMOVE,
605 None,
605 None,
606 b'replaced by non-standin',
606 b'replaced by non-standin',
607 )
607 )
608 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
608 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
609 if lm == ACTION_DELETED_CHANGED:
609 if lm == ACTION_DELETED_CHANGED:
610 f1, f2, fa, move, anc = largs
610 f1, f2, fa, move, anc = largs
611 largs = (p2[f2].flags(), False)
611 largs = (p2[f2].flags(), False)
612 # Case 2: largefile in the working copy, normal file in
612 # Case 2: largefile in the working copy, normal file in
613 # the second parent
613 # the second parent
614 usermsg = (
614 usermsg = (
615 _(
615 _(
616 b'remote turned local largefile %s into a normal file\n'
616 b'remote turned local largefile %s into a normal file\n'
617 b'keep (l)argefile or use (n)ormal file?'
617 b'keep (l)argefile or use (n)ormal file?'
618 b'$$ &Largefile $$ &Normal file'
618 b'$$ &Largefile $$ &Normal file'
619 )
619 )
620 % lfile
620 % lfile
621 )
621 )
622 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
622 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
623 if branchmerge:
623 if branchmerge:
624 # largefile can be restored from standin safely
624 # largefile can be restored from standin safely
625 mresult.addfile(
625 mresult.addfile(
626 lfile,
626 lfile,
627 ACTION_KEEP,
627 ACTION_KEEP,
628 None,
628 None,
629 b'replaced by standin',
629 b'replaced by standin',
630 )
630 )
631 mresult.addfile(
631 mresult.addfile(
632 standin, ACTION_KEEP, None, b'replaces standin'
632 standin, ACTION_KEEP, None, b'replaces standin'
633 )
633 )
634 else:
634 else:
635 # "lfile" should be marked as "removed" without
635 # "lfile" should be marked as "removed" without
636 # removal of itself
636 # removal of itself
637 mresult.addfile(
637 mresult.addfile(
638 lfile,
638 lfile,
639 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
639 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
640 None,
640 None,
641 b'forget non-standin largefile',
641 b'forget non-standin largefile',
642 )
642 )
643
643
644 # linear-merge should treat this largefile as 're-added'
644 # linear-merge should treat this largefile as 're-added'
645 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
645 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
646 else: # pick remote normal file
646 else: # pick remote normal file
647 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
647 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
648 mresult.addfile(
648 mresult.addfile(
649 standin,
649 standin,
650 ACTION_REMOVE,
650 ACTION_REMOVE,
651 None,
651 None,
652 b'replaced by non-standin',
652 b'replaced by non-standin',
653 )
653 )
654
654
655 return mresult
655 return mresult
656
656
657
657
658 @eh.wrapfunction(mergestatemod, b'recordupdates')
658 @eh.wrapfunction(mergestatemod, b'recordupdates')
659 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
659 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
660 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
660 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
661 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
661 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
662 with lfdirstate.parentchange():
662 with lfdirstate.parentchange():
663 for lfile, args, msg in actions[
663 for lfile, args, msg in actions[
664 MERGE_ACTION_LARGEFILE_MARK_REMOVED
664 MERGE_ACTION_LARGEFILE_MARK_REMOVED
665 ]:
665 ]:
666 # this should be executed before 'orig', to execute 'remove'
666 # this should be executed before 'orig', to execute 'remove'
667 # before all other actions
667 # before all other actions
668 repo.dirstate.update_file(
668 repo.dirstate.update_file(
669 lfile, p1_tracked=True, wc_tracked=False
669 lfile, p1_tracked=True, wc_tracked=False
670 )
670 )
671 # make sure lfile doesn't get synclfdirstate'd as normal
671 # make sure lfile doesn't get synclfdirstate'd as normal
672 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
672 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
673 lfdirstate.write(repo.currenttransaction())
673 lfdirstate.write(repo.currenttransaction())
674
674
675 return orig(repo, actions, branchmerge, getfiledata)
675 return orig(repo, actions, branchmerge, getfiledata)
676
676
677
677
678 # Override filemerge to prompt the user about how they wish to merge
678 # Override filemerge to prompt the user about how they wish to merge
679 # largefiles. This will handle identical edits without prompting the user.
679 # largefiles. This will handle identical edits without prompting the user.
680 @eh.wrapfunction(filemerge, b'filemerge')
680 @eh.wrapfunction(filemerge, b'filemerge')
681 def overridefilemerge(
681 def overridefilemerge(
682 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
682 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
683 ):
683 ):
684 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
684 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
685 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
685 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
686
686
687 ahash = lfutil.readasstandin(fca).lower()
687 ahash = lfutil.readasstandin(fca).lower()
688 dhash = lfutil.readasstandin(fcd).lower()
688 dhash = lfutil.readasstandin(fcd).lower()
689 ohash = lfutil.readasstandin(fco).lower()
689 ohash = lfutil.readasstandin(fco).lower()
690 if (
690 if (
691 ohash != ahash
691 ohash != ahash
692 and ohash != dhash
692 and ohash != dhash
693 and (
693 and (
694 dhash == ahash
694 dhash == ahash
695 or repo.ui.promptchoice(
695 or repo.ui.promptchoice(
696 _(
696 _(
697 b'largefile %s has a merge conflict\nancestor was %s\n'
697 b'largefile %s has a merge conflict\nancestor was %s\n'
698 b'you can keep (l)ocal %s or take (o)ther %s.\n'
698 b'you can keep (l)ocal %s or take (o)ther %s.\n'
699 b'what do you want to do?'
699 b'what do you want to do?'
700 b'$$ &Local $$ &Other'
700 b'$$ &Local $$ &Other'
701 )
701 )
702 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
702 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
703 0,
703 0,
704 )
704 )
705 == 1
705 == 1
706 )
706 )
707 ):
707 ):
708 repo.wwrite(fcd.path(), fco.data(), fco.flags())
708 repo.wwrite(fcd.path(), fco.data(), fco.flags())
709 return 0, False
709 return 0, False
710
710
711
711
712 @eh.wrapfunction(copiesmod, b'pathcopies')
712 @eh.wrapfunction(copiesmod, b'pathcopies')
713 def copiespathcopies(orig, ctx1, ctx2, match=None):
713 def copiespathcopies(orig, ctx1, ctx2, match=None):
714 copies = orig(ctx1, ctx2, match=match)
714 copies = orig(ctx1, ctx2, match=match)
715 updated = {}
715 updated = {}
716
716
717 for k, v in pycompat.iteritems(copies):
717 for k, v in pycompat.iteritems(copies):
718 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
718 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
719
719
720 return updated
720 return updated
721
721
722
722
723 # Copy first changes the matchers to match standins instead of
723 # Copy first changes the matchers to match standins instead of
724 # largefiles. Then it overrides util.copyfile in that function it
724 # largefiles. Then it overrides util.copyfile in that function it
725 # checks if the destination largefile already exists. It also keeps a
725 # checks if the destination largefile already exists. It also keeps a
726 # list of copied files so that the largefiles can be copied and the
726 # list of copied files so that the largefiles can be copied and the
727 # dirstate updated.
727 # dirstate updated.
728 @eh.wrapfunction(cmdutil, b'copy')
728 @eh.wrapfunction(cmdutil, b'copy')
729 def overridecopy(orig, ui, repo, pats, opts, rename=False):
729 def overridecopy(orig, ui, repo, pats, opts, rename=False):
730 # doesn't remove largefile on rename
730 # doesn't remove largefile on rename
731 if len(pats) < 2:
731 if len(pats) < 2:
732 # this isn't legal, let the original function deal with it
732 # this isn't legal, let the original function deal with it
733 return orig(ui, repo, pats, opts, rename)
733 return orig(ui, repo, pats, opts, rename)
734
734
735 # This could copy both lfiles and normal files in one command,
735 # This could copy both lfiles and normal files in one command,
736 # but we don't want to do that. First replace their matcher to
736 # but we don't want to do that. First replace their matcher to
737 # only match normal files and run it, then replace it to just
737 # only match normal files and run it, then replace it to just
738 # match largefiles and run it again.
738 # match largefiles and run it again.
739 nonormalfiles = False
739 nonormalfiles = False
740 nolfiles = False
740 nolfiles = False
741 manifest = repo[None].manifest()
741 manifest = repo[None].manifest()
742
742
743 def normalfilesmatchfn(
743 def normalfilesmatchfn(
744 orig,
744 orig,
745 ctx,
745 ctx,
746 pats=(),
746 pats=(),
747 opts=None,
747 opts=None,
748 globbed=False,
748 globbed=False,
749 default=b'relpath',
749 default=b'relpath',
750 badfn=None,
750 badfn=None,
751 ):
751 ):
752 if opts is None:
752 if opts is None:
753 opts = {}
753 opts = {}
754 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
754 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
755 return composenormalfilematcher(match, manifest)
755 return composenormalfilematcher(match, manifest)
756
756
757 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
757 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
758 try:
758 try:
759 result = orig(ui, repo, pats, opts, rename)
759 result = orig(ui, repo, pats, opts, rename)
760 except error.Abort as e:
760 except error.Abort as e:
761 if e.message != _(b'no files to copy'):
761 if e.message != _(b'no files to copy'):
762 raise e
762 raise e
763 else:
763 else:
764 nonormalfiles = True
764 nonormalfiles = True
765 result = 0
765 result = 0
766
766
767 # The first rename can cause our current working directory to be removed.
767 # The first rename can cause our current working directory to be removed.
768 # In that case there is nothing left to copy/rename so just quit.
768 # In that case there is nothing left to copy/rename so just quit.
769 try:
769 try:
770 repo.getcwd()
770 repo.getcwd()
771 except OSError:
771 except OSError:
772 return result
772 return result
773
773
774 def makestandin(relpath):
774 def makestandin(relpath):
775 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
775 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
776 return repo.wvfs.join(lfutil.standin(path))
776 return repo.wvfs.join(lfutil.standin(path))
777
777
778 fullpats = scmutil.expandpats(pats)
778 fullpats = scmutil.expandpats(pats)
779 dest = fullpats[-1]
779 dest = fullpats[-1]
780
780
781 if os.path.isdir(dest):
781 if os.path.isdir(dest):
782 if not os.path.isdir(makestandin(dest)):
782 if not os.path.isdir(makestandin(dest)):
783 os.makedirs(makestandin(dest))
783 os.makedirs(makestandin(dest))
784
784
785 try:
785 try:
786 # When we call orig below it creates the standins but we don't add
786 # When we call orig below it creates the standins but we don't add
787 # them to the dir state until later so lock during that time.
787 # them to the dir state until later so lock during that time.
788 wlock = repo.wlock()
788 wlock = repo.wlock()
789
789
790 manifest = repo[None].manifest()
790 manifest = repo[None].manifest()
791
791
792 def overridematch(
792 def overridematch(
793 orig,
793 orig,
794 ctx,
794 ctx,
795 pats=(),
795 pats=(),
796 opts=None,
796 opts=None,
797 globbed=False,
797 globbed=False,
798 default=b'relpath',
798 default=b'relpath',
799 badfn=None,
799 badfn=None,
800 ):
800 ):
801 if opts is None:
801 if opts is None:
802 opts = {}
802 opts = {}
803 newpats = []
803 newpats = []
804 # The patterns were previously mangled to add the standin
804 # The patterns were previously mangled to add the standin
805 # directory; we need to remove that now
805 # directory; we need to remove that now
806 for pat in pats:
806 for pat in pats:
807 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
807 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
808 newpats.append(pat.replace(lfutil.shortname, b''))
808 newpats.append(pat.replace(lfutil.shortname, b''))
809 else:
809 else:
810 newpats.append(pat)
810 newpats.append(pat)
811 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
811 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
812 m = copy.copy(match)
812 m = copy.copy(match)
813 lfile = lambda f: lfutil.standin(f) in manifest
813 lfile = lambda f: lfutil.standin(f) in manifest
814 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
814 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
815 m._fileset = set(m._files)
815 m._fileset = set(m._files)
816 origmatchfn = m.matchfn
816 origmatchfn = m.matchfn
817
817
818 def matchfn(f):
818 def matchfn(f):
819 lfile = lfutil.splitstandin(f)
819 lfile = lfutil.splitstandin(f)
820 return (
820 return (
821 lfile is not None
821 lfile is not None
822 and (f in manifest)
822 and (f in manifest)
823 and origmatchfn(lfile)
823 and origmatchfn(lfile)
824 or None
824 or None
825 )
825 )
826
826
827 m.matchfn = matchfn
827 m.matchfn = matchfn
828 return m
828 return m
829
829
830 listpats = []
830 listpats = []
831 for pat in pats:
831 for pat in pats:
832 if matchmod.patkind(pat) is not None:
832 if matchmod.patkind(pat) is not None:
833 listpats.append(pat)
833 listpats.append(pat)
834 else:
834 else:
835 listpats.append(makestandin(pat))
835 listpats.append(makestandin(pat))
836
836
837 copiedfiles = []
837 copiedfiles = []
838
838
839 def overridecopyfile(orig, src, dest, *args, **kwargs):
839 def overridecopyfile(orig, src, dest, *args, **kwargs):
840 if lfutil.shortname in src and dest.startswith(
840 if lfutil.shortname in src and dest.startswith(
841 repo.wjoin(lfutil.shortname)
841 repo.wjoin(lfutil.shortname)
842 ):
842 ):
843 destlfile = dest.replace(lfutil.shortname, b'')
843 destlfile = dest.replace(lfutil.shortname, b'')
844 if not opts[b'force'] and os.path.exists(destlfile):
844 if not opts[b'force'] and os.path.exists(destlfile):
845 raise IOError(
845 raise IOError(
846 b'', _(b'destination largefile already exists')
846 b'', _(b'destination largefile already exists')
847 )
847 )
848 copiedfiles.append((src, dest))
848 copiedfiles.append((src, dest))
849 orig(src, dest, *args, **kwargs)
849 orig(src, dest, *args, **kwargs)
850
850
851 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
851 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
852 with extensions.wrappedfunction(scmutil, b'match', overridematch):
852 with extensions.wrappedfunction(scmutil, b'match', overridematch):
853 result += orig(ui, repo, listpats, opts, rename)
853 result += orig(ui, repo, listpats, opts, rename)
854
854
855 lfdirstate = lfutil.openlfdirstate(ui, repo)
855 lfdirstate = lfutil.openlfdirstate(ui, repo)
856 for (src, dest) in copiedfiles:
856 for (src, dest) in copiedfiles:
857 if lfutil.shortname in src and dest.startswith(
857 if lfutil.shortname in src and dest.startswith(
858 repo.wjoin(lfutil.shortname)
858 repo.wjoin(lfutil.shortname)
859 ):
859 ):
860 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
860 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
861 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
861 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
862 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
862 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
863 if not os.path.isdir(destlfiledir):
863 if not os.path.isdir(destlfiledir):
864 os.makedirs(destlfiledir)
864 os.makedirs(destlfiledir)
865 if rename:
865 if rename:
866 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
866 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
867
867
868 # The file is gone, but this deletes any empty parent
868 # The file is gone, but this deletes any empty parent
869 # directories as a side-effect.
869 # directories as a side-effect.
870 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
870 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
871 lfdirstate.set_untracked(srclfile)
871 lfdirstate.set_untracked(srclfile)
872 else:
872 else:
873 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
873 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
874
874
875 lfdirstate.set_tracked(destlfile)
875 lfdirstate.set_tracked(destlfile)
876 lfdirstate.write(repo.currenttransaction())
876 lfdirstate.write(repo.currenttransaction())
877 except error.Abort as e:
877 except error.Abort as e:
878 if e.message != _(b'no files to copy'):
878 if e.message != _(b'no files to copy'):
879 raise e
879 raise e
880 else:
880 else:
881 nolfiles = True
881 nolfiles = True
882 finally:
882 finally:
883 wlock.release()
883 wlock.release()
884
884
885 if nolfiles and nonormalfiles:
885 if nolfiles and nonormalfiles:
886 raise error.Abort(_(b'no files to copy'))
886 raise error.Abort(_(b'no files to copy'))
887
887
888 return result
888 return result
889
889
890
890
891 # When the user calls revert, we have to be careful to not revert any
891 # When the user calls revert, we have to be careful to not revert any
892 # changes to other largefiles accidentally. This means we have to keep
892 # changes to other largefiles accidentally. This means we have to keep
893 # track of the largefiles that are being reverted so we only pull down
893 # track of the largefiles that are being reverted so we only pull down
894 # the necessary largefiles.
894 # the necessary largefiles.
895 #
895 #
896 # Standins are only updated (to match the hash of largefiles) before
896 # Standins are only updated (to match the hash of largefiles) before
897 # commits. Update the standins then run the original revert, changing
897 # commits. Update the standins then run the original revert, changing
898 # the matcher to hit standins instead of largefiles. Based on the
898 # the matcher to hit standins instead of largefiles. Based on the
899 # resulting standins update the largefiles.
899 # resulting standins update the largefiles.
900 @eh.wrapfunction(cmdutil, b'revert')
900 @eh.wrapfunction(cmdutil, b'revert')
901 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
901 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
902 # Because we put the standins in a bad state (by updating them)
902 # Because we put the standins in a bad state (by updating them)
903 # and then return them to a correct state we need to lock to
903 # and then return them to a correct state we need to lock to
904 # prevent others from changing them in their incorrect state.
904 # prevent others from changing them in their incorrect state.
905 with repo.wlock():
905 with repo.wlock():
906 lfdirstate = lfutil.openlfdirstate(ui, repo)
906 lfdirstate = lfutil.openlfdirstate(ui, repo)
907 s = lfutil.lfdirstatestatus(lfdirstate, repo)
907 s = lfutil.lfdirstatestatus(lfdirstate, repo)
908 lfdirstate.write(repo.currenttransaction())
908 lfdirstate.write(repo.currenttransaction())
909 for lfile in s.modified:
909 for lfile in s.modified:
910 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
910 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
911 for lfile in s.deleted:
911 for lfile in s.deleted:
912 fstandin = lfutil.standin(lfile)
912 fstandin = lfutil.standin(lfile)
913 if repo.wvfs.exists(fstandin):
913 if repo.wvfs.exists(fstandin):
914 repo.wvfs.unlink(fstandin)
914 repo.wvfs.unlink(fstandin)
915
915
916 oldstandins = lfutil.getstandinsstate(repo)
916 oldstandins = lfutil.getstandinsstate(repo)
917
917
918 def overridematch(
918 def overridematch(
919 orig,
919 orig,
920 mctx,
920 mctx,
921 pats=(),
921 pats=(),
922 opts=None,
922 opts=None,
923 globbed=False,
923 globbed=False,
924 default=b'relpath',
924 default=b'relpath',
925 badfn=None,
925 badfn=None,
926 ):
926 ):
927 if opts is None:
927 if opts is None:
928 opts = {}
928 opts = {}
929 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
929 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
930 m = copy.copy(match)
930 m = copy.copy(match)
931
931
932 # revert supports recursing into subrepos, and though largefiles
932 # revert supports recursing into subrepos, and though largefiles
933 # currently doesn't work correctly in that case, this match is
933 # currently doesn't work correctly in that case, this match is
934 # called, so the lfdirstate above may not be the correct one for
934 # called, so the lfdirstate above may not be the correct one for
935 # this invocation of match.
935 # this invocation of match.
936 lfdirstate = lfutil.openlfdirstate(
936 lfdirstate = lfutil.openlfdirstate(
937 mctx.repo().ui, mctx.repo(), False
937 mctx.repo().ui, mctx.repo(), False
938 )
938 )
939
939
940 wctx = repo[None]
940 wctx = repo[None]
941 matchfiles = []
941 matchfiles = []
942 for f in m._files:
942 for f in m._files:
943 standin = lfutil.standin(f)
943 standin = lfutil.standin(f)
944 if standin in ctx or standin in mctx:
944 if standin in ctx or standin in mctx:
945 matchfiles.append(standin)
945 matchfiles.append(standin)
946 elif standin in wctx or lfdirstate.get_entry(f).removed:
946 elif standin in wctx or lfdirstate.get_entry(f).removed:
947 continue
947 continue
948 else:
948 else:
949 matchfiles.append(f)
949 matchfiles.append(f)
950 m._files = matchfiles
950 m._files = matchfiles
951 m._fileset = set(m._files)
951 m._fileset = set(m._files)
952 origmatchfn = m.matchfn
952 origmatchfn = m.matchfn
953
953
954 def matchfn(f):
954 def matchfn(f):
955 lfile = lfutil.splitstandin(f)
955 lfile = lfutil.splitstandin(f)
956 if lfile is not None:
956 if lfile is not None:
957 return origmatchfn(lfile) and (f in ctx or f in mctx)
957 return origmatchfn(lfile) and (f in ctx or f in mctx)
958 return origmatchfn(f)
958 return origmatchfn(f)
959
959
960 m.matchfn = matchfn
960 m.matchfn = matchfn
961 return m
961 return m
962
962
963 with extensions.wrappedfunction(scmutil, b'match', overridematch):
963 with extensions.wrappedfunction(scmutil, b'match', overridematch):
964 orig(ui, repo, ctx, *pats, **opts)
964 orig(ui, repo, ctx, *pats, **opts)
965
965
966 newstandins = lfutil.getstandinsstate(repo)
966 newstandins = lfutil.getstandinsstate(repo)
967 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
967 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
968 # lfdirstate should be 'normallookup'-ed for updated files,
968 # lfdirstate should be 'normallookup'-ed for updated files,
969 # because reverting doesn't touch dirstate for 'normal' files
969 # because reverting doesn't touch dirstate for 'normal' files
970 # when target revision is explicitly specified: in such case,
970 # when target revision is explicitly specified: in such case,
971 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
971 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
972 # of target (standin) file.
972 # of target (standin) file.
973 lfcommands.updatelfiles(
973 lfcommands.updatelfiles(
974 ui, repo, filelist, printmessage=False, normallookup=True
974 ui, repo, filelist, printmessage=False, normallookup=True
975 )
975 )
976
976
977
977
978 # after pulling changesets, we need to take some extra care to get
978 # after pulling changesets, we need to take some extra care to get
979 # largefiles updated remotely
979 # largefiles updated remotely
980 @eh.wrapcommand(
980 @eh.wrapcommand(
981 b'pull',
981 b'pull',
982 opts=[
982 opts=[
983 (
983 (
984 b'',
984 b'',
985 b'all-largefiles',
985 b'all-largefiles',
986 None,
986 None,
987 _(b'download all pulled versions of largefiles (DEPRECATED)'),
987 _(b'download all pulled versions of largefiles (DEPRECATED)'),
988 ),
988 ),
989 (
989 (
990 b'',
990 b'',
991 b'lfrev',
991 b'lfrev',
992 [],
992 [],
993 _(b'download largefiles for these revisions'),
993 _(b'download largefiles for these revisions'),
994 _(b'REV'),
994 _(b'REV'),
995 ),
995 ),
996 ],
996 ],
997 )
997 )
998 def overridepull(orig, ui, repo, source=None, **opts):
998 def overridepull(orig, ui, repo, source=None, **opts):
999 revsprepull = len(repo)
999 revsprepull = len(repo)
1000 if not source:
1000 if not source:
1001 source = b'default'
1001 source = b'default'
1002 repo.lfpullsource = source
1002 repo.lfpullsource = source
1003 result = orig(ui, repo, source, **opts)
1003 result = orig(ui, repo, source, **opts)
1004 revspostpull = len(repo)
1004 revspostpull = len(repo)
1005 lfrevs = opts.get('lfrev', [])
1005 lfrevs = opts.get('lfrev', [])
1006 if opts.get('all_largefiles'):
1006 if opts.get('all_largefiles'):
1007 lfrevs.append(b'pulled()')
1007 lfrevs.append(b'pulled()')
1008 if lfrevs and revspostpull > revsprepull:
1008 if lfrevs and revspostpull > revsprepull:
1009 numcached = 0
1009 numcached = 0
1010 repo.firstpulled = revsprepull # for pulled() revset expression
1010 repo.firstpulled = revsprepull # for pulled() revset expression
1011 try:
1011 try:
1012 for rev in logcmdutil.revrange(repo, lfrevs):
1012 for rev in logcmdutil.revrange(repo, lfrevs):
1013 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1013 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1014 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1014 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1015 numcached += len(cached)
1015 numcached += len(cached)
1016 finally:
1016 finally:
1017 del repo.firstpulled
1017 del repo.firstpulled
1018 ui.status(_(b"%d largefiles cached\n") % numcached)
1018 ui.status(_(b"%d largefiles cached\n") % numcached)
1019 return result
1019 return result
1020
1020
1021
1021
1022 @eh.wrapcommand(
1022 @eh.wrapcommand(
1023 b'push',
1023 b'push',
1024 opts=[
1024 opts=[
1025 (
1025 (
1026 b'',
1026 b'',
1027 b'lfrev',
1027 b'lfrev',
1028 [],
1028 [],
1029 _(b'upload largefiles for these revisions'),
1029 _(b'upload largefiles for these revisions'),
1030 _(b'REV'),
1030 _(b'REV'),
1031 )
1031 )
1032 ],
1032 ],
1033 )
1033 )
1034 def overridepush(orig, ui, repo, *args, **kwargs):
1034 def overridepush(orig, ui, repo, *args, **kwargs):
1035 """Override push command and store --lfrev parameters in opargs"""
1035 """Override push command and store --lfrev parameters in opargs"""
1036 lfrevs = kwargs.pop('lfrev', None)
1036 lfrevs = kwargs.pop('lfrev', None)
1037 if lfrevs:
1037 if lfrevs:
1038 opargs = kwargs.setdefault('opargs', {})
1038 opargs = kwargs.setdefault('opargs', {})
1039 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1039 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1040 return orig(ui, repo, *args, **kwargs)
1040 return orig(ui, repo, *args, **kwargs)
1041
1041
1042
1042
1043 @eh.wrapfunction(exchange, b'pushoperation')
1043 @eh.wrapfunction(exchange, b'pushoperation')
1044 def exchangepushoperation(orig, *args, **kwargs):
1044 def exchangepushoperation(orig, *args, **kwargs):
1045 """Override pushoperation constructor and store lfrevs parameter"""
1045 """Override pushoperation constructor and store lfrevs parameter"""
1046 lfrevs = kwargs.pop('lfrevs', None)
1046 lfrevs = kwargs.pop('lfrevs', None)
1047 pushop = orig(*args, **kwargs)
1047 pushop = orig(*args, **kwargs)
1048 pushop.lfrevs = lfrevs
1048 pushop.lfrevs = lfrevs
1049 return pushop
1049 return pushop
1050
1050
1051
1051
1052 @eh.revsetpredicate(b'pulled()')
1052 @eh.revsetpredicate(b'pulled()')
1053 def pulledrevsetsymbol(repo, subset, x):
1053 def pulledrevsetsymbol(repo, subset, x):
1054 """Changesets that just has been pulled.
1054 """Changesets that just has been pulled.
1055
1055
1056 Only available with largefiles from pull --lfrev expressions.
1056 Only available with largefiles from pull --lfrev expressions.
1057
1057
1058 .. container:: verbose
1058 .. container:: verbose
1059
1059
1060 Some examples:
1060 Some examples:
1061
1061
1062 - pull largefiles for all new changesets::
1062 - pull largefiles for all new changesets::
1063
1063
1064 hg pull -lfrev "pulled()"
1064 hg pull -lfrev "pulled()"
1065
1065
1066 - pull largefiles for all new branch heads::
1066 - pull largefiles for all new branch heads::
1067
1067
1068 hg pull -lfrev "head(pulled()) and not closed()"
1068 hg pull -lfrev "head(pulled()) and not closed()"
1069
1069
1070 """
1070 """
1071
1071
1072 try:
1072 try:
1073 firstpulled = repo.firstpulled
1073 firstpulled = repo.firstpulled
1074 except AttributeError:
1074 except AttributeError:
1075 raise error.Abort(_(b"pulled() only available in --lfrev"))
1075 raise error.Abort(_(b"pulled() only available in --lfrev"))
1076 return smartset.baseset([r for r in subset if r >= firstpulled])
1076 return smartset.baseset([r for r in subset if r >= firstpulled])
1077
1077
1078
1078
1079 @eh.wrapcommand(
1079 @eh.wrapcommand(
1080 b'clone',
1080 b'clone',
1081 opts=[
1081 opts=[
1082 (
1082 (
1083 b'',
1083 b'',
1084 b'all-largefiles',
1084 b'all-largefiles',
1085 None,
1085 None,
1086 _(b'download all versions of all largefiles'),
1086 _(b'download all versions of all largefiles'),
1087 )
1087 )
1088 ],
1088 ],
1089 )
1089 )
1090 def overrideclone(orig, ui, source, dest=None, **opts):
1090 def overrideclone(orig, ui, source, dest=None, **opts):
1091 d = dest
1091 d = dest
1092 if d is None:
1092 if d is None:
1093 d = hg.defaultdest(source)
1093 d = hg.defaultdest(source)
1094 if opts.get('all_largefiles') and not hg.islocal(d):
1094 if opts.get('all_largefiles') and not hg.islocal(d):
1095 raise error.Abort(
1095 raise error.Abort(
1096 _(b'--all-largefiles is incompatible with non-local destination %s')
1096 _(b'--all-largefiles is incompatible with non-local destination %s')
1097 % d
1097 % d
1098 )
1098 )
1099
1099
1100 return orig(ui, source, dest, **opts)
1100 return orig(ui, source, dest, **opts)
1101
1101
1102
1102
1103 @eh.wrapfunction(hg, b'clone')
1103 @eh.wrapfunction(hg, b'clone')
1104 def hgclone(orig, ui, opts, *args, **kwargs):
1104 def hgclone(orig, ui, opts, *args, **kwargs):
1105 result = orig(ui, opts, *args, **kwargs)
1105 result = orig(ui, opts, *args, **kwargs)
1106
1106
1107 if result is not None:
1107 if result is not None:
1108 sourcerepo, destrepo = result
1108 sourcerepo, destrepo = result
1109 repo = destrepo.local()
1109 repo = destrepo.local()
1110
1110
1111 # When cloning to a remote repo (like through SSH), no repo is available
1111 # When cloning to a remote repo (like through SSH), no repo is available
1112 # from the peer. Therefore the largefiles can't be downloaded and the
1112 # from the peer. Therefore the largefiles can't be downloaded and the
1113 # hgrc can't be updated.
1113 # hgrc can't be updated.
1114 if not repo:
1114 if not repo:
1115 return result
1115 return result
1116
1116
1117 # Caching is implicitly limited to 'rev' option, since the dest repo was
1117 # Caching is implicitly limited to 'rev' option, since the dest repo was
1118 # truncated at that point. The user may expect a download count with
1118 # truncated at that point. The user may expect a download count with
1119 # this option, so attempt whether or not this is a largefile repo.
1119 # this option, so attempt whether or not this is a largefile repo.
1120 if opts.get(b'all_largefiles'):
1120 if opts.get(b'all_largefiles'):
1121 success, missing = lfcommands.downloadlfiles(ui, repo)
1121 success, missing = lfcommands.downloadlfiles(ui, repo)
1122
1122
1123 if missing != 0:
1123 if missing != 0:
1124 return None
1124 return None
1125
1125
1126 return result
1126 return result
1127
1127
1128
1128
1129 @eh.wrapcommand(b'rebase', extension=b'rebase')
1129 @eh.wrapcommand(b'rebase', extension=b'rebase')
1130 def overriderebasecmd(orig, ui, repo, **opts):
1130 def overriderebasecmd(orig, ui, repo, **opts):
1131 if not util.safehasattr(repo, b'_largefilesenabled'):
1131 if not util.safehasattr(repo, b'_largefilesenabled'):
1132 return orig(ui, repo, **opts)
1132 return orig(ui, repo, **opts)
1133
1133
1134 resuming = opts.get('continue')
1134 resuming = opts.get('continue')
1135 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1135 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1136 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1136 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1137 try:
1137 try:
1138 with ui.configoverride(
1138 with ui.configoverride(
1139 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1139 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1140 ):
1140 ):
1141 return orig(ui, repo, **opts)
1141 return orig(ui, repo, **opts)
1142 finally:
1142 finally:
1143 repo._lfstatuswriters.pop()
1143 repo._lfstatuswriters.pop()
1144 repo._lfcommithooks.pop()
1144 repo._lfcommithooks.pop()
1145
1145
1146
1146
1147 @eh.extsetup
1147 @eh.extsetup
1148 def overriderebase(ui):
1148 def overriderebase(ui):
1149 try:
1149 try:
1150 rebase = extensions.find(b'rebase')
1150 rebase = extensions.find(b'rebase')
1151 except KeyError:
1151 except KeyError:
1152 pass
1152 pass
1153 else:
1153 else:
1154
1154
1155 def _dorebase(orig, *args, **kwargs):
1155 def _dorebase(orig, *args, **kwargs):
1156 kwargs['inmemory'] = False
1156 kwargs['inmemory'] = False
1157 return orig(*args, **kwargs)
1157 return orig(*args, **kwargs)
1158
1158
1159 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1159 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1160
1160
1161
1161
1162 @eh.wrapcommand(b'archive')
1162 @eh.wrapcommand(b'archive')
1163 def overridearchivecmd(orig, ui, repo, dest, **opts):
1163 def overridearchivecmd(orig, ui, repo, dest, **opts):
1164 with lfstatus(repo.unfiltered()):
1164 with lfstatus(repo.unfiltered()):
1165 return orig(ui, repo.unfiltered(), dest, **opts)
1165 return orig(ui, repo.unfiltered(), dest, **opts)
1166
1166
1167
1167
1168 @eh.wrapfunction(webcommands, b'archive')
1168 @eh.wrapfunction(webcommands, b'archive')
1169 def hgwebarchive(orig, web):
1169 def hgwebarchive(orig, web):
1170 with lfstatus(web.repo):
1170 with lfstatus(web.repo):
1171 return orig(web)
1171 return orig(web)
1172
1172
1173
1173
1174 @eh.wrapfunction(archival, b'archive')
1174 @eh.wrapfunction(archival, b'archive')
1175 def overridearchive(
1175 def overridearchive(
1176 orig,
1176 orig,
1177 repo,
1177 repo,
1178 dest,
1178 dest,
1179 node,
1179 node,
1180 kind,
1180 kind,
1181 decode=True,
1181 decode=True,
1182 match=None,
1182 match=None,
1183 prefix=b'',
1183 prefix=b'',
1184 mtime=None,
1184 mtime=None,
1185 subrepos=None,
1185 subrepos=None,
1186 ):
1186 ):
1187 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1187 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1188 # unfiltered repo's attr, so check that as well.
1188 # unfiltered repo's attr, so check that as well.
1189 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1189 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1190 return orig(
1190 return orig(
1191 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1191 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1192 )
1192 )
1193
1193
1194 # No need to lock because we are only reading history and
1194 # No need to lock because we are only reading history and
1195 # largefile caches, neither of which are modified.
1195 # largefile caches, neither of which are modified.
1196 if node is not None:
1196 if node is not None:
1197 lfcommands.cachelfiles(repo.ui, repo, node)
1197 lfcommands.cachelfiles(repo.ui, repo, node)
1198
1198
1199 if kind not in archival.archivers:
1199 if kind not in archival.archivers:
1200 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1200 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1201
1201
1202 ctx = repo[node]
1202 ctx = repo[node]
1203
1203
1204 if kind == b'files':
1204 if kind == b'files':
1205 if prefix:
1205 if prefix:
1206 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1206 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1207 else:
1207 else:
1208 prefix = archival.tidyprefix(dest, kind, prefix)
1208 prefix = archival.tidyprefix(dest, kind, prefix)
1209
1209
1210 def write(name, mode, islink, getdata):
1210 def write(name, mode, islink, getdata):
1211 if match and not match(name):
1211 if match and not match(name):
1212 return
1212 return
1213 data = getdata()
1213 data = getdata()
1214 if decode:
1214 if decode:
1215 data = repo.wwritedata(name, data)
1215 data = repo.wwritedata(name, data)
1216 archiver.addfile(prefix + name, mode, islink, data)
1216 archiver.addfile(prefix + name, mode, islink, data)
1217
1217
1218 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1218 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1219
1219
1220 if repo.ui.configbool(b"ui", b"archivemeta"):
1220 if repo.ui.configbool(b"ui", b"archivemeta"):
1221 write(
1221 write(
1222 b'.hg_archival.txt',
1222 b'.hg_archival.txt',
1223 0o644,
1223 0o644,
1224 False,
1224 False,
1225 lambda: archival.buildmetadata(ctx),
1225 lambda: archival.buildmetadata(ctx),
1226 )
1226 )
1227
1227
1228 for f in ctx:
1228 for f in ctx:
1229 ff = ctx.flags(f)
1229 ff = ctx.flags(f)
1230 getdata = ctx[f].data
1230 getdata = ctx[f].data
1231 lfile = lfutil.splitstandin(f)
1231 lfile = lfutil.splitstandin(f)
1232 if lfile is not None:
1232 if lfile is not None:
1233 if node is not None:
1233 if node is not None:
1234 path = lfutil.findfile(repo, getdata().strip())
1234 path = lfutil.findfile(repo, getdata().strip())
1235
1235
1236 if path is None:
1236 if path is None:
1237 raise error.Abort(
1237 raise error.Abort(
1238 _(
1238 _(
1239 b'largefile %s not found in repo store or system cache'
1239 b'largefile %s not found in repo store or system cache'
1240 )
1240 )
1241 % lfile
1241 % lfile
1242 )
1242 )
1243 else:
1243 else:
1244 path = lfile
1244 path = lfile
1245
1245
1246 f = lfile
1246 f = lfile
1247
1247
1248 getdata = lambda: util.readfile(path)
1248 getdata = lambda: util.readfile(path)
1249 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1249 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1250
1250
1251 if subrepos:
1251 if subrepos:
1252 for subpath in sorted(ctx.substate):
1252 for subpath in sorted(ctx.substate):
1253 sub = ctx.workingsub(subpath)
1253 sub = ctx.workingsub(subpath)
1254 submatch = matchmod.subdirmatcher(subpath, match)
1254 submatch = matchmod.subdirmatcher(subpath, match)
1255 subprefix = prefix + subpath + b'/'
1255 subprefix = prefix + subpath + b'/'
1256
1256
1257 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1257 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1258 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1258 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1259 # allow only hgsubrepos to set this, instead of the current scheme
1259 # allow only hgsubrepos to set this, instead of the current scheme
1260 # where the parent sets this for the child.
1260 # where the parent sets this for the child.
1261 with (
1261 with (
1262 util.safehasattr(sub, '_repo')
1262 util.safehasattr(sub, '_repo')
1263 and lfstatus(sub._repo)
1263 and lfstatus(sub._repo)
1264 or util.nullcontextmanager()
1264 or util.nullcontextmanager()
1265 ):
1265 ):
1266 sub.archive(archiver, subprefix, submatch)
1266 sub.archive(archiver, subprefix, submatch)
1267
1267
1268 archiver.done()
1268 archiver.done()
1269
1269
1270
1270
1271 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1271 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1272 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1272 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1273 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1273 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1274 if not lfenabled or not repo._repo.lfstatus:
1274 if not lfenabled or not repo._repo.lfstatus:
1275 return orig(repo, archiver, prefix, match, decode)
1275 return orig(repo, archiver, prefix, match, decode)
1276
1276
1277 repo._get(repo._state + (b'hg',))
1277 repo._get(repo._state + (b'hg',))
1278 rev = repo._state[1]
1278 rev = repo._state[1]
1279 ctx = repo._repo[rev]
1279 ctx = repo._repo[rev]
1280
1280
1281 if ctx.node() is not None:
1281 if ctx.node() is not None:
1282 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1282 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1283
1283
1284 def write(name, mode, islink, getdata):
1284 def write(name, mode, islink, getdata):
1285 # At this point, the standin has been replaced with the largefile name,
1285 # At this point, the standin has been replaced with the largefile name,
1286 # so the normal matcher works here without the lfutil variants.
1286 # so the normal matcher works here without the lfutil variants.
1287 if match and not match(f):
1287 if match and not match(f):
1288 return
1288 return
1289 data = getdata()
1289 data = getdata()
1290 if decode:
1290 if decode:
1291 data = repo._repo.wwritedata(name, data)
1291 data = repo._repo.wwritedata(name, data)
1292
1292
1293 archiver.addfile(prefix + name, mode, islink, data)
1293 archiver.addfile(prefix + name, mode, islink, data)
1294
1294
1295 for f in ctx:
1295 for f in ctx:
1296 ff = ctx.flags(f)
1296 ff = ctx.flags(f)
1297 getdata = ctx[f].data
1297 getdata = ctx[f].data
1298 lfile = lfutil.splitstandin(f)
1298 lfile = lfutil.splitstandin(f)
1299 if lfile is not None:
1299 if lfile is not None:
1300 if ctx.node() is not None:
1300 if ctx.node() is not None:
1301 path = lfutil.findfile(repo._repo, getdata().strip())
1301 path = lfutil.findfile(repo._repo, getdata().strip())
1302
1302
1303 if path is None:
1303 if path is None:
1304 raise error.Abort(
1304 raise error.Abort(
1305 _(
1305 _(
1306 b'largefile %s not found in repo store or system cache'
1306 b'largefile %s not found in repo store or system cache'
1307 )
1307 )
1308 % lfile
1308 % lfile
1309 )
1309 )
1310 else:
1310 else:
1311 path = lfile
1311 path = lfile
1312
1312
1313 f = lfile
1313 f = lfile
1314
1314
1315 getdata = lambda: util.readfile(os.path.join(prefix, path))
1315 getdata = lambda: util.readfile(os.path.join(prefix, path))
1316
1316
1317 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1317 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1318
1318
1319 for subpath in sorted(ctx.substate):
1319 for subpath in sorted(ctx.substate):
1320 sub = ctx.workingsub(subpath)
1320 sub = ctx.workingsub(subpath)
1321 submatch = matchmod.subdirmatcher(subpath, match)
1321 submatch = matchmod.subdirmatcher(subpath, match)
1322 subprefix = prefix + subpath + b'/'
1322 subprefix = prefix + subpath + b'/'
1323 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1323 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1324 # infer and possibly set lfstatus at the top of this function. That
1324 # infer and possibly set lfstatus at the top of this function. That
1325 # would allow only hgsubrepos to set this, instead of the current scheme
1325 # would allow only hgsubrepos to set this, instead of the current scheme
1326 # where the parent sets this for the child.
1326 # where the parent sets this for the child.
1327 with (
1327 with (
1328 util.safehasattr(sub, '_repo')
1328 util.safehasattr(sub, '_repo')
1329 and lfstatus(sub._repo)
1329 and lfstatus(sub._repo)
1330 or util.nullcontextmanager()
1330 or util.nullcontextmanager()
1331 ):
1331 ):
1332 sub.archive(archiver, subprefix, submatch, decode)
1332 sub.archive(archiver, subprefix, submatch, decode)
1333
1333
1334
1334
1335 # If a largefile is modified, the change is not reflected in its
1335 # If a largefile is modified, the change is not reflected in its
1336 # standin until a commit. cmdutil.bailifchanged() raises an exception
1336 # standin until a commit. cmdutil.bailifchanged() raises an exception
1337 # if the repo has uncommitted changes. Wrap it to also check if
1337 # if the repo has uncommitted changes. Wrap it to also check if
1338 # largefiles were changed. This is used by bisect, backout and fetch.
1338 # largefiles were changed. This is used by bisect, backout and fetch.
1339 @eh.wrapfunction(cmdutil, b'bailifchanged')
1339 @eh.wrapfunction(cmdutil, b'bailifchanged')
1340 def overridebailifchanged(orig, repo, *args, **kwargs):
1340 def overridebailifchanged(orig, repo, *args, **kwargs):
1341 orig(repo, *args, **kwargs)
1341 orig(repo, *args, **kwargs)
1342 with lfstatus(repo):
1342 with lfstatus(repo):
1343 s = repo.status()
1343 s = repo.status()
1344 if s.modified or s.added or s.removed or s.deleted:
1344 if s.modified or s.added or s.removed or s.deleted:
1345 raise error.Abort(_(b'uncommitted changes'))
1345 raise error.Abort(_(b'uncommitted changes'))
1346
1346
1347
1347
1348 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1348 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1349 def postcommitstatus(orig, repo, *args, **kwargs):
1349 def postcommitstatus(orig, repo, *args, **kwargs):
1350 with lfstatus(repo):
1350 with lfstatus(repo):
1351 return orig(repo, *args, **kwargs)
1351 return orig(repo, *args, **kwargs)
1352
1352
1353
1353
1354 @eh.wrapfunction(cmdutil, b'forget')
1354 @eh.wrapfunction(cmdutil, b'forget')
1355 def cmdutilforget(
1355 def cmdutilforget(
1356 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1356 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1357 ):
1357 ):
1358 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1358 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1359 bad, forgot = orig(
1359 bad, forgot = orig(
1360 ui,
1360 ui,
1361 repo,
1361 repo,
1362 normalmatcher,
1362 normalmatcher,
1363 prefix,
1363 prefix,
1364 uipathfn,
1364 uipathfn,
1365 explicitonly,
1365 explicitonly,
1366 dryrun,
1366 dryrun,
1367 interactive,
1367 interactive,
1368 )
1368 )
1369 m = composelargefilematcher(match, repo[None].manifest())
1369 m = composelargefilematcher(match, repo[None].manifest())
1370
1370
1371 with lfstatus(repo):
1371 with lfstatus(repo):
1372 s = repo.status(match=m, clean=True)
1372 s = repo.status(match=m, clean=True)
1373 manifest = repo[None].manifest()
1373 manifest = repo[None].manifest()
1374 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1374 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1375 forget = [f for f in forget if lfutil.standin(f) in manifest]
1375 forget = [f for f in forget if lfutil.standin(f) in manifest]
1376
1376
1377 for f in forget:
1377 for f in forget:
1378 fstandin = lfutil.standin(f)
1378 fstandin = lfutil.standin(f)
1379 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1379 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1380 ui.warn(
1380 ui.warn(
1381 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1381 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1382 )
1382 )
1383 bad.append(f)
1383 bad.append(f)
1384
1384
1385 for f in forget:
1385 for f in forget:
1386 if ui.verbose or not m.exact(f):
1386 if ui.verbose or not m.exact(f):
1387 ui.status(_(b'removing %s\n') % uipathfn(f))
1387 ui.status(_(b'removing %s\n') % uipathfn(f))
1388
1388
1389 # Need to lock because standin files are deleted then removed from the
1389 # Need to lock because standin files are deleted then removed from the
1390 # repository and we could race in-between.
1390 # repository and we could race in-between.
1391 with repo.wlock():
1391 with repo.wlock():
1392 lfdirstate = lfutil.openlfdirstate(ui, repo)
1392 lfdirstate = lfutil.openlfdirstate(ui, repo)
1393 for f in forget:
1393 for f in forget:
1394 lfdirstate.set_untracked(f)
1394 lfdirstate.set_untracked(f)
1395 lfdirstate.write(repo.currenttransaction())
1395 lfdirstate.write(repo.currenttransaction())
1396 standins = [lfutil.standin(f) for f in forget]
1396 standins = [lfutil.standin(f) for f in forget]
1397 for f in standins:
1397 for f in standins:
1398 repo.wvfs.unlinkpath(f, ignoremissing=True)
1398 repo.wvfs.unlinkpath(f, ignoremissing=True)
1399 rejected = repo[None].forget(standins)
1399 rejected = repo[None].forget(standins)
1400
1400
1401 bad.extend(f for f in rejected if f in m.files())
1401 bad.extend(f for f in rejected if f in m.files())
1402 forgot.extend(f for f in forget if f not in rejected)
1402 forgot.extend(f for f in forget if f not in rejected)
1403 return bad, forgot
1403 return bad, forgot
1404
1404
1405
1405
1406 def _getoutgoings(repo, other, missing, addfunc):
1406 def _getoutgoings(repo, other, missing, addfunc):
1407 """get pairs of filename and largefile hash in outgoing revisions
1407 """get pairs of filename and largefile hash in outgoing revisions
1408 in 'missing'.
1408 in 'missing'.
1409
1409
1410 largefiles already existing on 'other' repository are ignored.
1410 largefiles already existing on 'other' repository are ignored.
1411
1411
1412 'addfunc' is invoked with each unique pairs of filename and
1412 'addfunc' is invoked with each unique pairs of filename and
1413 largefile hash value.
1413 largefile hash value.
1414 """
1414 """
1415 knowns = set()
1415 knowns = set()
1416 lfhashes = set()
1416 lfhashes = set()
1417
1417
1418 def dedup(fn, lfhash):
1418 def dedup(fn, lfhash):
1419 k = (fn, lfhash)
1419 k = (fn, lfhash)
1420 if k not in knowns:
1420 if k not in knowns:
1421 knowns.add(k)
1421 knowns.add(k)
1422 lfhashes.add(lfhash)
1422 lfhashes.add(lfhash)
1423
1423
1424 lfutil.getlfilestoupload(repo, missing, dedup)
1424 lfutil.getlfilestoupload(repo, missing, dedup)
1425 if lfhashes:
1425 if lfhashes:
1426 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1426 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1427 for fn, lfhash in knowns:
1427 for fn, lfhash in knowns:
1428 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1428 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1429 addfunc(fn, lfhash)
1429 addfunc(fn, lfhash)
1430
1430
1431
1431
1432 def outgoinghook(ui, repo, other, opts, missing):
1432 def outgoinghook(ui, repo, other, opts, missing):
1433 if opts.pop(b'large', None):
1433 if opts.pop(b'large', None):
1434 lfhashes = set()
1434 lfhashes = set()
1435 if ui.debugflag:
1435 if ui.debugflag:
1436 toupload = {}
1436 toupload = {}
1437
1437
1438 def addfunc(fn, lfhash):
1438 def addfunc(fn, lfhash):
1439 if fn not in toupload:
1439 if fn not in toupload:
1440 toupload[fn] = []
1440 toupload[fn] = []
1441 toupload[fn].append(lfhash)
1441 toupload[fn].append(lfhash)
1442 lfhashes.add(lfhash)
1442 lfhashes.add(lfhash)
1443
1443
1444 def showhashes(fn):
1444 def showhashes(fn):
1445 for lfhash in sorted(toupload[fn]):
1445 for lfhash in sorted(toupload[fn]):
1446 ui.debug(b' %s\n' % lfhash)
1446 ui.debug(b' %s\n' % lfhash)
1447
1447
1448 else:
1448 else:
1449 toupload = set()
1449 toupload = set()
1450
1450
1451 def addfunc(fn, lfhash):
1451 def addfunc(fn, lfhash):
1452 toupload.add(fn)
1452 toupload.add(fn)
1453 lfhashes.add(lfhash)
1453 lfhashes.add(lfhash)
1454
1454
1455 def showhashes(fn):
1455 def showhashes(fn):
1456 pass
1456 pass
1457
1457
1458 _getoutgoings(repo, other, missing, addfunc)
1458 _getoutgoings(repo, other, missing, addfunc)
1459
1459
1460 if not toupload:
1460 if not toupload:
1461 ui.status(_(b'largefiles: no files to upload\n'))
1461 ui.status(_(b'largefiles: no files to upload\n'))
1462 else:
1462 else:
1463 ui.status(
1463 ui.status(
1464 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1464 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1465 )
1465 )
1466 for file in sorted(toupload):
1466 for file in sorted(toupload):
1467 ui.status(lfutil.splitstandin(file) + b'\n')
1467 ui.status(lfutil.splitstandin(file) + b'\n')
1468 showhashes(file)
1468 showhashes(file)
1469 ui.status(b'\n')
1469 ui.status(b'\n')
1470
1470
1471
1471
1472 @eh.wrapcommand(
1472 @eh.wrapcommand(
1473 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1473 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1474 )
1474 )
1475 def _outgoingcmd(orig, *args, **kwargs):
1475 def _outgoingcmd(orig, *args, **kwargs):
1476 # Nothing to do here other than add the extra help option- the hook above
1476 # Nothing to do here other than add the extra help option- the hook above
1477 # processes it.
1477 # processes it.
1478 return orig(*args, **kwargs)
1478 return orig(*args, **kwargs)
1479
1479
1480
1480
1481 def summaryremotehook(ui, repo, opts, changes):
1481 def summaryremotehook(ui, repo, opts, changes):
1482 largeopt = opts.get(b'large', False)
1482 largeopt = opts.get(b'large', False)
1483 if changes is None:
1483 if changes is None:
1484 if largeopt:
1484 if largeopt:
1485 return (False, True) # only outgoing check is needed
1485 return (False, True) # only outgoing check is needed
1486 else:
1486 else:
1487 return (False, False)
1487 return (False, False)
1488 elif largeopt:
1488 elif largeopt:
1489 url, branch, peer, outgoing = changes[1]
1489 url, branch, peer, outgoing = changes[1]
1490 if peer is None:
1490 if peer is None:
1491 # i18n: column positioning for "hg summary"
1491 # i18n: column positioning for "hg summary"
1492 ui.status(_(b'largefiles: (no remote repo)\n'))
1492 ui.status(_(b'largefiles: (no remote repo)\n'))
1493 return
1493 return
1494
1494
1495 toupload = set()
1495 toupload = set()
1496 lfhashes = set()
1496 lfhashes = set()
1497
1497
1498 def addfunc(fn, lfhash):
1498 def addfunc(fn, lfhash):
1499 toupload.add(fn)
1499 toupload.add(fn)
1500 lfhashes.add(lfhash)
1500 lfhashes.add(lfhash)
1501
1501
1502 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1502 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1503
1503
1504 if not toupload:
1504 if not toupload:
1505 # i18n: column positioning for "hg summary"
1505 # i18n: column positioning for "hg summary"
1506 ui.status(_(b'largefiles: (no files to upload)\n'))
1506 ui.status(_(b'largefiles: (no files to upload)\n'))
1507 else:
1507 else:
1508 # i18n: column positioning for "hg summary"
1508 # i18n: column positioning for "hg summary"
1509 ui.status(
1509 ui.status(
1510 _(b'largefiles: %d entities for %d files to upload\n')
1510 _(b'largefiles: %d entities for %d files to upload\n')
1511 % (len(lfhashes), len(toupload))
1511 % (len(lfhashes), len(toupload))
1512 )
1512 )
1513
1513
1514
1514
1515 @eh.wrapcommand(
1515 @eh.wrapcommand(
1516 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1516 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1517 )
1517 )
1518 def overridesummary(orig, ui, repo, *pats, **opts):
1518 def overridesummary(orig, ui, repo, *pats, **opts):
1519 with lfstatus(repo):
1519 with lfstatus(repo):
1520 orig(ui, repo, *pats, **opts)
1520 orig(ui, repo, *pats, **opts)
1521
1521
1522
1522
1523 @eh.wrapfunction(scmutil, b'addremove')
1523 @eh.wrapfunction(scmutil, b'addremove')
1524 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1524 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1525 if opts is None:
1525 if opts is None:
1526 opts = {}
1526 opts = {}
1527 if not lfutil.islfilesrepo(repo):
1527 if not lfutil.islfilesrepo(repo):
1528 return orig(repo, matcher, prefix, uipathfn, opts)
1528 return orig(repo, matcher, prefix, uipathfn, opts)
1529 # Get the list of missing largefiles so we can remove them
1529 # Get the list of missing largefiles so we can remove them
1530 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1530 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1531 unsure, s, mtime_boundary = lfdirstate.status(
1531 unsure, s, mtime_boundary = lfdirstate.status(
1532 matchmod.always(),
1532 matchmod.always(),
1533 subrepos=[],
1533 subrepos=[],
1534 ignored=False,
1534 ignored=False,
1535 clean=False,
1535 clean=False,
1536 unknown=False,
1536 unknown=False,
1537 )
1537 )
1538
1538
1539 # Call into the normal remove code, but the removing of the standin, we want
1539 # Call into the normal remove code, but the removing of the standin, we want
1540 # to have handled by original addremove. Monkey patching here makes sure
1540 # to have handled by original addremove. Monkey patching here makes sure
1541 # we don't remove the standin in the largefiles code, preventing a very
1541 # we don't remove the standin in the largefiles code, preventing a very
1542 # confused state later.
1542 # confused state later.
1543 if s.deleted:
1543 if s.deleted:
1544 m = copy.copy(matcher)
1544 m = copy.copy(matcher)
1545
1545
1546 # The m._files and m._map attributes are not changed to the deleted list
1546 # The m._files and m._map attributes are not changed to the deleted list
1547 # because that affects the m.exact() test, which in turn governs whether
1547 # because that affects the m.exact() test, which in turn governs whether
1548 # or not the file name is printed, and how. Simply limit the original
1548 # or not the file name is printed, and how. Simply limit the original
1549 # matches to those in the deleted status list.
1549 # matches to those in the deleted status list.
1550 matchfn = m.matchfn
1550 matchfn = m.matchfn
1551 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1551 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1552
1552
1553 removelargefiles(
1553 removelargefiles(
1554 repo.ui,
1554 repo.ui,
1555 repo,
1555 repo,
1556 True,
1556 True,
1557 m,
1557 m,
1558 uipathfn,
1558 uipathfn,
1559 opts.get(b'dry_run'),
1559 opts.get(b'dry_run'),
1560 **pycompat.strkwargs(opts)
1560 **pycompat.strkwargs(opts)
1561 )
1561 )
1562 # Call into the normal add code, and any files that *should* be added as
1562 # Call into the normal add code, and any files that *should* be added as
1563 # largefiles will be
1563 # largefiles will be
1564 added, bad = addlargefiles(
1564 added, bad = addlargefiles(
1565 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1565 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1566 )
1566 )
1567 # Now that we've handled largefiles, hand off to the original addremove
1567 # Now that we've handled largefiles, hand off to the original addremove
1568 # function to take care of the rest. Make sure it doesn't do anything with
1568 # function to take care of the rest. Make sure it doesn't do anything with
1569 # largefiles by passing a matcher that will ignore them.
1569 # largefiles by passing a matcher that will ignore them.
1570 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1570 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1571 return orig(repo, matcher, prefix, uipathfn, opts)
1571 return orig(repo, matcher, prefix, uipathfn, opts)
1572
1572
1573
1573
1574 # Calling purge with --all will cause the largefiles to be deleted.
1574 # Calling purge with --all will cause the largefiles to be deleted.
1575 # Override repo.status to prevent this from happening.
1575 # Override repo.status to prevent this from happening.
1576 @eh.wrapcommand(b'purge')
1576 @eh.wrapcommand(b'purge')
1577 def overridepurge(orig, ui, repo, *dirs, **opts):
1577 def overridepurge(orig, ui, repo, *dirs, **opts):
1578 # XXX Monkey patching a repoview will not work. The assigned attribute will
1578 # XXX Monkey patching a repoview will not work. The assigned attribute will
1579 # be set on the unfiltered repo, but we will only lookup attributes in the
1579 # be set on the unfiltered repo, but we will only lookup attributes in the
1580 # unfiltered repo if the lookup in the repoview object itself fails. As the
1580 # unfiltered repo if the lookup in the repoview object itself fails. As the
1581 # monkey patched method exists on the repoview class the lookup will not
1581 # monkey patched method exists on the repoview class the lookup will not
1582 # fail. As a result, the original version will shadow the monkey patched
1582 # fail. As a result, the original version will shadow the monkey patched
1583 # one, defeating the monkey patch.
1583 # one, defeating the monkey patch.
1584 #
1584 #
1585 # As a work around we use an unfiltered repo here. We should do something
1585 # As a work around we use an unfiltered repo here. We should do something
1586 # cleaner instead.
1586 # cleaner instead.
1587 repo = repo.unfiltered()
1587 repo = repo.unfiltered()
1588 oldstatus = repo.status
1588 oldstatus = repo.status
1589
1589
1590 def overridestatus(
1590 def overridestatus(
1591 node1=b'.',
1591 node1=b'.',
1592 node2=None,
1592 node2=None,
1593 match=None,
1593 match=None,
1594 ignored=False,
1594 ignored=False,
1595 clean=False,
1595 clean=False,
1596 unknown=False,
1596 unknown=False,
1597 listsubrepos=False,
1597 listsubrepos=False,
1598 ):
1598 ):
1599 r = oldstatus(
1599 r = oldstatus(
1600 node1, node2, match, ignored, clean, unknown, listsubrepos
1600 node1, node2, match, ignored, clean, unknown, listsubrepos
1601 )
1601 )
1602 lfdirstate = lfutil.openlfdirstate(ui, repo)
1602 lfdirstate = lfutil.openlfdirstate(ui, repo)
1603 unknown = [
1603 unknown = [
1604 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1604 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1605 ]
1605 ]
1606 ignored = [
1606 ignored = [
1607 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1607 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1608 ]
1608 ]
1609 return scmutil.status(
1609 return scmutil.status(
1610 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1610 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1611 )
1611 )
1612
1612
1613 repo.status = overridestatus
1613 repo.status = overridestatus
1614 orig(ui, repo, *dirs, **opts)
1614 orig(ui, repo, *dirs, **opts)
1615 repo.status = oldstatus
1615 repo.status = oldstatus
1616
1616
1617
1617
1618 @eh.wrapcommand(b'rollback')
1618 @eh.wrapcommand(b'rollback')
1619 def overriderollback(orig, ui, repo, **opts):
1619 def overriderollback(orig, ui, repo, **opts):
1620 with repo.wlock():
1620 with repo.wlock():
1621 before = repo.dirstate.parents()
1621 before = repo.dirstate.parents()
1622 orphans = {
1622 orphans = {
1623 f
1623 f
1624 for f in repo.dirstate
1624 for f in repo.dirstate
1625 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1625 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1626 }
1626 }
1627 result = orig(ui, repo, **opts)
1627 result = orig(ui, repo, **opts)
1628 after = repo.dirstate.parents()
1628 after = repo.dirstate.parents()
1629 if before == after:
1629 if before == after:
1630 return result # no need to restore standins
1630 return result # no need to restore standins
1631
1631
1632 pctx = repo[b'.']
1632 pctx = repo[b'.']
1633 for f in repo.dirstate:
1633 for f in repo.dirstate:
1634 if lfutil.isstandin(f):
1634 if lfutil.isstandin(f):
1635 orphans.discard(f)
1635 orphans.discard(f)
1636 if repo.dirstate.get_entry(f).removed:
1636 if repo.dirstate.get_entry(f).removed:
1637 repo.wvfs.unlinkpath(f, ignoremissing=True)
1637 repo.wvfs.unlinkpath(f, ignoremissing=True)
1638 elif f in pctx:
1638 elif f in pctx:
1639 fctx = pctx[f]
1639 fctx = pctx[f]
1640 repo.wwrite(f, fctx.data(), fctx.flags())
1640 repo.wwrite(f, fctx.data(), fctx.flags())
1641 else:
1641 else:
1642 # content of standin is not so important in 'a',
1642 # content of standin is not so important in 'a',
1643 # 'm' or 'n' (coming from the 2nd parent) cases
1643 # 'm' or 'n' (coming from the 2nd parent) cases
1644 lfutil.writestandin(repo, f, b'', False)
1644 lfutil.writestandin(repo, f, b'', False)
1645 for standin in orphans:
1645 for standin in orphans:
1646 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1646 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1647
1647
1648 return result
1648 return result
1649
1649
1650
1650
1651 @eh.wrapcommand(b'transplant', extension=b'transplant')
1651 @eh.wrapcommand(b'transplant', extension=b'transplant')
1652 def overridetransplant(orig, ui, repo, *revs, **opts):
1652 def overridetransplant(orig, ui, repo, *revs, **opts):
1653 resuming = opts.get('continue')
1653 resuming = opts.get('continue')
1654 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1654 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1655 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1655 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1656 try:
1656 try:
1657 result = orig(ui, repo, *revs, **opts)
1657 result = orig(ui, repo, *revs, **opts)
1658 finally:
1658 finally:
1659 repo._lfstatuswriters.pop()
1659 repo._lfstatuswriters.pop()
1660 repo._lfcommithooks.pop()
1660 repo._lfcommithooks.pop()
1661 return result
1661 return result
1662
1662
1663
1663
1664 @eh.wrapcommand(b'cat')
1664 @eh.wrapcommand(b'cat')
1665 def overridecat(orig, ui, repo, file1, *pats, **opts):
1665 def overridecat(orig, ui, repo, file1, *pats, **opts):
1666 opts = pycompat.byteskwargs(opts)
1666 opts = pycompat.byteskwargs(opts)
1667 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1667 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1668 err = 1
1668 err = 1
1669 notbad = set()
1669 notbad = set()
1670 m = scmutil.match(ctx, (file1,) + pats, opts)
1670 m = scmutil.match(ctx, (file1,) + pats, opts)
1671 origmatchfn = m.matchfn
1671 origmatchfn = m.matchfn
1672
1672
1673 def lfmatchfn(f):
1673 def lfmatchfn(f):
1674 if origmatchfn(f):
1674 if origmatchfn(f):
1675 return True
1675 return True
1676 lf = lfutil.splitstandin(f)
1676 lf = lfutil.splitstandin(f)
1677 if lf is None:
1677 if lf is None:
1678 return False
1678 return False
1679 notbad.add(lf)
1679 notbad.add(lf)
1680 return origmatchfn(lf)
1680 return origmatchfn(lf)
1681
1681
1682 m.matchfn = lfmatchfn
1682 m.matchfn = lfmatchfn
1683 origbadfn = m.bad
1683 origbadfn = m.bad
1684
1684
1685 def lfbadfn(f, msg):
1685 def lfbadfn(f, msg):
1686 if not f in notbad:
1686 if not f in notbad:
1687 origbadfn(f, msg)
1687 origbadfn(f, msg)
1688
1688
1689 m.bad = lfbadfn
1689 m.bad = lfbadfn
1690
1690
1691 origvisitdirfn = m.visitdir
1691 origvisitdirfn = m.visitdir
1692
1692
1693 def lfvisitdirfn(dir):
1693 def lfvisitdirfn(dir):
1694 if dir == lfutil.shortname:
1694 if dir == lfutil.shortname:
1695 return True
1695 return True
1696 ret = origvisitdirfn(dir)
1696 ret = origvisitdirfn(dir)
1697 if ret:
1697 if ret:
1698 return ret
1698 return ret
1699 lf = lfutil.splitstandin(dir)
1699 lf = lfutil.splitstandin(dir)
1700 if lf is None:
1700 if lf is None:
1701 return False
1701 return False
1702 return origvisitdirfn(lf)
1702 return origvisitdirfn(lf)
1703
1703
1704 m.visitdir = lfvisitdirfn
1704 m.visitdir = lfvisitdirfn
1705
1705
1706 for f in ctx.walk(m):
1706 for f in ctx.walk(m):
1707 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1707 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1708 lf = lfutil.splitstandin(f)
1708 lf = lfutil.splitstandin(f)
1709 if lf is None or origmatchfn(f):
1709 if lf is None or origmatchfn(f):
1710 # duplicating unreachable code from commands.cat
1710 # duplicating unreachable code from commands.cat
1711 data = ctx[f].data()
1711 data = ctx[f].data()
1712 if opts.get(b'decode'):
1712 if opts.get(b'decode'):
1713 data = repo.wwritedata(f, data)
1713 data = repo.wwritedata(f, data)
1714 fp.write(data)
1714 fp.write(data)
1715 else:
1715 else:
1716 hash = lfutil.readasstandin(ctx[f])
1716 hash = lfutil.readasstandin(ctx[f])
1717 if not lfutil.inusercache(repo.ui, hash):
1717 if not lfutil.inusercache(repo.ui, hash):
1718 store = storefactory.openstore(repo)
1718 store = storefactory.openstore(repo)
1719 success, missing = store.get([(lf, hash)])
1719 success, missing = store.get([(lf, hash)])
1720 if len(success) != 1:
1720 if len(success) != 1:
1721 raise error.Abort(
1721 raise error.Abort(
1722 _(
1722 _(
1723 b'largefile %s is not in cache and could not be '
1723 b'largefile %s is not in cache and could not be '
1724 b'downloaded'
1724 b'downloaded'
1725 )
1725 )
1726 % lf
1726 % lf
1727 )
1727 )
1728 path = lfutil.usercachepath(repo.ui, hash)
1728 path = lfutil.usercachepath(repo.ui, hash)
1729 with open(path, b"rb") as fpin:
1729 with open(path, b"rb") as fpin:
1730 for chunk in util.filechunkiter(fpin):
1730 for chunk in util.filechunkiter(fpin):
1731 fp.write(chunk)
1731 fp.write(chunk)
1732 err = 0
1732 err = 0
1733 return err
1733 return err
1734
1734
1735
1735
1736 @eh.wrapfunction(merge, b'_update')
1736 @eh.wrapfunction(merge, b'_update')
1737 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1737 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1738 matcher = kwargs.get('matcher', None)
1738 matcher = kwargs.get('matcher', None)
1739 # note if this is a partial update
1739 # note if this is a partial update
1740 partial = matcher and not matcher.always()
1740 partial = matcher and not matcher.always()
1741 with repo.wlock():
1741 with repo.wlock():
1742 # branch | | |
1742 # branch | | |
1743 # merge | force | partial | action
1743 # merge | force | partial | action
1744 # -------+-------+---------+--------------
1744 # -------+-------+---------+--------------
1745 # x | x | x | linear-merge
1745 # x | x | x | linear-merge
1746 # o | x | x | branch-merge
1746 # o | x | x | branch-merge
1747 # x | o | x | overwrite (as clean update)
1747 # x | o | x | overwrite (as clean update)
1748 # o | o | x | force-branch-merge (*1)
1748 # o | o | x | force-branch-merge (*1)
1749 # x | x | o | (*)
1749 # x | x | o | (*)
1750 # o | x | o | (*)
1750 # o | x | o | (*)
1751 # x | o | o | overwrite (as revert)
1751 # x | o | o | overwrite (as revert)
1752 # o | o | o | (*)
1752 # o | o | o | (*)
1753 #
1753 #
1754 # (*) don't care
1754 # (*) don't care
1755 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1755 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1756
1756
1757 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1757 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1758 unsure, s, mtime_boundary = lfdirstate.status(
1758 unsure, s, mtime_boundary = lfdirstate.status(
1759 matchmod.always(),
1759 matchmod.always(),
1760 subrepos=[],
1760 subrepos=[],
1761 ignored=False,
1761 ignored=False,
1762 clean=True,
1762 clean=True,
1763 unknown=False,
1763 unknown=False,
1764 )
1764 )
1765 oldclean = set(s.clean)
1765 oldclean = set(s.clean)
1766 pctx = repo[b'.']
1766 pctx = repo[b'.']
1767 dctx = repo[node]
1767 dctx = repo[node]
1768 for lfile in unsure + s.modified:
1768 for lfile in unsure + s.modified:
1769 lfileabs = repo.wvfs.join(lfile)
1769 lfileabs = repo.wvfs.join(lfile)
1770 if not repo.wvfs.exists(lfileabs):
1770 if not repo.wvfs.exists(lfileabs):
1771 continue
1771 continue
1772 lfhash = lfutil.hashfile(lfileabs)
1772 lfhash = lfutil.hashfile(lfileabs)
1773 standin = lfutil.standin(lfile)
1773 standin = lfutil.standin(lfile)
1774 lfutil.writestandin(
1774 lfutil.writestandin(
1775 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1775 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1776 )
1776 )
1777 if standin in pctx and lfhash == lfutil.readasstandin(
1777 if standin in pctx and lfhash == lfutil.readasstandin(
1778 pctx[standin]
1778 pctx[standin]
1779 ):
1779 ):
1780 oldclean.add(lfile)
1780 oldclean.add(lfile)
1781 for lfile in s.added:
1781 for lfile in s.added:
1782 fstandin = lfutil.standin(lfile)
1782 fstandin = lfutil.standin(lfile)
1783 if fstandin not in dctx:
1783 if fstandin not in dctx:
1784 # in this case, content of standin file is meaningless
1784 # in this case, content of standin file is meaningless
1785 # (in dctx, lfile is unknown, or normal file)
1785 # (in dctx, lfile is unknown, or normal file)
1786 continue
1786 continue
1787 lfutil.updatestandin(repo, lfile, fstandin)
1787 lfutil.updatestandin(repo, lfile, fstandin)
1788 # mark all clean largefiles as dirty, just in case the update gets
1788 # mark all clean largefiles as dirty, just in case the update gets
1789 # interrupted before largefiles and lfdirstate are synchronized
1789 # interrupted before largefiles and lfdirstate are synchronized
1790 for lfile in oldclean:
1790 for lfile in oldclean:
1791 lfdirstate.set_possibly_dirty(lfile)
1791 lfdirstate.set_possibly_dirty(lfile)
1792 lfdirstate.write(repo.currenttransaction())
1792 lfdirstate.write(repo.currenttransaction())
1793
1793
1794 oldstandins = lfutil.getstandinsstate(repo)
1794 oldstandins = lfutil.getstandinsstate(repo)
1795 wc = kwargs.get('wc')
1795 wc = kwargs.get('wc')
1796 if wc and wc.isinmemory():
1796 if wc and wc.isinmemory():
1797 # largefiles is not a good candidate for in-memory merge (large
1797 # largefiles is not a good candidate for in-memory merge (large
1798 # files, custom dirstate, matcher usage).
1798 # files, custom dirstate, matcher usage).
1799 raise error.ProgrammingError(
1799 raise error.ProgrammingError(
1800 b'largefiles is not compatible with in-memory merge'
1800 b'largefiles is not compatible with in-memory merge'
1801 )
1801 )
1802 with lfdirstate.parentchange():
1802 with lfdirstate.parentchange():
1803 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1803 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1804
1804
1805 newstandins = lfutil.getstandinsstate(repo)
1805 newstandins = lfutil.getstandinsstate(repo)
1806 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1806 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1807
1807
1808 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1808 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1809 # all the ones that didn't change as clean
1809 # all the ones that didn't change as clean
1810 for lfile in oldclean.difference(filelist):
1810 for lfile in oldclean.difference(filelist):
1811 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1811 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1812 lfdirstate.write(repo.currenttransaction())
1812 lfdirstate.write(repo.currenttransaction())
1813
1813
1814 if branchmerge or force or partial:
1814 if branchmerge or force or partial:
1815 filelist.extend(s.deleted + s.removed)
1815 filelist.extend(s.deleted + s.removed)
1816
1816
1817 lfcommands.updatelfiles(
1817 lfcommands.updatelfiles(
1818 repo.ui, repo, filelist=filelist, normallookup=partial
1818 repo.ui, repo, filelist=filelist, normallookup=partial
1819 )
1819 )
1820
1820
1821 return result
1821 return result
1822
1822
1823
1823
1824 @eh.wrapfunction(scmutil, b'marktouched')
1824 @eh.wrapfunction(scmutil, b'marktouched')
1825 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1825 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1826 result = orig(repo, files, *args, **kwargs)
1826 result = orig(repo, files, *args, **kwargs)
1827
1827
1828 filelist = []
1828 filelist = []
1829 for f in files:
1829 for f in files:
1830 lf = lfutil.splitstandin(f)
1830 lf = lfutil.splitstandin(f)
1831 if lf is not None:
1831 if lf is not None:
1832 filelist.append(lf)
1832 filelist.append(lf)
1833 if filelist:
1833 if filelist:
1834 lfcommands.updatelfiles(
1834 lfcommands.updatelfiles(
1835 repo.ui,
1835 repo.ui,
1836 repo,
1836 repo,
1837 filelist=filelist,
1837 filelist=filelist,
1838 printmessage=False,
1838 printmessage=False,
1839 normallookup=True,
1839 normallookup=True,
1840 )
1840 )
1841
1841
1842 return result
1842 return result
1843
1843
1844
1844
1845 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1845 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1846 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1846 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1847 def upgraderequirements(orig, repo):
1847 def upgraderequirements(orig, repo):
1848 reqs = orig(repo)
1848 reqs = orig(repo)
1849 if b'largefiles' in repo.requirements:
1849 if b'largefiles' in repo.requirements:
1850 reqs.add(b'largefiles')
1850 reqs.add(b'largefiles')
1851 return reqs
1851 return reqs
1852
1852
1853
1853
1854 _lfscheme = b'largefile://'
1854 _lfscheme = b'largefile://'
1855
1855
1856
1856
1857 @eh.wrapfunction(urlmod, b'open')
1857 @eh.wrapfunction(urlmod, b'open')
1858 def openlargefile(orig, ui, url_, data=None, **kwargs):
1858 def openlargefile(orig, ui, url_, data=None, **kwargs):
1859 if url_.startswith(_lfscheme):
1859 if url_.startswith(_lfscheme):
1860 if data:
1860 if data:
1861 msg = b"cannot use data on a 'largefile://' url"
1861 msg = b"cannot use data on a 'largefile://' url"
1862 raise error.ProgrammingError(msg)
1862 raise error.ProgrammingError(msg)
1863 lfid = url_[len(_lfscheme) :]
1863 lfid = url_[len(_lfscheme) :]
1864 return storefactory.getlfile(ui, lfid)
1864 return storefactory.getlfile(ui, lfid)
1865 else:
1865 else:
1866 return orig(ui, url_, data=data, **kwargs)
1866 return orig(ui, url_, data=data, **kwargs)
@@ -1,2490 +1,2493 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullrev
15 from .node import nullrev
16 from .thirdparty import attr
16 from .thirdparty import attr
17 from .utils import stringutil
17 from .utils import stringutil
18 from .dirstateutils import timestamp
18 from .dirstateutils import timestamp
19 from . import (
19 from . import (
20 copies,
20 copies,
21 encoding,
21 encoding,
22 error,
22 error,
23 filemerge,
23 filemerge,
24 match as matchmod,
24 match as matchmod,
25 mergestate as mergestatemod,
25 mergestate as mergestatemod,
26 obsutil,
26 obsutil,
27 pathutil,
27 pathutil,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 subrepoutil,
30 subrepoutil,
31 util,
31 util,
32 worker,
32 worker,
33 )
33 )
34
34
35 _pack = struct.pack
35 _pack = struct.pack
36 _unpack = struct.unpack
36 _unpack = struct.unpack
37
37
38
38
39 def _getcheckunknownconfig(repo, section, name):
39 def _getcheckunknownconfig(repo, section, name):
40 config = repo.ui.config(section, name)
40 config = repo.ui.config(section, name)
41 valid = [b'abort', b'ignore', b'warn']
41 valid = [b'abort', b'ignore', b'warn']
42 if config not in valid:
42 if config not in valid:
43 validstr = b', '.join([b"'" + v + b"'" for v in valid])
43 validstr = b', '.join([b"'" + v + b"'" for v in valid])
44 msg = _(b"%s.%s not valid ('%s' is none of %s)")
44 msg = _(b"%s.%s not valid ('%s' is none of %s)")
45 msg %= (section, name, config, validstr)
45 msg %= (section, name, config, validstr)
46 raise error.ConfigError(msg)
46 raise error.ConfigError(msg)
47 return config
47 return config
48
48
49
49
50 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
50 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
51 if wctx.isinmemory():
51 if wctx.isinmemory():
52 # Nothing to do in IMM because nothing in the "working copy" can be an
52 # Nothing to do in IMM because nothing in the "working copy" can be an
53 # unknown file.
53 # unknown file.
54 #
54 #
55 # Note that we should bail out here, not in ``_checkunknownfiles()``,
55 # Note that we should bail out here, not in ``_checkunknownfiles()``,
56 # because that function does other useful work.
56 # because that function does other useful work.
57 return False
57 return False
58
58
59 if f2 is None:
59 if f2 is None:
60 f2 = f
60 f2 = f
61 return (
61 return (
62 repo.wvfs.audit.check(f)
62 repo.wvfs.audit.check(f)
63 and repo.wvfs.isfileorlink(f)
63 and repo.wvfs.isfileorlink(f)
64 and repo.dirstate.normalize(f) not in repo.dirstate
64 and repo.dirstate.normalize(f) not in repo.dirstate
65 and mctx[f2].cmp(wctx[f])
65 and mctx[f2].cmp(wctx[f])
66 )
66 )
67
67
68
68
69 class _unknowndirschecker(object):
69 class _unknowndirschecker(object):
70 """
70 """
71 Look for any unknown files or directories that may have a path conflict
71 Look for any unknown files or directories that may have a path conflict
72 with a file. If any path prefix of the file exists as a file or link,
72 with a file. If any path prefix of the file exists as a file or link,
73 then it conflicts. If the file itself is a directory that contains any
73 then it conflicts. If the file itself is a directory that contains any
74 file that is not tracked, then it conflicts.
74 file that is not tracked, then it conflicts.
75
75
76 Returns the shortest path at which a conflict occurs, or None if there is
76 Returns the shortest path at which a conflict occurs, or None if there is
77 no conflict.
77 no conflict.
78 """
78 """
79
79
80 def __init__(self):
80 def __init__(self):
81 # A set of paths known to be good. This prevents repeated checking of
81 # A set of paths known to be good. This prevents repeated checking of
82 # dirs. It will be updated with any new dirs that are checked and found
82 # dirs. It will be updated with any new dirs that are checked and found
83 # to be safe.
83 # to be safe.
84 self._unknowndircache = set()
84 self._unknowndircache = set()
85
85
86 # A set of paths that are known to be absent. This prevents repeated
86 # A set of paths that are known to be absent. This prevents repeated
87 # checking of subdirectories that are known not to exist. It will be
87 # checking of subdirectories that are known not to exist. It will be
88 # updated with any new dirs that are checked and found to be absent.
88 # updated with any new dirs that are checked and found to be absent.
89 self._missingdircache = set()
89 self._missingdircache = set()
90
90
91 def __call__(self, repo, wctx, f):
91 def __call__(self, repo, wctx, f):
92 if wctx.isinmemory():
92 if wctx.isinmemory():
93 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
93 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
94 return False
94 return False
95
95
96 # Check for path prefixes that exist as unknown files.
96 # Check for path prefixes that exist as unknown files.
97 for p in reversed(list(pathutil.finddirs(f))):
97 for p in reversed(list(pathutil.finddirs(f))):
98 if p in self._missingdircache:
98 if p in self._missingdircache:
99 return
99 return
100 if p in self._unknowndircache:
100 if p in self._unknowndircache:
101 continue
101 continue
102 if repo.wvfs.audit.check(p):
102 if repo.wvfs.audit.check(p):
103 if (
103 if (
104 repo.wvfs.isfileorlink(p)
104 repo.wvfs.isfileorlink(p)
105 and repo.dirstate.normalize(p) not in repo.dirstate
105 and repo.dirstate.normalize(p) not in repo.dirstate
106 ):
106 ):
107 return p
107 return p
108 if not repo.wvfs.lexists(p):
108 if not repo.wvfs.lexists(p):
109 self._missingdircache.add(p)
109 self._missingdircache.add(p)
110 return
110 return
111 self._unknowndircache.add(p)
111 self._unknowndircache.add(p)
112
112
113 # Check if the file conflicts with a directory containing unknown files.
113 # Check if the file conflicts with a directory containing unknown files.
114 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
114 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
115 # Does the directory contain any files that are not in the dirstate?
115 # Does the directory contain any files that are not in the dirstate?
116 for p, dirs, files in repo.wvfs.walk(f):
116 for p, dirs, files in repo.wvfs.walk(f):
117 for fn in files:
117 for fn in files:
118 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
118 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
119 relf = repo.dirstate.normalize(relf, isknown=True)
119 relf = repo.dirstate.normalize(relf, isknown=True)
120 if relf not in repo.dirstate:
120 if relf not in repo.dirstate:
121 return f
121 return f
122 return None
122 return None
123
123
124
124
125 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
125 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
126 """
126 """
127 Considers any actions that care about the presence of conflicting unknown
127 Considers any actions that care about the presence of conflicting unknown
128 files. For some actions, the result is to abort; for others, it is to
128 files. For some actions, the result is to abort; for others, it is to
129 choose a different action.
129 choose a different action.
130 """
130 """
131 fileconflicts = set()
131 fileconflicts = set()
132 pathconflicts = set()
132 pathconflicts = set()
133 warnconflicts = set()
133 warnconflicts = set()
134 abortconflicts = set()
134 abortconflicts = set()
135 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
135 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
136 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
136 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
137 pathconfig = repo.ui.configbool(
137 pathconfig = repo.ui.configbool(
138 b'experimental', b'merge.checkpathconflicts'
138 b'experimental', b'merge.checkpathconflicts'
139 )
139 )
140 if not force:
140 if not force:
141
141
142 def collectconflicts(conflicts, config):
142 def collectconflicts(conflicts, config):
143 if config == b'abort':
143 if config == b'abort':
144 abortconflicts.update(conflicts)
144 abortconflicts.update(conflicts)
145 elif config == b'warn':
145 elif config == b'warn':
146 warnconflicts.update(conflicts)
146 warnconflicts.update(conflicts)
147
147
148 checkunknowndirs = _unknowndirschecker()
148 checkunknowndirs = _unknowndirschecker()
149 for f in mresult.files(
149 for f in mresult.files(
150 (
150 (
151 mergestatemod.ACTION_CREATED,
151 mergestatemod.ACTION_CREATED,
152 mergestatemod.ACTION_DELETED_CHANGED,
152 mergestatemod.ACTION_DELETED_CHANGED,
153 )
153 )
154 ):
154 ):
155 if _checkunknownfile(repo, wctx, mctx, f):
155 if _checkunknownfile(repo, wctx, mctx, f):
156 fileconflicts.add(f)
156 fileconflicts.add(f)
157 elif pathconfig and f not in wctx:
157 elif pathconfig and f not in wctx:
158 path = checkunknowndirs(repo, wctx, f)
158 path = checkunknowndirs(repo, wctx, f)
159 if path is not None:
159 if path is not None:
160 pathconflicts.add(path)
160 pathconflicts.add(path)
161 for f, args, msg in mresult.getactions(
161 for f, args, msg in mresult.getactions(
162 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
162 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
163 ):
163 ):
164 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
164 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
165 fileconflicts.add(f)
165 fileconflicts.add(f)
166
166
167 allconflicts = fileconflicts | pathconflicts
167 allconflicts = fileconflicts | pathconflicts
168 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
168 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
169 unknownconflicts = allconflicts - ignoredconflicts
169 unknownconflicts = allconflicts - ignoredconflicts
170 collectconflicts(ignoredconflicts, ignoredconfig)
170 collectconflicts(ignoredconflicts, ignoredconfig)
171 collectconflicts(unknownconflicts, unknownconfig)
171 collectconflicts(unknownconflicts, unknownconfig)
172 else:
172 else:
173 for f, args, msg in list(
173 for f, args, msg in list(
174 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
174 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
175 ):
175 ):
176 fl2, anc = args
176 fl2, anc = args
177 different = _checkunknownfile(repo, wctx, mctx, f)
177 different = _checkunknownfile(repo, wctx, mctx, f)
178 if repo.dirstate._ignore(f):
178 if repo.dirstate._ignore(f):
179 config = ignoredconfig
179 config = ignoredconfig
180 else:
180 else:
181 config = unknownconfig
181 config = unknownconfig
182
182
183 # The behavior when force is True is described by this table:
183 # The behavior when force is True is described by this table:
184 # config different mergeforce | action backup
184 # config different mergeforce | action backup
185 # * n * | get n
185 # * n * | get n
186 # * y y | merge -
186 # * y y | merge -
187 # abort y n | merge - (1)
187 # abort y n | merge - (1)
188 # warn y n | warn + get y
188 # warn y n | warn + get y
189 # ignore y n | get y
189 # ignore y n | get y
190 #
190 #
191 # (1) this is probably the wrong behavior here -- we should
191 # (1) this is probably the wrong behavior here -- we should
192 # probably abort, but some actions like rebases currently
192 # probably abort, but some actions like rebases currently
193 # don't like an abort happening in the middle of
193 # don't like an abort happening in the middle of
194 # merge.update.
194 # merge.update.
195 if not different:
195 if not different:
196 mresult.addfile(
196 mresult.addfile(
197 f,
197 f,
198 mergestatemod.ACTION_GET,
198 mergestatemod.ACTION_GET,
199 (fl2, False),
199 (fl2, False),
200 b'remote created',
200 b'remote created',
201 )
201 )
202 elif mergeforce or config == b'abort':
202 elif mergeforce or config == b'abort':
203 mresult.addfile(
203 mresult.addfile(
204 f,
204 f,
205 mergestatemod.ACTION_MERGE,
205 mergestatemod.ACTION_MERGE,
206 (f, f, None, False, anc),
206 (f, f, None, False, anc),
207 b'remote differs from untracked local',
207 b'remote differs from untracked local',
208 )
208 )
209 elif config == b'abort':
209 elif config == b'abort':
210 abortconflicts.add(f)
210 abortconflicts.add(f)
211 else:
211 else:
212 if config == b'warn':
212 if config == b'warn':
213 warnconflicts.add(f)
213 warnconflicts.add(f)
214 mresult.addfile(
214 mresult.addfile(
215 f,
215 f,
216 mergestatemod.ACTION_GET,
216 mergestatemod.ACTION_GET,
217 (fl2, True),
217 (fl2, True),
218 b'remote created',
218 b'remote created',
219 )
219 )
220
220
221 for f in sorted(abortconflicts):
221 for f in sorted(abortconflicts):
222 warn = repo.ui.warn
222 warn = repo.ui.warn
223 if f in pathconflicts:
223 if f in pathconflicts:
224 if repo.wvfs.isfileorlink(f):
224 if repo.wvfs.isfileorlink(f):
225 warn(_(b"%s: untracked file conflicts with directory\n") % f)
225 warn(_(b"%s: untracked file conflicts with directory\n") % f)
226 else:
226 else:
227 warn(_(b"%s: untracked directory conflicts with file\n") % f)
227 warn(_(b"%s: untracked directory conflicts with file\n") % f)
228 else:
228 else:
229 warn(_(b"%s: untracked file differs\n") % f)
229 warn(_(b"%s: untracked file differs\n") % f)
230 if abortconflicts:
230 if abortconflicts:
231 raise error.StateError(
231 raise error.StateError(
232 _(
232 _(
233 b"untracked files in working directory "
233 b"untracked files in working directory "
234 b"differ from files in requested revision"
234 b"differ from files in requested revision"
235 )
235 )
236 )
236 )
237
237
238 for f in sorted(warnconflicts):
238 for f in sorted(warnconflicts):
239 if repo.wvfs.isfileorlink(f):
239 if repo.wvfs.isfileorlink(f):
240 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
240 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
241 else:
241 else:
242 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
242 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
243
243
244 for f, args, msg in list(
244 for f, args, msg in list(
245 mresult.getactions([mergestatemod.ACTION_CREATED])
245 mresult.getactions([mergestatemod.ACTION_CREATED])
246 ):
246 ):
247 backup = (
247 backup = (
248 f in fileconflicts
248 f in fileconflicts
249 or f in pathconflicts
249 or f in pathconflicts
250 or any(p in pathconflicts for p in pathutil.finddirs(f))
250 or any(p in pathconflicts for p in pathutil.finddirs(f))
251 )
251 )
252 (flags,) = args
252 (flags,) = args
253 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
253 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
254
254
255
255
256 def _forgetremoved(wctx, mctx, branchmerge, mresult):
256 def _forgetremoved(wctx, mctx, branchmerge, mresult):
257 """
257 """
258 Forget removed files
258 Forget removed files
259
259
260 If we're jumping between revisions (as opposed to merging), and if
260 If we're jumping between revisions (as opposed to merging), and if
261 neither the working directory nor the target rev has the file,
261 neither the working directory nor the target rev has the file,
262 then we need to remove it from the dirstate, to prevent the
262 then we need to remove it from the dirstate, to prevent the
263 dirstate from listing the file when it is no longer in the
263 dirstate from listing the file when it is no longer in the
264 manifest.
264 manifest.
265
265
266 If we're merging, and the other revision has removed a file
266 If we're merging, and the other revision has removed a file
267 that is not present in the working directory, we need to mark it
267 that is not present in the working directory, we need to mark it
268 as removed.
268 as removed.
269 """
269 """
270
270
271 m = mergestatemod.ACTION_FORGET
271 m = mergestatemod.ACTION_FORGET
272 if branchmerge:
272 if branchmerge:
273 m = mergestatemod.ACTION_REMOVE
273 m = mergestatemod.ACTION_REMOVE
274 for f in wctx.deleted():
274 for f in wctx.deleted():
275 if f not in mctx:
275 if f not in mctx:
276 mresult.addfile(f, m, None, b"forget deleted")
276 mresult.addfile(f, m, None, b"forget deleted")
277
277
278 if not branchmerge:
278 if not branchmerge:
279 for f in wctx.removed():
279 for f in wctx.removed():
280 if f not in mctx:
280 if f not in mctx:
281 mresult.addfile(
281 mresult.addfile(
282 f,
282 f,
283 mergestatemod.ACTION_FORGET,
283 mergestatemod.ACTION_FORGET,
284 None,
284 None,
285 b"forget removed",
285 b"forget removed",
286 )
286 )
287
287
288
288
289 def _checkcollision(repo, wmf, mresult):
289 def _checkcollision(repo, wmf, mresult):
290 """
290 """
291 Check for case-folding collisions.
291 Check for case-folding collisions.
292 """
292 """
293 # If the repo is narrowed, filter out files outside the narrowspec.
293 # If the repo is narrowed, filter out files outside the narrowspec.
294 narrowmatch = repo.narrowmatch()
294 narrowmatch = repo.narrowmatch()
295 if not narrowmatch.always():
295 if not narrowmatch.always():
296 pmmf = set(wmf.walk(narrowmatch))
296 pmmf = set(wmf.walk(narrowmatch))
297 if mresult:
297 if mresult:
298 for f in list(mresult.files()):
298 for f in list(mresult.files()):
299 if not narrowmatch(f):
299 if not narrowmatch(f):
300 mresult.removefile(f)
300 mresult.removefile(f)
301 else:
301 else:
302 # build provisional merged manifest up
302 # build provisional merged manifest up
303 pmmf = set(wmf)
303 pmmf = set(wmf)
304
304
305 if mresult:
305 if mresult:
306 # KEEP and EXEC are no-op
306 # KEEP and EXEC are no-op
307 for f in mresult.files(
307 for f in mresult.files(
308 (
308 (
309 mergestatemod.ACTION_ADD,
309 mergestatemod.ACTION_ADD,
310 mergestatemod.ACTION_ADD_MODIFIED,
310 mergestatemod.ACTION_ADD_MODIFIED,
311 mergestatemod.ACTION_FORGET,
311 mergestatemod.ACTION_FORGET,
312 mergestatemod.ACTION_GET,
312 mergestatemod.ACTION_GET,
313 mergestatemod.ACTION_CHANGED_DELETED,
313 mergestatemod.ACTION_CHANGED_DELETED,
314 mergestatemod.ACTION_DELETED_CHANGED,
314 mergestatemod.ACTION_DELETED_CHANGED,
315 )
315 )
316 ):
316 ):
317 pmmf.add(f)
317 pmmf.add(f)
318 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
318 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
319 pmmf.discard(f)
319 pmmf.discard(f)
320 for f, args, msg in mresult.getactions(
320 for f, args, msg in mresult.getactions(
321 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
321 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
322 ):
322 ):
323 f2, flags = args
323 f2, flags = args
324 pmmf.discard(f2)
324 pmmf.discard(f2)
325 pmmf.add(f)
325 pmmf.add(f)
326 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
326 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
327 pmmf.add(f)
327 pmmf.add(f)
328 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
328 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
329 f1, f2, fa, move, anc = args
329 f1, f2, fa, move, anc = args
330 if move:
330 if move:
331 pmmf.discard(f1)
331 pmmf.discard(f1)
332 pmmf.add(f)
332 pmmf.add(f)
333
333
334 # check case-folding collision in provisional merged manifest
334 # check case-folding collision in provisional merged manifest
335 foldmap = {}
335 foldmap = {}
336 for f in pmmf:
336 for f in pmmf:
337 fold = util.normcase(f)
337 fold = util.normcase(f)
338 if fold in foldmap:
338 if fold in foldmap:
339 msg = _(b"case-folding collision between %s and %s")
339 msg = _(b"case-folding collision between %s and %s")
340 msg %= (f, foldmap[fold])
340 msg %= (f, foldmap[fold])
341 raise error.StateError(msg)
341 raise error.StateError(msg)
342 foldmap[fold] = f
342 foldmap[fold] = f
343
343
344 # check case-folding of directories
344 # check case-folding of directories
345 foldprefix = unfoldprefix = lastfull = b''
345 foldprefix = unfoldprefix = lastfull = b''
346 for fold, f in sorted(foldmap.items()):
346 for fold, f in sorted(foldmap.items()):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 # the folded prefix matches but actual casing is different
348 # the folded prefix matches but actual casing is different
349 msg = _(b"case-folding collision between %s and directory of %s")
349 msg = _(b"case-folding collision between %s and directory of %s")
350 msg %= (lastfull, f)
350 msg %= (lastfull, f)
351 raise error.StateError(msg)
351 raise error.StateError(msg)
352 foldprefix = fold + b'/'
352 foldprefix = fold + b'/'
353 unfoldprefix = f + b'/'
353 unfoldprefix = f + b'/'
354 lastfull = f
354 lastfull = f
355
355
356
356
357 def _filesindirs(repo, manifest, dirs):
357 def _filesindirs(repo, manifest, dirs):
358 """
358 """
359 Generator that yields pairs of all the files in the manifest that are found
359 Generator that yields pairs of all the files in the manifest that are found
360 inside the directories listed in dirs, and which directory they are found
360 inside the directories listed in dirs, and which directory they are found
361 in.
361 in.
362 """
362 """
363 for f in manifest:
363 for f in manifest:
364 for p in pathutil.finddirs(f):
364 for p in pathutil.finddirs(f):
365 if p in dirs:
365 if p in dirs:
366 yield f, p
366 yield f, p
367 break
367 break
368
368
369
369
370 def checkpathconflicts(repo, wctx, mctx, mresult):
370 def checkpathconflicts(repo, wctx, mctx, mresult):
371 """
371 """
372 Check if any actions introduce path conflicts in the repository, updating
372 Check if any actions introduce path conflicts in the repository, updating
373 actions to record or handle the path conflict accordingly.
373 actions to record or handle the path conflict accordingly.
374 """
374 """
375 mf = wctx.manifest()
375 mf = wctx.manifest()
376
376
377 # The set of local files that conflict with a remote directory.
377 # The set of local files that conflict with a remote directory.
378 localconflicts = set()
378 localconflicts = set()
379
379
380 # The set of directories that conflict with a remote file, and so may cause
380 # The set of directories that conflict with a remote file, and so may cause
381 # conflicts if they still contain any files after the merge.
381 # conflicts if they still contain any files after the merge.
382 remoteconflicts = set()
382 remoteconflicts = set()
383
383
384 # The set of directories that appear as both a file and a directory in the
384 # The set of directories that appear as both a file and a directory in the
385 # remote manifest. These indicate an invalid remote manifest, which
385 # remote manifest. These indicate an invalid remote manifest, which
386 # can't be updated to cleanly.
386 # can't be updated to cleanly.
387 invalidconflicts = set()
387 invalidconflicts = set()
388
388
389 # The set of directories that contain files that are being created.
389 # The set of directories that contain files that are being created.
390 createdfiledirs = set()
390 createdfiledirs = set()
391
391
392 # The set of files deleted by all the actions.
392 # The set of files deleted by all the actions.
393 deletedfiles = set()
393 deletedfiles = set()
394
394
395 for f in mresult.files(
395 for f in mresult.files(
396 (
396 (
397 mergestatemod.ACTION_CREATED,
397 mergestatemod.ACTION_CREATED,
398 mergestatemod.ACTION_DELETED_CHANGED,
398 mergestatemod.ACTION_DELETED_CHANGED,
399 mergestatemod.ACTION_MERGE,
399 mergestatemod.ACTION_MERGE,
400 mergestatemod.ACTION_CREATED_MERGE,
400 mergestatemod.ACTION_CREATED_MERGE,
401 )
401 )
402 ):
402 ):
403 # This action may create a new local file.
403 # This action may create a new local file.
404 createdfiledirs.update(pathutil.finddirs(f))
404 createdfiledirs.update(pathutil.finddirs(f))
405 if mf.hasdir(f):
405 if mf.hasdir(f):
406 # The file aliases a local directory. This might be ok if all
406 # The file aliases a local directory. This might be ok if all
407 # the files in the local directory are being deleted. This
407 # the files in the local directory are being deleted. This
408 # will be checked once we know what all the deleted files are.
408 # will be checked once we know what all the deleted files are.
409 remoteconflicts.add(f)
409 remoteconflicts.add(f)
410 # Track the names of all deleted files.
410 # Track the names of all deleted files.
411 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
411 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
412 deletedfiles.add(f)
412 deletedfiles.add(f)
413 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
413 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
414 f1, f2, fa, move, anc = args
414 f1, f2, fa, move, anc = args
415 if move:
415 if move:
416 deletedfiles.add(f1)
416 deletedfiles.add(f1)
417 for (f, args, msg) in mresult.getactions(
417 for (f, args, msg) in mresult.getactions(
418 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
418 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
419 ):
419 ):
420 f2, flags = args
420 f2, flags = args
421 deletedfiles.add(f2)
421 deletedfiles.add(f2)
422
422
423 # Check all directories that contain created files for path conflicts.
423 # Check all directories that contain created files for path conflicts.
424 for p in createdfiledirs:
424 for p in createdfiledirs:
425 if p in mf:
425 if p in mf:
426 if p in mctx:
426 if p in mctx:
427 # A file is in a directory which aliases both a local
427 # A file is in a directory which aliases both a local
428 # and a remote file. This is an internal inconsistency
428 # and a remote file. This is an internal inconsistency
429 # within the remote manifest.
429 # within the remote manifest.
430 invalidconflicts.add(p)
430 invalidconflicts.add(p)
431 else:
431 else:
432 # A file is in a directory which aliases a local file.
432 # A file is in a directory which aliases a local file.
433 # We will need to rename the local file.
433 # We will need to rename the local file.
434 localconflicts.add(p)
434 localconflicts.add(p)
435 pd = mresult.getfile(p)
435 pd = mresult.getfile(p)
436 if pd and pd[0] in (
436 if pd and pd[0] in (
437 mergestatemod.ACTION_CREATED,
437 mergestatemod.ACTION_CREATED,
438 mergestatemod.ACTION_DELETED_CHANGED,
438 mergestatemod.ACTION_DELETED_CHANGED,
439 mergestatemod.ACTION_MERGE,
439 mergestatemod.ACTION_MERGE,
440 mergestatemod.ACTION_CREATED_MERGE,
440 mergestatemod.ACTION_CREATED_MERGE,
441 ):
441 ):
442 # The file is in a directory which aliases a remote file.
442 # The file is in a directory which aliases a remote file.
443 # This is an internal inconsistency within the remote
443 # This is an internal inconsistency within the remote
444 # manifest.
444 # manifest.
445 invalidconflicts.add(p)
445 invalidconflicts.add(p)
446
446
447 # Rename all local conflicting files that have not been deleted.
447 # Rename all local conflicting files that have not been deleted.
448 for p in localconflicts:
448 for p in localconflicts:
449 if p not in deletedfiles:
449 if p not in deletedfiles:
450 ctxname = bytes(wctx).rstrip(b'+')
450 ctxname = bytes(wctx).rstrip(b'+')
451 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
451 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
452 porig = wctx[p].copysource() or p
452 porig = wctx[p].copysource() or p
453 mresult.addfile(
453 mresult.addfile(
454 pnew,
454 pnew,
455 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
455 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
456 (p, porig),
456 (p, porig),
457 b'local path conflict',
457 b'local path conflict',
458 )
458 )
459 mresult.addfile(
459 mresult.addfile(
460 p,
460 p,
461 mergestatemod.ACTION_PATH_CONFLICT,
461 mergestatemod.ACTION_PATH_CONFLICT,
462 (pnew, b'l'),
462 (pnew, b'l'),
463 b'path conflict',
463 b'path conflict',
464 )
464 )
465
465
466 if remoteconflicts:
466 if remoteconflicts:
467 # Check if all files in the conflicting directories have been removed.
467 # Check if all files in the conflicting directories have been removed.
468 ctxname = bytes(mctx).rstrip(b'+')
468 ctxname = bytes(mctx).rstrip(b'+')
469 for f, p in _filesindirs(repo, mf, remoteconflicts):
469 for f, p in _filesindirs(repo, mf, remoteconflicts):
470 if f not in deletedfiles:
470 if f not in deletedfiles:
471 m, args, msg = mresult.getfile(p)
471 m, args, msg = mresult.getfile(p)
472 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
472 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
473 if m in (
473 if m in (
474 mergestatemod.ACTION_DELETED_CHANGED,
474 mergestatemod.ACTION_DELETED_CHANGED,
475 mergestatemod.ACTION_MERGE,
475 mergestatemod.ACTION_MERGE,
476 ):
476 ):
477 # Action was merge, just update target.
477 # Action was merge, just update target.
478 mresult.addfile(pnew, m, args, msg)
478 mresult.addfile(pnew, m, args, msg)
479 else:
479 else:
480 # Action was create, change to renamed get action.
480 # Action was create, change to renamed get action.
481 fl = args[0]
481 fl = args[0]
482 mresult.addfile(
482 mresult.addfile(
483 pnew,
483 pnew,
484 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
484 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
485 (p, fl),
485 (p, fl),
486 b'remote path conflict',
486 b'remote path conflict',
487 )
487 )
488 mresult.addfile(
488 mresult.addfile(
489 p,
489 p,
490 mergestatemod.ACTION_PATH_CONFLICT,
490 mergestatemod.ACTION_PATH_CONFLICT,
491 (pnew, b'r'),
491 (pnew, b'r'),
492 b'path conflict',
492 b'path conflict',
493 )
493 )
494 remoteconflicts.remove(p)
494 remoteconflicts.remove(p)
495 break
495 break
496
496
497 if invalidconflicts:
497 if invalidconflicts:
498 for p in invalidconflicts:
498 for p in invalidconflicts:
499 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
499 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
500 raise error.StateError(
500 raise error.StateError(
501 _(b"destination manifest contains path conflicts")
501 _(b"destination manifest contains path conflicts")
502 )
502 )
503
503
504
504
505 def _filternarrowactions(narrowmatch, branchmerge, mresult):
505 def _filternarrowactions(narrowmatch, branchmerge, mresult):
506 """
506 """
507 Filters out actions that can ignored because the repo is narrowed.
507 Filters out actions that can ignored because the repo is narrowed.
508
508
509 Raise an exception if the merge cannot be completed because the repo is
509 Raise an exception if the merge cannot be completed because the repo is
510 narrowed.
510 narrowed.
511 """
511 """
512 # TODO: handle with nonconflicttypes
512 # TODO: handle with nonconflicttypes
513 nonconflicttypes = {
513 nonconflicttypes = {
514 mergestatemod.ACTION_ADD,
514 mergestatemod.ACTION_ADD,
515 mergestatemod.ACTION_ADD_MODIFIED,
515 mergestatemod.ACTION_ADD_MODIFIED,
516 mergestatemod.ACTION_CREATED,
516 mergestatemod.ACTION_CREATED,
517 mergestatemod.ACTION_CREATED_MERGE,
517 mergestatemod.ACTION_CREATED_MERGE,
518 mergestatemod.ACTION_FORGET,
518 mergestatemod.ACTION_FORGET,
519 mergestatemod.ACTION_GET,
519 mergestatemod.ACTION_GET,
520 mergestatemod.ACTION_REMOVE,
520 mergestatemod.ACTION_REMOVE,
521 mergestatemod.ACTION_EXEC,
521 mergestatemod.ACTION_EXEC,
522 }
522 }
523 # We mutate the items in the dict during iteration, so iterate
523 # We mutate the items in the dict during iteration, so iterate
524 # over a copy.
524 # over a copy.
525 for f, action in mresult.filemap():
525 for f, action in mresult.filemap():
526 if narrowmatch(f):
526 if narrowmatch(f):
527 pass
527 pass
528 elif not branchmerge:
528 elif not branchmerge:
529 mresult.removefile(f) # just updating, ignore changes outside clone
529 mresult.removefile(f) # just updating, ignore changes outside clone
530 elif action[0] in mergestatemod.NO_OP_ACTIONS:
530 elif action[0] in mergestatemod.NO_OP_ACTIONS:
531 mresult.removefile(f) # merge does not affect file
531 mresult.removefile(f) # merge does not affect file
532 elif action[0] in nonconflicttypes:
532 elif action[0] in nonconflicttypes:
533 msg = _(
533 msg = _(
534 b'merge affects file \'%s\' outside narrow, '
534 b'merge affects file \'%s\' outside narrow, '
535 b'which is not yet supported'
535 b'which is not yet supported'
536 )
536 )
537 hint = _(b'merging in the other direction may work')
537 hint = _(b'merging in the other direction may work')
538 raise error.Abort(msg % f, hint=hint)
538 raise error.Abort(msg % f, hint=hint)
539 else:
539 else:
540 msg = _(b'conflict in file \'%s\' is outside narrow clone')
540 msg = _(b'conflict in file \'%s\' is outside narrow clone')
541 raise error.StateError(msg % f)
541 raise error.StateError(msg % f)
542
542
543
543
544 class mergeresult(object):
544 class mergeresult(object):
545 """An object representing result of merging manifests.
545 """An object representing result of merging manifests.
546
546
547 It has information about what actions need to be performed on dirstate
547 It has information about what actions need to be performed on dirstate
548 mapping of divergent renames and other such cases."""
548 mapping of divergent renames and other such cases."""
549
549
550 def __init__(self):
550 def __init__(self):
551 """
551 """
552 filemapping: dict of filename as keys and action related info as values
552 filemapping: dict of filename as keys and action related info as values
553 diverge: mapping of source name -> list of dest name for
553 diverge: mapping of source name -> list of dest name for
554 divergent renames
554 divergent renames
555 renamedelete: mapping of source name -> list of destinations for files
555 renamedelete: mapping of source name -> list of destinations for files
556 deleted on one side and renamed on other.
556 deleted on one side and renamed on other.
557 commitinfo: dict containing data which should be used on commit
557 commitinfo: dict containing data which should be used on commit
558 contains a filename -> info mapping
558 contains a filename -> info mapping
559 actionmapping: dict of action names as keys and values are dict of
559 actionmapping: dict of action names as keys and values are dict of
560 filename as key and related data as values
560 filename as key and related data as values
561 """
561 """
562 self._filemapping = {}
562 self._filemapping = {}
563 self._diverge = {}
563 self._diverge = {}
564 self._renamedelete = {}
564 self._renamedelete = {}
565 self._commitinfo = collections.defaultdict(dict)
565 self._commitinfo = collections.defaultdict(dict)
566 self._actionmapping = collections.defaultdict(dict)
566 self._actionmapping = collections.defaultdict(dict)
567
567
568 def updatevalues(self, diverge, renamedelete):
568 def updatevalues(self, diverge, renamedelete):
569 self._diverge = diverge
569 self._diverge = diverge
570 self._renamedelete = renamedelete
570 self._renamedelete = renamedelete
571
571
572 def addfile(self, filename, action, data, message):
572 def addfile(self, filename, action, data, message):
573 """adds a new file to the mergeresult object
573 """adds a new file to the mergeresult object
574
574
575 filename: file which we are adding
575 filename: file which we are adding
576 action: one of mergestatemod.ACTION_*
576 action: one of mergestatemod.ACTION_*
577 data: a tuple of information like fctx and ctx related to this merge
577 data: a tuple of information like fctx and ctx related to this merge
578 message: a message about the merge
578 message: a message about the merge
579 """
579 """
580 # if the file already existed, we need to delete it's old
580 # if the file already existed, we need to delete it's old
581 # entry form _actionmapping too
581 # entry form _actionmapping too
582 if filename in self._filemapping:
582 if filename in self._filemapping:
583 a, d, m = self._filemapping[filename]
583 a, d, m = self._filemapping[filename]
584 del self._actionmapping[a][filename]
584 del self._actionmapping[a][filename]
585
585
586 self._filemapping[filename] = (action, data, message)
586 self._filemapping[filename] = (action, data, message)
587 self._actionmapping[action][filename] = (data, message)
587 self._actionmapping[action][filename] = (data, message)
588
588
589 def getfile(self, filename, default_return=None):
589 def getfile(self, filename, default_return=None):
590 """returns (action, args, msg) about this file
590 """returns (action, args, msg) about this file
591
591
592 returns default_return if the file is not present"""
592 returns default_return if the file is not present"""
593 if filename in self._filemapping:
593 if filename in self._filemapping:
594 return self._filemapping[filename]
594 return self._filemapping[filename]
595 return default_return
595 return default_return
596
596
597 def files(self, actions=None):
597 def files(self, actions=None):
598 """returns files on which provided action needs to perfromed
598 """returns files on which provided action needs to perfromed
599
599
600 If actions is None, all files are returned
600 If actions is None, all files are returned
601 """
601 """
602 # TODO: think whether we should return renamedelete and
602 # TODO: think whether we should return renamedelete and
603 # diverge filenames also
603 # diverge filenames also
604 if actions is None:
604 if actions is None:
605 for f in self._filemapping:
605 for f in self._filemapping:
606 yield f
606 yield f
607
607
608 else:
608 else:
609 for a in actions:
609 for a in actions:
610 for f in self._actionmapping[a]:
610 for f in self._actionmapping[a]:
611 yield f
611 yield f
612
612
613 def removefile(self, filename):
613 def removefile(self, filename):
614 """removes a file from the mergeresult object as the file might
614 """removes a file from the mergeresult object as the file might
615 not merging anymore"""
615 not merging anymore"""
616 action, data, message = self._filemapping[filename]
616 action, data, message = self._filemapping[filename]
617 del self._filemapping[filename]
617 del self._filemapping[filename]
618 del self._actionmapping[action][filename]
618 del self._actionmapping[action][filename]
619
619
620 def getactions(self, actions, sort=False):
620 def getactions(self, actions, sort=False):
621 """get list of files which are marked with these actions
621 """get list of files which are marked with these actions
622 if sort is true, files for each action is sorted and then added
622 if sort is true, files for each action is sorted and then added
623
623
624 Returns a list of tuple of form (filename, data, message)
624 Returns a list of tuple of form (filename, data, message)
625 """
625 """
626 for a in actions:
626 for a in actions:
627 if sort:
627 if sort:
628 for f in sorted(self._actionmapping[a]):
628 for f in sorted(self._actionmapping[a]):
629 args, msg = self._actionmapping[a][f]
629 args, msg = self._actionmapping[a][f]
630 yield f, args, msg
630 yield f, args, msg
631 else:
631 else:
632 for f, (args, msg) in pycompat.iteritems(
632 for f, (args, msg) in pycompat.iteritems(
633 self._actionmapping[a]
633 self._actionmapping[a]
634 ):
634 ):
635 yield f, args, msg
635 yield f, args, msg
636
636
637 def len(self, actions=None):
637 def len(self, actions=None):
638 """returns number of files which needs actions
638 """returns number of files which needs actions
639
639
640 if actions is passed, total of number of files in that action
640 if actions is passed, total of number of files in that action
641 only is returned"""
641 only is returned"""
642
642
643 if actions is None:
643 if actions is None:
644 return len(self._filemapping)
644 return len(self._filemapping)
645
645
646 return sum(len(self._actionmapping[a]) for a in actions)
646 return sum(len(self._actionmapping[a]) for a in actions)
647
647
648 def filemap(self, sort=False):
648 def filemap(self, sort=False):
649 if sorted:
649 if sorted:
650 for key, val in sorted(pycompat.iteritems(self._filemapping)):
650 for key, val in sorted(pycompat.iteritems(self._filemapping)):
651 yield key, val
651 yield key, val
652 else:
652 else:
653 for key, val in pycompat.iteritems(self._filemapping):
653 for key, val in pycompat.iteritems(self._filemapping):
654 yield key, val
654 yield key, val
655
655
656 def addcommitinfo(self, filename, key, value):
656 def addcommitinfo(self, filename, key, value):
657 """adds key-value information about filename which will be required
657 """adds key-value information about filename which will be required
658 while committing this merge"""
658 while committing this merge"""
659 self._commitinfo[filename][key] = value
659 self._commitinfo[filename][key] = value
660
660
661 @property
661 @property
662 def diverge(self):
662 def diverge(self):
663 return self._diverge
663 return self._diverge
664
664
665 @property
665 @property
666 def renamedelete(self):
666 def renamedelete(self):
667 return self._renamedelete
667 return self._renamedelete
668
668
669 @property
669 @property
670 def commitinfo(self):
670 def commitinfo(self):
671 return self._commitinfo
671 return self._commitinfo
672
672
673 @property
673 @property
674 def actionsdict(self):
674 def actionsdict(self):
675 """returns a dictionary of actions to be perfomed with action as key
675 """returns a dictionary of actions to be perfomed with action as key
676 and a list of files and related arguments as values"""
676 and a list of files and related arguments as values"""
677 res = collections.defaultdict(list)
677 res = collections.defaultdict(list)
678 for a, d in pycompat.iteritems(self._actionmapping):
678 for a, d in pycompat.iteritems(self._actionmapping):
679 for f, (args, msg) in pycompat.iteritems(d):
679 for f, (args, msg) in pycompat.iteritems(d):
680 res[a].append((f, args, msg))
680 res[a].append((f, args, msg))
681 return res
681 return res
682
682
683 def setactions(self, actions):
683 def setactions(self, actions):
684 self._filemapping = actions
684 self._filemapping = actions
685 self._actionmapping = collections.defaultdict(dict)
685 self._actionmapping = collections.defaultdict(dict)
686 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
686 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
687 self._actionmapping[act][f] = data, msg
687 self._actionmapping[act][f] = data, msg
688
688
689 def hasconflicts(self):
689 def hasconflicts(self):
690 """tells whether this merge resulted in some actions which can
690 """tells whether this merge resulted in some actions which can
691 result in conflicts or not"""
691 result in conflicts or not"""
692 for a in self._actionmapping.keys():
692 for a in self._actionmapping.keys():
693 if (
693 if (
694 a
694 a
695 not in (
695 not in (
696 mergestatemod.ACTION_GET,
696 mergestatemod.ACTION_GET,
697 mergestatemod.ACTION_EXEC,
697 mergestatemod.ACTION_EXEC,
698 mergestatemod.ACTION_REMOVE,
698 mergestatemod.ACTION_REMOVE,
699 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
699 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
700 )
700 )
701 and self._actionmapping[a]
701 and self._actionmapping[a]
702 and a not in mergestatemod.NO_OP_ACTIONS
702 and a not in mergestatemod.NO_OP_ACTIONS
703 ):
703 ):
704 return True
704 return True
705
705
706 return False
706 return False
707
707
708
708
709 def manifestmerge(
709 def manifestmerge(
710 repo,
710 repo,
711 wctx,
711 wctx,
712 p2,
712 p2,
713 pa,
713 pa,
714 branchmerge,
714 branchmerge,
715 force,
715 force,
716 matcher,
716 matcher,
717 acceptremote,
717 acceptremote,
718 followcopies,
718 followcopies,
719 forcefulldiff=False,
719 forcefulldiff=False,
720 ):
720 ):
721 """
721 """
722 Merge wctx and p2 with ancestor pa and generate merge action list
722 Merge wctx and p2 with ancestor pa and generate merge action list
723
723
724 branchmerge and force are as passed in to update
724 branchmerge and force are as passed in to update
725 matcher = matcher to filter file lists
725 matcher = matcher to filter file lists
726 acceptremote = accept the incoming changes without prompting
726 acceptremote = accept the incoming changes without prompting
727
727
728 Returns an object of mergeresult class
728 Returns an object of mergeresult class
729 """
729 """
730 mresult = mergeresult()
730 mresult = mergeresult()
731 if matcher is not None and matcher.always():
731 if matcher is not None and matcher.always():
732 matcher = None
732 matcher = None
733
733
734 # manifests fetched in order are going to be faster, so prime the caches
734 # manifests fetched in order are going to be faster, so prime the caches
735 [
735 [
736 x.manifest()
736 x.manifest()
737 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
737 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
738 ]
738 ]
739
739
740 branch_copies1 = copies.branch_copies()
740 branch_copies1 = copies.branch_copies()
741 branch_copies2 = copies.branch_copies()
741 branch_copies2 = copies.branch_copies()
742 diverge = {}
742 diverge = {}
743 # information from merge which is needed at commit time
743 # information from merge which is needed at commit time
744 # for example choosing filelog of which parent to commit
744 # for example choosing filelog of which parent to commit
745 # TODO: use specific constants in future for this mapping
745 # TODO: use specific constants in future for this mapping
746 if followcopies:
746 if followcopies:
747 branch_copies1, branch_copies2, diverge = copies.mergecopies(
747 branch_copies1, branch_copies2, diverge = copies.mergecopies(
748 repo, wctx, p2, pa
748 repo, wctx, p2, pa
749 )
749 )
750
750
751 boolbm = pycompat.bytestr(bool(branchmerge))
751 boolbm = pycompat.bytestr(bool(branchmerge))
752 boolf = pycompat.bytestr(bool(force))
752 boolf = pycompat.bytestr(bool(force))
753 boolm = pycompat.bytestr(bool(matcher))
753 boolm = pycompat.bytestr(bool(matcher))
754 repo.ui.note(_(b"resolving manifests\n"))
754 repo.ui.note(_(b"resolving manifests\n"))
755 repo.ui.debug(
755 repo.ui.debug(
756 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
756 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
757 )
757 )
758 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
758 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
759
759
760 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
760 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
761 copied1 = set(branch_copies1.copy.values())
761 copied1 = set(branch_copies1.copy.values())
762 copied1.update(branch_copies1.movewithdir.values())
762 copied1.update(branch_copies1.movewithdir.values())
763 copied2 = set(branch_copies2.copy.values())
763 copied2 = set(branch_copies2.copy.values())
764 copied2.update(branch_copies2.movewithdir.values())
764 copied2.update(branch_copies2.movewithdir.values())
765
765
766 if b'.hgsubstate' in m1 and wctx.rev() is None:
766 if b'.hgsubstate' in m1 and wctx.rev() is None:
767 # Check whether sub state is modified, and overwrite the manifest
767 # Check whether sub state is modified, and overwrite the manifest
768 # to flag the change. If wctx is a committed revision, we shouldn't
768 # to flag the change. If wctx is a committed revision, we shouldn't
769 # care for the dirty state of the working directory.
769 # care for the dirty state of the working directory.
770 if any(wctx.sub(s).dirty() for s in wctx.substate):
770 if any(wctx.sub(s).dirty() for s in wctx.substate):
771 m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
771 m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
772
772
773 # Don't use m2-vs-ma optimization if:
773 # Don't use m2-vs-ma optimization if:
774 # - ma is the same as m1 or m2, which we're just going to diff again later
774 # - ma is the same as m1 or m2, which we're just going to diff again later
775 # - The caller specifically asks for a full diff, which is useful during bid
775 # - The caller specifically asks for a full diff, which is useful during bid
776 # merge.
776 # merge.
777 # - we are tracking salvaged files specifically hence should process all
777 # - we are tracking salvaged files specifically hence should process all
778 # files
778 # files
779 if (
779 if (
780 pa not in ([wctx, p2] + wctx.parents())
780 pa not in ([wctx, p2] + wctx.parents())
781 and not forcefulldiff
781 and not forcefulldiff
782 and not (
782 and not (
783 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
783 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
784 or repo.filecopiesmode == b'changeset-sidedata'
784 or repo.filecopiesmode == b'changeset-sidedata'
785 )
785 )
786 ):
786 ):
787 # Identify which files are relevant to the merge, so we can limit the
787 # Identify which files are relevant to the merge, so we can limit the
788 # total m1-vs-m2 diff to just those files. This has significant
788 # total m1-vs-m2 diff to just those files. This has significant
789 # performance benefits in large repositories.
789 # performance benefits in large repositories.
790 relevantfiles = set(ma.diff(m2).keys())
790 relevantfiles = set(ma.diff(m2).keys())
791
791
792 # For copied and moved files, we need to add the source file too.
792 # For copied and moved files, we need to add the source file too.
793 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
793 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
794 if copyvalue in relevantfiles:
794 if copyvalue in relevantfiles:
795 relevantfiles.add(copykey)
795 relevantfiles.add(copykey)
796 for movedirkey in branch_copies1.movewithdir:
796 for movedirkey in branch_copies1.movewithdir:
797 relevantfiles.add(movedirkey)
797 relevantfiles.add(movedirkey)
798 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
798 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
799 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
799 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
800
800
801 diff = m1.diff(m2, match=matcher)
801 diff = m1.diff(m2, match=matcher)
802
802
803 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
803 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
804 if n1 and n2: # file exists on both local and remote side
804 if n1 and n2: # file exists on both local and remote side
805 if f not in ma:
805 if f not in ma:
806 # TODO: what if they're renamed from different sources?
806 # TODO: what if they're renamed from different sources?
807 fa = branch_copies1.copy.get(
807 fa = branch_copies1.copy.get(
808 f, None
808 f, None
809 ) or branch_copies2.copy.get(f, None)
809 ) or branch_copies2.copy.get(f, None)
810 args, msg = None, None
810 args, msg = None, None
811 if fa is not None:
811 if fa is not None:
812 args = (f, f, fa, False, pa.node())
812 args = (f, f, fa, False, pa.node())
813 msg = b'both renamed from %s' % fa
813 msg = b'both renamed from %s' % fa
814 else:
814 else:
815 args = (f, f, None, False, pa.node())
815 args = (f, f, None, False, pa.node())
816 msg = b'both created'
816 msg = b'both created'
817 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
817 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
818 elif f in branch_copies1.copy:
818 elif f in branch_copies1.copy:
819 fa = branch_copies1.copy[f]
819 fa = branch_copies1.copy[f]
820 mresult.addfile(
820 mresult.addfile(
821 f,
821 f,
822 mergestatemod.ACTION_MERGE,
822 mergestatemod.ACTION_MERGE,
823 (f, fa, fa, False, pa.node()),
823 (f, fa, fa, False, pa.node()),
824 b'local replaced from %s' % fa,
824 b'local replaced from %s' % fa,
825 )
825 )
826 elif f in branch_copies2.copy:
826 elif f in branch_copies2.copy:
827 fa = branch_copies2.copy[f]
827 fa = branch_copies2.copy[f]
828 mresult.addfile(
828 mresult.addfile(
829 f,
829 f,
830 mergestatemod.ACTION_MERGE,
830 mergestatemod.ACTION_MERGE,
831 (fa, f, fa, False, pa.node()),
831 (fa, f, fa, False, pa.node()),
832 b'other replaced from %s' % fa,
832 b'other replaced from %s' % fa,
833 )
833 )
834 else:
834 else:
835 a = ma[f]
835 a = ma[f]
836 fla = ma.flags(f)
836 fla = ma.flags(f)
837 nol = b'l' not in fl1 + fl2 + fla
837 nol = b'l' not in fl1 + fl2 + fla
838 if n2 == a and fl2 == fla:
838 if n2 == a and fl2 == fla:
839 mresult.addfile(
839 mresult.addfile(
840 f,
840 f,
841 mergestatemod.ACTION_KEEP,
841 mergestatemod.ACTION_KEEP,
842 (),
842 (),
843 b'remote unchanged',
843 b'remote unchanged',
844 )
844 )
845 elif n1 == a and fl1 == fla: # local unchanged - use remote
845 elif n1 == a and fl1 == fla: # local unchanged - use remote
846 if n1 == n2: # optimization: keep local content
846 if n1 == n2: # optimization: keep local content
847 mresult.addfile(
847 mresult.addfile(
848 f,
848 f,
849 mergestatemod.ACTION_EXEC,
849 mergestatemod.ACTION_EXEC,
850 (fl2,),
850 (fl2,),
851 b'update permissions',
851 b'update permissions',
852 )
852 )
853 else:
853 else:
854 mresult.addfile(
854 mresult.addfile(
855 f,
855 f,
856 mergestatemod.ACTION_GET,
856 mergestatemod.ACTION_GET,
857 (fl2, False),
857 (fl2, False),
858 b'remote is newer',
858 b'remote is newer',
859 )
859 )
860 if branchmerge:
860 if branchmerge:
861 mresult.addcommitinfo(
861 mresult.addcommitinfo(
862 f, b'filenode-source', b'other'
862 f, b'filenode-source', b'other'
863 )
863 )
864 elif nol and n2 == a: # remote only changed 'x'
864 elif nol and n2 == a: # remote only changed 'x'
865 mresult.addfile(
865 mresult.addfile(
866 f,
866 f,
867 mergestatemod.ACTION_EXEC,
867 mergestatemod.ACTION_EXEC,
868 (fl2,),
868 (fl2,),
869 b'update permissions',
869 b'update permissions',
870 )
870 )
871 elif nol and n1 == a: # local only changed 'x'
871 elif nol and n1 == a: # local only changed 'x'
872 mresult.addfile(
872 mresult.addfile(
873 f,
873 f,
874 mergestatemod.ACTION_GET,
874 mergestatemod.ACTION_GET,
875 (fl1, False),
875 (fl1, False),
876 b'remote is newer',
876 b'remote is newer',
877 )
877 )
878 if branchmerge:
878 if branchmerge:
879 mresult.addcommitinfo(f, b'filenode-source', b'other')
879 mresult.addcommitinfo(f, b'filenode-source', b'other')
880 else: # both changed something
880 else: # both changed something
881 mresult.addfile(
881 mresult.addfile(
882 f,
882 f,
883 mergestatemod.ACTION_MERGE,
883 mergestatemod.ACTION_MERGE,
884 (f, f, f, False, pa.node()),
884 (f, f, f, False, pa.node()),
885 b'versions differ',
885 b'versions differ',
886 )
886 )
887 elif n1: # file exists only on local side
887 elif n1: # file exists only on local side
888 if f in copied2:
888 if f in copied2:
889 pass # we'll deal with it on m2 side
889 pass # we'll deal with it on m2 side
890 elif (
890 elif (
891 f in branch_copies1.movewithdir
891 f in branch_copies1.movewithdir
892 ): # directory rename, move local
892 ): # directory rename, move local
893 f2 = branch_copies1.movewithdir[f]
893 f2 = branch_copies1.movewithdir[f]
894 if f2 in m2:
894 if f2 in m2:
895 mresult.addfile(
895 mresult.addfile(
896 f2,
896 f2,
897 mergestatemod.ACTION_MERGE,
897 mergestatemod.ACTION_MERGE,
898 (f, f2, None, True, pa.node()),
898 (f, f2, None, True, pa.node()),
899 b'remote directory rename, both created',
899 b'remote directory rename, both created',
900 )
900 )
901 else:
901 else:
902 mresult.addfile(
902 mresult.addfile(
903 f2,
903 f2,
904 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
904 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
905 (f, fl1),
905 (f, fl1),
906 b'remote directory rename - move from %s' % f,
906 b'remote directory rename - move from %s' % f,
907 )
907 )
908 elif f in branch_copies1.copy:
908 elif f in branch_copies1.copy:
909 f2 = branch_copies1.copy[f]
909 f2 = branch_copies1.copy[f]
910 mresult.addfile(
910 mresult.addfile(
911 f,
911 f,
912 mergestatemod.ACTION_MERGE,
912 mergestatemod.ACTION_MERGE,
913 (f, f2, f2, False, pa.node()),
913 (f, f2, f2, False, pa.node()),
914 b'local copied/moved from %s' % f2,
914 b'local copied/moved from %s' % f2,
915 )
915 )
916 elif f in ma: # clean, a different, no remote
916 elif f in ma: # clean, a different, no remote
917 if n1 != ma[f]:
917 if n1 != ma[f]:
918 if acceptremote:
918 if acceptremote:
919 mresult.addfile(
919 mresult.addfile(
920 f,
920 f,
921 mergestatemod.ACTION_REMOVE,
921 mergestatemod.ACTION_REMOVE,
922 None,
922 None,
923 b'remote delete',
923 b'remote delete',
924 )
924 )
925 else:
925 else:
926 mresult.addfile(
926 mresult.addfile(
927 f,
927 f,
928 mergestatemod.ACTION_CHANGED_DELETED,
928 mergestatemod.ACTION_CHANGED_DELETED,
929 (f, None, f, False, pa.node()),
929 (f, None, f, False, pa.node()),
930 b'prompt changed/deleted',
930 b'prompt changed/deleted',
931 )
931 )
932 if branchmerge:
932 if branchmerge:
933 mresult.addcommitinfo(
933 mresult.addcommitinfo(
934 f, b'merge-removal-candidate', b'yes'
934 f, b'merge-removal-candidate', b'yes'
935 )
935 )
936 elif n1 == repo.nodeconstants.addednodeid:
936 elif n1 == repo.nodeconstants.addednodeid:
937 # This file was locally added. We should forget it instead of
937 # This file was locally added. We should forget it instead of
938 # deleting it.
938 # deleting it.
939 mresult.addfile(
939 mresult.addfile(
940 f,
940 f,
941 mergestatemod.ACTION_FORGET,
941 mergestatemod.ACTION_FORGET,
942 None,
942 None,
943 b'remote deleted',
943 b'remote deleted',
944 )
944 )
945 else:
945 else:
946 mresult.addfile(
946 mresult.addfile(
947 f,
947 f,
948 mergestatemod.ACTION_REMOVE,
948 mergestatemod.ACTION_REMOVE,
949 None,
949 None,
950 b'other deleted',
950 b'other deleted',
951 )
951 )
952 if branchmerge:
952 if branchmerge:
953 # the file must be absent after merging,
953 # the file must be absent after merging,
954 # howeber the user might make
954 # howeber the user might make
955 # the file reappear using revert and if they does,
955 # the file reappear using revert and if they does,
956 # we force create a new node
956 # we force create a new node
957 mresult.addcommitinfo(
957 mresult.addcommitinfo(
958 f, b'merge-removal-candidate', b'yes'
958 f, b'merge-removal-candidate', b'yes'
959 )
959 )
960
960
961 else: # file not in ancestor, not in remote
961 else: # file not in ancestor, not in remote
962 mresult.addfile(
962 mresult.addfile(
963 f,
963 f,
964 mergestatemod.ACTION_KEEP_NEW,
964 mergestatemod.ACTION_KEEP_NEW,
965 None,
965 None,
966 b'ancestor missing, remote missing',
966 b'ancestor missing, remote missing',
967 )
967 )
968
968
969 elif n2: # file exists only on remote side
969 elif n2: # file exists only on remote side
970 if f in copied1:
970 if f in copied1:
971 pass # we'll deal with it on m1 side
971 pass # we'll deal with it on m1 side
972 elif f in branch_copies2.movewithdir:
972 elif f in branch_copies2.movewithdir:
973 f2 = branch_copies2.movewithdir[f]
973 f2 = branch_copies2.movewithdir[f]
974 if f2 in m1:
974 if f2 in m1:
975 mresult.addfile(
975 mresult.addfile(
976 f2,
976 f2,
977 mergestatemod.ACTION_MERGE,
977 mergestatemod.ACTION_MERGE,
978 (f2, f, None, False, pa.node()),
978 (f2, f, None, False, pa.node()),
979 b'local directory rename, both created',
979 b'local directory rename, both created',
980 )
980 )
981 else:
981 else:
982 mresult.addfile(
982 mresult.addfile(
983 f2,
983 f2,
984 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
984 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
985 (f, fl2),
985 (f, fl2),
986 b'local directory rename - get from %s' % f,
986 b'local directory rename - get from %s' % f,
987 )
987 )
988 elif f in branch_copies2.copy:
988 elif f in branch_copies2.copy:
989 f2 = branch_copies2.copy[f]
989 f2 = branch_copies2.copy[f]
990 msg, args = None, None
990 msg, args = None, None
991 if f2 in m2:
991 if f2 in m2:
992 args = (f2, f, f2, False, pa.node())
992 args = (f2, f, f2, False, pa.node())
993 msg = b'remote copied from %s' % f2
993 msg = b'remote copied from %s' % f2
994 else:
994 else:
995 args = (f2, f, f2, True, pa.node())
995 args = (f2, f, f2, True, pa.node())
996 msg = b'remote moved from %s' % f2
996 msg = b'remote moved from %s' % f2
997 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
997 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
998 elif f not in ma:
998 elif f not in ma:
999 # local unknown, remote created: the logic is described by the
999 # local unknown, remote created: the logic is described by the
1000 # following table:
1000 # following table:
1001 #
1001 #
1002 # force branchmerge different | action
1002 # force branchmerge different | action
1003 # n * * | create
1003 # n * * | create
1004 # y n * | create
1004 # y n * | create
1005 # y y n | create
1005 # y y n | create
1006 # y y y | merge
1006 # y y y | merge
1007 #
1007 #
1008 # Checking whether the files are different is expensive, so we
1008 # Checking whether the files are different is expensive, so we
1009 # don't do that when we can avoid it.
1009 # don't do that when we can avoid it.
1010 if not force:
1010 if not force:
1011 mresult.addfile(
1011 mresult.addfile(
1012 f,
1012 f,
1013 mergestatemod.ACTION_CREATED,
1013 mergestatemod.ACTION_CREATED,
1014 (fl2,),
1014 (fl2,),
1015 b'remote created',
1015 b'remote created',
1016 )
1016 )
1017 elif not branchmerge:
1017 elif not branchmerge:
1018 mresult.addfile(
1018 mresult.addfile(
1019 f,
1019 f,
1020 mergestatemod.ACTION_CREATED,
1020 mergestatemod.ACTION_CREATED,
1021 (fl2,),
1021 (fl2,),
1022 b'remote created',
1022 b'remote created',
1023 )
1023 )
1024 else:
1024 else:
1025 mresult.addfile(
1025 mresult.addfile(
1026 f,
1026 f,
1027 mergestatemod.ACTION_CREATED_MERGE,
1027 mergestatemod.ACTION_CREATED_MERGE,
1028 (fl2, pa.node()),
1028 (fl2, pa.node()),
1029 b'remote created, get or merge',
1029 b'remote created, get or merge',
1030 )
1030 )
1031 elif n2 != ma[f]:
1031 elif n2 != ma[f]:
1032 df = None
1032 df = None
1033 for d in branch_copies1.dirmove:
1033 for d in branch_copies1.dirmove:
1034 if f.startswith(d):
1034 if f.startswith(d):
1035 # new file added in a directory that was moved
1035 # new file added in a directory that was moved
1036 df = branch_copies1.dirmove[d] + f[len(d) :]
1036 df = branch_copies1.dirmove[d] + f[len(d) :]
1037 break
1037 break
1038 if df is not None and df in m1:
1038 if df is not None and df in m1:
1039 mresult.addfile(
1039 mresult.addfile(
1040 df,
1040 df,
1041 mergestatemod.ACTION_MERGE,
1041 mergestatemod.ACTION_MERGE,
1042 (df, f, f, False, pa.node()),
1042 (df, f, f, False, pa.node()),
1043 b'local directory rename - respect move '
1043 b'local directory rename - respect move '
1044 b'from %s' % f,
1044 b'from %s' % f,
1045 )
1045 )
1046 elif acceptremote:
1046 elif acceptremote:
1047 mresult.addfile(
1047 mresult.addfile(
1048 f,
1048 f,
1049 mergestatemod.ACTION_CREATED,
1049 mergestatemod.ACTION_CREATED,
1050 (fl2,),
1050 (fl2,),
1051 b'remote recreating',
1051 b'remote recreating',
1052 )
1052 )
1053 else:
1053 else:
1054 mresult.addfile(
1054 mresult.addfile(
1055 f,
1055 f,
1056 mergestatemod.ACTION_DELETED_CHANGED,
1056 mergestatemod.ACTION_DELETED_CHANGED,
1057 (None, f, f, False, pa.node()),
1057 (None, f, f, False, pa.node()),
1058 b'prompt deleted/changed',
1058 b'prompt deleted/changed',
1059 )
1059 )
1060 if branchmerge:
1060 if branchmerge:
1061 mresult.addcommitinfo(
1061 mresult.addcommitinfo(
1062 f, b'merge-removal-candidate', b'yes'
1062 f, b'merge-removal-candidate', b'yes'
1063 )
1063 )
1064 else:
1064 else:
1065 mresult.addfile(
1065 mresult.addfile(
1066 f,
1066 f,
1067 mergestatemod.ACTION_KEEP_ABSENT,
1067 mergestatemod.ACTION_KEEP_ABSENT,
1068 None,
1068 None,
1069 b'local not present, remote unchanged',
1069 b'local not present, remote unchanged',
1070 )
1070 )
1071 if branchmerge:
1071 if branchmerge:
1072 # the file must be absent after merging
1072 # the file must be absent after merging
1073 # however the user might make
1073 # however the user might make
1074 # the file reappear using revert and if they does,
1074 # the file reappear using revert and if they does,
1075 # we force create a new node
1075 # we force create a new node
1076 mresult.addcommitinfo(f, b'merge-removal-candidate', b'yes')
1076 mresult.addcommitinfo(f, b'merge-removal-candidate', b'yes')
1077
1077
1078 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1078 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1079 # If we are merging, look for path conflicts.
1079 # If we are merging, look for path conflicts.
1080 checkpathconflicts(repo, wctx, p2, mresult)
1080 checkpathconflicts(repo, wctx, p2, mresult)
1081
1081
1082 narrowmatch = repo.narrowmatch()
1082 narrowmatch = repo.narrowmatch()
1083 if not narrowmatch.always():
1083 if not narrowmatch.always():
1084 # Updates "actions" in place
1084 # Updates "actions" in place
1085 _filternarrowactions(narrowmatch, branchmerge, mresult)
1085 _filternarrowactions(narrowmatch, branchmerge, mresult)
1086
1086
1087 renamedelete = branch_copies1.renamedelete
1087 renamedelete = branch_copies1.renamedelete
1088 renamedelete.update(branch_copies2.renamedelete)
1088 renamedelete.update(branch_copies2.renamedelete)
1089
1089
1090 mresult.updatevalues(diverge, renamedelete)
1090 mresult.updatevalues(diverge, renamedelete)
1091 return mresult
1091 return mresult
1092
1092
1093
1093
1094 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1094 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1095 """Resolves false conflicts where the nodeid changed but the content
1095 """Resolves false conflicts where the nodeid changed but the content
1096 remained the same."""
1096 remained the same."""
1097 # We force a copy of actions.items() because we're going to mutate
1097 # We force a copy of actions.items() because we're going to mutate
1098 # actions as we resolve trivial conflicts.
1098 # actions as we resolve trivial conflicts.
1099 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1099 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1100 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1100 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1101 # local did change but ended up with same content
1101 # local did change but ended up with same content
1102 mresult.addfile(
1102 mresult.addfile(
1103 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1103 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1104 )
1104 )
1105
1105
1106 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1106 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1107 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1107 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1108 # remote did change but ended up with same content
1108 # remote did change but ended up with same content
1109 mresult.removefile(f) # don't get = keep local deleted
1109 mresult.removefile(f) # don't get = keep local deleted
1110
1110
1111
1111
1112 def calculateupdates(
1112 def calculateupdates(
1113 repo,
1113 repo,
1114 wctx,
1114 wctx,
1115 mctx,
1115 mctx,
1116 ancestors,
1116 ancestors,
1117 branchmerge,
1117 branchmerge,
1118 force,
1118 force,
1119 acceptremote,
1119 acceptremote,
1120 followcopies,
1120 followcopies,
1121 matcher=None,
1121 matcher=None,
1122 mergeforce=False,
1122 mergeforce=False,
1123 ):
1123 ):
1124 """
1124 """
1125 Calculate the actions needed to merge mctx into wctx using ancestors
1125 Calculate the actions needed to merge mctx into wctx using ancestors
1126
1126
1127 Uses manifestmerge() to merge manifest and get list of actions required to
1127 Uses manifestmerge() to merge manifest and get list of actions required to
1128 perform for merging two manifests. If there are multiple ancestors, uses bid
1128 perform for merging two manifests. If there are multiple ancestors, uses bid
1129 merge if enabled.
1129 merge if enabled.
1130
1130
1131 Also filters out actions which are unrequired if repository is sparse.
1131 Also filters out actions which are unrequired if repository is sparse.
1132
1132
1133 Returns mergeresult object same as manifestmerge().
1133 Returns mergeresult object same as manifestmerge().
1134 """
1134 """
1135 # Avoid cycle.
1135 # Avoid cycle.
1136 from . import sparse
1136 from . import sparse
1137
1137
1138 mresult = None
1138 mresult = None
1139 if len(ancestors) == 1: # default
1139 if len(ancestors) == 1: # default
1140 mresult = manifestmerge(
1140 mresult = manifestmerge(
1141 repo,
1141 repo,
1142 wctx,
1142 wctx,
1143 mctx,
1143 mctx,
1144 ancestors[0],
1144 ancestors[0],
1145 branchmerge,
1145 branchmerge,
1146 force,
1146 force,
1147 matcher,
1147 matcher,
1148 acceptremote,
1148 acceptremote,
1149 followcopies,
1149 followcopies,
1150 )
1150 )
1151 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1151 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1152
1152
1153 else: # only when merge.preferancestor=* - the default
1153 else: # only when merge.preferancestor=* - the default
1154 repo.ui.note(
1154 repo.ui.note(
1155 _(b"note: merging %s and %s using bids from ancestors %s\n")
1155 _(b"note: merging %s and %s using bids from ancestors %s\n")
1156 % (
1156 % (
1157 wctx,
1157 wctx,
1158 mctx,
1158 mctx,
1159 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1159 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1160 )
1160 )
1161 )
1161 )
1162
1162
1163 # mapping filename to bids (action method to list af actions)
1163 # mapping filename to bids (action method to list af actions)
1164 # {FILENAME1 : BID1, FILENAME2 : BID2}
1164 # {FILENAME1 : BID1, FILENAME2 : BID2}
1165 # BID is another dictionary which contains
1165 # BID is another dictionary which contains
1166 # mapping of following form:
1166 # mapping of following form:
1167 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1167 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1168 fbids = {}
1168 fbids = {}
1169 mresult = mergeresult()
1169 mresult = mergeresult()
1170 diverge, renamedelete = None, None
1170 diverge, renamedelete = None, None
1171 for ancestor in ancestors:
1171 for ancestor in ancestors:
1172 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1172 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1173 mresult1 = manifestmerge(
1173 mresult1 = manifestmerge(
1174 repo,
1174 repo,
1175 wctx,
1175 wctx,
1176 mctx,
1176 mctx,
1177 ancestor,
1177 ancestor,
1178 branchmerge,
1178 branchmerge,
1179 force,
1179 force,
1180 matcher,
1180 matcher,
1181 acceptremote,
1181 acceptremote,
1182 followcopies,
1182 followcopies,
1183 forcefulldiff=True,
1183 forcefulldiff=True,
1184 )
1184 )
1185 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1185 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1186
1186
1187 # Track the shortest set of warning on the theory that bid
1187 # Track the shortest set of warning on the theory that bid
1188 # merge will correctly incorporate more information
1188 # merge will correctly incorporate more information
1189 if diverge is None or len(mresult1.diverge) < len(diverge):
1189 if diverge is None or len(mresult1.diverge) < len(diverge):
1190 diverge = mresult1.diverge
1190 diverge = mresult1.diverge
1191 if renamedelete is None or len(renamedelete) < len(
1191 if renamedelete is None or len(renamedelete) < len(
1192 mresult1.renamedelete
1192 mresult1.renamedelete
1193 ):
1193 ):
1194 renamedelete = mresult1.renamedelete
1194 renamedelete = mresult1.renamedelete
1195
1195
1196 # blindly update final mergeresult commitinfo with what we get
1196 # blindly update final mergeresult commitinfo with what we get
1197 # from mergeresult object for each ancestor
1197 # from mergeresult object for each ancestor
1198 # TODO: some commitinfo depends on what bid merge choose and hence
1198 # TODO: some commitinfo depends on what bid merge choose and hence
1199 # we will need to make commitinfo also depend on bid merge logic
1199 # we will need to make commitinfo also depend on bid merge logic
1200 mresult._commitinfo.update(mresult1._commitinfo)
1200 mresult._commitinfo.update(mresult1._commitinfo)
1201
1201
1202 for f, a in mresult1.filemap(sort=True):
1202 for f, a in mresult1.filemap(sort=True):
1203 m, args, msg = a
1203 m, args, msg = a
1204 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1204 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m.__bytes__()))
1205 if f in fbids:
1205 if f in fbids:
1206 d = fbids[f]
1206 d = fbids[f]
1207 if m in d:
1207 if m in d:
1208 d[m].append(a)
1208 d[m].append(a)
1209 else:
1209 else:
1210 d[m] = [a]
1210 d[m] = [a]
1211 else:
1211 else:
1212 fbids[f] = {m: [a]}
1212 fbids[f] = {m: [a]}
1213
1213
1214 # Call for bids
1214 # Call for bids
1215 # Pick the best bid for each file
1215 # Pick the best bid for each file
1216 repo.ui.note(
1216 repo.ui.note(
1217 _(b'\nauction for merging merge bids (%d ancestors)\n')
1217 _(b'\nauction for merging merge bids (%d ancestors)\n')
1218 % len(ancestors)
1218 % len(ancestors)
1219 )
1219 )
1220 for f, bids in sorted(fbids.items()):
1220 for f, bids in sorted(fbids.items()):
1221 if repo.ui.debugflag:
1221 if repo.ui.debugflag:
1222 repo.ui.debug(b" list of bids for %s:\n" % f)
1222 repo.ui.debug(b" list of bids for %s:\n" % f)
1223 for m, l in sorted(bids.items()):
1223 for m, l in sorted(bids.items()):
1224 for _f, args, msg in l:
1224 for _f, args, msg in l:
1225 repo.ui.debug(b' %s -> %s\n' % (msg, m))
1225 repo.ui.debug(b' %s -> %s\n' % (msg, m.__bytes__()))
1226 # bids is a mapping from action method to list af actions
1226 # bids is a mapping from action method to list af actions
1227 # Consensus?
1227 # Consensus?
1228 if len(bids) == 1: # all bids are the same kind of method
1228 if len(bids) == 1: # all bids are the same kind of method
1229 m, l = list(bids.items())[0]
1229 m, l = list(bids.items())[0]
1230 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1230 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1231 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1231 repo.ui.note(
1232 _(b" %s: consensus for %s\n") % (f, m.__bytes__())
1233 )
1232 mresult.addfile(f, *l[0])
1234 mresult.addfile(f, *l[0])
1233 continue
1235 continue
1234 # If keep is an option, just do it.
1236 # If keep is an option, just do it.
1235 if mergestatemod.ACTION_KEEP in bids:
1237 if mergestatemod.ACTION_KEEP in bids:
1236 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1238 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1237 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1239 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1238 continue
1240 continue
1239 # If keep absent is an option, just do that
1241 # If keep absent is an option, just do that
1240 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1242 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1241 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1243 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1242 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1244 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1243 continue
1245 continue
1244 # ACTION_KEEP_NEW and ACTION_CHANGED_DELETED are conflicting actions
1246 # ACTION_KEEP_NEW and ACTION_CHANGED_DELETED are conflicting actions
1245 # as one say that file is new while other says that file was present
1247 # as one say that file is new while other says that file was present
1246 # earlier too and has a change delete conflict
1248 # earlier too and has a change delete conflict
1247 # Let's fall back to conflicting ACTION_CHANGED_DELETED and let user
1249 # Let's fall back to conflicting ACTION_CHANGED_DELETED and let user
1248 # do the right thing
1250 # do the right thing
1249 if (
1251 if (
1250 mergestatemod.ACTION_CHANGED_DELETED in bids
1252 mergestatemod.ACTION_CHANGED_DELETED in bids
1251 and mergestatemod.ACTION_KEEP_NEW in bids
1253 and mergestatemod.ACTION_KEEP_NEW in bids
1252 ):
1254 ):
1253 repo.ui.note(_(b" %s: picking 'changed/deleted' action\n") % f)
1255 repo.ui.note(_(b" %s: picking 'changed/deleted' action\n") % f)
1254 mresult.addfile(
1256 mresult.addfile(
1255 f, *bids[mergestatemod.ACTION_CHANGED_DELETED][0]
1257 f, *bids[mergestatemod.ACTION_CHANGED_DELETED][0]
1256 )
1258 )
1257 continue
1259 continue
1258 # If keep new is an option, let's just do that
1260 # If keep new is an option, let's just do that
1259 if mergestatemod.ACTION_KEEP_NEW in bids:
1261 if mergestatemod.ACTION_KEEP_NEW in bids:
1260 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1262 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1261 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1263 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1262 continue
1264 continue
1263 # ACTION_GET and ACTION_DELETE_CHANGED are conflicting actions as
1265 # ACTION_GET and ACTION_DELETE_CHANGED are conflicting actions as
1264 # one action states the file is newer/created on remote side and
1266 # one action states the file is newer/created on remote side and
1265 # other states that file is deleted locally and changed on remote
1267 # other states that file is deleted locally and changed on remote
1266 # side. Let's fallback and rely on a conflicting action to let user
1268 # side. Let's fallback and rely on a conflicting action to let user
1267 # do the right thing
1269 # do the right thing
1268 if (
1270 if (
1269 mergestatemod.ACTION_DELETED_CHANGED in bids
1271 mergestatemod.ACTION_DELETED_CHANGED in bids
1270 and mergestatemod.ACTION_GET in bids
1272 and mergestatemod.ACTION_GET in bids
1271 ):
1273 ):
1272 repo.ui.note(_(b" %s: picking 'delete/changed' action\n") % f)
1274 repo.ui.note(_(b" %s: picking 'delete/changed' action\n") % f)
1273 mresult.addfile(
1275 mresult.addfile(
1274 f, *bids[mergestatemod.ACTION_DELETED_CHANGED][0]
1276 f, *bids[mergestatemod.ACTION_DELETED_CHANGED][0]
1275 )
1277 )
1276 continue
1278 continue
1277 # If there are gets and they all agree [how could they not?], do it.
1279 # If there are gets and they all agree [how could they not?], do it.
1278 if mergestatemod.ACTION_GET in bids:
1280 if mergestatemod.ACTION_GET in bids:
1279 ga0 = bids[mergestatemod.ACTION_GET][0]
1281 ga0 = bids[mergestatemod.ACTION_GET][0]
1280 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1282 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1281 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1283 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1282 mresult.addfile(f, *ga0)
1284 mresult.addfile(f, *ga0)
1283 continue
1285 continue
1284 # TODO: Consider other simple actions such as mode changes
1286 # TODO: Consider other simple actions such as mode changes
1285 # Handle inefficient democrazy.
1287 # Handle inefficient democrazy.
1286 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1288 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1287 for m, l in sorted(bids.items()):
1289 for m, l in sorted(bids.items()):
1288 for _f, args, msg in l:
1290 for _f, args, msg in l:
1289 repo.ui.note(b' %s -> %s\n' % (msg, m))
1291 repo.ui.note(b' %s -> %s\n' % (msg, m.__bytes__()))
1290 # Pick random action. TODO: Instead, prompt user when resolving
1292 # Pick random action. TODO: Instead, prompt user when resolving
1291 m, l = list(bids.items())[0]
1293 m, l = list(bids.items())[0]
1292 repo.ui.warn(
1294 repo.ui.warn(
1293 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1295 _(b' %s: ambiguous merge - picked %s action\n')
1296 % (f, m.__bytes__())
1294 )
1297 )
1295 mresult.addfile(f, *l[0])
1298 mresult.addfile(f, *l[0])
1296 continue
1299 continue
1297 repo.ui.note(_(b'end of auction\n\n'))
1300 repo.ui.note(_(b'end of auction\n\n'))
1298 mresult.updatevalues(diverge, renamedelete)
1301 mresult.updatevalues(diverge, renamedelete)
1299
1302
1300 if wctx.rev() is None:
1303 if wctx.rev() is None:
1301 _forgetremoved(wctx, mctx, branchmerge, mresult)
1304 _forgetremoved(wctx, mctx, branchmerge, mresult)
1302
1305
1303 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1306 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1304 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1307 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1305
1308
1306 return mresult
1309 return mresult
1307
1310
1308
1311
1309 def _getcwd():
1312 def _getcwd():
1310 try:
1313 try:
1311 return encoding.getcwd()
1314 return encoding.getcwd()
1312 except OSError as err:
1315 except OSError as err:
1313 if err.errno == errno.ENOENT:
1316 if err.errno == errno.ENOENT:
1314 return None
1317 return None
1315 raise
1318 raise
1316
1319
1317
1320
1318 def batchremove(repo, wctx, actions):
1321 def batchremove(repo, wctx, actions):
1319 """apply removes to the working directory
1322 """apply removes to the working directory
1320
1323
1321 yields tuples for progress updates
1324 yields tuples for progress updates
1322 """
1325 """
1323 verbose = repo.ui.verbose
1326 verbose = repo.ui.verbose
1324 cwd = _getcwd()
1327 cwd = _getcwd()
1325 i = 0
1328 i = 0
1326 for f, args, msg in actions:
1329 for f, args, msg in actions:
1327 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1330 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1328 if verbose:
1331 if verbose:
1329 repo.ui.note(_(b"removing %s\n") % f)
1332 repo.ui.note(_(b"removing %s\n") % f)
1330 wctx[f].audit()
1333 wctx[f].audit()
1331 try:
1334 try:
1332 wctx[f].remove(ignoremissing=True)
1335 wctx[f].remove(ignoremissing=True)
1333 except OSError as inst:
1336 except OSError as inst:
1334 repo.ui.warn(
1337 repo.ui.warn(
1335 _(b"update failed to remove %s: %s!\n")
1338 _(b"update failed to remove %s: %s!\n")
1336 % (f, stringutil.forcebytestr(inst.strerror))
1339 % (f, stringutil.forcebytestr(inst.strerror))
1337 )
1340 )
1338 if i == 100:
1341 if i == 100:
1339 yield i, f
1342 yield i, f
1340 i = 0
1343 i = 0
1341 i += 1
1344 i += 1
1342 if i > 0:
1345 if i > 0:
1343 yield i, f
1346 yield i, f
1344
1347
1345 if cwd and not _getcwd():
1348 if cwd and not _getcwd():
1346 # cwd was removed in the course of removing files; print a helpful
1349 # cwd was removed in the course of removing files; print a helpful
1347 # warning.
1350 # warning.
1348 repo.ui.warn(
1351 repo.ui.warn(
1349 _(
1352 _(
1350 b"current directory was removed\n"
1353 b"current directory was removed\n"
1351 b"(consider changing to repo root: %s)\n"
1354 b"(consider changing to repo root: %s)\n"
1352 )
1355 )
1353 % repo.root
1356 % repo.root
1354 )
1357 )
1355
1358
1356
1359
1357 def batchget(repo, mctx, wctx, wantfiledata, actions):
1360 def batchget(repo, mctx, wctx, wantfiledata, actions):
1358 """apply gets to the working directory
1361 """apply gets to the working directory
1359
1362
1360 mctx is the context to get from
1363 mctx is the context to get from
1361
1364
1362 Yields arbitrarily many (False, tuple) for progress updates, followed by
1365 Yields arbitrarily many (False, tuple) for progress updates, followed by
1363 exactly one (True, filedata). When wantfiledata is false, filedata is an
1366 exactly one (True, filedata). When wantfiledata is false, filedata is an
1364 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1367 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1365 mtime) of the file f written for each action.
1368 mtime) of the file f written for each action.
1366 """
1369 """
1367 filedata = {}
1370 filedata = {}
1368 verbose = repo.ui.verbose
1371 verbose = repo.ui.verbose
1369 fctx = mctx.filectx
1372 fctx = mctx.filectx
1370 ui = repo.ui
1373 ui = repo.ui
1371 i = 0
1374 i = 0
1372 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1375 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1373 for f, (flags, backup), msg in actions:
1376 for f, (flags, backup), msg in actions:
1374 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1377 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1375 if verbose:
1378 if verbose:
1376 repo.ui.note(_(b"getting %s\n") % f)
1379 repo.ui.note(_(b"getting %s\n") % f)
1377
1380
1378 if backup:
1381 if backup:
1379 # If a file or directory exists with the same name, back that
1382 # If a file or directory exists with the same name, back that
1380 # up. Otherwise, look to see if there is a file that conflicts
1383 # up. Otherwise, look to see if there is a file that conflicts
1381 # with a directory this file is in, and if so, back that up.
1384 # with a directory this file is in, and if so, back that up.
1382 conflicting = f
1385 conflicting = f
1383 if not repo.wvfs.lexists(f):
1386 if not repo.wvfs.lexists(f):
1384 for p in pathutil.finddirs(f):
1387 for p in pathutil.finddirs(f):
1385 if repo.wvfs.isfileorlink(p):
1388 if repo.wvfs.isfileorlink(p):
1386 conflicting = p
1389 conflicting = p
1387 break
1390 break
1388 if repo.wvfs.lexists(conflicting):
1391 if repo.wvfs.lexists(conflicting):
1389 orig = scmutil.backuppath(ui, repo, conflicting)
1392 orig = scmutil.backuppath(ui, repo, conflicting)
1390 util.rename(repo.wjoin(conflicting), orig)
1393 util.rename(repo.wjoin(conflicting), orig)
1391 wfctx = wctx[f]
1394 wfctx = wctx[f]
1392 wfctx.clearunknown()
1395 wfctx.clearunknown()
1393 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1396 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1394 size = wfctx.write(
1397 size = wfctx.write(
1395 fctx(f).data(),
1398 fctx(f).data(),
1396 flags,
1399 flags,
1397 backgroundclose=True,
1400 backgroundclose=True,
1398 atomictemp=atomictemp,
1401 atomictemp=atomictemp,
1399 )
1402 )
1400 if wantfiledata:
1403 if wantfiledata:
1401 # XXX note that there is a race window between the time we
1404 # XXX note that there is a race window between the time we
1402 # write the clean data into the file and we stats it. So another
1405 # write the clean data into the file and we stats it. So another
1403 # writing process meddling with the file content right after we
1406 # writing process meddling with the file content right after we
1404 # wrote it could cause bad stat data to be gathered.
1407 # wrote it could cause bad stat data to be gathered.
1405 #
1408 #
1406 # They are 2 data we gather here
1409 # They are 2 data we gather here
1407 # - the mode:
1410 # - the mode:
1408 # That we actually just wrote, we should not need to read
1411 # That we actually just wrote, we should not need to read
1409 # it from disk, (except not all mode might have survived
1412 # it from disk, (except not all mode might have survived
1410 # the disk round-trip, which is another issue: we should
1413 # the disk round-trip, which is another issue: we should
1411 # not depends on this)
1414 # not depends on this)
1412 # - the mtime,
1415 # - the mtime,
1413 # On system that support nanosecond precision, the mtime
1416 # On system that support nanosecond precision, the mtime
1414 # could be accurate enough to tell the two writes appart.
1417 # could be accurate enough to tell the two writes appart.
1415 # However gathering it in a racy way make the mtime we
1418 # However gathering it in a racy way make the mtime we
1416 # gather "unreliable".
1419 # gather "unreliable".
1417 #
1420 #
1418 # (note: we get the size from the data we write, which is sane)
1421 # (note: we get the size from the data we write, which is sane)
1419 #
1422 #
1420 # So in theory the data returned here are fully racy, but in
1423 # So in theory the data returned here are fully racy, but in
1421 # practice "it works mostly fine".
1424 # practice "it works mostly fine".
1422 #
1425 #
1423 # Do not be surprised if you end up reading this while looking
1426 # Do not be surprised if you end up reading this while looking
1424 # for the causes of some buggy status. Feel free to improve
1427 # for the causes of some buggy status. Feel free to improve
1425 # this in the future, but we cannot simply stop gathering
1428 # this in the future, but we cannot simply stop gathering
1426 # information. Otherwise `hg status` call made after a large `hg
1429 # information. Otherwise `hg status` call made after a large `hg
1427 # update` runs would have to redo a similar amount of work to
1430 # update` runs would have to redo a similar amount of work to
1428 # restore and compare all files content.
1431 # restore and compare all files content.
1429 s = wfctx.lstat()
1432 s = wfctx.lstat()
1430 mode = s.st_mode
1433 mode = s.st_mode
1431 mtime = timestamp.mtime_of(s)
1434 mtime = timestamp.mtime_of(s)
1432 # for dirstate.update_file's parentfiledata argument:
1435 # for dirstate.update_file's parentfiledata argument:
1433 filedata[f] = (mode, size, mtime)
1436 filedata[f] = (mode, size, mtime)
1434 if i == 100:
1437 if i == 100:
1435 yield False, (i, f)
1438 yield False, (i, f)
1436 i = 0
1439 i = 0
1437 i += 1
1440 i += 1
1438 if i > 0:
1441 if i > 0:
1439 yield False, (i, f)
1442 yield False, (i, f)
1440 yield True, filedata
1443 yield True, filedata
1441
1444
1442
1445
1443 def _prefetchfiles(repo, ctx, mresult):
1446 def _prefetchfiles(repo, ctx, mresult):
1444 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1447 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1445 of merge actions. ``ctx`` is the context being merged in."""
1448 of merge actions. ``ctx`` is the context being merged in."""
1446
1449
1447 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1450 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1448 # don't touch the context to be merged in. 'cd' is skipped, because
1451 # don't touch the context to be merged in. 'cd' is skipped, because
1449 # changed/deleted never resolves to something from the remote side.
1452 # changed/deleted never resolves to something from the remote side.
1450 files = mresult.files(
1453 files = mresult.files(
1451 [
1454 [
1452 mergestatemod.ACTION_GET,
1455 mergestatemod.ACTION_GET,
1453 mergestatemod.ACTION_DELETED_CHANGED,
1456 mergestatemod.ACTION_DELETED_CHANGED,
1454 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1457 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1455 mergestatemod.ACTION_MERGE,
1458 mergestatemod.ACTION_MERGE,
1456 ]
1459 ]
1457 )
1460 )
1458
1461
1459 prefetch = scmutil.prefetchfiles
1462 prefetch = scmutil.prefetchfiles
1460 matchfiles = scmutil.matchfiles
1463 matchfiles = scmutil.matchfiles
1461 prefetch(
1464 prefetch(
1462 repo,
1465 repo,
1463 [
1466 [
1464 (
1467 (
1465 ctx.rev(),
1468 ctx.rev(),
1466 matchfiles(repo, files),
1469 matchfiles(repo, files),
1467 )
1470 )
1468 ],
1471 ],
1469 )
1472 )
1470
1473
1471
1474
1472 @attr.s(frozen=True)
1475 @attr.s(frozen=True)
1473 class updateresult(object):
1476 class updateresult(object):
1474 updatedcount = attr.ib()
1477 updatedcount = attr.ib()
1475 mergedcount = attr.ib()
1478 mergedcount = attr.ib()
1476 removedcount = attr.ib()
1479 removedcount = attr.ib()
1477 unresolvedcount = attr.ib()
1480 unresolvedcount = attr.ib()
1478
1481
1479 def isempty(self):
1482 def isempty(self):
1480 return not (
1483 return not (
1481 self.updatedcount
1484 self.updatedcount
1482 or self.mergedcount
1485 or self.mergedcount
1483 or self.removedcount
1486 or self.removedcount
1484 or self.unresolvedcount
1487 or self.unresolvedcount
1485 )
1488 )
1486
1489
1487
1490
1488 def applyupdates(
1491 def applyupdates(
1489 repo,
1492 repo,
1490 mresult,
1493 mresult,
1491 wctx,
1494 wctx,
1492 mctx,
1495 mctx,
1493 overwrite,
1496 overwrite,
1494 wantfiledata,
1497 wantfiledata,
1495 labels=None,
1498 labels=None,
1496 ):
1499 ):
1497 """apply the merge action list to the working directory
1500 """apply the merge action list to the working directory
1498
1501
1499 mresult is a mergeresult object representing result of the merge
1502 mresult is a mergeresult object representing result of the merge
1500 wctx is the working copy context
1503 wctx is the working copy context
1501 mctx is the context to be merged into the working copy
1504 mctx is the context to be merged into the working copy
1502
1505
1503 Return a tuple of (counts, filedata), where counts is a tuple
1506 Return a tuple of (counts, filedata), where counts is a tuple
1504 (updated, merged, removed, unresolved) that describes how many
1507 (updated, merged, removed, unresolved) that describes how many
1505 files were affected by the update, and filedata is as described in
1508 files were affected by the update, and filedata is as described in
1506 batchget.
1509 batchget.
1507 """
1510 """
1508
1511
1509 _prefetchfiles(repo, mctx, mresult)
1512 _prefetchfiles(repo, mctx, mresult)
1510
1513
1511 updated, merged, removed = 0, 0, 0
1514 updated, merged, removed = 0, 0, 0
1512 ms = wctx.mergestate(clean=True)
1515 ms = wctx.mergestate(clean=True)
1513 ms.start(wctx.p1().node(), mctx.node(), labels)
1516 ms.start(wctx.p1().node(), mctx.node(), labels)
1514
1517
1515 for f, op in pycompat.iteritems(mresult.commitinfo):
1518 for f, op in pycompat.iteritems(mresult.commitinfo):
1516 # the other side of filenode was choosen while merging, store this in
1519 # the other side of filenode was choosen while merging, store this in
1517 # mergestate so that it can be reused on commit
1520 # mergestate so that it can be reused on commit
1518 ms.addcommitinfo(f, op)
1521 ms.addcommitinfo(f, op)
1519
1522
1520 numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
1523 numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
1521 progress = repo.ui.makeprogress(
1524 progress = repo.ui.makeprogress(
1522 _(b'updating'), unit=_(b'files'), total=numupdates
1525 _(b'updating'), unit=_(b'files'), total=numupdates
1523 )
1526 )
1524
1527
1525 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1528 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1526 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1529 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1527
1530
1528 # record path conflicts
1531 # record path conflicts
1529 for f, args, msg in mresult.getactions(
1532 for f, args, msg in mresult.getactions(
1530 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1533 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1531 ):
1534 ):
1532 f1, fo = args
1535 f1, fo = args
1533 s = repo.ui.status
1536 s = repo.ui.status
1534 s(
1537 s(
1535 _(
1538 _(
1536 b"%s: path conflict - a file or link has the same name as a "
1539 b"%s: path conflict - a file or link has the same name as a "
1537 b"directory\n"
1540 b"directory\n"
1538 )
1541 )
1539 % f
1542 % f
1540 )
1543 )
1541 if fo == b'l':
1544 if fo == b'l':
1542 s(_(b"the local file has been renamed to %s\n") % f1)
1545 s(_(b"the local file has been renamed to %s\n") % f1)
1543 else:
1546 else:
1544 s(_(b"the remote file has been renamed to %s\n") % f1)
1547 s(_(b"the remote file has been renamed to %s\n") % f1)
1545 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1548 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1546 ms.addpathconflict(f, f1, fo)
1549 ms.addpathconflict(f, f1, fo)
1547 progress.increment(item=f)
1550 progress.increment(item=f)
1548
1551
1549 # When merging in-memory, we can't support worker processes, so set the
1552 # When merging in-memory, we can't support worker processes, so set the
1550 # per-item cost at 0 in that case.
1553 # per-item cost at 0 in that case.
1551 cost = 0 if wctx.isinmemory() else 0.001
1554 cost = 0 if wctx.isinmemory() else 0.001
1552
1555
1553 # remove in parallel (must come before resolving path conflicts and getting)
1556 # remove in parallel (must come before resolving path conflicts and getting)
1554 prog = worker.worker(
1557 prog = worker.worker(
1555 repo.ui,
1558 repo.ui,
1556 cost,
1559 cost,
1557 batchremove,
1560 batchremove,
1558 (repo, wctx),
1561 (repo, wctx),
1559 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1562 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1560 )
1563 )
1561 for i, item in prog:
1564 for i, item in prog:
1562 progress.increment(step=i, item=item)
1565 progress.increment(step=i, item=item)
1563 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1566 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1564
1567
1565 # resolve path conflicts (must come before getting)
1568 # resolve path conflicts (must come before getting)
1566 for f, args, msg in mresult.getactions(
1569 for f, args, msg in mresult.getactions(
1567 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1570 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1568 ):
1571 ):
1569 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1572 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1570 (f0, origf0) = args
1573 (f0, origf0) = args
1571 if wctx[f0].lexists():
1574 if wctx[f0].lexists():
1572 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1575 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1573 wctx[f].audit()
1576 wctx[f].audit()
1574 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1577 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1575 wctx[f0].remove()
1578 wctx[f0].remove()
1576 progress.increment(item=f)
1579 progress.increment(item=f)
1577
1580
1578 # get in parallel.
1581 # get in parallel.
1579 threadsafe = repo.ui.configbool(
1582 threadsafe = repo.ui.configbool(
1580 b'experimental', b'worker.wdir-get-thread-safe'
1583 b'experimental', b'worker.wdir-get-thread-safe'
1581 )
1584 )
1582 prog = worker.worker(
1585 prog = worker.worker(
1583 repo.ui,
1586 repo.ui,
1584 cost,
1587 cost,
1585 batchget,
1588 batchget,
1586 (repo, mctx, wctx, wantfiledata),
1589 (repo, mctx, wctx, wantfiledata),
1587 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1590 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1588 threadsafe=threadsafe,
1591 threadsafe=threadsafe,
1589 hasretval=True,
1592 hasretval=True,
1590 )
1593 )
1591 getfiledata = {}
1594 getfiledata = {}
1592 for final, res in prog:
1595 for final, res in prog:
1593 if final:
1596 if final:
1594 getfiledata = res
1597 getfiledata = res
1595 else:
1598 else:
1596 i, item = res
1599 i, item = res
1597 progress.increment(step=i, item=item)
1600 progress.increment(step=i, item=item)
1598
1601
1599 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1602 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1600 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1603 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1601
1604
1602 # forget (manifest only, just log it) (must come first)
1605 # forget (manifest only, just log it) (must come first)
1603 for f, args, msg in mresult.getactions(
1606 for f, args, msg in mresult.getactions(
1604 (mergestatemod.ACTION_FORGET,), sort=True
1607 (mergestatemod.ACTION_FORGET,), sort=True
1605 ):
1608 ):
1606 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1609 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1607 progress.increment(item=f)
1610 progress.increment(item=f)
1608
1611
1609 # re-add (manifest only, just log it)
1612 # re-add (manifest only, just log it)
1610 for f, args, msg in mresult.getactions(
1613 for f, args, msg in mresult.getactions(
1611 (mergestatemod.ACTION_ADD,), sort=True
1614 (mergestatemod.ACTION_ADD,), sort=True
1612 ):
1615 ):
1613 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1616 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1614 progress.increment(item=f)
1617 progress.increment(item=f)
1615
1618
1616 # re-add/mark as modified (manifest only, just log it)
1619 # re-add/mark as modified (manifest only, just log it)
1617 for f, args, msg in mresult.getactions(
1620 for f, args, msg in mresult.getactions(
1618 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1621 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1619 ):
1622 ):
1620 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1623 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1621 progress.increment(item=f)
1624 progress.increment(item=f)
1622
1625
1623 # keep (noop, just log it)
1626 # keep (noop, just log it)
1624 for a in mergestatemod.NO_OP_ACTIONS:
1627 for a in mergestatemod.NO_OP_ACTIONS:
1625 for f, args, msg in mresult.getactions((a,), sort=True):
1628 for f, args, msg in mresult.getactions((a,), sort=True):
1626 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
1629 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a.__bytes__()))
1627 # no progress
1630 # no progress
1628
1631
1629 # directory rename, move local
1632 # directory rename, move local
1630 for f, args, msg in mresult.getactions(
1633 for f, args, msg in mresult.getactions(
1631 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1634 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1632 ):
1635 ):
1633 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1636 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1634 progress.increment(item=f)
1637 progress.increment(item=f)
1635 f0, flags = args
1638 f0, flags = args
1636 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1639 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1637 wctx[f].audit()
1640 wctx[f].audit()
1638 wctx[f].write(wctx.filectx(f0).data(), flags)
1641 wctx[f].write(wctx.filectx(f0).data(), flags)
1639 wctx[f0].remove()
1642 wctx[f0].remove()
1640
1643
1641 # local directory rename, get
1644 # local directory rename, get
1642 for f, args, msg in mresult.getactions(
1645 for f, args, msg in mresult.getactions(
1643 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1646 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1644 ):
1647 ):
1645 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1648 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1646 progress.increment(item=f)
1649 progress.increment(item=f)
1647 f0, flags = args
1650 f0, flags = args
1648 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1651 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1649 wctx[f].write(mctx.filectx(f0).data(), flags)
1652 wctx[f].write(mctx.filectx(f0).data(), flags)
1650
1653
1651 # exec
1654 # exec
1652 for f, args, msg in mresult.getactions(
1655 for f, args, msg in mresult.getactions(
1653 (mergestatemod.ACTION_EXEC,), sort=True
1656 (mergestatemod.ACTION_EXEC,), sort=True
1654 ):
1657 ):
1655 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1658 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1656 progress.increment(item=f)
1659 progress.increment(item=f)
1657 (flags,) = args
1660 (flags,) = args
1658 wctx[f].audit()
1661 wctx[f].audit()
1659 wctx[f].setflags(b'l' in flags, b'x' in flags)
1662 wctx[f].setflags(b'l' in flags, b'x' in flags)
1660
1663
1661 moves = []
1664 moves = []
1662
1665
1663 # 'cd' and 'dc' actions are treated like other merge conflicts
1666 # 'cd' and 'dc' actions are treated like other merge conflicts
1664 mergeactions = list(
1667 mergeactions = list(
1665 mresult.getactions(
1668 mresult.getactions(
1666 [
1669 [
1667 mergestatemod.ACTION_CHANGED_DELETED,
1670 mergestatemod.ACTION_CHANGED_DELETED,
1668 mergestatemod.ACTION_DELETED_CHANGED,
1671 mergestatemod.ACTION_DELETED_CHANGED,
1669 mergestatemod.ACTION_MERGE,
1672 mergestatemod.ACTION_MERGE,
1670 ],
1673 ],
1671 sort=True,
1674 sort=True,
1672 )
1675 )
1673 )
1676 )
1674 for f, args, msg in mergeactions:
1677 for f, args, msg in mergeactions:
1675 f1, f2, fa, move, anc = args
1678 f1, f2, fa, move, anc = args
1676 if f == b'.hgsubstate': # merged internally
1679 if f == b'.hgsubstate': # merged internally
1677 continue
1680 continue
1678 if f1 is None:
1681 if f1 is None:
1679 fcl = filemerge.absentfilectx(wctx, fa)
1682 fcl = filemerge.absentfilectx(wctx, fa)
1680 else:
1683 else:
1681 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1684 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1682 fcl = wctx[f1]
1685 fcl = wctx[f1]
1683 if f2 is None:
1686 if f2 is None:
1684 fco = filemerge.absentfilectx(mctx, fa)
1687 fco = filemerge.absentfilectx(mctx, fa)
1685 else:
1688 else:
1686 fco = mctx[f2]
1689 fco = mctx[f2]
1687 actx = repo[anc]
1690 actx = repo[anc]
1688 if fa in actx:
1691 if fa in actx:
1689 fca = actx[fa]
1692 fca = actx[fa]
1690 else:
1693 else:
1691 # TODO: move to absentfilectx
1694 # TODO: move to absentfilectx
1692 fca = repo.filectx(f1, fileid=nullrev)
1695 fca = repo.filectx(f1, fileid=nullrev)
1693 ms.add(fcl, fco, fca, f)
1696 ms.add(fcl, fco, fca, f)
1694 if f1 != f and move:
1697 if f1 != f and move:
1695 moves.append(f1)
1698 moves.append(f1)
1696
1699
1697 # remove renamed files after safely stored
1700 # remove renamed files after safely stored
1698 for f in moves:
1701 for f in moves:
1699 if wctx[f].lexists():
1702 if wctx[f].lexists():
1700 repo.ui.debug(b"removing %s\n" % f)
1703 repo.ui.debug(b"removing %s\n" % f)
1701 wctx[f].audit()
1704 wctx[f].audit()
1702 wctx[f].remove()
1705 wctx[f].remove()
1703
1706
1704 # these actions updates the file
1707 # these actions updates the file
1705 updated = mresult.len(
1708 updated = mresult.len(
1706 (
1709 (
1707 mergestatemod.ACTION_GET,
1710 mergestatemod.ACTION_GET,
1708 mergestatemod.ACTION_EXEC,
1711 mergestatemod.ACTION_EXEC,
1709 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1712 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1710 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1713 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1711 )
1714 )
1712 )
1715 )
1713
1716
1714 try:
1717 try:
1715 for f, args, msg in mergeactions:
1718 for f, args, msg in mergeactions:
1716 repo.ui.debug(b" %s: %s -> m\n" % (f, msg))
1719 repo.ui.debug(b" %s: %s -> m\n" % (f, msg))
1717 ms.addcommitinfo(f, {b'merged': b'yes'})
1720 ms.addcommitinfo(f, {b'merged': b'yes'})
1718 progress.increment(item=f)
1721 progress.increment(item=f)
1719 if f == b'.hgsubstate': # subrepo states need updating
1722 if f == b'.hgsubstate': # subrepo states need updating
1720 subrepoutil.submerge(
1723 subrepoutil.submerge(
1721 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1724 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1722 )
1725 )
1723 continue
1726 continue
1724 wctx[f].audit()
1727 wctx[f].audit()
1725 ms.resolve(f, wctx)
1728 ms.resolve(f, wctx)
1726
1729
1727 except error.InterventionRequired:
1730 except error.InterventionRequired:
1728 # If the user has merge.on-failure=halt, catch the error and close the
1731 # If the user has merge.on-failure=halt, catch the error and close the
1729 # merge state "properly".
1732 # merge state "properly".
1730 pass
1733 pass
1731 finally:
1734 finally:
1732 ms.commit()
1735 ms.commit()
1733
1736
1734 unresolved = ms.unresolvedcount()
1737 unresolved = ms.unresolvedcount()
1735
1738
1736 msupdated, msmerged, msremoved = ms.counts()
1739 msupdated, msmerged, msremoved = ms.counts()
1737 updated += msupdated
1740 updated += msupdated
1738 merged += msmerged
1741 merged += msmerged
1739 removed += msremoved
1742 removed += msremoved
1740
1743
1741 extraactions = ms.actions()
1744 extraactions = ms.actions()
1742
1745
1743 progress.complete()
1746 progress.complete()
1744 return (
1747 return (
1745 updateresult(updated, merged, removed, unresolved),
1748 updateresult(updated, merged, removed, unresolved),
1746 getfiledata,
1749 getfiledata,
1747 extraactions,
1750 extraactions,
1748 )
1751 )
1749
1752
1750
1753
1751 def _advertisefsmonitor(repo, num_gets, p1node):
1754 def _advertisefsmonitor(repo, num_gets, p1node):
1752 # Advertise fsmonitor when its presence could be useful.
1755 # Advertise fsmonitor when its presence could be useful.
1753 #
1756 #
1754 # We only advertise when performing an update from an empty working
1757 # We only advertise when performing an update from an empty working
1755 # directory. This typically only occurs during initial clone.
1758 # directory. This typically only occurs during initial clone.
1756 #
1759 #
1757 # We give users a mechanism to disable the warning in case it is
1760 # We give users a mechanism to disable the warning in case it is
1758 # annoying.
1761 # annoying.
1759 #
1762 #
1760 # We only allow on Linux and MacOS because that's where fsmonitor is
1763 # We only allow on Linux and MacOS because that's where fsmonitor is
1761 # considered stable.
1764 # considered stable.
1762 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1765 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1763 fsmonitorthreshold = repo.ui.configint(
1766 fsmonitorthreshold = repo.ui.configint(
1764 b'fsmonitor', b'warn_update_file_count'
1767 b'fsmonitor', b'warn_update_file_count'
1765 )
1768 )
1766 # avoid cycle dirstate -> sparse -> merge -> dirstate
1769 # avoid cycle dirstate -> sparse -> merge -> dirstate
1767 from . import dirstate
1770 from . import dirstate
1768
1771
1769 if dirstate.rustmod is not None:
1772 if dirstate.rustmod is not None:
1770 # When using rust status, fsmonitor becomes necessary at higher sizes
1773 # When using rust status, fsmonitor becomes necessary at higher sizes
1771 fsmonitorthreshold = repo.ui.configint(
1774 fsmonitorthreshold = repo.ui.configint(
1772 b'fsmonitor',
1775 b'fsmonitor',
1773 b'warn_update_file_count_rust',
1776 b'warn_update_file_count_rust',
1774 )
1777 )
1775
1778
1776 try:
1779 try:
1777 # avoid cycle: extensions -> cmdutil -> merge
1780 # avoid cycle: extensions -> cmdutil -> merge
1778 from . import extensions
1781 from . import extensions
1779
1782
1780 extensions.find(b'fsmonitor')
1783 extensions.find(b'fsmonitor')
1781 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1784 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1782 # We intentionally don't look at whether fsmonitor has disabled
1785 # We intentionally don't look at whether fsmonitor has disabled
1783 # itself because a) fsmonitor may have already printed a warning
1786 # itself because a) fsmonitor may have already printed a warning
1784 # b) we only care about the config state here.
1787 # b) we only care about the config state here.
1785 except KeyError:
1788 except KeyError:
1786 fsmonitorenabled = False
1789 fsmonitorenabled = False
1787
1790
1788 if (
1791 if (
1789 fsmonitorwarning
1792 fsmonitorwarning
1790 and not fsmonitorenabled
1793 and not fsmonitorenabled
1791 and p1node == repo.nullid
1794 and p1node == repo.nullid
1792 and num_gets >= fsmonitorthreshold
1795 and num_gets >= fsmonitorthreshold
1793 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1796 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1794 ):
1797 ):
1795 repo.ui.warn(
1798 repo.ui.warn(
1796 _(
1799 _(
1797 b'(warning: large working directory being used without '
1800 b'(warning: large working directory being used without '
1798 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1801 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1799 b'see "hg help -e fsmonitor")\n'
1802 b'see "hg help -e fsmonitor")\n'
1800 )
1803 )
1801 )
1804 )
1802
1805
1803
1806
1804 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1807 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1805 UPDATECHECK_NONE = b'none'
1808 UPDATECHECK_NONE = b'none'
1806 UPDATECHECK_LINEAR = b'linear'
1809 UPDATECHECK_LINEAR = b'linear'
1807 UPDATECHECK_NO_CONFLICT = b'noconflict'
1810 UPDATECHECK_NO_CONFLICT = b'noconflict'
1808
1811
1809
1812
1810 def _update(
1813 def _update(
1811 repo,
1814 repo,
1812 node,
1815 node,
1813 branchmerge,
1816 branchmerge,
1814 force,
1817 force,
1815 ancestor=None,
1818 ancestor=None,
1816 mergeancestor=False,
1819 mergeancestor=False,
1817 labels=None,
1820 labels=None,
1818 matcher=None,
1821 matcher=None,
1819 mergeforce=False,
1822 mergeforce=False,
1820 updatedirstate=True,
1823 updatedirstate=True,
1821 updatecheck=None,
1824 updatecheck=None,
1822 wc=None,
1825 wc=None,
1823 ):
1826 ):
1824 """
1827 """
1825 Perform a merge between the working directory and the given node
1828 Perform a merge between the working directory and the given node
1826
1829
1827 node = the node to update to
1830 node = the node to update to
1828 branchmerge = whether to merge between branches
1831 branchmerge = whether to merge between branches
1829 force = whether to force branch merging or file overwriting
1832 force = whether to force branch merging or file overwriting
1830 matcher = a matcher to filter file lists (dirstate not updated)
1833 matcher = a matcher to filter file lists (dirstate not updated)
1831 mergeancestor = whether it is merging with an ancestor. If true,
1834 mergeancestor = whether it is merging with an ancestor. If true,
1832 we should accept the incoming changes for any prompts that occur.
1835 we should accept the incoming changes for any prompts that occur.
1833 If false, merging with an ancestor (fast-forward) is only allowed
1836 If false, merging with an ancestor (fast-forward) is only allowed
1834 between different named branches. This flag is used by rebase extension
1837 between different named branches. This flag is used by rebase extension
1835 as a temporary fix and should be avoided in general.
1838 as a temporary fix and should be avoided in general.
1836 labels = labels to use for base, local and other
1839 labels = labels to use for base, local and other
1837 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1840 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1838 this is True, then 'force' should be True as well.
1841 this is True, then 'force' should be True as well.
1839
1842
1840 The table below shows all the behaviors of the update command given the
1843 The table below shows all the behaviors of the update command given the
1841 -c/--check and -C/--clean or no options, whether the working directory is
1844 -c/--check and -C/--clean or no options, whether the working directory is
1842 dirty, whether a revision is specified, and the relationship of the parent
1845 dirty, whether a revision is specified, and the relationship of the parent
1843 rev to the target rev (linear or not). Match from top first. The -n
1846 rev to the target rev (linear or not). Match from top first. The -n
1844 option doesn't exist on the command line, but represents the
1847 option doesn't exist on the command line, but represents the
1845 experimental.updatecheck=noconflict option.
1848 experimental.updatecheck=noconflict option.
1846
1849
1847 This logic is tested by test-update-branches.t.
1850 This logic is tested by test-update-branches.t.
1848
1851
1849 -c -C -n -m dirty rev linear | result
1852 -c -C -n -m dirty rev linear | result
1850 y y * * * * * | (1)
1853 y y * * * * * | (1)
1851 y * y * * * * | (1)
1854 y * y * * * * | (1)
1852 y * * y * * * | (1)
1855 y * * y * * * | (1)
1853 * y y * * * * | (1)
1856 * y y * * * * | (1)
1854 * y * y * * * | (1)
1857 * y * y * * * | (1)
1855 * * y y * * * | (1)
1858 * * y y * * * | (1)
1856 * * * * * n n | x
1859 * * * * * n n | x
1857 * * * * n * * | ok
1860 * * * * n * * | ok
1858 n n n n y * y | merge
1861 n n n n y * y | merge
1859 n n n n y y n | (2)
1862 n n n n y y n | (2)
1860 n n n y y * * | merge
1863 n n n y y * * | merge
1861 n n y n y * * | merge if no conflict
1864 n n y n y * * | merge if no conflict
1862 n y n n y * * | discard
1865 n y n n y * * | discard
1863 y n n n y * * | (3)
1866 y n n n y * * | (3)
1864
1867
1865 x = can't happen
1868 x = can't happen
1866 * = don't-care
1869 * = don't-care
1867 1 = incompatible options (checked in commands.py)
1870 1 = incompatible options (checked in commands.py)
1868 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1871 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1869 3 = abort: uncommitted changes (checked in commands.py)
1872 3 = abort: uncommitted changes (checked in commands.py)
1870
1873
1871 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1874 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1872 to repo[None] if None is passed.
1875 to repo[None] if None is passed.
1873
1876
1874 Return the same tuple as applyupdates().
1877 Return the same tuple as applyupdates().
1875 """
1878 """
1876 # Avoid cycle.
1879 # Avoid cycle.
1877 from . import sparse
1880 from . import sparse
1878
1881
1879 # This function used to find the default destination if node was None, but
1882 # This function used to find the default destination if node was None, but
1880 # that's now in destutil.py.
1883 # that's now in destutil.py.
1881 assert node is not None
1884 assert node is not None
1882 if not branchmerge and not force:
1885 if not branchmerge and not force:
1883 # TODO: remove the default once all callers that pass branchmerge=False
1886 # TODO: remove the default once all callers that pass branchmerge=False
1884 # and force=False pass a value for updatecheck. We may want to allow
1887 # and force=False pass a value for updatecheck. We may want to allow
1885 # updatecheck='abort' to better suppport some of these callers.
1888 # updatecheck='abort' to better suppport some of these callers.
1886 if updatecheck is None:
1889 if updatecheck is None:
1887 updatecheck = UPDATECHECK_LINEAR
1890 updatecheck = UPDATECHECK_LINEAR
1888 okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT)
1891 okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT)
1889 if updatecheck not in okay:
1892 if updatecheck not in okay:
1890 msg = r'Invalid updatecheck %r (can accept %r)'
1893 msg = r'Invalid updatecheck %r (can accept %r)'
1891 msg %= (updatecheck, okay)
1894 msg %= (updatecheck, okay)
1892 raise ValueError(msg)
1895 raise ValueError(msg)
1893 if wc is not None and wc.isinmemory():
1896 if wc is not None and wc.isinmemory():
1894 maybe_wlock = util.nullcontextmanager()
1897 maybe_wlock = util.nullcontextmanager()
1895 else:
1898 else:
1896 maybe_wlock = repo.wlock()
1899 maybe_wlock = repo.wlock()
1897 with maybe_wlock:
1900 with maybe_wlock:
1898 if wc is None:
1901 if wc is None:
1899 wc = repo[None]
1902 wc = repo[None]
1900 pl = wc.parents()
1903 pl = wc.parents()
1901 p1 = pl[0]
1904 p1 = pl[0]
1902 p2 = repo[node]
1905 p2 = repo[node]
1903 if ancestor is not None:
1906 if ancestor is not None:
1904 pas = [repo[ancestor]]
1907 pas = [repo[ancestor]]
1905 else:
1908 else:
1906 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1909 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1907 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1910 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1908 pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
1911 pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
1909 else:
1912 else:
1910 pas = [p1.ancestor(p2, warn=branchmerge)]
1913 pas = [p1.ancestor(p2, warn=branchmerge)]
1911
1914
1912 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1915 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1913
1916
1914 overwrite = force and not branchmerge
1917 overwrite = force and not branchmerge
1915 ### check phase
1918 ### check phase
1916 if not overwrite:
1919 if not overwrite:
1917 if len(pl) > 1:
1920 if len(pl) > 1:
1918 raise error.StateError(_(b"outstanding uncommitted merge"))
1921 raise error.StateError(_(b"outstanding uncommitted merge"))
1919 ms = wc.mergestate()
1922 ms = wc.mergestate()
1920 if ms.unresolvedcount():
1923 if ms.unresolvedcount():
1921 msg = _(b"outstanding merge conflicts")
1924 msg = _(b"outstanding merge conflicts")
1922 hint = _(b"use 'hg resolve' to resolve")
1925 hint = _(b"use 'hg resolve' to resolve")
1923 raise error.StateError(msg, hint=hint)
1926 raise error.StateError(msg, hint=hint)
1924 if branchmerge:
1927 if branchmerge:
1925 m_a = _(b"merging with a working directory ancestor has no effect")
1928 m_a = _(b"merging with a working directory ancestor has no effect")
1926 if pas == [p2]:
1929 if pas == [p2]:
1927 raise error.Abort(m_a)
1930 raise error.Abort(m_a)
1928 elif pas == [p1]:
1931 elif pas == [p1]:
1929 if not mergeancestor and wc.branch() == p2.branch():
1932 if not mergeancestor and wc.branch() == p2.branch():
1930 msg = _(b"nothing to merge")
1933 msg = _(b"nothing to merge")
1931 hint = _(b"use 'hg update' or check 'hg heads'")
1934 hint = _(b"use 'hg update' or check 'hg heads'")
1932 raise error.Abort(msg, hint=hint)
1935 raise error.Abort(msg, hint=hint)
1933 if not force and (wc.files() or wc.deleted()):
1936 if not force and (wc.files() or wc.deleted()):
1934 msg = _(b"uncommitted changes")
1937 msg = _(b"uncommitted changes")
1935 hint = _(b"use 'hg status' to list changes")
1938 hint = _(b"use 'hg status' to list changes")
1936 raise error.StateError(msg, hint=hint)
1939 raise error.StateError(msg, hint=hint)
1937 if not wc.isinmemory():
1940 if not wc.isinmemory():
1938 for s in sorted(wc.substate):
1941 for s in sorted(wc.substate):
1939 wc.sub(s).bailifchanged()
1942 wc.sub(s).bailifchanged()
1940
1943
1941 elif not overwrite:
1944 elif not overwrite:
1942 if p1 == p2: # no-op update
1945 if p1 == p2: # no-op update
1943 # call the hooks and exit early
1946 # call the hooks and exit early
1944 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1947 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1945 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1948 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1946 return updateresult(0, 0, 0, 0)
1949 return updateresult(0, 0, 0, 0)
1947
1950
1948 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1951 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1949 [p1],
1952 [p1],
1950 [p2],
1953 [p2],
1951 ): # nonlinear
1954 ): # nonlinear
1952 dirty = wc.dirty(missing=True)
1955 dirty = wc.dirty(missing=True)
1953 if dirty:
1956 if dirty:
1954 # Branching is a bit strange to ensure we do the minimal
1957 # Branching is a bit strange to ensure we do the minimal
1955 # amount of call to obsutil.foreground.
1958 # amount of call to obsutil.foreground.
1956 foreground = obsutil.foreground(repo, [p1.node()])
1959 foreground = obsutil.foreground(repo, [p1.node()])
1957 # note: the <node> variable contains a random identifier
1960 # note: the <node> variable contains a random identifier
1958 if repo[node].node() in foreground:
1961 if repo[node].node() in foreground:
1959 pass # allow updating to successors
1962 pass # allow updating to successors
1960 else:
1963 else:
1961 msg = _(b"uncommitted changes")
1964 msg = _(b"uncommitted changes")
1962 hint = _(b"commit or update --clean to discard changes")
1965 hint = _(b"commit or update --clean to discard changes")
1963 raise error.UpdateAbort(msg, hint=hint)
1966 raise error.UpdateAbort(msg, hint=hint)
1964 else:
1967 else:
1965 # Allow jumping branches if clean and specific rev given
1968 # Allow jumping branches if clean and specific rev given
1966 pass
1969 pass
1967
1970
1968 if overwrite:
1971 if overwrite:
1969 pas = [wc]
1972 pas = [wc]
1970 elif not branchmerge:
1973 elif not branchmerge:
1971 pas = [p1]
1974 pas = [p1]
1972
1975
1973 # deprecated config: merge.followcopies
1976 # deprecated config: merge.followcopies
1974 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1977 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1975 if overwrite:
1978 if overwrite:
1976 followcopies = False
1979 followcopies = False
1977 elif not pas[0]:
1980 elif not pas[0]:
1978 followcopies = False
1981 followcopies = False
1979 if not branchmerge and not wc.dirty(missing=True):
1982 if not branchmerge and not wc.dirty(missing=True):
1980 followcopies = False
1983 followcopies = False
1981
1984
1982 ### calculate phase
1985 ### calculate phase
1983 mresult = calculateupdates(
1986 mresult = calculateupdates(
1984 repo,
1987 repo,
1985 wc,
1988 wc,
1986 p2,
1989 p2,
1987 pas,
1990 pas,
1988 branchmerge,
1991 branchmerge,
1989 force,
1992 force,
1990 mergeancestor,
1993 mergeancestor,
1991 followcopies,
1994 followcopies,
1992 matcher=matcher,
1995 matcher=matcher,
1993 mergeforce=mergeforce,
1996 mergeforce=mergeforce,
1994 )
1997 )
1995
1998
1996 if updatecheck == UPDATECHECK_NO_CONFLICT:
1999 if updatecheck == UPDATECHECK_NO_CONFLICT:
1997 if mresult.hasconflicts():
2000 if mresult.hasconflicts():
1998 msg = _(b"conflicting changes")
2001 msg = _(b"conflicting changes")
1999 hint = _(b"commit or update --clean to discard changes")
2002 hint = _(b"commit or update --clean to discard changes")
2000 raise error.StateError(msg, hint=hint)
2003 raise error.StateError(msg, hint=hint)
2001
2004
2002 # Prompt and create actions. Most of this is in the resolve phase
2005 # Prompt and create actions. Most of this is in the resolve phase
2003 # already, but we can't handle .hgsubstate in filemerge or
2006 # already, but we can't handle .hgsubstate in filemerge or
2004 # subrepoutil.submerge yet so we have to keep prompting for it.
2007 # subrepoutil.submerge yet so we have to keep prompting for it.
2005 vals = mresult.getfile(b'.hgsubstate')
2008 vals = mresult.getfile(b'.hgsubstate')
2006 if vals:
2009 if vals:
2007 f = b'.hgsubstate'
2010 f = b'.hgsubstate'
2008 m, args, msg = vals
2011 m, args, msg = vals
2009 prompts = filemerge.partextras(labels)
2012 prompts = filemerge.partextras(labels)
2010 prompts[b'f'] = f
2013 prompts[b'f'] = f
2011 if m == mergestatemod.ACTION_CHANGED_DELETED:
2014 if m == mergestatemod.ACTION_CHANGED_DELETED:
2012 if repo.ui.promptchoice(
2015 if repo.ui.promptchoice(
2013 _(
2016 _(
2014 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2017 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2015 b"use (c)hanged version or (d)elete?"
2018 b"use (c)hanged version or (d)elete?"
2016 b"$$ &Changed $$ &Delete"
2019 b"$$ &Changed $$ &Delete"
2017 )
2020 )
2018 % prompts,
2021 % prompts,
2019 0,
2022 0,
2020 ):
2023 ):
2021 mresult.addfile(
2024 mresult.addfile(
2022 f,
2025 f,
2023 mergestatemod.ACTION_REMOVE,
2026 mergestatemod.ACTION_REMOVE,
2024 None,
2027 None,
2025 b'prompt delete',
2028 b'prompt delete',
2026 )
2029 )
2027 elif f in p1:
2030 elif f in p1:
2028 mresult.addfile(
2031 mresult.addfile(
2029 f,
2032 f,
2030 mergestatemod.ACTION_ADD_MODIFIED,
2033 mergestatemod.ACTION_ADD_MODIFIED,
2031 None,
2034 None,
2032 b'prompt keep',
2035 b'prompt keep',
2033 )
2036 )
2034 else:
2037 else:
2035 mresult.addfile(
2038 mresult.addfile(
2036 f,
2039 f,
2037 mergestatemod.ACTION_ADD,
2040 mergestatemod.ACTION_ADD,
2038 None,
2041 None,
2039 b'prompt keep',
2042 b'prompt keep',
2040 )
2043 )
2041 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2044 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2042 f1, f2, fa, move, anc = args
2045 f1, f2, fa, move, anc = args
2043 flags = p2[f2].flags()
2046 flags = p2[f2].flags()
2044 if (
2047 if (
2045 repo.ui.promptchoice(
2048 repo.ui.promptchoice(
2046 _(
2049 _(
2047 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2050 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2048 b"use (c)hanged version or leave (d)eleted?"
2051 b"use (c)hanged version or leave (d)eleted?"
2049 b"$$ &Changed $$ &Deleted"
2052 b"$$ &Changed $$ &Deleted"
2050 )
2053 )
2051 % prompts,
2054 % prompts,
2052 0,
2055 0,
2053 )
2056 )
2054 == 0
2057 == 0
2055 ):
2058 ):
2056 mresult.addfile(
2059 mresult.addfile(
2057 f,
2060 f,
2058 mergestatemod.ACTION_GET,
2061 mergestatemod.ACTION_GET,
2059 (flags, False),
2062 (flags, False),
2060 b'prompt recreating',
2063 b'prompt recreating',
2061 )
2064 )
2062 else:
2065 else:
2063 mresult.removefile(f)
2066 mresult.removefile(f)
2064
2067
2065 if not util.fscasesensitive(repo.path):
2068 if not util.fscasesensitive(repo.path):
2066 # check collision between files only in p2 for clean update
2069 # check collision between files only in p2 for clean update
2067 if not branchmerge and (
2070 if not branchmerge and (
2068 force or not wc.dirty(missing=True, branch=False)
2071 force or not wc.dirty(missing=True, branch=False)
2069 ):
2072 ):
2070 _checkcollision(repo, p2.manifest(), None)
2073 _checkcollision(repo, p2.manifest(), None)
2071 else:
2074 else:
2072 _checkcollision(repo, wc.manifest(), mresult)
2075 _checkcollision(repo, wc.manifest(), mresult)
2073
2076
2074 # divergent renames
2077 # divergent renames
2075 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2078 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2076 repo.ui.warn(
2079 repo.ui.warn(
2077 _(
2080 _(
2078 b"note: possible conflict - %s was renamed "
2081 b"note: possible conflict - %s was renamed "
2079 b"multiple times to:\n"
2082 b"multiple times to:\n"
2080 )
2083 )
2081 % f
2084 % f
2082 )
2085 )
2083 for nf in sorted(fl):
2086 for nf in sorted(fl):
2084 repo.ui.warn(b" %s\n" % nf)
2087 repo.ui.warn(b" %s\n" % nf)
2085
2088
2086 # rename and delete
2089 # rename and delete
2087 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2090 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2088 repo.ui.warn(
2091 repo.ui.warn(
2089 _(
2092 _(
2090 b"note: possible conflict - %s was deleted "
2093 b"note: possible conflict - %s was deleted "
2091 b"and renamed to:\n"
2094 b"and renamed to:\n"
2092 )
2095 )
2093 % f
2096 % f
2094 )
2097 )
2095 for nf in sorted(fl):
2098 for nf in sorted(fl):
2096 repo.ui.warn(b" %s\n" % nf)
2099 repo.ui.warn(b" %s\n" % nf)
2097
2100
2098 ### apply phase
2101 ### apply phase
2099 if not branchmerge: # just jump to the new rev
2102 if not branchmerge: # just jump to the new rev
2100 fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
2103 fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
2101 # If we're doing a partial update, we need to skip updating
2104 # If we're doing a partial update, we need to skip updating
2102 # the dirstate.
2105 # the dirstate.
2103 always = matcher is None or matcher.always()
2106 always = matcher is None or matcher.always()
2104 updatedirstate = updatedirstate and always and not wc.isinmemory()
2107 updatedirstate = updatedirstate and always and not wc.isinmemory()
2105 if updatedirstate:
2108 if updatedirstate:
2106 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2109 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2107 # note that we're in the middle of an update
2110 # note that we're in the middle of an update
2108 repo.vfs.write(b'updatestate', p2.hex())
2111 repo.vfs.write(b'updatestate', p2.hex())
2109
2112
2110 _advertisefsmonitor(
2113 _advertisefsmonitor(
2111 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2114 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2112 )
2115 )
2113
2116
2114 wantfiledata = updatedirstate and not branchmerge
2117 wantfiledata = updatedirstate and not branchmerge
2115 stats, getfiledata, extraactions = applyupdates(
2118 stats, getfiledata, extraactions = applyupdates(
2116 repo,
2119 repo,
2117 mresult,
2120 mresult,
2118 wc,
2121 wc,
2119 p2,
2122 p2,
2120 overwrite,
2123 overwrite,
2121 wantfiledata,
2124 wantfiledata,
2122 labels=labels,
2125 labels=labels,
2123 )
2126 )
2124
2127
2125 if updatedirstate:
2128 if updatedirstate:
2126 if extraactions:
2129 if extraactions:
2127 for k, acts in pycompat.iteritems(extraactions):
2130 for k, acts in pycompat.iteritems(extraactions):
2128 for a in acts:
2131 for a in acts:
2129 mresult.addfile(a[0], k, *a[1:])
2132 mresult.addfile(a[0], k, *a[1:])
2130 if k == mergestatemod.ACTION_GET and wantfiledata:
2133 if k == mergestatemod.ACTION_GET and wantfiledata:
2131 # no filedata until mergestate is updated to provide it
2134 # no filedata until mergestate is updated to provide it
2132 for a in acts:
2135 for a in acts:
2133 getfiledata[a[0]] = None
2136 getfiledata[a[0]] = None
2134
2137
2135 assert len(getfiledata) == (
2138 assert len(getfiledata) == (
2136 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
2139 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
2137 )
2140 )
2138 with repo.dirstate.parentchange():
2141 with repo.dirstate.parentchange():
2139 ### Filter Filedata
2142 ### Filter Filedata
2140 #
2143 #
2141 # We gathered "cache" information for the clean file while
2144 # We gathered "cache" information for the clean file while
2142 # updating them: mtime, size and mode.
2145 # updating them: mtime, size and mode.
2143 #
2146 #
2144 # At the time this comment is written, they are various issues
2147 # At the time this comment is written, they are various issues
2145 # with how we gather the `mode` and `mtime` information (see
2148 # with how we gather the `mode` and `mtime` information (see
2146 # the comment in `batchget`).
2149 # the comment in `batchget`).
2147 #
2150 #
2148 # We are going to smooth one of this issue here : mtime ambiguity.
2151 # We are going to smooth one of this issue here : mtime ambiguity.
2149 #
2152 #
2150 # i.e. even if the mtime gathered during `batchget` was
2153 # i.e. even if the mtime gathered during `batchget` was
2151 # correct[1] a change happening right after it could change the
2154 # correct[1] a change happening right after it could change the
2152 # content while keeping the same mtime[2].
2155 # content while keeping the same mtime[2].
2153 #
2156 #
2154 # When we reach the current code, the "on disk" part of the
2157 # When we reach the current code, the "on disk" part of the
2155 # update operation is finished. We still assume that no other
2158 # update operation is finished. We still assume that no other
2156 # process raced that "on disk" part, but we want to at least
2159 # process raced that "on disk" part, but we want to at least
2157 # prevent later file change to alter the content of the file
2160 # prevent later file change to alter the content of the file
2158 # right after the update operation. So quickly that the same
2161 # right after the update operation. So quickly that the same
2159 # mtime is record for the operation.
2162 # mtime is record for the operation.
2160 # To prevent such ambiguity to happens, we will only keep the
2163 # To prevent such ambiguity to happens, we will only keep the
2161 # "file data" for files with mtime that are stricly in the past,
2164 # "file data" for files with mtime that are stricly in the past,
2162 # i.e. whose mtime is strictly lower than the current time.
2165 # i.e. whose mtime is strictly lower than the current time.
2163 #
2166 #
2164 # This protect us from race conditions from operation that could
2167 # This protect us from race conditions from operation that could
2165 # run right after this one, especially other Mercurial
2168 # run right after this one, especially other Mercurial
2166 # operation that could be waiting for the wlock to touch files
2169 # operation that could be waiting for the wlock to touch files
2167 # content and the dirstate.
2170 # content and the dirstate.
2168 #
2171 #
2169 # In an ideal world, we could only get reliable information in
2172 # In an ideal world, we could only get reliable information in
2170 # `getfiledata` (from `getbatch`), however the current approach
2173 # `getfiledata` (from `getbatch`), however the current approach
2171 # have been a successful compromise since many years.
2174 # have been a successful compromise since many years.
2172 #
2175 #
2173 # At the time this comment is written, not using any "cache"
2176 # At the time this comment is written, not using any "cache"
2174 # file data at all here would not be viable. As it would result is
2177 # file data at all here would not be viable. As it would result is
2175 # a very large amount of work (equivalent to the previous `hg
2178 # a very large amount of work (equivalent to the previous `hg
2176 # update` during the next status after an update).
2179 # update` during the next status after an update).
2177 #
2180 #
2178 # [1] the current code cannot grantee that the `mtime` and
2181 # [1] the current code cannot grantee that the `mtime` and
2179 # `mode` are correct, but the result is "okay in practice".
2182 # `mode` are correct, but the result is "okay in practice".
2180 # (see the comment in `batchget`). #
2183 # (see the comment in `batchget`). #
2181 #
2184 #
2182 # [2] using nano-second precision can greatly help here because
2185 # [2] using nano-second precision can greatly help here because
2183 # it makes the "different write with same mtime" issue
2186 # it makes the "different write with same mtime" issue
2184 # virtually vanish. However, dirstate v1 cannot store such
2187 # virtually vanish. However, dirstate v1 cannot store such
2185 # precision and a bunch of python-runtime, operating-system and
2188 # precision and a bunch of python-runtime, operating-system and
2186 # filesystem does not provide use with such precision, so we
2189 # filesystem does not provide use with such precision, so we
2187 # have to operate as if it wasn't available.
2190 # have to operate as if it wasn't available.
2188 if getfiledata:
2191 if getfiledata:
2189 ambiguous_mtime = {}
2192 ambiguous_mtime = {}
2190 now = timestamp.get_fs_now(repo.vfs)
2193 now = timestamp.get_fs_now(repo.vfs)
2191 if now is None:
2194 if now is None:
2192 # we can't write to the FS, so we won't actually update
2195 # we can't write to the FS, so we won't actually update
2193 # the dirstate content anyway, no need to put cache
2196 # the dirstate content anyway, no need to put cache
2194 # information.
2197 # information.
2195 getfiledata = None
2198 getfiledata = None
2196 else:
2199 else:
2197 now_sec = now[0]
2200 now_sec = now[0]
2198 for f, m in pycompat.iteritems(getfiledata):
2201 for f, m in pycompat.iteritems(getfiledata):
2199 if m is not None and m[2][0] >= now_sec:
2202 if m is not None and m[2][0] >= now_sec:
2200 ambiguous_mtime[f] = (m[0], m[1], None)
2203 ambiguous_mtime[f] = (m[0], m[1], None)
2201 for f, m in pycompat.iteritems(ambiguous_mtime):
2204 for f, m in pycompat.iteritems(ambiguous_mtime):
2202 getfiledata[f] = m
2205 getfiledata[f] = m
2203
2206
2204 repo.setparents(fp1, fp2)
2207 repo.setparents(fp1, fp2)
2205 mergestatemod.recordupdates(
2208 mergestatemod.recordupdates(
2206 repo, mresult.actionsdict, branchmerge, getfiledata
2209 repo, mresult.actionsdict, branchmerge, getfiledata
2207 )
2210 )
2208 # update completed, clear state
2211 # update completed, clear state
2209 util.unlink(repo.vfs.join(b'updatestate'))
2212 util.unlink(repo.vfs.join(b'updatestate'))
2210
2213
2211 if not branchmerge:
2214 if not branchmerge:
2212 repo.dirstate.setbranch(p2.branch())
2215 repo.dirstate.setbranch(p2.branch())
2213
2216
2214 # If we're updating to a location, clean up any stale temporary includes
2217 # If we're updating to a location, clean up any stale temporary includes
2215 # (ex: this happens during hg rebase --abort).
2218 # (ex: this happens during hg rebase --abort).
2216 if not branchmerge:
2219 if not branchmerge:
2217 sparse.prunetemporaryincludes(repo)
2220 sparse.prunetemporaryincludes(repo)
2218
2221
2219 if updatedirstate:
2222 if updatedirstate:
2220 repo.hook(
2223 repo.hook(
2221 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2224 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2222 )
2225 )
2223 return stats
2226 return stats
2224
2227
2225
2228
2226 def merge(ctx, labels=None, force=False, wc=None):
2229 def merge(ctx, labels=None, force=False, wc=None):
2227 """Merge another topological branch into the working copy.
2230 """Merge another topological branch into the working copy.
2228
2231
2229 force = whether the merge was run with 'merge --force' (deprecated)
2232 force = whether the merge was run with 'merge --force' (deprecated)
2230 """
2233 """
2231
2234
2232 return _update(
2235 return _update(
2233 ctx.repo(),
2236 ctx.repo(),
2234 ctx.rev(),
2237 ctx.rev(),
2235 labels=labels,
2238 labels=labels,
2236 branchmerge=True,
2239 branchmerge=True,
2237 force=force,
2240 force=force,
2238 mergeforce=force,
2241 mergeforce=force,
2239 wc=wc,
2242 wc=wc,
2240 )
2243 )
2241
2244
2242
2245
2243 def update(ctx, updatecheck=None, wc=None):
2246 def update(ctx, updatecheck=None, wc=None):
2244 """Do a regular update to the given commit, aborting if there are conflicts.
2247 """Do a regular update to the given commit, aborting if there are conflicts.
2245
2248
2246 The 'updatecheck' argument can be used to control what to do in case of
2249 The 'updatecheck' argument can be used to control what to do in case of
2247 conflicts.
2250 conflicts.
2248
2251
2249 Note: This is a new, higher-level update() than the one that used to exist
2252 Note: This is a new, higher-level update() than the one that used to exist
2250 in this module. That function is now called _update(). You can hopefully
2253 in this module. That function is now called _update(). You can hopefully
2251 replace your callers to use this new update(), or clean_update(), merge(),
2254 replace your callers to use this new update(), or clean_update(), merge(),
2252 revert_to(), or graft().
2255 revert_to(), or graft().
2253 """
2256 """
2254 return _update(
2257 return _update(
2255 ctx.repo(),
2258 ctx.repo(),
2256 ctx.rev(),
2259 ctx.rev(),
2257 branchmerge=False,
2260 branchmerge=False,
2258 force=False,
2261 force=False,
2259 labels=[b'working copy', b'destination', b'working copy parent'],
2262 labels=[b'working copy', b'destination', b'working copy parent'],
2260 updatecheck=updatecheck,
2263 updatecheck=updatecheck,
2261 wc=wc,
2264 wc=wc,
2262 )
2265 )
2263
2266
2264
2267
2265 def clean_update(ctx, wc=None):
2268 def clean_update(ctx, wc=None):
2266 """Do a clean update to the given commit.
2269 """Do a clean update to the given commit.
2267
2270
2268 This involves updating to the commit and discarding any changes in the
2271 This involves updating to the commit and discarding any changes in the
2269 working copy.
2272 working copy.
2270 """
2273 """
2271 return _update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2274 return _update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2272
2275
2273
2276
2274 def revert_to(ctx, matcher=None, wc=None):
2277 def revert_to(ctx, matcher=None, wc=None):
2275 """Revert the working copy to the given commit.
2278 """Revert the working copy to the given commit.
2276
2279
2277 The working copy will keep its current parent(s) but its content will
2280 The working copy will keep its current parent(s) but its content will
2278 be the same as in the given commit.
2281 be the same as in the given commit.
2279 """
2282 """
2280
2283
2281 return _update(
2284 return _update(
2282 ctx.repo(),
2285 ctx.repo(),
2283 ctx.rev(),
2286 ctx.rev(),
2284 branchmerge=False,
2287 branchmerge=False,
2285 force=True,
2288 force=True,
2286 updatedirstate=False,
2289 updatedirstate=False,
2287 matcher=matcher,
2290 matcher=matcher,
2288 wc=wc,
2291 wc=wc,
2289 )
2292 )
2290
2293
2291
2294
2292 def graft(
2295 def graft(
2293 repo,
2296 repo,
2294 ctx,
2297 ctx,
2295 base=None,
2298 base=None,
2296 labels=None,
2299 labels=None,
2297 keepparent=False,
2300 keepparent=False,
2298 keepconflictparent=False,
2301 keepconflictparent=False,
2299 wctx=None,
2302 wctx=None,
2300 ):
2303 ):
2301 """Do a graft-like merge.
2304 """Do a graft-like merge.
2302
2305
2303 This is a merge where the merge ancestor is chosen such that one
2306 This is a merge where the merge ancestor is chosen such that one
2304 or more changesets are grafted onto the current changeset. In
2307 or more changesets are grafted onto the current changeset. In
2305 addition to the merge, this fixes up the dirstate to include only
2308 addition to the merge, this fixes up the dirstate to include only
2306 a single parent (if keepparent is False) and tries to duplicate any
2309 a single parent (if keepparent is False) and tries to duplicate any
2307 renames/copies appropriately.
2310 renames/copies appropriately.
2308
2311
2309 ctx - changeset to rebase
2312 ctx - changeset to rebase
2310 base - merge base, or ctx.p1() if not specified
2313 base - merge base, or ctx.p1() if not specified
2311 labels - merge labels eg ['local', 'graft']
2314 labels - merge labels eg ['local', 'graft']
2312 keepparent - keep second parent if any
2315 keepparent - keep second parent if any
2313 keepconflictparent - if unresolved, keep parent used for the merge
2316 keepconflictparent - if unresolved, keep parent used for the merge
2314
2317
2315 """
2318 """
2316 # If we're grafting a descendant onto an ancestor, be sure to pass
2319 # If we're grafting a descendant onto an ancestor, be sure to pass
2317 # mergeancestor=True to update. This does two things: 1) allows the merge if
2320 # mergeancestor=True to update. This does two things: 1) allows the merge if
2318 # the destination is the same as the parent of the ctx (so we can use graft
2321 # the destination is the same as the parent of the ctx (so we can use graft
2319 # to copy commits), and 2) informs update that the incoming changes are
2322 # to copy commits), and 2) informs update that the incoming changes are
2320 # newer than the destination so it doesn't prompt about "remote changed foo
2323 # newer than the destination so it doesn't prompt about "remote changed foo
2321 # which local deleted".
2324 # which local deleted".
2322 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2325 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2323 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2326 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2324 wctx = wctx or repo[None]
2327 wctx = wctx or repo[None]
2325 pctx = wctx.p1()
2328 pctx = wctx.p1()
2326 base = base or ctx.p1()
2329 base = base or ctx.p1()
2327 mergeancestor = (
2330 mergeancestor = (
2328 repo.changelog.isancestor(pctx.node(), ctx.node())
2331 repo.changelog.isancestor(pctx.node(), ctx.node())
2329 or pctx.rev() == base.rev()
2332 or pctx.rev() == base.rev()
2330 )
2333 )
2331
2334
2332 stats = _update(
2335 stats = _update(
2333 repo,
2336 repo,
2334 ctx.node(),
2337 ctx.node(),
2335 True,
2338 True,
2336 True,
2339 True,
2337 base.node(),
2340 base.node(),
2338 mergeancestor=mergeancestor,
2341 mergeancestor=mergeancestor,
2339 labels=labels,
2342 labels=labels,
2340 wc=wctx,
2343 wc=wctx,
2341 )
2344 )
2342
2345
2343 if keepconflictparent and stats.unresolvedcount:
2346 if keepconflictparent and stats.unresolvedcount:
2344 pother = ctx.node()
2347 pother = ctx.node()
2345 else:
2348 else:
2346 pother = repo.nullid
2349 pother = repo.nullid
2347 parents = ctx.parents()
2350 parents = ctx.parents()
2348 if keepparent and len(parents) == 2 and base in parents:
2351 if keepparent and len(parents) == 2 and base in parents:
2349 parents.remove(base)
2352 parents.remove(base)
2350 pother = parents[0].node()
2353 pother = parents[0].node()
2351 # Never set both parents equal to each other
2354 # Never set both parents equal to each other
2352 if pother == pctx.node():
2355 if pother == pctx.node():
2353 pother = repo.nullid
2356 pother = repo.nullid
2354
2357
2355 if wctx.isinmemory():
2358 if wctx.isinmemory():
2356 wctx.setparents(pctx.node(), pother)
2359 wctx.setparents(pctx.node(), pother)
2357 # fix up dirstate for copies and renames
2360 # fix up dirstate for copies and renames
2358 copies.graftcopies(wctx, ctx, base)
2361 copies.graftcopies(wctx, ctx, base)
2359 else:
2362 else:
2360 with repo.dirstate.parentchange():
2363 with repo.dirstate.parentchange():
2361 repo.setparents(pctx.node(), pother)
2364 repo.setparents(pctx.node(), pother)
2362 repo.dirstate.write(repo.currenttransaction())
2365 repo.dirstate.write(repo.currenttransaction())
2363 # fix up dirstate for copies and renames
2366 # fix up dirstate for copies and renames
2364 copies.graftcopies(wctx, ctx, base)
2367 copies.graftcopies(wctx, ctx, base)
2365 return stats
2368 return stats
2366
2369
2367
2370
2368 def back_out(ctx, parent=None, wc=None):
2371 def back_out(ctx, parent=None, wc=None):
2369 if parent is None:
2372 if parent is None:
2370 if ctx.p2() is not None:
2373 if ctx.p2() is not None:
2371 msg = b"must specify parent of merge commit to back out"
2374 msg = b"must specify parent of merge commit to back out"
2372 raise error.ProgrammingError(msg)
2375 raise error.ProgrammingError(msg)
2373 parent = ctx.p1()
2376 parent = ctx.p1()
2374 return _update(
2377 return _update(
2375 ctx.repo(),
2378 ctx.repo(),
2376 parent,
2379 parent,
2377 branchmerge=True,
2380 branchmerge=True,
2378 force=True,
2381 force=True,
2379 ancestor=ctx.node(),
2382 ancestor=ctx.node(),
2380 mergeancestor=False,
2383 mergeancestor=False,
2381 )
2384 )
2382
2385
2383
2386
2384 def purge(
2387 def purge(
2385 repo,
2388 repo,
2386 matcher,
2389 matcher,
2387 unknown=True,
2390 unknown=True,
2388 ignored=False,
2391 ignored=False,
2389 removeemptydirs=True,
2392 removeemptydirs=True,
2390 removefiles=True,
2393 removefiles=True,
2391 abortonerror=False,
2394 abortonerror=False,
2392 noop=False,
2395 noop=False,
2393 confirm=False,
2396 confirm=False,
2394 ):
2397 ):
2395 """Purge the working directory of untracked files.
2398 """Purge the working directory of untracked files.
2396
2399
2397 ``matcher`` is a matcher configured to scan the working directory -
2400 ``matcher`` is a matcher configured to scan the working directory -
2398 potentially a subset.
2401 potentially a subset.
2399
2402
2400 ``unknown`` controls whether unknown files should be purged.
2403 ``unknown`` controls whether unknown files should be purged.
2401
2404
2402 ``ignored`` controls whether ignored files should be purged.
2405 ``ignored`` controls whether ignored files should be purged.
2403
2406
2404 ``removeemptydirs`` controls whether empty directories should be removed.
2407 ``removeemptydirs`` controls whether empty directories should be removed.
2405
2408
2406 ``removefiles`` controls whether files are removed.
2409 ``removefiles`` controls whether files are removed.
2407
2410
2408 ``abortonerror`` causes an exception to be raised if an error occurs
2411 ``abortonerror`` causes an exception to be raised if an error occurs
2409 deleting a file or directory.
2412 deleting a file or directory.
2410
2413
2411 ``noop`` controls whether to actually remove files. If not defined, actions
2414 ``noop`` controls whether to actually remove files. If not defined, actions
2412 will be taken.
2415 will be taken.
2413
2416
2414 ``confirm`` ask confirmation before actually removing anything.
2417 ``confirm`` ask confirmation before actually removing anything.
2415
2418
2416 Returns an iterable of relative paths in the working directory that were
2419 Returns an iterable of relative paths in the working directory that were
2417 or would be removed.
2420 or would be removed.
2418 """
2421 """
2419
2422
2420 def remove(removefn, path):
2423 def remove(removefn, path):
2421 try:
2424 try:
2422 removefn(path)
2425 removefn(path)
2423 except OSError:
2426 except OSError:
2424 m = _(b'%s cannot be removed') % path
2427 m = _(b'%s cannot be removed') % path
2425 if abortonerror:
2428 if abortonerror:
2426 raise error.Abort(m)
2429 raise error.Abort(m)
2427 else:
2430 else:
2428 repo.ui.warn(_(b'warning: %s\n') % m)
2431 repo.ui.warn(_(b'warning: %s\n') % m)
2429
2432
2430 # There's no API to copy a matcher. So mutate the passed matcher and
2433 # There's no API to copy a matcher. So mutate the passed matcher and
2431 # restore it when we're done.
2434 # restore it when we're done.
2432 oldtraversedir = matcher.traversedir
2435 oldtraversedir = matcher.traversedir
2433
2436
2434 res = []
2437 res = []
2435
2438
2436 try:
2439 try:
2437 if removeemptydirs:
2440 if removeemptydirs:
2438 directories = []
2441 directories = []
2439 matcher.traversedir = directories.append
2442 matcher.traversedir = directories.append
2440
2443
2441 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2444 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2442
2445
2443 if confirm:
2446 if confirm:
2444 nb_ignored = len(status.ignored)
2447 nb_ignored = len(status.ignored)
2445 nb_unknown = len(status.unknown)
2448 nb_unknown = len(status.unknown)
2446 if nb_unknown and nb_ignored:
2449 if nb_unknown and nb_ignored:
2447 msg = _(b"permanently delete %d unknown and %d ignored files?")
2450 msg = _(b"permanently delete %d unknown and %d ignored files?")
2448 msg %= (nb_unknown, nb_ignored)
2451 msg %= (nb_unknown, nb_ignored)
2449 elif nb_unknown:
2452 elif nb_unknown:
2450 msg = _(b"permanently delete %d unknown files?")
2453 msg = _(b"permanently delete %d unknown files?")
2451 msg %= nb_unknown
2454 msg %= nb_unknown
2452 elif nb_ignored:
2455 elif nb_ignored:
2453 msg = _(b"permanently delete %d ignored files?")
2456 msg = _(b"permanently delete %d ignored files?")
2454 msg %= nb_ignored
2457 msg %= nb_ignored
2455 elif removeemptydirs:
2458 elif removeemptydirs:
2456 dir_count = 0
2459 dir_count = 0
2457 for f in directories:
2460 for f in directories:
2458 if matcher(f) and not repo.wvfs.listdir(f):
2461 if matcher(f) and not repo.wvfs.listdir(f):
2459 dir_count += 1
2462 dir_count += 1
2460 if dir_count:
2463 if dir_count:
2461 msg = _(
2464 msg = _(
2462 b"permanently delete at least %d empty directories?"
2465 b"permanently delete at least %d empty directories?"
2463 )
2466 )
2464 msg %= dir_count
2467 msg %= dir_count
2465 else:
2468 else:
2466 # XXX we might be missing directory there
2469 # XXX we might be missing directory there
2467 return res
2470 return res
2468 msg += b" (yN)$$ &Yes $$ &No"
2471 msg += b" (yN)$$ &Yes $$ &No"
2469 if repo.ui.promptchoice(msg, default=1) == 1:
2472 if repo.ui.promptchoice(msg, default=1) == 1:
2470 raise error.CanceledError(_(b'removal cancelled'))
2473 raise error.CanceledError(_(b'removal cancelled'))
2471
2474
2472 if removefiles:
2475 if removefiles:
2473 for f in sorted(status.unknown + status.ignored):
2476 for f in sorted(status.unknown + status.ignored):
2474 if not noop:
2477 if not noop:
2475 repo.ui.note(_(b'removing file %s\n') % f)
2478 repo.ui.note(_(b'removing file %s\n') % f)
2476 remove(repo.wvfs.unlink, f)
2479 remove(repo.wvfs.unlink, f)
2477 res.append(f)
2480 res.append(f)
2478
2481
2479 if removeemptydirs:
2482 if removeemptydirs:
2480 for f in sorted(directories, reverse=True):
2483 for f in sorted(directories, reverse=True):
2481 if matcher(f) and not repo.wvfs.listdir(f):
2484 if matcher(f) and not repo.wvfs.listdir(f):
2482 if not noop:
2485 if not noop:
2483 repo.ui.note(_(b'removing directory %s\n') % f)
2486 repo.ui.note(_(b'removing directory %s\n') % f)
2484 remove(repo.wvfs.rmdir, f)
2487 remove(repo.wvfs.rmdir, f)
2485 res.append(f)
2488 res.append(f)
2486
2489
2487 return res
2490 return res
2488
2491
2489 finally:
2492 finally:
2490 matcher.traversedir = oldtraversedir
2493 matcher.traversedir = oldtraversedir
@@ -1,853 +1,883 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullrev,
12 nullrev,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 filemerge,
16 filemerge,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 )
19 )
20 from .utils import hashutil
20 from .utils import hashutil
21
21
22 _pack = struct.pack
22 _pack = struct.pack
23 _unpack = struct.unpack
23 _unpack = struct.unpack
24
24
25
25
26 def _droponode(data):
26 def _droponode(data):
27 # used for compatibility for v1
27 # used for compatibility for v1
28 bits = data.split(b'\0')
28 bits = data.split(b'\0')
29 bits = bits[:-2] + bits[-1:]
29 bits = bits[:-2] + bits[-1:]
30 return b'\0'.join(bits)
30 return b'\0'.join(bits)
31
31
32
32
33 def _filectxorabsent(hexnode, ctx, f):
33 def _filectxorabsent(hexnode, ctx, f):
34 if hexnode == ctx.repo().nodeconstants.nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 return filemerge.absentfilectx(ctx, f)
35 return filemerge.absentfilectx(ctx, f)
36 else:
36 else:
37 return ctx[f]
37 return ctx[f]
38
38
39
39
40 # Merge state record types. See ``mergestate`` docs for more.
40 # Merge state record types. See ``mergestate`` docs for more.
41
41
42 ####
42 ####
43 # merge records which records metadata about a current merge
43 # merge records which records metadata about a current merge
44 # exists only once in a mergestate
44 # exists only once in a mergestate
45 #####
45 #####
46 RECORD_LOCAL = b'L'
46 RECORD_LOCAL = b'L'
47 RECORD_OTHER = b'O'
47 RECORD_OTHER = b'O'
48 # record merge labels
48 # record merge labels
49 RECORD_LABELS = b'l'
49 RECORD_LABELS = b'l'
50
50
51 #####
51 #####
52 # record extra information about files, with one entry containing info about one
52 # record extra information about files, with one entry containing info about one
53 # file. Hence, multiple of them can exists
53 # file. Hence, multiple of them can exists
54 #####
54 #####
55 RECORD_FILE_VALUES = b'f'
55 RECORD_FILE_VALUES = b'f'
56
56
57 #####
57 #####
58 # merge records which represents state of individual merges of files/folders
58 # merge records which represents state of individual merges of files/folders
59 # These are top level records for each entry containing merge related info.
59 # These are top level records for each entry containing merge related info.
60 # Each record of these has info about one file. Hence multiple of them can
60 # Each record of these has info about one file. Hence multiple of them can
61 # exists
61 # exists
62 #####
62 #####
63 RECORD_MERGED = b'F'
63 RECORD_MERGED = b'F'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 # the path was dir on one side of merge and file on another
65 # the path was dir on one side of merge and file on another
66 RECORD_PATH_CONFLICT = b'P'
66 RECORD_PATH_CONFLICT = b'P'
67
67
68 #####
68 #####
69 # possible state which a merge entry can have. These are stored inside top-level
69 # possible state which a merge entry can have. These are stored inside top-level
70 # merge records mentioned just above.
70 # merge records mentioned just above.
71 #####
71 #####
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 # represents that the file was automatically merged in favor
76 # represents that the file was automatically merged in favor
77 # of other version. This info is used on commit.
77 # of other version. This info is used on commit.
78 # This is now deprecated and commit related information is now
78 # This is now deprecated and commit related information is now
79 # stored in RECORD_FILE_VALUES
79 # stored in RECORD_FILE_VALUES
80 MERGE_RECORD_MERGED_OTHER = b'o'
80 MERGE_RECORD_MERGED_OTHER = b'o'
81
81
82 #####
82 #####
83 # top level record which stores other unknown records. Multiple of these can
83 # top level record which stores other unknown records. Multiple of these can
84 # exists
84 # exists
85 #####
85 #####
86 RECORD_OVERRIDE = b't'
86 RECORD_OVERRIDE = b't'
87
87
88 #####
88 #####
89 # legacy records which are no longer used but kept to prevent breaking BC
89 # legacy records which are no longer used but kept to prevent breaking BC
90 #####
90 #####
91 # This record was release in 5.4 and usage was removed in 5.5
91 # This record was release in 5.4 and usage was removed in 5.5
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 # This record was release in 3.7 and usage was removed in 5.6
93 # This record was release in 3.7 and usage was removed in 5.6
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 # This record was release in 3.7 and usage was removed in 5.6
95 # This record was release in 3.7 and usage was removed in 5.6
96 LEGACY_MERGE_DRIVER_STATE = b'm'
96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 # This record was release in 3.7 and usage was removed in 5.6
97 # This record was release in 3.7 and usage was removed in 5.6
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99
99
100
100
101 ACTION_FORGET = b'f'
101 class MergeAction(object):
102 ACTION_REMOVE = b'r'
102 """represent an "action" merge need to take for a given file
103 ACTION_ADD = b'a'
103
104 ACTION_GET = b'g'
104 Attributes:
105 ACTION_PATH_CONFLICT = b'p'
105
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
106 _short: internal representation used to identify each action
107 ACTION_ADD_MODIFIED = b'am'
107 """
108 ACTION_CREATED = b'c'
108
109 ACTION_DELETED_CHANGED = b'dc'
109 def __init__(self, short):
110 ACTION_CHANGED_DELETED = b'cd'
110 self._short = short
111 ACTION_MERGE = b'm'
111
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
112 def __hash__(self):
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
113 return hash(self._short)
114 ACTION_KEEP = b'k'
114
115 def __repr__(self):
116 return 'MergeAction<%s>' % self._short.decode('ascii')
117
118 def __bytes__(self):
119 return self._short
120
121 def __eq__(self, other):
122 if other is None:
123 return False
124 assert isinstance(other, MergeAction)
125 return self._short == other._short
126
127 def __lt__(self, other):
128 return self._short < other._short
129
130
131 ACTION_FORGET = MergeAction(b'f')
132 ACTION_REMOVE = MergeAction(b'r')
133 ACTION_ADD = MergeAction(b'a')
134 ACTION_GET = MergeAction(b'g')
135 ACTION_PATH_CONFLICT = MergeAction(b'p')
136 ACTION_PATH_CONFLICT_RESOLVE = MergeAction('pr')
137 ACTION_ADD_MODIFIED = MergeAction(b'am')
138 ACTION_CREATED = MergeAction(b'c')
139 ACTION_DELETED_CHANGED = MergeAction(b'dc')
140 ACTION_CHANGED_DELETED = MergeAction(b'cd')
141 ACTION_MERGE = MergeAction(b'm')
142 ACTION_LOCAL_DIR_RENAME_GET = MergeAction(b'dg')
143 ACTION_DIR_RENAME_MOVE_LOCAL = MergeAction(b'dm')
144 ACTION_KEEP = MergeAction(b'k')
115 # the file was absent on local side before merge and we should
145 # the file was absent on local side before merge and we should
116 # keep it absent (absent means file not present, it can be a result
146 # keep it absent (absent means file not present, it can be a result
117 # of file deletion, rename etc.)
147 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
148 ACTION_KEEP_ABSENT = MergeAction(b'ka')
119 # the file is absent on the ancestor and remote side of the merge
149 # the file is absent on the ancestor and remote side of the merge
120 # hence this file is new and we should keep it
150 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
151 ACTION_KEEP_NEW = MergeAction(b'kn')
122 ACTION_EXEC = b'e'
152 ACTION_EXEC = MergeAction(b'e')
123 ACTION_CREATED_MERGE = b'cm'
153 ACTION_CREATED_MERGE = MergeAction(b'cm')
124
154
125 # actions which are no op
155 # actions which are no op
126 NO_OP_ACTIONS = (
156 NO_OP_ACTIONS = (
127 ACTION_KEEP,
157 ACTION_KEEP,
128 ACTION_KEEP_ABSENT,
158 ACTION_KEEP_ABSENT,
129 ACTION_KEEP_NEW,
159 ACTION_KEEP_NEW,
130 )
160 )
131
161
132 # Used by concert to detect situation it does not like, not sure what the exact
162 # Used by concert to detect situation it does not like, not sure what the exact
133 # criteria is
163 # criteria is
134 CONVERT_MERGE_ACTIONS = (
164 CONVERT_MERGE_ACTIONS = (
135 ACTION_MERGE,
165 ACTION_MERGE,
136 ACTION_DIR_RENAME_MOVE_LOCAL,
166 ACTION_DIR_RENAME_MOVE_LOCAL,
137 ACTION_CHANGED_DELETED,
167 ACTION_CHANGED_DELETED,
138 ACTION_DELETED_CHANGED,
168 ACTION_DELETED_CHANGED,
139 )
169 )
140
170
141
171
142 class _mergestate_base(object):
172 class _mergestate_base(object):
143 """track 3-way merge state of individual files
173 """track 3-way merge state of individual files
144
174
145 The merge state is stored on disk when needed. Two files are used: one with
175 The merge state is stored on disk when needed. Two files are used: one with
146 an old format (version 1), and one with a new format (version 2). Version 2
176 an old format (version 1), and one with a new format (version 2). Version 2
147 stores a superset of the data in version 1, including new kinds of records
177 stores a superset of the data in version 1, including new kinds of records
148 in the future. For more about the new format, see the documentation for
178 in the future. For more about the new format, see the documentation for
149 `_readrecordsv2`.
179 `_readrecordsv2`.
150
180
151 Each record can contain arbitrary content, and has an associated type. This
181 Each record can contain arbitrary content, and has an associated type. This
152 `type` should be a letter. If `type` is uppercase, the record is mandatory:
182 `type` should be a letter. If `type` is uppercase, the record is mandatory:
153 versions of Mercurial that don't support it should abort. If `type` is
183 versions of Mercurial that don't support it should abort. If `type` is
154 lowercase, the record can be safely ignored.
184 lowercase, the record can be safely ignored.
155
185
156 Currently known records:
186 Currently known records:
157
187
158 L: the node of the "local" part of the merge (hexified version)
188 L: the node of the "local" part of the merge (hexified version)
159 O: the node of the "other" part of the merge (hexified version)
189 O: the node of the "other" part of the merge (hexified version)
160 F: a file to be merged entry
190 F: a file to be merged entry
161 C: a change/delete or delete/change conflict
191 C: a change/delete or delete/change conflict
162 P: a path conflict (file vs directory)
192 P: a path conflict (file vs directory)
163 f: a (filename, dictionary) tuple of optional values for a given file
193 f: a (filename, dictionary) tuple of optional values for a given file
164 l: the labels for the parts of the merge.
194 l: the labels for the parts of the merge.
165
195
166 Merge record states (stored in self._state, indexed by filename):
196 Merge record states (stored in self._state, indexed by filename):
167 u: unresolved conflict
197 u: unresolved conflict
168 r: resolved conflict
198 r: resolved conflict
169 pu: unresolved path conflict (file conflicts with directory)
199 pu: unresolved path conflict (file conflicts with directory)
170 pr: resolved path conflict
200 pr: resolved path conflict
171 o: file was merged in favor of other parent of merge (DEPRECATED)
201 o: file was merged in favor of other parent of merge (DEPRECATED)
172
202
173 The resolve command transitions between 'u' and 'r' for conflicts and
203 The resolve command transitions between 'u' and 'r' for conflicts and
174 'pu' and 'pr' for path conflicts.
204 'pu' and 'pr' for path conflicts.
175 """
205 """
176
206
177 def __init__(self, repo):
207 def __init__(self, repo):
178 """Initialize the merge state.
208 """Initialize the merge state.
179
209
180 Do not use this directly! Instead call read() or clean()."""
210 Do not use this directly! Instead call read() or clean()."""
181 self._repo = repo
211 self._repo = repo
182 self._state = {}
212 self._state = {}
183 self._stateextras = collections.defaultdict(dict)
213 self._stateextras = collections.defaultdict(dict)
184 self._local = None
214 self._local = None
185 self._other = None
215 self._other = None
186 self._labels = None
216 self._labels = None
187 # contains a mapping of form:
217 # contains a mapping of form:
188 # {filename : (merge_return_value, action_to_be_performed}
218 # {filename : (merge_return_value, action_to_be_performed}
189 # these are results of re-running merge process
219 # these are results of re-running merge process
190 # this dict is used to perform actions on dirstate caused by re-running
220 # this dict is used to perform actions on dirstate caused by re-running
191 # the merge
221 # the merge
192 self._results = {}
222 self._results = {}
193 self._dirty = False
223 self._dirty = False
194
224
195 def reset(self):
225 def reset(self):
196 pass
226 pass
197
227
198 def start(self, node, other, labels=None):
228 def start(self, node, other, labels=None):
199 self._local = node
229 self._local = node
200 self._other = other
230 self._other = other
201 self._labels = labels
231 self._labels = labels
202
232
203 @util.propertycache
233 @util.propertycache
204 def local(self):
234 def local(self):
205 if self._local is None:
235 if self._local is None:
206 msg = b"local accessed but self._local isn't set"
236 msg = b"local accessed but self._local isn't set"
207 raise error.ProgrammingError(msg)
237 raise error.ProgrammingError(msg)
208 return self._local
238 return self._local
209
239
210 @util.propertycache
240 @util.propertycache
211 def localctx(self):
241 def localctx(self):
212 return self._repo[self.local]
242 return self._repo[self.local]
213
243
214 @util.propertycache
244 @util.propertycache
215 def other(self):
245 def other(self):
216 if self._other is None:
246 if self._other is None:
217 msg = b"other accessed but self._other isn't set"
247 msg = b"other accessed but self._other isn't set"
218 raise error.ProgrammingError(msg)
248 raise error.ProgrammingError(msg)
219 return self._other
249 return self._other
220
250
221 @util.propertycache
251 @util.propertycache
222 def otherctx(self):
252 def otherctx(self):
223 return self._repo[self.other]
253 return self._repo[self.other]
224
254
225 def active(self):
255 def active(self):
226 """Whether mergestate is active.
256 """Whether mergestate is active.
227
257
228 Returns True if there appears to be mergestate. This is a rough proxy
258 Returns True if there appears to be mergestate. This is a rough proxy
229 for "is a merge in progress."
259 for "is a merge in progress."
230 """
260 """
231 return bool(self._local) or bool(self._state)
261 return bool(self._local) or bool(self._state)
232
262
233 def commit(self):
263 def commit(self):
234 """Write current state on disk (if necessary)"""
264 """Write current state on disk (if necessary)"""
235
265
236 @staticmethod
266 @staticmethod
237 def getlocalkey(path):
267 def getlocalkey(path):
238 """hash the path of a local file context for storage in the .hg/merge
268 """hash the path of a local file context for storage in the .hg/merge
239 directory."""
269 directory."""
240
270
241 return hex(hashutil.sha1(path).digest())
271 return hex(hashutil.sha1(path).digest())
242
272
243 def _make_backup(self, fctx, localkey):
273 def _make_backup(self, fctx, localkey):
244 raise NotImplementedError()
274 raise NotImplementedError()
245
275
246 def _restore_backup(self, fctx, localkey, flags):
276 def _restore_backup(self, fctx, localkey, flags):
247 raise NotImplementedError()
277 raise NotImplementedError()
248
278
249 def add(self, fcl, fco, fca, fd):
279 def add(self, fcl, fco, fca, fd):
250 """add a new (potentially?) conflicting file the merge state
280 """add a new (potentially?) conflicting file the merge state
251 fcl: file context for local,
281 fcl: file context for local,
252 fco: file context for remote,
282 fco: file context for remote,
253 fca: file context for ancestors,
283 fca: file context for ancestors,
254 fd: file path of the resulting merge.
284 fd: file path of the resulting merge.
255
285
256 note: also write the local version to the `.hg/merge` directory.
286 note: also write the local version to the `.hg/merge` directory.
257 """
287 """
258 if fcl.isabsent():
288 if fcl.isabsent():
259 localkey = self._repo.nodeconstants.nullhex
289 localkey = self._repo.nodeconstants.nullhex
260 else:
290 else:
261 localkey = mergestate.getlocalkey(fcl.path())
291 localkey = mergestate.getlocalkey(fcl.path())
262 self._make_backup(fcl, localkey)
292 self._make_backup(fcl, localkey)
263 self._state[fd] = [
293 self._state[fd] = [
264 MERGE_RECORD_UNRESOLVED,
294 MERGE_RECORD_UNRESOLVED,
265 localkey,
295 localkey,
266 fcl.path(),
296 fcl.path(),
267 fca.path(),
297 fca.path(),
268 hex(fca.filenode()),
298 hex(fca.filenode()),
269 fco.path(),
299 fco.path(),
270 hex(fco.filenode()),
300 hex(fco.filenode()),
271 fcl.flags(),
301 fcl.flags(),
272 ]
302 ]
273 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
303 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
274 self._dirty = True
304 self._dirty = True
275
305
276 def addpathconflict(self, path, frename, forigin):
306 def addpathconflict(self, path, frename, forigin):
277 """add a new conflicting path to the merge state
307 """add a new conflicting path to the merge state
278 path: the path that conflicts
308 path: the path that conflicts
279 frename: the filename the conflicting file was renamed to
309 frename: the filename the conflicting file was renamed to
280 forigin: origin of the file ('l' or 'r' for local/remote)
310 forigin: origin of the file ('l' or 'r' for local/remote)
281 """
311 """
282 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
312 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
283 self._dirty = True
313 self._dirty = True
284
314
285 def addcommitinfo(self, path, data):
315 def addcommitinfo(self, path, data):
286 """stores information which is required at commit
316 """stores information which is required at commit
287 into _stateextras"""
317 into _stateextras"""
288 self._stateextras[path].update(data)
318 self._stateextras[path].update(data)
289 self._dirty = True
319 self._dirty = True
290
320
291 def __contains__(self, dfile):
321 def __contains__(self, dfile):
292 return dfile in self._state
322 return dfile in self._state
293
323
294 def __getitem__(self, dfile):
324 def __getitem__(self, dfile):
295 return self._state[dfile][0]
325 return self._state[dfile][0]
296
326
297 def __iter__(self):
327 def __iter__(self):
298 return iter(sorted(self._state))
328 return iter(sorted(self._state))
299
329
300 def files(self):
330 def files(self):
301 return self._state.keys()
331 return self._state.keys()
302
332
303 def mark(self, dfile, state):
333 def mark(self, dfile, state):
304 self._state[dfile][0] = state
334 self._state[dfile][0] = state
305 self._dirty = True
335 self._dirty = True
306
336
307 def unresolved(self):
337 def unresolved(self):
308 """Obtain the paths of unresolved files."""
338 """Obtain the paths of unresolved files."""
309
339
310 for f, entry in pycompat.iteritems(self._state):
340 for f, entry in pycompat.iteritems(self._state):
311 if entry[0] in (
341 if entry[0] in (
312 MERGE_RECORD_UNRESOLVED,
342 MERGE_RECORD_UNRESOLVED,
313 MERGE_RECORD_UNRESOLVED_PATH,
343 MERGE_RECORD_UNRESOLVED_PATH,
314 ):
344 ):
315 yield f
345 yield f
316
346
317 def allextras(self):
347 def allextras(self):
318 """return all extras information stored with the mergestate"""
348 """return all extras information stored with the mergestate"""
319 return self._stateextras
349 return self._stateextras
320
350
321 def extras(self, filename):
351 def extras(self, filename):
322 """return extras stored with the mergestate for the given filename"""
352 """return extras stored with the mergestate for the given filename"""
323 return self._stateextras[filename]
353 return self._stateextras[filename]
324
354
325 def resolve(self, dfile, wctx):
355 def resolve(self, dfile, wctx):
326 """run merge process for dfile
356 """run merge process for dfile
327
357
328 Returns the exit code of the merge."""
358 Returns the exit code of the merge."""
329 if self[dfile] in (
359 if self[dfile] in (
330 MERGE_RECORD_RESOLVED,
360 MERGE_RECORD_RESOLVED,
331 LEGACY_RECORD_DRIVER_RESOLVED,
361 LEGACY_RECORD_DRIVER_RESOLVED,
332 ):
362 ):
333 return 0
363 return 0
334 stateentry = self._state[dfile]
364 stateentry = self._state[dfile]
335 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
365 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
336 octx = self._repo[self._other]
366 octx = self._repo[self._other]
337 extras = self.extras(dfile)
367 extras = self.extras(dfile)
338 anccommitnode = extras.get(b'ancestorlinknode')
368 anccommitnode = extras.get(b'ancestorlinknode')
339 if anccommitnode:
369 if anccommitnode:
340 actx = self._repo[anccommitnode]
370 actx = self._repo[anccommitnode]
341 else:
371 else:
342 actx = None
372 actx = None
343 fcd = _filectxorabsent(localkey, wctx, dfile)
373 fcd = _filectxorabsent(localkey, wctx, dfile)
344 fco = _filectxorabsent(onode, octx, ofile)
374 fco = _filectxorabsent(onode, octx, ofile)
345 # TODO: move this to filectxorabsent
375 # TODO: move this to filectxorabsent
346 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
376 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
347 # "premerge" x flags
377 # "premerge" x flags
348 flo = fco.flags()
378 flo = fco.flags()
349 fla = fca.flags()
379 fla = fca.flags()
350 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
380 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
351 if fca.rev() == nullrev and flags != flo:
381 if fca.rev() == nullrev and flags != flo:
352 self._repo.ui.warn(
382 self._repo.ui.warn(
353 _(
383 _(
354 b'warning: cannot merge flags for %s '
384 b'warning: cannot merge flags for %s '
355 b'without common ancestor - keeping local flags\n'
385 b'without common ancestor - keeping local flags\n'
356 )
386 )
357 % afile
387 % afile
358 )
388 )
359 elif flags == fla:
389 elif flags == fla:
360 flags = flo
390 flags = flo
361 # restore local
391 # restore local
362 if localkey != self._repo.nodeconstants.nullhex:
392 if localkey != self._repo.nodeconstants.nullhex:
363 self._restore_backup(wctx[dfile], localkey, flags)
393 self._restore_backup(wctx[dfile], localkey, flags)
364 else:
394 else:
365 wctx[dfile].remove(ignoremissing=True)
395 wctx[dfile].remove(ignoremissing=True)
366 merge_ret, deleted = filemerge.filemerge(
396 merge_ret, deleted = filemerge.filemerge(
367 self._repo,
397 self._repo,
368 wctx,
398 wctx,
369 self._local,
399 self._local,
370 lfile,
400 lfile,
371 fcd,
401 fcd,
372 fco,
402 fco,
373 fca,
403 fca,
374 labels=self._labels,
404 labels=self._labels,
375 )
405 )
376 if merge_ret is None:
406 if merge_ret is None:
377 # If return value of merge is None, then there are no real conflict
407 # If return value of merge is None, then there are no real conflict
378 del self._state[dfile]
408 del self._state[dfile]
379 self._dirty = True
409 self._dirty = True
380 elif not merge_ret:
410 elif not merge_ret:
381 self.mark(dfile, MERGE_RECORD_RESOLVED)
411 self.mark(dfile, MERGE_RECORD_RESOLVED)
382
412
383 action = None
413 action = None
384 if deleted:
414 if deleted:
385 if fcd.isabsent():
415 if fcd.isabsent():
386 # dc: local picked. Need to drop if present, which may
416 # dc: local picked. Need to drop if present, which may
387 # happen on re-resolves.
417 # happen on re-resolves.
388 action = ACTION_FORGET
418 action = ACTION_FORGET
389 else:
419 else:
390 # cd: remote picked (or otherwise deleted)
420 # cd: remote picked (or otherwise deleted)
391 action = ACTION_REMOVE
421 action = ACTION_REMOVE
392 else:
422 else:
393 if fcd.isabsent(): # dc: remote picked
423 if fcd.isabsent(): # dc: remote picked
394 action = ACTION_GET
424 action = ACTION_GET
395 elif fco.isabsent(): # cd: local picked
425 elif fco.isabsent(): # cd: local picked
396 if dfile in self.localctx:
426 if dfile in self.localctx:
397 action = ACTION_ADD_MODIFIED
427 action = ACTION_ADD_MODIFIED
398 else:
428 else:
399 action = ACTION_ADD
429 action = ACTION_ADD
400 # else: regular merges (no action necessary)
430 # else: regular merges (no action necessary)
401 self._results[dfile] = merge_ret, action
431 self._results[dfile] = merge_ret, action
402
432
403 return merge_ret
433 return merge_ret
404
434
405 def counts(self):
435 def counts(self):
406 """return counts for updated, merged and removed files in this
436 """return counts for updated, merged and removed files in this
407 session"""
437 session"""
408 updated, merged, removed = 0, 0, 0
438 updated, merged, removed = 0, 0, 0
409 for r, action in pycompat.itervalues(self._results):
439 for r, action in pycompat.itervalues(self._results):
410 if r is None:
440 if r is None:
411 updated += 1
441 updated += 1
412 elif r == 0:
442 elif r == 0:
413 if action == ACTION_REMOVE:
443 if action == ACTION_REMOVE:
414 removed += 1
444 removed += 1
415 else:
445 else:
416 merged += 1
446 merged += 1
417 return updated, merged, removed
447 return updated, merged, removed
418
448
419 def unresolvedcount(self):
449 def unresolvedcount(self):
420 """get unresolved count for this merge (persistent)"""
450 """get unresolved count for this merge (persistent)"""
421 return len(list(self.unresolved()))
451 return len(list(self.unresolved()))
422
452
423 def actions(self):
453 def actions(self):
424 """return lists of actions to perform on the dirstate"""
454 """return lists of actions to perform on the dirstate"""
425 actions = {
455 actions = {
426 ACTION_REMOVE: [],
456 ACTION_REMOVE: [],
427 ACTION_FORGET: [],
457 ACTION_FORGET: [],
428 ACTION_ADD: [],
458 ACTION_ADD: [],
429 ACTION_ADD_MODIFIED: [],
459 ACTION_ADD_MODIFIED: [],
430 ACTION_GET: [],
460 ACTION_GET: [],
431 }
461 }
432 for f, (r, action) in pycompat.iteritems(self._results):
462 for f, (r, action) in pycompat.iteritems(self._results):
433 if action is not None:
463 if action is not None:
434 actions[action].append((f, None, b"merge result"))
464 actions[action].append((f, None, b"merge result"))
435 return actions
465 return actions
436
466
437
467
438 class mergestate(_mergestate_base):
468 class mergestate(_mergestate_base):
439
469
440 statepathv1 = b'merge/state'
470 statepathv1 = b'merge/state'
441 statepathv2 = b'merge/state2'
471 statepathv2 = b'merge/state2'
442
472
443 @staticmethod
473 @staticmethod
444 def clean(repo):
474 def clean(repo):
445 """Initialize a brand new merge state, removing any existing state on
475 """Initialize a brand new merge state, removing any existing state on
446 disk."""
476 disk."""
447 ms = mergestate(repo)
477 ms = mergestate(repo)
448 ms.reset()
478 ms.reset()
449 return ms
479 return ms
450
480
451 @staticmethod
481 @staticmethod
452 def read(repo):
482 def read(repo):
453 """Initialize the merge state, reading it from disk."""
483 """Initialize the merge state, reading it from disk."""
454 ms = mergestate(repo)
484 ms = mergestate(repo)
455 ms._read()
485 ms._read()
456 return ms
486 return ms
457
487
458 def _read(self):
488 def _read(self):
459 """Analyse each record content to restore a serialized state from disk
489 """Analyse each record content to restore a serialized state from disk
460
490
461 This function process "record" entry produced by the de-serialization
491 This function process "record" entry produced by the de-serialization
462 of on disk file.
492 of on disk file.
463 """
493 """
464 unsupported = set()
494 unsupported = set()
465 records = self._readrecords()
495 records = self._readrecords()
466 for rtype, record in records:
496 for rtype, record in records:
467 if rtype == RECORD_LOCAL:
497 if rtype == RECORD_LOCAL:
468 self._local = bin(record)
498 self._local = bin(record)
469 elif rtype == RECORD_OTHER:
499 elif rtype == RECORD_OTHER:
470 self._other = bin(record)
500 self._other = bin(record)
471 elif rtype == LEGACY_MERGE_DRIVER_STATE:
501 elif rtype == LEGACY_MERGE_DRIVER_STATE:
472 pass
502 pass
473 elif rtype in (
503 elif rtype in (
474 RECORD_MERGED,
504 RECORD_MERGED,
475 RECORD_CHANGEDELETE_CONFLICT,
505 RECORD_CHANGEDELETE_CONFLICT,
476 RECORD_PATH_CONFLICT,
506 RECORD_PATH_CONFLICT,
477 LEGACY_MERGE_DRIVER_MERGE,
507 LEGACY_MERGE_DRIVER_MERGE,
478 LEGACY_RECORD_RESOLVED_OTHER,
508 LEGACY_RECORD_RESOLVED_OTHER,
479 ):
509 ):
480 bits = record.split(b'\0')
510 bits = record.split(b'\0')
481 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
511 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
482 # and we now store related information in _stateextras, so
512 # and we now store related information in _stateextras, so
483 # lets write to _stateextras directly
513 # lets write to _stateextras directly
484 if bits[1] == MERGE_RECORD_MERGED_OTHER:
514 if bits[1] == MERGE_RECORD_MERGED_OTHER:
485 self._stateextras[bits[0]][b'filenode-source'] = b'other'
515 self._stateextras[bits[0]][b'filenode-source'] = b'other'
486 else:
516 else:
487 self._state[bits[0]] = bits[1:]
517 self._state[bits[0]] = bits[1:]
488 elif rtype == RECORD_FILE_VALUES:
518 elif rtype == RECORD_FILE_VALUES:
489 filename, rawextras = record.split(b'\0', 1)
519 filename, rawextras = record.split(b'\0', 1)
490 extraparts = rawextras.split(b'\0')
520 extraparts = rawextras.split(b'\0')
491 extras = {}
521 extras = {}
492 i = 0
522 i = 0
493 while i < len(extraparts):
523 while i < len(extraparts):
494 extras[extraparts[i]] = extraparts[i + 1]
524 extras[extraparts[i]] = extraparts[i + 1]
495 i += 2
525 i += 2
496
526
497 self._stateextras[filename] = extras
527 self._stateextras[filename] = extras
498 elif rtype == RECORD_LABELS:
528 elif rtype == RECORD_LABELS:
499 labels = record.split(b'\0', 2)
529 labels = record.split(b'\0', 2)
500 self._labels = [l for l in labels if len(l) > 0]
530 self._labels = [l for l in labels if len(l) > 0]
501 elif not rtype.islower():
531 elif not rtype.islower():
502 unsupported.add(rtype)
532 unsupported.add(rtype)
503
533
504 if unsupported:
534 if unsupported:
505 raise error.UnsupportedMergeRecords(unsupported)
535 raise error.UnsupportedMergeRecords(unsupported)
506
536
507 def _readrecords(self):
537 def _readrecords(self):
508 """Read merge state from disk and return a list of record (TYPE, data)
538 """Read merge state from disk and return a list of record (TYPE, data)
509
539
510 We read data from both v1 and v2 files and decide which one to use.
540 We read data from both v1 and v2 files and decide which one to use.
511
541
512 V1 has been used by version prior to 2.9.1 and contains less data than
542 V1 has been used by version prior to 2.9.1 and contains less data than
513 v2. We read both versions and check if no data in v2 contradicts
543 v2. We read both versions and check if no data in v2 contradicts
514 v1. If there is not contradiction we can safely assume that both v1
544 v1. If there is not contradiction we can safely assume that both v1
515 and v2 were written at the same time and use the extract data in v2. If
545 and v2 were written at the same time and use the extract data in v2. If
516 there is contradiction we ignore v2 content as we assume an old version
546 there is contradiction we ignore v2 content as we assume an old version
517 of Mercurial has overwritten the mergestate file and left an old v2
547 of Mercurial has overwritten the mergestate file and left an old v2
518 file around.
548 file around.
519
549
520 returns list of record [(TYPE, data), ...]"""
550 returns list of record [(TYPE, data), ...]"""
521 v1records = self._readrecordsv1()
551 v1records = self._readrecordsv1()
522 v2records = self._readrecordsv2()
552 v2records = self._readrecordsv2()
523 if self._v1v2match(v1records, v2records):
553 if self._v1v2match(v1records, v2records):
524 return v2records
554 return v2records
525 else:
555 else:
526 # v1 file is newer than v2 file, use it
556 # v1 file is newer than v2 file, use it
527 # we have to infer the "other" changeset of the merge
557 # we have to infer the "other" changeset of the merge
528 # we cannot do better than that with v1 of the format
558 # we cannot do better than that with v1 of the format
529 mctx = self._repo[None].parents()[-1]
559 mctx = self._repo[None].parents()[-1]
530 v1records.append((RECORD_OTHER, mctx.hex()))
560 v1records.append((RECORD_OTHER, mctx.hex()))
531 # add place holder "other" file node information
561 # add place holder "other" file node information
532 # nobody is using it yet so we do no need to fetch the data
562 # nobody is using it yet so we do no need to fetch the data
533 # if mctx was wrong `mctx[bits[-2]]` may fails.
563 # if mctx was wrong `mctx[bits[-2]]` may fails.
534 for idx, r in enumerate(v1records):
564 for idx, r in enumerate(v1records):
535 if r[0] == RECORD_MERGED:
565 if r[0] == RECORD_MERGED:
536 bits = r[1].split(b'\0')
566 bits = r[1].split(b'\0')
537 bits.insert(-2, b'')
567 bits.insert(-2, b'')
538 v1records[idx] = (r[0], b'\0'.join(bits))
568 v1records[idx] = (r[0], b'\0'.join(bits))
539 return v1records
569 return v1records
540
570
541 def _v1v2match(self, v1records, v2records):
571 def _v1v2match(self, v1records, v2records):
542 oldv2 = set() # old format version of v2 record
572 oldv2 = set() # old format version of v2 record
543 for rec in v2records:
573 for rec in v2records:
544 if rec[0] == RECORD_LOCAL:
574 if rec[0] == RECORD_LOCAL:
545 oldv2.add(rec)
575 oldv2.add(rec)
546 elif rec[0] == RECORD_MERGED:
576 elif rec[0] == RECORD_MERGED:
547 # drop the onode data (not contained in v1)
577 # drop the onode data (not contained in v1)
548 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
578 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
549 for rec in v1records:
579 for rec in v1records:
550 if rec not in oldv2:
580 if rec not in oldv2:
551 return False
581 return False
552 else:
582 else:
553 return True
583 return True
554
584
555 def _readrecordsv1(self):
585 def _readrecordsv1(self):
556 """read on disk merge state for version 1 file
586 """read on disk merge state for version 1 file
557
587
558 returns list of record [(TYPE, data), ...]
588 returns list of record [(TYPE, data), ...]
559
589
560 Note: the "F" data from this file are one entry short
590 Note: the "F" data from this file are one entry short
561 (no "other file node" entry)
591 (no "other file node" entry)
562 """
592 """
563 records = []
593 records = []
564 try:
594 try:
565 f = self._repo.vfs(self.statepathv1)
595 f = self._repo.vfs(self.statepathv1)
566 for i, l in enumerate(f):
596 for i, l in enumerate(f):
567 if i == 0:
597 if i == 0:
568 records.append((RECORD_LOCAL, l[:-1]))
598 records.append((RECORD_LOCAL, l[:-1]))
569 else:
599 else:
570 records.append((RECORD_MERGED, l[:-1]))
600 records.append((RECORD_MERGED, l[:-1]))
571 f.close()
601 f.close()
572 except IOError as err:
602 except IOError as err:
573 if err.errno != errno.ENOENT:
603 if err.errno != errno.ENOENT:
574 raise
604 raise
575 return records
605 return records
576
606
577 def _readrecordsv2(self):
607 def _readrecordsv2(self):
578 """read on disk merge state for version 2 file
608 """read on disk merge state for version 2 file
579
609
580 This format is a list of arbitrary records of the form:
610 This format is a list of arbitrary records of the form:
581
611
582 [type][length][content]
612 [type][length][content]
583
613
584 `type` is a single character, `length` is a 4 byte integer, and
614 `type` is a single character, `length` is a 4 byte integer, and
585 `content` is an arbitrary byte sequence of length `length`.
615 `content` is an arbitrary byte sequence of length `length`.
586
616
587 Mercurial versions prior to 3.7 have a bug where if there are
617 Mercurial versions prior to 3.7 have a bug where if there are
588 unsupported mandatory merge records, attempting to clear out the merge
618 unsupported mandatory merge records, attempting to clear out the merge
589 state with hg update --clean or similar aborts. The 't' record type
619 state with hg update --clean or similar aborts. The 't' record type
590 works around that by writing out what those versions treat as an
620 works around that by writing out what those versions treat as an
591 advisory record, but later versions interpret as special: the first
621 advisory record, but later versions interpret as special: the first
592 character is the 'real' record type and everything onwards is the data.
622 character is the 'real' record type and everything onwards is the data.
593
623
594 Returns list of records [(TYPE, data), ...]."""
624 Returns list of records [(TYPE, data), ...]."""
595 records = []
625 records = []
596 try:
626 try:
597 f = self._repo.vfs(self.statepathv2)
627 f = self._repo.vfs(self.statepathv2)
598 data = f.read()
628 data = f.read()
599 off = 0
629 off = 0
600 end = len(data)
630 end = len(data)
601 while off < end:
631 while off < end:
602 rtype = data[off : off + 1]
632 rtype = data[off : off + 1]
603 off += 1
633 off += 1
604 length = _unpack(b'>I', data[off : (off + 4)])[0]
634 length = _unpack(b'>I', data[off : (off + 4)])[0]
605 off += 4
635 off += 4
606 record = data[off : (off + length)]
636 record = data[off : (off + length)]
607 off += length
637 off += length
608 if rtype == RECORD_OVERRIDE:
638 if rtype == RECORD_OVERRIDE:
609 rtype, record = record[0:1], record[1:]
639 rtype, record = record[0:1], record[1:]
610 records.append((rtype, record))
640 records.append((rtype, record))
611 f.close()
641 f.close()
612 except IOError as err:
642 except IOError as err:
613 if err.errno != errno.ENOENT:
643 if err.errno != errno.ENOENT:
614 raise
644 raise
615 return records
645 return records
616
646
617 def commit(self):
647 def commit(self):
618 if self._dirty:
648 if self._dirty:
619 records = self._makerecords()
649 records = self._makerecords()
620 self._writerecords(records)
650 self._writerecords(records)
621 self._dirty = False
651 self._dirty = False
622
652
623 def _makerecords(self):
653 def _makerecords(self):
624 records = []
654 records = []
625 records.append((RECORD_LOCAL, hex(self._local)))
655 records.append((RECORD_LOCAL, hex(self._local)))
626 records.append((RECORD_OTHER, hex(self._other)))
656 records.append((RECORD_OTHER, hex(self._other)))
627 # Write out state items. In all cases, the value of the state map entry
657 # Write out state items. In all cases, the value of the state map entry
628 # is written as the contents of the record. The record type depends on
658 # is written as the contents of the record. The record type depends on
629 # the type of state that is stored, and capital-letter records are used
659 # the type of state that is stored, and capital-letter records are used
630 # to prevent older versions of Mercurial that do not support the feature
660 # to prevent older versions of Mercurial that do not support the feature
631 # from loading them.
661 # from loading them.
632 for filename, v in pycompat.iteritems(self._state):
662 for filename, v in pycompat.iteritems(self._state):
633 if v[0] in (
663 if v[0] in (
634 MERGE_RECORD_UNRESOLVED_PATH,
664 MERGE_RECORD_UNRESOLVED_PATH,
635 MERGE_RECORD_RESOLVED_PATH,
665 MERGE_RECORD_RESOLVED_PATH,
636 ):
666 ):
637 # Path conflicts. These are stored in 'P' records. The current
667 # Path conflicts. These are stored in 'P' records. The current
638 # resolution state ('pu' or 'pr') is stored within the record.
668 # resolution state ('pu' or 'pr') is stored within the record.
639 records.append(
669 records.append(
640 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
670 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
641 )
671 )
642 elif (
672 elif (
643 v[1] == self._repo.nodeconstants.nullhex
673 v[1] == self._repo.nodeconstants.nullhex
644 or v[6] == self._repo.nodeconstants.nullhex
674 or v[6] == self._repo.nodeconstants.nullhex
645 ):
675 ):
646 # Change/Delete or Delete/Change conflicts. These are stored in
676 # Change/Delete or Delete/Change conflicts. These are stored in
647 # 'C' records. v[1] is the local file, and is nullhex when the
677 # 'C' records. v[1] is the local file, and is nullhex when the
648 # file is deleted locally ('dc'). v[6] is the remote file, and
678 # file is deleted locally ('dc'). v[6] is the remote file, and
649 # is nullhex when the file is deleted remotely ('cd').
679 # is nullhex when the file is deleted remotely ('cd').
650 records.append(
680 records.append(
651 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
681 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
652 )
682 )
653 else:
683 else:
654 # Normal files. These are stored in 'F' records.
684 # Normal files. These are stored in 'F' records.
655 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
685 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
656 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
686 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
657 rawextras = b'\0'.join(
687 rawextras = b'\0'.join(
658 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
688 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
659 )
689 )
660 records.append(
690 records.append(
661 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
691 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
662 )
692 )
663 if self._labels is not None:
693 if self._labels is not None:
664 labels = b'\0'.join(self._labels)
694 labels = b'\0'.join(self._labels)
665 records.append((RECORD_LABELS, labels))
695 records.append((RECORD_LABELS, labels))
666 return records
696 return records
667
697
668 def _writerecords(self, records):
698 def _writerecords(self, records):
669 """Write current state on disk (both v1 and v2)"""
699 """Write current state on disk (both v1 and v2)"""
670 self._writerecordsv1(records)
700 self._writerecordsv1(records)
671 self._writerecordsv2(records)
701 self._writerecordsv2(records)
672
702
673 def _writerecordsv1(self, records):
703 def _writerecordsv1(self, records):
674 """Write current state on disk in a version 1 file"""
704 """Write current state on disk in a version 1 file"""
675 f = self._repo.vfs(self.statepathv1, b'wb')
705 f = self._repo.vfs(self.statepathv1, b'wb')
676 irecords = iter(records)
706 irecords = iter(records)
677 lrecords = next(irecords)
707 lrecords = next(irecords)
678 assert lrecords[0] == RECORD_LOCAL
708 assert lrecords[0] == RECORD_LOCAL
679 f.write(hex(self._local) + b'\n')
709 f.write(hex(self._local) + b'\n')
680 for rtype, data in irecords:
710 for rtype, data in irecords:
681 if rtype == RECORD_MERGED:
711 if rtype == RECORD_MERGED:
682 f.write(b'%s\n' % _droponode(data))
712 f.write(b'%s\n' % _droponode(data))
683 f.close()
713 f.close()
684
714
685 def _writerecordsv2(self, records):
715 def _writerecordsv2(self, records):
686 """Write current state on disk in a version 2 file
716 """Write current state on disk in a version 2 file
687
717
688 See the docstring for _readrecordsv2 for why we use 't'."""
718 See the docstring for _readrecordsv2 for why we use 't'."""
689 # these are the records that all version 2 clients can read
719 # these are the records that all version 2 clients can read
690 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
720 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
691 f = self._repo.vfs(self.statepathv2, b'wb')
721 f = self._repo.vfs(self.statepathv2, b'wb')
692 for key, data in records:
722 for key, data in records:
693 assert len(key) == 1
723 assert len(key) == 1
694 if key not in allowlist:
724 if key not in allowlist:
695 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
725 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
696 format = b'>sI%is' % len(data)
726 format = b'>sI%is' % len(data)
697 f.write(_pack(format, key, len(data), data))
727 f.write(_pack(format, key, len(data), data))
698 f.close()
728 f.close()
699
729
700 def _make_backup(self, fctx, localkey):
730 def _make_backup(self, fctx, localkey):
701 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
731 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
702
732
703 def _restore_backup(self, fctx, localkey, flags):
733 def _restore_backup(self, fctx, localkey, flags):
704 with self._repo.vfs(b'merge/' + localkey) as f:
734 with self._repo.vfs(b'merge/' + localkey) as f:
705 fctx.write(f.read(), flags)
735 fctx.write(f.read(), flags)
706
736
707 def reset(self):
737 def reset(self):
708 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
738 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
709
739
710
740
711 class memmergestate(_mergestate_base):
741 class memmergestate(_mergestate_base):
712 def __init__(self, repo):
742 def __init__(self, repo):
713 super(memmergestate, self).__init__(repo)
743 super(memmergestate, self).__init__(repo)
714 self._backups = {}
744 self._backups = {}
715
745
716 def _make_backup(self, fctx, localkey):
746 def _make_backup(self, fctx, localkey):
717 self._backups[localkey] = fctx.data()
747 self._backups[localkey] = fctx.data()
718
748
719 def _restore_backup(self, fctx, localkey, flags):
749 def _restore_backup(self, fctx, localkey, flags):
720 fctx.write(self._backups[localkey], flags)
750 fctx.write(self._backups[localkey], flags)
721
751
722
752
723 def recordupdates(repo, actions, branchmerge, getfiledata):
753 def recordupdates(repo, actions, branchmerge, getfiledata):
724 """record merge actions to the dirstate"""
754 """record merge actions to the dirstate"""
725 # remove (must come first)
755 # remove (must come first)
726 for f, args, msg in actions.get(ACTION_REMOVE, []):
756 for f, args, msg in actions.get(ACTION_REMOVE, []):
727 if branchmerge:
757 if branchmerge:
728 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
758 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
729 else:
759 else:
730 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
760 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
731
761
732 # forget (must come first)
762 # forget (must come first)
733 for f, args, msg in actions.get(ACTION_FORGET, []):
763 for f, args, msg in actions.get(ACTION_FORGET, []):
734 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
764 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
735
765
736 # resolve path conflicts
766 # resolve path conflicts
737 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
767 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
738 (f0, origf0) = args
768 (f0, origf0) = args
739 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
769 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
740 repo.dirstate.copy(origf0, f)
770 repo.dirstate.copy(origf0, f)
741 if f0 == origf0:
771 if f0 == origf0:
742 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
772 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
743 else:
773 else:
744 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
774 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
745
775
746 # re-add
776 # re-add
747 for f, args, msg in actions.get(ACTION_ADD, []):
777 for f, args, msg in actions.get(ACTION_ADD, []):
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
778 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
749
779
750 # re-add/mark as modified
780 # re-add/mark as modified
751 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
781 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
752 if branchmerge:
782 if branchmerge:
753 repo.dirstate.update_file(
783 repo.dirstate.update_file(
754 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
784 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
755 )
785 )
756 else:
786 else:
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
787 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758
788
759 # exec change
789 # exec change
760 for f, args, msg in actions.get(ACTION_EXEC, []):
790 for f, args, msg in actions.get(ACTION_EXEC, []):
761 repo.dirstate.update_file(
791 repo.dirstate.update_file(
762 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
792 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
763 )
793 )
764
794
765 # keep
795 # keep
766 for f, args, msg in actions.get(ACTION_KEEP, []):
796 for f, args, msg in actions.get(ACTION_KEEP, []):
767 pass
797 pass
768
798
769 # keep deleted
799 # keep deleted
770 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
800 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
771 pass
801 pass
772
802
773 # keep new
803 # keep new
774 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
804 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
775 pass
805 pass
776
806
777 # get
807 # get
778 for f, args, msg in actions.get(ACTION_GET, []):
808 for f, args, msg in actions.get(ACTION_GET, []):
779 if branchmerge:
809 if branchmerge:
780 # tracked in p1 can be True also but update_file should not care
810 # tracked in p1 can be True also but update_file should not care
781 old_entry = repo.dirstate.get_entry(f)
811 old_entry = repo.dirstate.get_entry(f)
782 p1_tracked = old_entry.any_tracked and not old_entry.added
812 p1_tracked = old_entry.any_tracked and not old_entry.added
783 repo.dirstate.update_file(
813 repo.dirstate.update_file(
784 f,
814 f,
785 p1_tracked=p1_tracked,
815 p1_tracked=p1_tracked,
786 wc_tracked=True,
816 wc_tracked=True,
787 p2_info=True,
817 p2_info=True,
788 )
818 )
789 else:
819 else:
790 parentfiledata = getfiledata[f] if getfiledata else None
820 parentfiledata = getfiledata[f] if getfiledata else None
791 repo.dirstate.update_file(
821 repo.dirstate.update_file(
792 f,
822 f,
793 p1_tracked=True,
823 p1_tracked=True,
794 wc_tracked=True,
824 wc_tracked=True,
795 parentfiledata=parentfiledata,
825 parentfiledata=parentfiledata,
796 )
826 )
797
827
798 # merge
828 # merge
799 for f, args, msg in actions.get(ACTION_MERGE, []):
829 for f, args, msg in actions.get(ACTION_MERGE, []):
800 f1, f2, fa, move, anc = args
830 f1, f2, fa, move, anc = args
801 if branchmerge:
831 if branchmerge:
802 # We've done a branch merge, mark this file as merged
832 # We've done a branch merge, mark this file as merged
803 # so that we properly record the merger later
833 # so that we properly record the merger later
804 p1_tracked = f1 == f
834 p1_tracked = f1 == f
805 repo.dirstate.update_file(
835 repo.dirstate.update_file(
806 f,
836 f,
807 p1_tracked=p1_tracked,
837 p1_tracked=p1_tracked,
808 wc_tracked=True,
838 wc_tracked=True,
809 p2_info=True,
839 p2_info=True,
810 )
840 )
811 if f1 != f2: # copy/rename
841 if f1 != f2: # copy/rename
812 if move:
842 if move:
813 repo.dirstate.update_file(
843 repo.dirstate.update_file(
814 f1, p1_tracked=True, wc_tracked=False
844 f1, p1_tracked=True, wc_tracked=False
815 )
845 )
816 if f1 != f:
846 if f1 != f:
817 repo.dirstate.copy(f1, f)
847 repo.dirstate.copy(f1, f)
818 else:
848 else:
819 repo.dirstate.copy(f2, f)
849 repo.dirstate.copy(f2, f)
820 else:
850 else:
821 # We've update-merged a locally modified file, so
851 # We've update-merged a locally modified file, so
822 # we set the dirstate to emulate a normal checkout
852 # we set the dirstate to emulate a normal checkout
823 # of that file some time in the past. Thus our
853 # of that file some time in the past. Thus our
824 # merge will appear as a normal local file
854 # merge will appear as a normal local file
825 # modification.
855 # modification.
826 if f2 == f: # file not locally copied/moved
856 if f2 == f: # file not locally copied/moved
827 repo.dirstate.update_file(
857 repo.dirstate.update_file(
828 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
858 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
829 )
859 )
830 if move:
860 if move:
831 repo.dirstate.update_file(
861 repo.dirstate.update_file(
832 f1, p1_tracked=False, wc_tracked=False
862 f1, p1_tracked=False, wc_tracked=False
833 )
863 )
834
864
835 # directory rename, move local
865 # directory rename, move local
836 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
866 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
837 f0, flag = args
867 f0, flag = args
838 if branchmerge:
868 if branchmerge:
839 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
869 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
840 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
870 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
841 repo.dirstate.copy(f0, f)
871 repo.dirstate.copy(f0, f)
842 else:
872 else:
843 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
873 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
844 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
874 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
845
875
846 # directory rename, get
876 # directory rename, get
847 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
877 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
848 f0, flag = args
878 f0, flag = args
849 if branchmerge:
879 if branchmerge:
850 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
880 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
851 repo.dirstate.copy(f0, f)
881 repo.dirstate.copy(f0, f)
852 else:
882 else:
853 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
883 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now