##// END OF EJS Templates
filemerge: stop returning always-`True` value...
Martin von Zweigbergk -
r49337:608a35db default
parent child Browse files
Show More
@@ -1,1855 +1,1855 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 url as urlmod,
40 url as urlmod,
41 util,
41 util,
42 )
42 )
43
43
44 from mercurial.upgrade_utils import (
44 from mercurial.upgrade_utils import (
45 actions as upgrade_actions,
45 actions as upgrade_actions,
46 )
46 )
47
47
48 from . import (
48 from . import (
49 lfcommands,
49 lfcommands,
50 lfutil,
50 lfutil,
51 storefactory,
51 storefactory,
52 )
52 )
53
53
54 eh = exthelper.exthelper()
54 eh = exthelper.exthelper()
55
55
56 lfstatus = lfutil.lfstatus
56 lfstatus = lfutil.lfstatus
57
57
58 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
58 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
59
59
60 # -- Utility functions: commonly/repeatedly needed functionality ---------------
60 # -- Utility functions: commonly/repeatedly needed functionality ---------------
61
61
62
62
63 def composelargefilematcher(match, manifest):
63 def composelargefilematcher(match, manifest):
64 """create a matcher that matches only the largefiles in the original
64 """create a matcher that matches only the largefiles in the original
65 matcher"""
65 matcher"""
66 m = copy.copy(match)
66 m = copy.copy(match)
67 lfile = lambda f: lfutil.standin(f) in manifest
67 lfile = lambda f: lfutil.standin(f) in manifest
68 m._files = [lf for lf in m._files if lfile(lf)]
68 m._files = [lf for lf in m._files if lfile(lf)]
69 m._fileset = set(m._files)
69 m._fileset = set(m._files)
70 m.always = lambda: False
70 m.always = lambda: False
71 origmatchfn = m.matchfn
71 origmatchfn = m.matchfn
72 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
72 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
73 return m
73 return m
74
74
75
75
76 def composenormalfilematcher(match, manifest, exclude=None):
76 def composenormalfilematcher(match, manifest, exclude=None):
77 excluded = set()
77 excluded = set()
78 if exclude is not None:
78 if exclude is not None:
79 excluded.update(exclude)
79 excluded.update(exclude)
80
80
81 m = copy.copy(match)
81 m = copy.copy(match)
82 notlfile = lambda f: not (
82 notlfile = lambda f: not (
83 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
83 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
84 )
84 )
85 m._files = [lf for lf in m._files if notlfile(lf)]
85 m._files = [lf for lf in m._files if notlfile(lf)]
86 m._fileset = set(m._files)
86 m._fileset = set(m._files)
87 m.always = lambda: False
87 m.always = lambda: False
88 origmatchfn = m.matchfn
88 origmatchfn = m.matchfn
89 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
89 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
90 return m
90 return m
91
91
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
97 )
97 )
98
98
99 lfmatcher = None
99 lfmatcher = None
100 if lfutil.islfilesrepo(repo):
100 if lfutil.islfilesrepo(repo):
101 lfpats = ui.configlist(lfutil.longname, b'patterns')
101 lfpats = ui.configlist(lfutil.longname, b'patterns')
102 if lfpats:
102 if lfpats:
103 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
103 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
104
104
105 lfnames = []
105 lfnames = []
106 m = matcher
106 m = matcher
107
107
108 wctx = repo[None]
108 wctx = repo[None]
109 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
109 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
110 exact = m.exact(f)
110 exact = m.exact(f)
111 lfile = lfutil.standin(f) in wctx
111 lfile = lfutil.standin(f) in wctx
112 nfile = f in wctx
112 nfile = f in wctx
113 exists = lfile or nfile
113 exists = lfile or nfile
114
114
115 # Don't warn the user when they attempt to add a normal tracked file.
115 # Don't warn the user when they attempt to add a normal tracked file.
116 # The normal add code will do that for us.
116 # The normal add code will do that for us.
117 if exact and exists:
117 if exact and exists:
118 if lfile:
118 if lfile:
119 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
119 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
120 continue
120 continue
121
121
122 if (exact or not exists) and not lfutil.isstandin(f):
122 if (exact or not exists) and not lfutil.isstandin(f):
123 # In case the file was removed previously, but not committed
123 # In case the file was removed previously, but not committed
124 # (issue3507)
124 # (issue3507)
125 if not repo.wvfs.exists(f):
125 if not repo.wvfs.exists(f):
126 continue
126 continue
127
127
128 abovemin = (
128 abovemin = (
129 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
129 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
130 )
130 )
131 if large or abovemin or (lfmatcher and lfmatcher(f)):
131 if large or abovemin or (lfmatcher and lfmatcher(f)):
132 lfnames.append(f)
132 lfnames.append(f)
133 if ui.verbose or not exact:
133 if ui.verbose or not exact:
134 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
134 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
135
135
136 bad = []
136 bad = []
137
137
138 # Need to lock, otherwise there could be a race condition between
138 # Need to lock, otherwise there could be a race condition between
139 # when standins are created and added to the repo.
139 # when standins are created and added to the repo.
140 with repo.wlock():
140 with repo.wlock():
141 if not opts.get('dry_run'):
141 if not opts.get('dry_run'):
142 standins = []
142 standins = []
143 lfdirstate = lfutil.openlfdirstate(ui, repo)
143 lfdirstate = lfutil.openlfdirstate(ui, repo)
144 for f in lfnames:
144 for f in lfnames:
145 standinname = lfutil.standin(f)
145 standinname = lfutil.standin(f)
146 lfutil.writestandin(
146 lfutil.writestandin(
147 repo,
147 repo,
148 standinname,
148 standinname,
149 hash=b'',
149 hash=b'',
150 executable=lfutil.getexecutable(repo.wjoin(f)),
150 executable=lfutil.getexecutable(repo.wjoin(f)),
151 )
151 )
152 standins.append(standinname)
152 standins.append(standinname)
153 lfdirstate.set_tracked(f)
153 lfdirstate.set_tracked(f)
154 lfdirstate.write(repo.currenttransaction())
154 lfdirstate.write(repo.currenttransaction())
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
158 if f in m.files()
158 if f in m.files()
159 ]
159 ]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 return added, bad
162 return added, bad
163
163
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
168 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
169 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
170 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
171 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
172 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
173 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
174 ]
175
175
176 def warn(files, msg):
176 def warn(files, msg):
177 for f in files:
177 for f in files:
178 ui.warn(msg % uipathfn(f))
178 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
179 return int(len(files) > 0)
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(
183 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
185 )
186 else:
186 else:
187 remove = deleted + clean
187 remove = deleted + clean
188 result = warn(
188 result = warn(
189 modified,
189 modified,
190 _(
190 _(
191 b'not removing %s: file is modified (use -f'
191 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
192 b' to force removal)\n'
193 ),
193 ),
194 )
194 )
195 result = (
195 result = (
196 warn(
196 warn(
197 added,
197 added,
198 _(
198 _(
199 b'not removing %s: file has been marked for add'
199 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
200 b' (use forget to undo)\n'
201 ),
201 ),
202 )
202 )
203 or result
203 or result
204 )
204 )
205
205
206 # Need to lock because standin files are deleted then removed from the
206 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
207 # repository and we could race in-between.
208 with repo.wlock():
208 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
210 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
211 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
212 ui.status(_(b'removing %s\n') % uipathfn(f))
213
213
214 if not dryrun:
214 if not dryrun:
215 if not after:
215 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
217
218 if dryrun:
218 if dryrun:
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfdirstate.set_untracked(lfutil.splitstandin(f))
230 lfdirstate.set_untracked(lfutil.splitstandin(f))
231
231
232 lfdirstate.write(repo.currenttransaction())
232 lfdirstate.write(repo.currenttransaction())
233
233
234 return result
234 return result
235
235
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 @eh.wrapfunction(webcommands, b'decodepath')
239 @eh.wrapfunction(webcommands, b'decodepath')
240 def decodepath(orig, path):
240 def decodepath(orig, path):
241 return lfutil.splitstandin(path) or path
241 return lfutil.splitstandin(path) or path
242
242
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246
246
247 @eh.wrapcommand(
247 @eh.wrapcommand(
248 b'add',
248 b'add',
249 opts=[
249 opts=[
250 (b'', b'large', None, _(b'add as largefile')),
250 (b'', b'large', None, _(b'add as largefile')),
251 (b'', b'normal', None, _(b'add as normal file')),
251 (b'', b'normal', None, _(b'add as normal file')),
252 (
252 (
253 b'',
253 b'',
254 b'lfsize',
254 b'lfsize',
255 b'',
255 b'',
256 _(
256 _(
257 b'add all files above this size (in megabytes) '
257 b'add all files above this size (in megabytes) '
258 b'as largefiles (default: 10)'
258 b'as largefiles (default: 10)'
259 ),
259 ),
260 ),
260 ),
261 ],
261 ],
262 )
262 )
263 def overrideadd(orig, ui, repo, *pats, **opts):
263 def overrideadd(orig, ui, repo, *pats, **opts):
264 if opts.get('normal') and opts.get('large'):
264 if opts.get('normal') and opts.get('large'):
265 raise error.Abort(_(b'--normal cannot be used with --large'))
265 raise error.Abort(_(b'--normal cannot be used with --large'))
266 return orig(ui, repo, *pats, **opts)
266 return orig(ui, repo, *pats, **opts)
267
267
268
268
269 @eh.wrapfunction(cmdutil, b'add')
269 @eh.wrapfunction(cmdutil, b'add')
270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
271 # The --normal flag short circuits this override
271 # The --normal flag short circuits this override
272 if opts.get('normal'):
272 if opts.get('normal'):
273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
274
274
275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
276 normalmatcher = composenormalfilematcher(
276 normalmatcher = composenormalfilematcher(
277 matcher, repo[None].manifest(), ladded
277 matcher, repo[None].manifest(), ladded
278 )
278 )
279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
280
280
281 bad.extend(f for f in lbad)
281 bad.extend(f for f in lbad)
282 return bad
282 return bad
283
283
284
284
285 @eh.wrapfunction(cmdutil, b'remove')
285 @eh.wrapfunction(cmdutil, b'remove')
286 def cmdutilremove(
286 def cmdutilremove(
287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
288 ):
288 ):
289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
290 result = orig(
290 result = orig(
291 ui,
291 ui,
292 repo,
292 repo,
293 normalmatcher,
293 normalmatcher,
294 prefix,
294 prefix,
295 uipathfn,
295 uipathfn,
296 after,
296 after,
297 force,
297 force,
298 subrepos,
298 subrepos,
299 dryrun,
299 dryrun,
300 )
300 )
301 return (
301 return (
302 removelargefiles(
302 removelargefiles(
303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
304 )
304 )
305 or result
305 or result
306 )
306 )
307
307
308
308
309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
310 def overridestatusfn(orig, repo, rev2, **opts):
310 def overridestatusfn(orig, repo, rev2, **opts):
311 with lfstatus(repo._repo):
311 with lfstatus(repo._repo):
312 return orig(repo, rev2, **opts)
312 return orig(repo, rev2, **opts)
313
313
314
314
315 @eh.wrapcommand(b'status')
315 @eh.wrapcommand(b'status')
316 def overridestatus(orig, ui, repo, *pats, **opts):
316 def overridestatus(orig, ui, repo, *pats, **opts):
317 with lfstatus(repo):
317 with lfstatus(repo):
318 return orig(ui, repo, *pats, **opts)
318 return orig(ui, repo, *pats, **opts)
319
319
320
320
321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
323 with lfstatus(repo._repo):
323 with lfstatus(repo._repo):
324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
325
325
326
326
327 @eh.wrapcommand(b'log')
327 @eh.wrapcommand(b'log')
328 def overridelog(orig, ui, repo, *pats, **opts):
328 def overridelog(orig, ui, repo, *pats, **opts):
329 def overridematchandpats(
329 def overridematchandpats(
330 orig,
330 orig,
331 ctx,
331 ctx,
332 pats=(),
332 pats=(),
333 opts=None,
333 opts=None,
334 globbed=False,
334 globbed=False,
335 default=b'relpath',
335 default=b'relpath',
336 badfn=None,
336 badfn=None,
337 ):
337 ):
338 """Matcher that merges root directory with .hglf, suitable for log.
338 """Matcher that merges root directory with .hglf, suitable for log.
339 It is still possible to match .hglf directly.
339 It is still possible to match .hglf directly.
340 For any listed files run log on the standin too.
340 For any listed files run log on the standin too.
341 matchfn tries both the given filename and with .hglf stripped.
341 matchfn tries both the given filename and with .hglf stripped.
342 """
342 """
343 if opts is None:
343 if opts is None:
344 opts = {}
344 opts = {}
345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
346 m, p = copy.copy(matchandpats)
346 m, p = copy.copy(matchandpats)
347
347
348 if m.always():
348 if m.always():
349 # We want to match everything anyway, so there's no benefit trying
349 # We want to match everything anyway, so there's no benefit trying
350 # to add standins.
350 # to add standins.
351 return matchandpats
351 return matchandpats
352
352
353 pats = set(p)
353 pats = set(p)
354
354
355 def fixpats(pat, tostandin=lfutil.standin):
355 def fixpats(pat, tostandin=lfutil.standin):
356 if pat.startswith(b'set:'):
356 if pat.startswith(b'set:'):
357 return pat
357 return pat
358
358
359 kindpat = matchmod._patsplit(pat, None)
359 kindpat = matchmod._patsplit(pat, None)
360
360
361 if kindpat[0] is not None:
361 if kindpat[0] is not None:
362 return kindpat[0] + b':' + tostandin(kindpat[1])
362 return kindpat[0] + b':' + tostandin(kindpat[1])
363 return tostandin(kindpat[1])
363 return tostandin(kindpat[1])
364
364
365 cwd = repo.getcwd()
365 cwd = repo.getcwd()
366 if cwd:
366 if cwd:
367 hglf = lfutil.shortname
367 hglf = lfutil.shortname
368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
369
369
370 def tostandin(f):
370 def tostandin(f):
371 # The file may already be a standin, so truncate the back
371 # The file may already be a standin, so truncate the back
372 # prefix and test before mangling it. This avoids turning
372 # prefix and test before mangling it. This avoids turning
373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
375 return f
375 return f
376
376
377 # An absolute path is from outside the repo, so truncate the
377 # An absolute path is from outside the repo, so truncate the
378 # path to the root before building the standin. Otherwise cwd
378 # path to the root before building the standin. Otherwise cwd
379 # is somewhere in the repo, relative to root, and needs to be
379 # is somewhere in the repo, relative to root, and needs to be
380 # prepended before building the standin.
380 # prepended before building the standin.
381 if os.path.isabs(cwd):
381 if os.path.isabs(cwd):
382 f = f[len(back) :]
382 f = f[len(back) :]
383 else:
383 else:
384 f = cwd + b'/' + f
384 f = cwd + b'/' + f
385 return back + lfutil.standin(f)
385 return back + lfutil.standin(f)
386
386
387 else:
387 else:
388
388
389 def tostandin(f):
389 def tostandin(f):
390 if lfutil.isstandin(f):
390 if lfutil.isstandin(f):
391 return f
391 return f
392 return lfutil.standin(f)
392 return lfutil.standin(f)
393
393
394 pats.update(fixpats(f, tostandin) for f in p)
394 pats.update(fixpats(f, tostandin) for f in p)
395
395
396 for i in range(0, len(m._files)):
396 for i in range(0, len(m._files)):
397 # Don't add '.hglf' to m.files, since that is already covered by '.'
397 # Don't add '.hglf' to m.files, since that is already covered by '.'
398 if m._files[i] == b'.':
398 if m._files[i] == b'.':
399 continue
399 continue
400 standin = lfutil.standin(m._files[i])
400 standin = lfutil.standin(m._files[i])
401 # If the "standin" is a directory, append instead of replace to
401 # If the "standin" is a directory, append instead of replace to
402 # support naming a directory on the command line with only
402 # support naming a directory on the command line with only
403 # largefiles. The original directory is kept to support normal
403 # largefiles. The original directory is kept to support normal
404 # files.
404 # files.
405 if standin in ctx:
405 if standin in ctx:
406 m._files[i] = standin
406 m._files[i] = standin
407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
408 m._files.append(standin)
408 m._files.append(standin)
409
409
410 m._fileset = set(m._files)
410 m._fileset = set(m._files)
411 m.always = lambda: False
411 m.always = lambda: False
412 origmatchfn = m.matchfn
412 origmatchfn = m.matchfn
413
413
414 def lfmatchfn(f):
414 def lfmatchfn(f):
415 lf = lfutil.splitstandin(f)
415 lf = lfutil.splitstandin(f)
416 if lf is not None and origmatchfn(lf):
416 if lf is not None and origmatchfn(lf):
417 return True
417 return True
418 r = origmatchfn(f)
418 r = origmatchfn(f)
419 return r
419 return r
420
420
421 m.matchfn = lfmatchfn
421 m.matchfn = lfmatchfn
422
422
423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
424 return m, pats
424 return m, pats
425
425
426 # For hg log --patch, the match object is used in two different senses:
426 # For hg log --patch, the match object is used in two different senses:
427 # (1) to determine what revisions should be printed out, and
427 # (1) to determine what revisions should be printed out, and
428 # (2) to determine what files to print out diffs for.
428 # (2) to determine what files to print out diffs for.
429 # The magic matchandpats override should be used for case (1) but not for
429 # The magic matchandpats override should be used for case (1) but not for
430 # case (2).
430 # case (2).
431 oldmatchandpats = scmutil.matchandpats
431 oldmatchandpats = scmutil.matchandpats
432
432
433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
434 wctx = repo[None]
434 wctx = repo[None]
435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
436 return lambda ctx: match
436 return lambda ctx: match
437
437
438 wrappedmatchandpats = extensions.wrappedfunction(
438 wrappedmatchandpats = extensions.wrappedfunction(
439 scmutil, b'matchandpats', overridematchandpats
439 scmutil, b'matchandpats', overridematchandpats
440 )
440 )
441 wrappedmakefilematcher = extensions.wrappedfunction(
441 wrappedmakefilematcher = extensions.wrappedfunction(
442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
443 )
443 )
444 with wrappedmatchandpats, wrappedmakefilematcher:
444 with wrappedmatchandpats, wrappedmakefilematcher:
445 return orig(ui, repo, *pats, **opts)
445 return orig(ui, repo, *pats, **opts)
446
446
447
447
448 @eh.wrapcommand(
448 @eh.wrapcommand(
449 b'verify',
449 b'verify',
450 opts=[
450 opts=[
451 (
451 (
452 b'',
452 b'',
453 b'large',
453 b'large',
454 None,
454 None,
455 _(b'verify that all largefiles in current revision exists'),
455 _(b'verify that all largefiles in current revision exists'),
456 ),
456 ),
457 (
457 (
458 b'',
458 b'',
459 b'lfa',
459 b'lfa',
460 None,
460 None,
461 _(b'verify largefiles in all revisions, not just current'),
461 _(b'verify largefiles in all revisions, not just current'),
462 ),
462 ),
463 (
463 (
464 b'',
464 b'',
465 b'lfc',
465 b'lfc',
466 None,
466 None,
467 _(b'verify local largefile contents, not just existence'),
467 _(b'verify local largefile contents, not just existence'),
468 ),
468 ),
469 ],
469 ],
470 )
470 )
471 def overrideverify(orig, ui, repo, *pats, **opts):
471 def overrideverify(orig, ui, repo, *pats, **opts):
472 large = opts.pop('large', False)
472 large = opts.pop('large', False)
473 all = opts.pop('lfa', False)
473 all = opts.pop('lfa', False)
474 contents = opts.pop('lfc', False)
474 contents = opts.pop('lfc', False)
475
475
476 result = orig(ui, repo, *pats, **opts)
476 result = orig(ui, repo, *pats, **opts)
477 if large or all or contents:
477 if large or all or contents:
478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
479 return result
479 return result
480
480
481
481
482 @eh.wrapcommand(
482 @eh.wrapcommand(
483 b'debugstate',
483 b'debugstate',
484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
485 )
485 )
486 def overridedebugstate(orig, ui, repo, *pats, **opts):
486 def overridedebugstate(orig, ui, repo, *pats, **opts):
487 large = opts.pop('large', False)
487 large = opts.pop('large', False)
488 if large:
488 if large:
489
489
490 class fakerepo(object):
490 class fakerepo(object):
491 dirstate = lfutil.openlfdirstate(ui, repo)
491 dirstate = lfutil.openlfdirstate(ui, repo)
492
492
493 orig(ui, fakerepo, *pats, **opts)
493 orig(ui, fakerepo, *pats, **opts)
494 else:
494 else:
495 orig(ui, repo, *pats, **opts)
495 orig(ui, repo, *pats, **opts)
496
496
497
497
498 # Before starting the manifest merge, merge.updates will call
498 # Before starting the manifest merge, merge.updates will call
499 # _checkunknownfile to check if there are any files in the merged-in
499 # _checkunknownfile to check if there are any files in the merged-in
500 # changeset that collide with unknown files in the working copy.
500 # changeset that collide with unknown files in the working copy.
501 #
501 #
502 # The largefiles are seen as unknown, so this prevents us from merging
502 # The largefiles are seen as unknown, so this prevents us from merging
503 # in a file 'foo' if we already have a largefile with the same name.
503 # in a file 'foo' if we already have a largefile with the same name.
504 #
504 #
505 # The overridden function filters the unknown files by removing any
505 # The overridden function filters the unknown files by removing any
506 # largefiles. This makes the merge proceed and we can then handle this
506 # largefiles. This makes the merge proceed and we can then handle this
507 # case further in the overridden calculateupdates function below.
507 # case further in the overridden calculateupdates function below.
508 @eh.wrapfunction(merge, b'_checkunknownfile')
508 @eh.wrapfunction(merge, b'_checkunknownfile')
509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
511 return False
511 return False
512 return origfn(repo, wctx, mctx, f, f2)
512 return origfn(repo, wctx, mctx, f, f2)
513
513
514
514
515 # The manifest merge handles conflicts on the manifest level. We want
515 # The manifest merge handles conflicts on the manifest level. We want
516 # to handle changes in largefile-ness of files at this level too.
516 # to handle changes in largefile-ness of files at this level too.
517 #
517 #
518 # The strategy is to run the original calculateupdates and then process
518 # The strategy is to run the original calculateupdates and then process
519 # the action list it outputs. There are two cases we need to deal with:
519 # the action list it outputs. There are two cases we need to deal with:
520 #
520 #
521 # 1. Normal file in p1, largefile in p2. Here the largefile is
521 # 1. Normal file in p1, largefile in p2. Here the largefile is
522 # detected via its standin file, which will enter the working copy
522 # detected via its standin file, which will enter the working copy
523 # with a "get" action. It is not "merge" since the standin is all
523 # with a "get" action. It is not "merge" since the standin is all
524 # Mercurial is concerned with at this level -- the link to the
524 # Mercurial is concerned with at this level -- the link to the
525 # existing normal file is not relevant here.
525 # existing normal file is not relevant here.
526 #
526 #
527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
528 # since the largefile will be present in the working copy and
528 # since the largefile will be present in the working copy and
529 # different from the normal file in p2. Mercurial therefore
529 # different from the normal file in p2. Mercurial therefore
530 # triggers a merge action.
530 # triggers a merge action.
531 #
531 #
532 # In both cases, we prompt the user and emit new actions to either
532 # In both cases, we prompt the user and emit new actions to either
533 # remove the standin (if the normal file was kept) or to remove the
533 # remove the standin (if the normal file was kept) or to remove the
534 # normal file and get the standin (if the largefile was kept). The
534 # normal file and get the standin (if the largefile was kept). The
535 # default prompt answer is to use the largefile version since it was
535 # default prompt answer is to use the largefile version since it was
536 # presumably changed on purpose.
536 # presumably changed on purpose.
537 #
537 #
538 # Finally, the merge.applyupdates function will then take care of
538 # Finally, the merge.applyupdates function will then take care of
539 # writing the files into the working copy and lfcommands.updatelfiles
539 # writing the files into the working copy and lfcommands.updatelfiles
540 # will update the largefiles.
540 # will update the largefiles.
541 @eh.wrapfunction(merge, b'calculateupdates')
541 @eh.wrapfunction(merge, b'calculateupdates')
542 def overridecalculateupdates(
542 def overridecalculateupdates(
543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
544 ):
544 ):
545 overwrite = force and not branchmerge
545 overwrite = force and not branchmerge
546 mresult = origfn(
546 mresult = origfn(
547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 )
548 )
549
549
550 if overwrite:
550 if overwrite:
551 return mresult
551 return mresult
552
552
553 # Convert to dictionary with filename as key and action as value.
553 # Convert to dictionary with filename as key and action as value.
554 lfiles = set()
554 lfiles = set()
555 for f in mresult.files():
555 for f in mresult.files():
556 splitstandin = lfutil.splitstandin(f)
556 splitstandin = lfutil.splitstandin(f)
557 if splitstandin is not None and splitstandin in p1:
557 if splitstandin is not None and splitstandin in p1:
558 lfiles.add(splitstandin)
558 lfiles.add(splitstandin)
559 elif lfutil.standin(f) in p1:
559 elif lfutil.standin(f) in p1:
560 lfiles.add(f)
560 lfiles.add(f)
561
561
562 for lfile in sorted(lfiles):
562 for lfile in sorted(lfiles):
563 standin = lfutil.standin(lfile)
563 standin = lfutil.standin(lfile)
564 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
564 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
565 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
565 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
566 if sm in (b'g', b'dc') and lm != b'r':
566 if sm in (b'g', b'dc') and lm != b'r':
567 if sm == b'dc':
567 if sm == b'dc':
568 f1, f2, fa, move, anc = sargs
568 f1, f2, fa, move, anc = sargs
569 sargs = (p2[f2].flags(), False)
569 sargs = (p2[f2].flags(), False)
570 # Case 1: normal file in the working copy, largefile in
570 # Case 1: normal file in the working copy, largefile in
571 # the second parent
571 # the second parent
572 usermsg = (
572 usermsg = (
573 _(
573 _(
574 b'remote turned local normal file %s into a largefile\n'
574 b'remote turned local normal file %s into a largefile\n'
575 b'use (l)argefile or keep (n)ormal file?'
575 b'use (l)argefile or keep (n)ormal file?'
576 b'$$ &Largefile $$ &Normal file'
576 b'$$ &Largefile $$ &Normal file'
577 )
577 )
578 % lfile
578 % lfile
579 )
579 )
580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
583 else: # keep local normal file
583 else: # keep local normal file
584 mresult.addfile(lfile, b'k', None, b'replaces standin')
584 mresult.addfile(lfile, b'k', None, b'replaces standin')
585 if branchmerge:
585 if branchmerge:
586 mresult.addfile(
586 mresult.addfile(
587 standin,
587 standin,
588 b'k',
588 b'k',
589 None,
589 None,
590 b'replaced by non-standin',
590 b'replaced by non-standin',
591 )
591 )
592 else:
592 else:
593 mresult.addfile(
593 mresult.addfile(
594 standin,
594 standin,
595 b'r',
595 b'r',
596 None,
596 None,
597 b'replaced by non-standin',
597 b'replaced by non-standin',
598 )
598 )
599 elif lm in (b'g', b'dc') and sm != b'r':
599 elif lm in (b'g', b'dc') and sm != b'r':
600 if lm == b'dc':
600 if lm == b'dc':
601 f1, f2, fa, move, anc = largs
601 f1, f2, fa, move, anc = largs
602 largs = (p2[f2].flags(), False)
602 largs = (p2[f2].flags(), False)
603 # Case 2: largefile in the working copy, normal file in
603 # Case 2: largefile in the working copy, normal file in
604 # the second parent
604 # the second parent
605 usermsg = (
605 usermsg = (
606 _(
606 _(
607 b'remote turned local largefile %s into a normal file\n'
607 b'remote turned local largefile %s into a normal file\n'
608 b'keep (l)argefile or use (n)ormal file?'
608 b'keep (l)argefile or use (n)ormal file?'
609 b'$$ &Largefile $$ &Normal file'
609 b'$$ &Largefile $$ &Normal file'
610 )
610 )
611 % lfile
611 % lfile
612 )
612 )
613 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
613 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
614 if branchmerge:
614 if branchmerge:
615 # largefile can be restored from standin safely
615 # largefile can be restored from standin safely
616 mresult.addfile(
616 mresult.addfile(
617 lfile,
617 lfile,
618 b'k',
618 b'k',
619 None,
619 None,
620 b'replaced by standin',
620 b'replaced by standin',
621 )
621 )
622 mresult.addfile(standin, b'k', None, b'replaces standin')
622 mresult.addfile(standin, b'k', None, b'replaces standin')
623 else:
623 else:
624 # "lfile" should be marked as "removed" without
624 # "lfile" should be marked as "removed" without
625 # removal of itself
625 # removal of itself
626 mresult.addfile(
626 mresult.addfile(
627 lfile,
627 lfile,
628 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
628 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
629 None,
629 None,
630 b'forget non-standin largefile',
630 b'forget non-standin largefile',
631 )
631 )
632
632
633 # linear-merge should treat this largefile as 're-added'
633 # linear-merge should treat this largefile as 're-added'
634 mresult.addfile(standin, b'a', None, b'keep standin')
634 mresult.addfile(standin, b'a', None, b'keep standin')
635 else: # pick remote normal file
635 else: # pick remote normal file
636 mresult.addfile(lfile, b'g', largs, b'replaces standin')
636 mresult.addfile(lfile, b'g', largs, b'replaces standin')
637 mresult.addfile(
637 mresult.addfile(
638 standin,
638 standin,
639 b'r',
639 b'r',
640 None,
640 None,
641 b'replaced by non-standin',
641 b'replaced by non-standin',
642 )
642 )
643
643
644 return mresult
644 return mresult
645
645
646
646
647 @eh.wrapfunction(mergestatemod, b'recordupdates')
647 @eh.wrapfunction(mergestatemod, b'recordupdates')
648 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
648 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
649 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
649 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
650 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
650 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
651 with lfdirstate.parentchange():
651 with lfdirstate.parentchange():
652 for lfile, args, msg in actions[
652 for lfile, args, msg in actions[
653 MERGE_ACTION_LARGEFILE_MARK_REMOVED
653 MERGE_ACTION_LARGEFILE_MARK_REMOVED
654 ]:
654 ]:
655 # this should be executed before 'orig', to execute 'remove'
655 # this should be executed before 'orig', to execute 'remove'
656 # before all other actions
656 # before all other actions
657 repo.dirstate.update_file(
657 repo.dirstate.update_file(
658 lfile, p1_tracked=True, wc_tracked=False
658 lfile, p1_tracked=True, wc_tracked=False
659 )
659 )
660 # make sure lfile doesn't get synclfdirstate'd as normal
660 # make sure lfile doesn't get synclfdirstate'd as normal
661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
662 lfdirstate.write(repo.currenttransaction())
662 lfdirstate.write(repo.currenttransaction())
663
663
664 return orig(repo, actions, branchmerge, getfiledata)
664 return orig(repo, actions, branchmerge, getfiledata)
665
665
666
666
667 # Override filemerge to prompt the user about how they wish to merge
667 # Override filemerge to prompt the user about how they wish to merge
668 # largefiles. This will handle identical edits without prompting the user.
668 # largefiles. This will handle identical edits without prompting the user.
669 @eh.wrapfunction(filemerge, b'filemerge')
669 @eh.wrapfunction(filemerge, b'filemerge')
670 def overridefilemerge(
670 def overridefilemerge(
671 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
671 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
672 ):
672 ):
673 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
673 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
674 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
674 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
675
675
676 ahash = lfutil.readasstandin(fca).lower()
676 ahash = lfutil.readasstandin(fca).lower()
677 dhash = lfutil.readasstandin(fcd).lower()
677 dhash = lfutil.readasstandin(fcd).lower()
678 ohash = lfutil.readasstandin(fco).lower()
678 ohash = lfutil.readasstandin(fco).lower()
679 if (
679 if (
680 ohash != ahash
680 ohash != ahash
681 and ohash != dhash
681 and ohash != dhash
682 and (
682 and (
683 dhash == ahash
683 dhash == ahash
684 or repo.ui.promptchoice(
684 or repo.ui.promptchoice(
685 _(
685 _(
686 b'largefile %s has a merge conflict\nancestor was %s\n'
686 b'largefile %s has a merge conflict\nancestor was %s\n'
687 b'you can keep (l)ocal %s or take (o)ther %s.\n'
687 b'you can keep (l)ocal %s or take (o)ther %s.\n'
688 b'what do you want to do?'
688 b'what do you want to do?'
689 b'$$ &Local $$ &Other'
689 b'$$ &Local $$ &Other'
690 )
690 )
691 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
691 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
692 0,
692 0,
693 )
693 )
694 == 1
694 == 1
695 )
695 )
696 ):
696 ):
697 repo.wwrite(fcd.path(), fco.data(), fco.flags())
697 repo.wwrite(fcd.path(), fco.data(), fco.flags())
698 return True, 0, False
698 return 0, False
699
699
700
700
701 @eh.wrapfunction(copiesmod, b'pathcopies')
701 @eh.wrapfunction(copiesmod, b'pathcopies')
702 def copiespathcopies(orig, ctx1, ctx2, match=None):
702 def copiespathcopies(orig, ctx1, ctx2, match=None):
703 copies = orig(ctx1, ctx2, match=match)
703 copies = orig(ctx1, ctx2, match=match)
704 updated = {}
704 updated = {}
705
705
706 for k, v in pycompat.iteritems(copies):
706 for k, v in pycompat.iteritems(copies):
707 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
707 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
708
708
709 return updated
709 return updated
710
710
711
711
712 # Copy first changes the matchers to match standins instead of
712 # Copy first changes the matchers to match standins instead of
713 # largefiles. Then it overrides util.copyfile in that function it
713 # largefiles. Then it overrides util.copyfile in that function it
714 # checks if the destination largefile already exists. It also keeps a
714 # checks if the destination largefile already exists. It also keeps a
715 # list of copied files so that the largefiles can be copied and the
715 # list of copied files so that the largefiles can be copied and the
716 # dirstate updated.
716 # dirstate updated.
717 @eh.wrapfunction(cmdutil, b'copy')
717 @eh.wrapfunction(cmdutil, b'copy')
718 def overridecopy(orig, ui, repo, pats, opts, rename=False):
718 def overridecopy(orig, ui, repo, pats, opts, rename=False):
719 # doesn't remove largefile on rename
719 # doesn't remove largefile on rename
720 if len(pats) < 2:
720 if len(pats) < 2:
721 # this isn't legal, let the original function deal with it
721 # this isn't legal, let the original function deal with it
722 return orig(ui, repo, pats, opts, rename)
722 return orig(ui, repo, pats, opts, rename)
723
723
724 # This could copy both lfiles and normal files in one command,
724 # This could copy both lfiles and normal files in one command,
725 # but we don't want to do that. First replace their matcher to
725 # but we don't want to do that. First replace their matcher to
726 # only match normal files and run it, then replace it to just
726 # only match normal files and run it, then replace it to just
727 # match largefiles and run it again.
727 # match largefiles and run it again.
728 nonormalfiles = False
728 nonormalfiles = False
729 nolfiles = False
729 nolfiles = False
730 manifest = repo[None].manifest()
730 manifest = repo[None].manifest()
731
731
732 def normalfilesmatchfn(
732 def normalfilesmatchfn(
733 orig,
733 orig,
734 ctx,
734 ctx,
735 pats=(),
735 pats=(),
736 opts=None,
736 opts=None,
737 globbed=False,
737 globbed=False,
738 default=b'relpath',
738 default=b'relpath',
739 badfn=None,
739 badfn=None,
740 ):
740 ):
741 if opts is None:
741 if opts is None:
742 opts = {}
742 opts = {}
743 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
743 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
744 return composenormalfilematcher(match, manifest)
744 return composenormalfilematcher(match, manifest)
745
745
746 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
746 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
747 try:
747 try:
748 result = orig(ui, repo, pats, opts, rename)
748 result = orig(ui, repo, pats, opts, rename)
749 except error.Abort as e:
749 except error.Abort as e:
750 if e.message != _(b'no files to copy'):
750 if e.message != _(b'no files to copy'):
751 raise e
751 raise e
752 else:
752 else:
753 nonormalfiles = True
753 nonormalfiles = True
754 result = 0
754 result = 0
755
755
756 # The first rename can cause our current working directory to be removed.
756 # The first rename can cause our current working directory to be removed.
757 # In that case there is nothing left to copy/rename so just quit.
757 # In that case there is nothing left to copy/rename so just quit.
758 try:
758 try:
759 repo.getcwd()
759 repo.getcwd()
760 except OSError:
760 except OSError:
761 return result
761 return result
762
762
763 def makestandin(relpath):
763 def makestandin(relpath):
764 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
764 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
765 return repo.wvfs.join(lfutil.standin(path))
765 return repo.wvfs.join(lfutil.standin(path))
766
766
767 fullpats = scmutil.expandpats(pats)
767 fullpats = scmutil.expandpats(pats)
768 dest = fullpats[-1]
768 dest = fullpats[-1]
769
769
770 if os.path.isdir(dest):
770 if os.path.isdir(dest):
771 if not os.path.isdir(makestandin(dest)):
771 if not os.path.isdir(makestandin(dest)):
772 os.makedirs(makestandin(dest))
772 os.makedirs(makestandin(dest))
773
773
774 try:
774 try:
775 # When we call orig below it creates the standins but we don't add
775 # When we call orig below it creates the standins but we don't add
776 # them to the dir state until later so lock during that time.
776 # them to the dir state until later so lock during that time.
777 wlock = repo.wlock()
777 wlock = repo.wlock()
778
778
779 manifest = repo[None].manifest()
779 manifest = repo[None].manifest()
780
780
781 def overridematch(
781 def overridematch(
782 orig,
782 orig,
783 ctx,
783 ctx,
784 pats=(),
784 pats=(),
785 opts=None,
785 opts=None,
786 globbed=False,
786 globbed=False,
787 default=b'relpath',
787 default=b'relpath',
788 badfn=None,
788 badfn=None,
789 ):
789 ):
790 if opts is None:
790 if opts is None:
791 opts = {}
791 opts = {}
792 newpats = []
792 newpats = []
793 # The patterns were previously mangled to add the standin
793 # The patterns were previously mangled to add the standin
794 # directory; we need to remove that now
794 # directory; we need to remove that now
795 for pat in pats:
795 for pat in pats:
796 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
796 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
797 newpats.append(pat.replace(lfutil.shortname, b''))
797 newpats.append(pat.replace(lfutil.shortname, b''))
798 else:
798 else:
799 newpats.append(pat)
799 newpats.append(pat)
800 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
800 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
801 m = copy.copy(match)
801 m = copy.copy(match)
802 lfile = lambda f: lfutil.standin(f) in manifest
802 lfile = lambda f: lfutil.standin(f) in manifest
803 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
803 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
804 m._fileset = set(m._files)
804 m._fileset = set(m._files)
805 origmatchfn = m.matchfn
805 origmatchfn = m.matchfn
806
806
807 def matchfn(f):
807 def matchfn(f):
808 lfile = lfutil.splitstandin(f)
808 lfile = lfutil.splitstandin(f)
809 return (
809 return (
810 lfile is not None
810 lfile is not None
811 and (f in manifest)
811 and (f in manifest)
812 and origmatchfn(lfile)
812 and origmatchfn(lfile)
813 or None
813 or None
814 )
814 )
815
815
816 m.matchfn = matchfn
816 m.matchfn = matchfn
817 return m
817 return m
818
818
819 listpats = []
819 listpats = []
820 for pat in pats:
820 for pat in pats:
821 if matchmod.patkind(pat) is not None:
821 if matchmod.patkind(pat) is not None:
822 listpats.append(pat)
822 listpats.append(pat)
823 else:
823 else:
824 listpats.append(makestandin(pat))
824 listpats.append(makestandin(pat))
825
825
826 copiedfiles = []
826 copiedfiles = []
827
827
828 def overridecopyfile(orig, src, dest, *args, **kwargs):
828 def overridecopyfile(orig, src, dest, *args, **kwargs):
829 if lfutil.shortname in src and dest.startswith(
829 if lfutil.shortname in src and dest.startswith(
830 repo.wjoin(lfutil.shortname)
830 repo.wjoin(lfutil.shortname)
831 ):
831 ):
832 destlfile = dest.replace(lfutil.shortname, b'')
832 destlfile = dest.replace(lfutil.shortname, b'')
833 if not opts[b'force'] and os.path.exists(destlfile):
833 if not opts[b'force'] and os.path.exists(destlfile):
834 raise IOError(
834 raise IOError(
835 b'', _(b'destination largefile already exists')
835 b'', _(b'destination largefile already exists')
836 )
836 )
837 copiedfiles.append((src, dest))
837 copiedfiles.append((src, dest))
838 orig(src, dest, *args, **kwargs)
838 orig(src, dest, *args, **kwargs)
839
839
840 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
840 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
841 with extensions.wrappedfunction(scmutil, b'match', overridematch):
841 with extensions.wrappedfunction(scmutil, b'match', overridematch):
842 result += orig(ui, repo, listpats, opts, rename)
842 result += orig(ui, repo, listpats, opts, rename)
843
843
844 lfdirstate = lfutil.openlfdirstate(ui, repo)
844 lfdirstate = lfutil.openlfdirstate(ui, repo)
845 for (src, dest) in copiedfiles:
845 for (src, dest) in copiedfiles:
846 if lfutil.shortname in src and dest.startswith(
846 if lfutil.shortname in src and dest.startswith(
847 repo.wjoin(lfutil.shortname)
847 repo.wjoin(lfutil.shortname)
848 ):
848 ):
849 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
849 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
850 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
850 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
851 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
851 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
852 if not os.path.isdir(destlfiledir):
852 if not os.path.isdir(destlfiledir):
853 os.makedirs(destlfiledir)
853 os.makedirs(destlfiledir)
854 if rename:
854 if rename:
855 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
855 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
856
856
857 # The file is gone, but this deletes any empty parent
857 # The file is gone, but this deletes any empty parent
858 # directories as a side-effect.
858 # directories as a side-effect.
859 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
859 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
860 lfdirstate.set_untracked(srclfile)
860 lfdirstate.set_untracked(srclfile)
861 else:
861 else:
862 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
862 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
863
863
864 lfdirstate.set_tracked(destlfile)
864 lfdirstate.set_tracked(destlfile)
865 lfdirstate.write(repo.currenttransaction())
865 lfdirstate.write(repo.currenttransaction())
866 except error.Abort as e:
866 except error.Abort as e:
867 if e.message != _(b'no files to copy'):
867 if e.message != _(b'no files to copy'):
868 raise e
868 raise e
869 else:
869 else:
870 nolfiles = True
870 nolfiles = True
871 finally:
871 finally:
872 wlock.release()
872 wlock.release()
873
873
874 if nolfiles and nonormalfiles:
874 if nolfiles and nonormalfiles:
875 raise error.Abort(_(b'no files to copy'))
875 raise error.Abort(_(b'no files to copy'))
876
876
877 return result
877 return result
878
878
879
879
880 # When the user calls revert, we have to be careful to not revert any
880 # When the user calls revert, we have to be careful to not revert any
881 # changes to other largefiles accidentally. This means we have to keep
881 # changes to other largefiles accidentally. This means we have to keep
882 # track of the largefiles that are being reverted so we only pull down
882 # track of the largefiles that are being reverted so we only pull down
883 # the necessary largefiles.
883 # the necessary largefiles.
884 #
884 #
885 # Standins are only updated (to match the hash of largefiles) before
885 # Standins are only updated (to match the hash of largefiles) before
886 # commits. Update the standins then run the original revert, changing
886 # commits. Update the standins then run the original revert, changing
887 # the matcher to hit standins instead of largefiles. Based on the
887 # the matcher to hit standins instead of largefiles. Based on the
888 # resulting standins update the largefiles.
888 # resulting standins update the largefiles.
889 @eh.wrapfunction(cmdutil, b'revert')
889 @eh.wrapfunction(cmdutil, b'revert')
890 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
890 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
891 # Because we put the standins in a bad state (by updating them)
891 # Because we put the standins in a bad state (by updating them)
892 # and then return them to a correct state we need to lock to
892 # and then return them to a correct state we need to lock to
893 # prevent others from changing them in their incorrect state.
893 # prevent others from changing them in their incorrect state.
894 with repo.wlock():
894 with repo.wlock():
895 lfdirstate = lfutil.openlfdirstate(ui, repo)
895 lfdirstate = lfutil.openlfdirstate(ui, repo)
896 s = lfutil.lfdirstatestatus(lfdirstate, repo)
896 s = lfutil.lfdirstatestatus(lfdirstate, repo)
897 lfdirstate.write(repo.currenttransaction())
897 lfdirstate.write(repo.currenttransaction())
898 for lfile in s.modified:
898 for lfile in s.modified:
899 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
899 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
900 for lfile in s.deleted:
900 for lfile in s.deleted:
901 fstandin = lfutil.standin(lfile)
901 fstandin = lfutil.standin(lfile)
902 if repo.wvfs.exists(fstandin):
902 if repo.wvfs.exists(fstandin):
903 repo.wvfs.unlink(fstandin)
903 repo.wvfs.unlink(fstandin)
904
904
905 oldstandins = lfutil.getstandinsstate(repo)
905 oldstandins = lfutil.getstandinsstate(repo)
906
906
907 def overridematch(
907 def overridematch(
908 orig,
908 orig,
909 mctx,
909 mctx,
910 pats=(),
910 pats=(),
911 opts=None,
911 opts=None,
912 globbed=False,
912 globbed=False,
913 default=b'relpath',
913 default=b'relpath',
914 badfn=None,
914 badfn=None,
915 ):
915 ):
916 if opts is None:
916 if opts is None:
917 opts = {}
917 opts = {}
918 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
918 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
919 m = copy.copy(match)
919 m = copy.copy(match)
920
920
921 # revert supports recursing into subrepos, and though largefiles
921 # revert supports recursing into subrepos, and though largefiles
922 # currently doesn't work correctly in that case, this match is
922 # currently doesn't work correctly in that case, this match is
923 # called, so the lfdirstate above may not be the correct one for
923 # called, so the lfdirstate above may not be the correct one for
924 # this invocation of match.
924 # this invocation of match.
925 lfdirstate = lfutil.openlfdirstate(
925 lfdirstate = lfutil.openlfdirstate(
926 mctx.repo().ui, mctx.repo(), False
926 mctx.repo().ui, mctx.repo(), False
927 )
927 )
928
928
929 wctx = repo[None]
929 wctx = repo[None]
930 matchfiles = []
930 matchfiles = []
931 for f in m._files:
931 for f in m._files:
932 standin = lfutil.standin(f)
932 standin = lfutil.standin(f)
933 if standin in ctx or standin in mctx:
933 if standin in ctx or standin in mctx:
934 matchfiles.append(standin)
934 matchfiles.append(standin)
935 elif standin in wctx or lfdirstate.get_entry(f).removed:
935 elif standin in wctx or lfdirstate.get_entry(f).removed:
936 continue
936 continue
937 else:
937 else:
938 matchfiles.append(f)
938 matchfiles.append(f)
939 m._files = matchfiles
939 m._files = matchfiles
940 m._fileset = set(m._files)
940 m._fileset = set(m._files)
941 origmatchfn = m.matchfn
941 origmatchfn = m.matchfn
942
942
943 def matchfn(f):
943 def matchfn(f):
944 lfile = lfutil.splitstandin(f)
944 lfile = lfutil.splitstandin(f)
945 if lfile is not None:
945 if lfile is not None:
946 return origmatchfn(lfile) and (f in ctx or f in mctx)
946 return origmatchfn(lfile) and (f in ctx or f in mctx)
947 return origmatchfn(f)
947 return origmatchfn(f)
948
948
949 m.matchfn = matchfn
949 m.matchfn = matchfn
950 return m
950 return m
951
951
952 with extensions.wrappedfunction(scmutil, b'match', overridematch):
952 with extensions.wrappedfunction(scmutil, b'match', overridematch):
953 orig(ui, repo, ctx, *pats, **opts)
953 orig(ui, repo, ctx, *pats, **opts)
954
954
955 newstandins = lfutil.getstandinsstate(repo)
955 newstandins = lfutil.getstandinsstate(repo)
956 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
956 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
957 # lfdirstate should be 'normallookup'-ed for updated files,
957 # lfdirstate should be 'normallookup'-ed for updated files,
958 # because reverting doesn't touch dirstate for 'normal' files
958 # because reverting doesn't touch dirstate for 'normal' files
959 # when target revision is explicitly specified: in such case,
959 # when target revision is explicitly specified: in such case,
960 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
960 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
961 # of target (standin) file.
961 # of target (standin) file.
962 lfcommands.updatelfiles(
962 lfcommands.updatelfiles(
963 ui, repo, filelist, printmessage=False, normallookup=True
963 ui, repo, filelist, printmessage=False, normallookup=True
964 )
964 )
965
965
966
966
967 # after pulling changesets, we need to take some extra care to get
967 # after pulling changesets, we need to take some extra care to get
968 # largefiles updated remotely
968 # largefiles updated remotely
969 @eh.wrapcommand(
969 @eh.wrapcommand(
970 b'pull',
970 b'pull',
971 opts=[
971 opts=[
972 (
972 (
973 b'',
973 b'',
974 b'all-largefiles',
974 b'all-largefiles',
975 None,
975 None,
976 _(b'download all pulled versions of largefiles (DEPRECATED)'),
976 _(b'download all pulled versions of largefiles (DEPRECATED)'),
977 ),
977 ),
978 (
978 (
979 b'',
979 b'',
980 b'lfrev',
980 b'lfrev',
981 [],
981 [],
982 _(b'download largefiles for these revisions'),
982 _(b'download largefiles for these revisions'),
983 _(b'REV'),
983 _(b'REV'),
984 ),
984 ),
985 ],
985 ],
986 )
986 )
987 def overridepull(orig, ui, repo, source=None, **opts):
987 def overridepull(orig, ui, repo, source=None, **opts):
988 revsprepull = len(repo)
988 revsprepull = len(repo)
989 if not source:
989 if not source:
990 source = b'default'
990 source = b'default'
991 repo.lfpullsource = source
991 repo.lfpullsource = source
992 result = orig(ui, repo, source, **opts)
992 result = orig(ui, repo, source, **opts)
993 revspostpull = len(repo)
993 revspostpull = len(repo)
994 lfrevs = opts.get('lfrev', [])
994 lfrevs = opts.get('lfrev', [])
995 if opts.get('all_largefiles'):
995 if opts.get('all_largefiles'):
996 lfrevs.append(b'pulled()')
996 lfrevs.append(b'pulled()')
997 if lfrevs and revspostpull > revsprepull:
997 if lfrevs and revspostpull > revsprepull:
998 numcached = 0
998 numcached = 0
999 repo.firstpulled = revsprepull # for pulled() revset expression
999 repo.firstpulled = revsprepull # for pulled() revset expression
1000 try:
1000 try:
1001 for rev in logcmdutil.revrange(repo, lfrevs):
1001 for rev in logcmdutil.revrange(repo, lfrevs):
1002 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1002 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1003 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1003 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1004 numcached += len(cached)
1004 numcached += len(cached)
1005 finally:
1005 finally:
1006 del repo.firstpulled
1006 del repo.firstpulled
1007 ui.status(_(b"%d largefiles cached\n") % numcached)
1007 ui.status(_(b"%d largefiles cached\n") % numcached)
1008 return result
1008 return result
1009
1009
1010
1010
1011 @eh.wrapcommand(
1011 @eh.wrapcommand(
1012 b'push',
1012 b'push',
1013 opts=[
1013 opts=[
1014 (
1014 (
1015 b'',
1015 b'',
1016 b'lfrev',
1016 b'lfrev',
1017 [],
1017 [],
1018 _(b'upload largefiles for these revisions'),
1018 _(b'upload largefiles for these revisions'),
1019 _(b'REV'),
1019 _(b'REV'),
1020 )
1020 )
1021 ],
1021 ],
1022 )
1022 )
1023 def overridepush(orig, ui, repo, *args, **kwargs):
1023 def overridepush(orig, ui, repo, *args, **kwargs):
1024 """Override push command and store --lfrev parameters in opargs"""
1024 """Override push command and store --lfrev parameters in opargs"""
1025 lfrevs = kwargs.pop('lfrev', None)
1025 lfrevs = kwargs.pop('lfrev', None)
1026 if lfrevs:
1026 if lfrevs:
1027 opargs = kwargs.setdefault('opargs', {})
1027 opargs = kwargs.setdefault('opargs', {})
1028 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1028 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1029 return orig(ui, repo, *args, **kwargs)
1029 return orig(ui, repo, *args, **kwargs)
1030
1030
1031
1031
1032 @eh.wrapfunction(exchange, b'pushoperation')
1032 @eh.wrapfunction(exchange, b'pushoperation')
1033 def exchangepushoperation(orig, *args, **kwargs):
1033 def exchangepushoperation(orig, *args, **kwargs):
1034 """Override pushoperation constructor and store lfrevs parameter"""
1034 """Override pushoperation constructor and store lfrevs parameter"""
1035 lfrevs = kwargs.pop('lfrevs', None)
1035 lfrevs = kwargs.pop('lfrevs', None)
1036 pushop = orig(*args, **kwargs)
1036 pushop = orig(*args, **kwargs)
1037 pushop.lfrevs = lfrevs
1037 pushop.lfrevs = lfrevs
1038 return pushop
1038 return pushop
1039
1039
1040
1040
1041 @eh.revsetpredicate(b'pulled()')
1041 @eh.revsetpredicate(b'pulled()')
1042 def pulledrevsetsymbol(repo, subset, x):
1042 def pulledrevsetsymbol(repo, subset, x):
1043 """Changesets that just has been pulled.
1043 """Changesets that just has been pulled.
1044
1044
1045 Only available with largefiles from pull --lfrev expressions.
1045 Only available with largefiles from pull --lfrev expressions.
1046
1046
1047 .. container:: verbose
1047 .. container:: verbose
1048
1048
1049 Some examples:
1049 Some examples:
1050
1050
1051 - pull largefiles for all new changesets::
1051 - pull largefiles for all new changesets::
1052
1052
1053 hg pull -lfrev "pulled()"
1053 hg pull -lfrev "pulled()"
1054
1054
1055 - pull largefiles for all new branch heads::
1055 - pull largefiles for all new branch heads::
1056
1056
1057 hg pull -lfrev "head(pulled()) and not closed()"
1057 hg pull -lfrev "head(pulled()) and not closed()"
1058
1058
1059 """
1059 """
1060
1060
1061 try:
1061 try:
1062 firstpulled = repo.firstpulled
1062 firstpulled = repo.firstpulled
1063 except AttributeError:
1063 except AttributeError:
1064 raise error.Abort(_(b"pulled() only available in --lfrev"))
1064 raise error.Abort(_(b"pulled() only available in --lfrev"))
1065 return smartset.baseset([r for r in subset if r >= firstpulled])
1065 return smartset.baseset([r for r in subset if r >= firstpulled])
1066
1066
1067
1067
1068 @eh.wrapcommand(
1068 @eh.wrapcommand(
1069 b'clone',
1069 b'clone',
1070 opts=[
1070 opts=[
1071 (
1071 (
1072 b'',
1072 b'',
1073 b'all-largefiles',
1073 b'all-largefiles',
1074 None,
1074 None,
1075 _(b'download all versions of all largefiles'),
1075 _(b'download all versions of all largefiles'),
1076 )
1076 )
1077 ],
1077 ],
1078 )
1078 )
1079 def overrideclone(orig, ui, source, dest=None, **opts):
1079 def overrideclone(orig, ui, source, dest=None, **opts):
1080 d = dest
1080 d = dest
1081 if d is None:
1081 if d is None:
1082 d = hg.defaultdest(source)
1082 d = hg.defaultdest(source)
1083 if opts.get('all_largefiles') and not hg.islocal(d):
1083 if opts.get('all_largefiles') and not hg.islocal(d):
1084 raise error.Abort(
1084 raise error.Abort(
1085 _(b'--all-largefiles is incompatible with non-local destination %s')
1085 _(b'--all-largefiles is incompatible with non-local destination %s')
1086 % d
1086 % d
1087 )
1087 )
1088
1088
1089 return orig(ui, source, dest, **opts)
1089 return orig(ui, source, dest, **opts)
1090
1090
1091
1091
1092 @eh.wrapfunction(hg, b'clone')
1092 @eh.wrapfunction(hg, b'clone')
1093 def hgclone(orig, ui, opts, *args, **kwargs):
1093 def hgclone(orig, ui, opts, *args, **kwargs):
1094 result = orig(ui, opts, *args, **kwargs)
1094 result = orig(ui, opts, *args, **kwargs)
1095
1095
1096 if result is not None:
1096 if result is not None:
1097 sourcerepo, destrepo = result
1097 sourcerepo, destrepo = result
1098 repo = destrepo.local()
1098 repo = destrepo.local()
1099
1099
1100 # When cloning to a remote repo (like through SSH), no repo is available
1100 # When cloning to a remote repo (like through SSH), no repo is available
1101 # from the peer. Therefore the largefiles can't be downloaded and the
1101 # from the peer. Therefore the largefiles can't be downloaded and the
1102 # hgrc can't be updated.
1102 # hgrc can't be updated.
1103 if not repo:
1103 if not repo:
1104 return result
1104 return result
1105
1105
1106 # Caching is implicitly limited to 'rev' option, since the dest repo was
1106 # Caching is implicitly limited to 'rev' option, since the dest repo was
1107 # truncated at that point. The user may expect a download count with
1107 # truncated at that point. The user may expect a download count with
1108 # this option, so attempt whether or not this is a largefile repo.
1108 # this option, so attempt whether or not this is a largefile repo.
1109 if opts.get(b'all_largefiles'):
1109 if opts.get(b'all_largefiles'):
1110 success, missing = lfcommands.downloadlfiles(ui, repo)
1110 success, missing = lfcommands.downloadlfiles(ui, repo)
1111
1111
1112 if missing != 0:
1112 if missing != 0:
1113 return None
1113 return None
1114
1114
1115 return result
1115 return result
1116
1116
1117
1117
1118 @eh.wrapcommand(b'rebase', extension=b'rebase')
1118 @eh.wrapcommand(b'rebase', extension=b'rebase')
1119 def overriderebasecmd(orig, ui, repo, **opts):
1119 def overriderebasecmd(orig, ui, repo, **opts):
1120 if not util.safehasattr(repo, b'_largefilesenabled'):
1120 if not util.safehasattr(repo, b'_largefilesenabled'):
1121 return orig(ui, repo, **opts)
1121 return orig(ui, repo, **opts)
1122
1122
1123 resuming = opts.get('continue')
1123 resuming = opts.get('continue')
1124 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1124 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1125 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1125 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1126 try:
1126 try:
1127 with ui.configoverride(
1127 with ui.configoverride(
1128 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1128 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1129 ):
1129 ):
1130 return orig(ui, repo, **opts)
1130 return orig(ui, repo, **opts)
1131 finally:
1131 finally:
1132 repo._lfstatuswriters.pop()
1132 repo._lfstatuswriters.pop()
1133 repo._lfcommithooks.pop()
1133 repo._lfcommithooks.pop()
1134
1134
1135
1135
1136 @eh.extsetup
1136 @eh.extsetup
1137 def overriderebase(ui):
1137 def overriderebase(ui):
1138 try:
1138 try:
1139 rebase = extensions.find(b'rebase')
1139 rebase = extensions.find(b'rebase')
1140 except KeyError:
1140 except KeyError:
1141 pass
1141 pass
1142 else:
1142 else:
1143
1143
1144 def _dorebase(orig, *args, **kwargs):
1144 def _dorebase(orig, *args, **kwargs):
1145 kwargs['inmemory'] = False
1145 kwargs['inmemory'] = False
1146 return orig(*args, **kwargs)
1146 return orig(*args, **kwargs)
1147
1147
1148 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1148 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1149
1149
1150
1150
1151 @eh.wrapcommand(b'archive')
1151 @eh.wrapcommand(b'archive')
1152 def overridearchivecmd(orig, ui, repo, dest, **opts):
1152 def overridearchivecmd(orig, ui, repo, dest, **opts):
1153 with lfstatus(repo.unfiltered()):
1153 with lfstatus(repo.unfiltered()):
1154 return orig(ui, repo.unfiltered(), dest, **opts)
1154 return orig(ui, repo.unfiltered(), dest, **opts)
1155
1155
1156
1156
1157 @eh.wrapfunction(webcommands, b'archive')
1157 @eh.wrapfunction(webcommands, b'archive')
1158 def hgwebarchive(orig, web):
1158 def hgwebarchive(orig, web):
1159 with lfstatus(web.repo):
1159 with lfstatus(web.repo):
1160 return orig(web)
1160 return orig(web)
1161
1161
1162
1162
1163 @eh.wrapfunction(archival, b'archive')
1163 @eh.wrapfunction(archival, b'archive')
1164 def overridearchive(
1164 def overridearchive(
1165 orig,
1165 orig,
1166 repo,
1166 repo,
1167 dest,
1167 dest,
1168 node,
1168 node,
1169 kind,
1169 kind,
1170 decode=True,
1170 decode=True,
1171 match=None,
1171 match=None,
1172 prefix=b'',
1172 prefix=b'',
1173 mtime=None,
1173 mtime=None,
1174 subrepos=None,
1174 subrepos=None,
1175 ):
1175 ):
1176 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1176 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1177 # unfiltered repo's attr, so check that as well.
1177 # unfiltered repo's attr, so check that as well.
1178 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1178 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1179 return orig(
1179 return orig(
1180 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1180 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1181 )
1181 )
1182
1182
1183 # No need to lock because we are only reading history and
1183 # No need to lock because we are only reading history and
1184 # largefile caches, neither of which are modified.
1184 # largefile caches, neither of which are modified.
1185 if node is not None:
1185 if node is not None:
1186 lfcommands.cachelfiles(repo.ui, repo, node)
1186 lfcommands.cachelfiles(repo.ui, repo, node)
1187
1187
1188 if kind not in archival.archivers:
1188 if kind not in archival.archivers:
1189 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1189 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1190
1190
1191 ctx = repo[node]
1191 ctx = repo[node]
1192
1192
1193 if kind == b'files':
1193 if kind == b'files':
1194 if prefix:
1194 if prefix:
1195 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1195 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1196 else:
1196 else:
1197 prefix = archival.tidyprefix(dest, kind, prefix)
1197 prefix = archival.tidyprefix(dest, kind, prefix)
1198
1198
1199 def write(name, mode, islink, getdata):
1199 def write(name, mode, islink, getdata):
1200 if match and not match(name):
1200 if match and not match(name):
1201 return
1201 return
1202 data = getdata()
1202 data = getdata()
1203 if decode:
1203 if decode:
1204 data = repo.wwritedata(name, data)
1204 data = repo.wwritedata(name, data)
1205 archiver.addfile(prefix + name, mode, islink, data)
1205 archiver.addfile(prefix + name, mode, islink, data)
1206
1206
1207 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1207 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1208
1208
1209 if repo.ui.configbool(b"ui", b"archivemeta"):
1209 if repo.ui.configbool(b"ui", b"archivemeta"):
1210 write(
1210 write(
1211 b'.hg_archival.txt',
1211 b'.hg_archival.txt',
1212 0o644,
1212 0o644,
1213 False,
1213 False,
1214 lambda: archival.buildmetadata(ctx),
1214 lambda: archival.buildmetadata(ctx),
1215 )
1215 )
1216
1216
1217 for f in ctx:
1217 for f in ctx:
1218 ff = ctx.flags(f)
1218 ff = ctx.flags(f)
1219 getdata = ctx[f].data
1219 getdata = ctx[f].data
1220 lfile = lfutil.splitstandin(f)
1220 lfile = lfutil.splitstandin(f)
1221 if lfile is not None:
1221 if lfile is not None:
1222 if node is not None:
1222 if node is not None:
1223 path = lfutil.findfile(repo, getdata().strip())
1223 path = lfutil.findfile(repo, getdata().strip())
1224
1224
1225 if path is None:
1225 if path is None:
1226 raise error.Abort(
1226 raise error.Abort(
1227 _(
1227 _(
1228 b'largefile %s not found in repo store or system cache'
1228 b'largefile %s not found in repo store or system cache'
1229 )
1229 )
1230 % lfile
1230 % lfile
1231 )
1231 )
1232 else:
1232 else:
1233 path = lfile
1233 path = lfile
1234
1234
1235 f = lfile
1235 f = lfile
1236
1236
1237 getdata = lambda: util.readfile(path)
1237 getdata = lambda: util.readfile(path)
1238 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1238 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1239
1239
1240 if subrepos:
1240 if subrepos:
1241 for subpath in sorted(ctx.substate):
1241 for subpath in sorted(ctx.substate):
1242 sub = ctx.workingsub(subpath)
1242 sub = ctx.workingsub(subpath)
1243 submatch = matchmod.subdirmatcher(subpath, match)
1243 submatch = matchmod.subdirmatcher(subpath, match)
1244 subprefix = prefix + subpath + b'/'
1244 subprefix = prefix + subpath + b'/'
1245
1245
1246 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1246 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1247 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1247 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1248 # allow only hgsubrepos to set this, instead of the current scheme
1248 # allow only hgsubrepos to set this, instead of the current scheme
1249 # where the parent sets this for the child.
1249 # where the parent sets this for the child.
1250 with (
1250 with (
1251 util.safehasattr(sub, '_repo')
1251 util.safehasattr(sub, '_repo')
1252 and lfstatus(sub._repo)
1252 and lfstatus(sub._repo)
1253 or util.nullcontextmanager()
1253 or util.nullcontextmanager()
1254 ):
1254 ):
1255 sub.archive(archiver, subprefix, submatch)
1255 sub.archive(archiver, subprefix, submatch)
1256
1256
1257 archiver.done()
1257 archiver.done()
1258
1258
1259
1259
1260 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1260 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1261 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1261 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1262 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1262 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1263 if not lfenabled or not repo._repo.lfstatus:
1263 if not lfenabled or not repo._repo.lfstatus:
1264 return orig(repo, archiver, prefix, match, decode)
1264 return orig(repo, archiver, prefix, match, decode)
1265
1265
1266 repo._get(repo._state + (b'hg',))
1266 repo._get(repo._state + (b'hg',))
1267 rev = repo._state[1]
1267 rev = repo._state[1]
1268 ctx = repo._repo[rev]
1268 ctx = repo._repo[rev]
1269
1269
1270 if ctx.node() is not None:
1270 if ctx.node() is not None:
1271 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1271 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1272
1272
1273 def write(name, mode, islink, getdata):
1273 def write(name, mode, islink, getdata):
1274 # At this point, the standin has been replaced with the largefile name,
1274 # At this point, the standin has been replaced with the largefile name,
1275 # so the normal matcher works here without the lfutil variants.
1275 # so the normal matcher works here without the lfutil variants.
1276 if match and not match(f):
1276 if match and not match(f):
1277 return
1277 return
1278 data = getdata()
1278 data = getdata()
1279 if decode:
1279 if decode:
1280 data = repo._repo.wwritedata(name, data)
1280 data = repo._repo.wwritedata(name, data)
1281
1281
1282 archiver.addfile(prefix + name, mode, islink, data)
1282 archiver.addfile(prefix + name, mode, islink, data)
1283
1283
1284 for f in ctx:
1284 for f in ctx:
1285 ff = ctx.flags(f)
1285 ff = ctx.flags(f)
1286 getdata = ctx[f].data
1286 getdata = ctx[f].data
1287 lfile = lfutil.splitstandin(f)
1287 lfile = lfutil.splitstandin(f)
1288 if lfile is not None:
1288 if lfile is not None:
1289 if ctx.node() is not None:
1289 if ctx.node() is not None:
1290 path = lfutil.findfile(repo._repo, getdata().strip())
1290 path = lfutil.findfile(repo._repo, getdata().strip())
1291
1291
1292 if path is None:
1292 if path is None:
1293 raise error.Abort(
1293 raise error.Abort(
1294 _(
1294 _(
1295 b'largefile %s not found in repo store or system cache'
1295 b'largefile %s not found in repo store or system cache'
1296 )
1296 )
1297 % lfile
1297 % lfile
1298 )
1298 )
1299 else:
1299 else:
1300 path = lfile
1300 path = lfile
1301
1301
1302 f = lfile
1302 f = lfile
1303
1303
1304 getdata = lambda: util.readfile(os.path.join(prefix, path))
1304 getdata = lambda: util.readfile(os.path.join(prefix, path))
1305
1305
1306 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1306 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1307
1307
1308 for subpath in sorted(ctx.substate):
1308 for subpath in sorted(ctx.substate):
1309 sub = ctx.workingsub(subpath)
1309 sub = ctx.workingsub(subpath)
1310 submatch = matchmod.subdirmatcher(subpath, match)
1310 submatch = matchmod.subdirmatcher(subpath, match)
1311 subprefix = prefix + subpath + b'/'
1311 subprefix = prefix + subpath + b'/'
1312 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1312 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1313 # infer and possibly set lfstatus at the top of this function. That
1313 # infer and possibly set lfstatus at the top of this function. That
1314 # would allow only hgsubrepos to set this, instead of the current scheme
1314 # would allow only hgsubrepos to set this, instead of the current scheme
1315 # where the parent sets this for the child.
1315 # where the parent sets this for the child.
1316 with (
1316 with (
1317 util.safehasattr(sub, '_repo')
1317 util.safehasattr(sub, '_repo')
1318 and lfstatus(sub._repo)
1318 and lfstatus(sub._repo)
1319 or util.nullcontextmanager()
1319 or util.nullcontextmanager()
1320 ):
1320 ):
1321 sub.archive(archiver, subprefix, submatch, decode)
1321 sub.archive(archiver, subprefix, submatch, decode)
1322
1322
1323
1323
1324 # If a largefile is modified, the change is not reflected in its
1324 # If a largefile is modified, the change is not reflected in its
1325 # standin until a commit. cmdutil.bailifchanged() raises an exception
1325 # standin until a commit. cmdutil.bailifchanged() raises an exception
1326 # if the repo has uncommitted changes. Wrap it to also check if
1326 # if the repo has uncommitted changes. Wrap it to also check if
1327 # largefiles were changed. This is used by bisect, backout and fetch.
1327 # largefiles were changed. This is used by bisect, backout and fetch.
1328 @eh.wrapfunction(cmdutil, b'bailifchanged')
1328 @eh.wrapfunction(cmdutil, b'bailifchanged')
1329 def overridebailifchanged(orig, repo, *args, **kwargs):
1329 def overridebailifchanged(orig, repo, *args, **kwargs):
1330 orig(repo, *args, **kwargs)
1330 orig(repo, *args, **kwargs)
1331 with lfstatus(repo):
1331 with lfstatus(repo):
1332 s = repo.status()
1332 s = repo.status()
1333 if s.modified or s.added or s.removed or s.deleted:
1333 if s.modified or s.added or s.removed or s.deleted:
1334 raise error.Abort(_(b'uncommitted changes'))
1334 raise error.Abort(_(b'uncommitted changes'))
1335
1335
1336
1336
1337 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1337 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1338 def postcommitstatus(orig, repo, *args, **kwargs):
1338 def postcommitstatus(orig, repo, *args, **kwargs):
1339 with lfstatus(repo):
1339 with lfstatus(repo):
1340 return orig(repo, *args, **kwargs)
1340 return orig(repo, *args, **kwargs)
1341
1341
1342
1342
1343 @eh.wrapfunction(cmdutil, b'forget')
1343 @eh.wrapfunction(cmdutil, b'forget')
1344 def cmdutilforget(
1344 def cmdutilforget(
1345 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1345 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1346 ):
1346 ):
1347 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1347 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1348 bad, forgot = orig(
1348 bad, forgot = orig(
1349 ui,
1349 ui,
1350 repo,
1350 repo,
1351 normalmatcher,
1351 normalmatcher,
1352 prefix,
1352 prefix,
1353 uipathfn,
1353 uipathfn,
1354 explicitonly,
1354 explicitonly,
1355 dryrun,
1355 dryrun,
1356 interactive,
1356 interactive,
1357 )
1357 )
1358 m = composelargefilematcher(match, repo[None].manifest())
1358 m = composelargefilematcher(match, repo[None].manifest())
1359
1359
1360 with lfstatus(repo):
1360 with lfstatus(repo):
1361 s = repo.status(match=m, clean=True)
1361 s = repo.status(match=m, clean=True)
1362 manifest = repo[None].manifest()
1362 manifest = repo[None].manifest()
1363 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1363 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1364 forget = [f for f in forget if lfutil.standin(f) in manifest]
1364 forget = [f for f in forget if lfutil.standin(f) in manifest]
1365
1365
1366 for f in forget:
1366 for f in forget:
1367 fstandin = lfutil.standin(f)
1367 fstandin = lfutil.standin(f)
1368 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1368 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1369 ui.warn(
1369 ui.warn(
1370 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1370 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1371 )
1371 )
1372 bad.append(f)
1372 bad.append(f)
1373
1373
1374 for f in forget:
1374 for f in forget:
1375 if ui.verbose or not m.exact(f):
1375 if ui.verbose or not m.exact(f):
1376 ui.status(_(b'removing %s\n') % uipathfn(f))
1376 ui.status(_(b'removing %s\n') % uipathfn(f))
1377
1377
1378 # Need to lock because standin files are deleted then removed from the
1378 # Need to lock because standin files are deleted then removed from the
1379 # repository and we could race in-between.
1379 # repository and we could race in-between.
1380 with repo.wlock():
1380 with repo.wlock():
1381 lfdirstate = lfutil.openlfdirstate(ui, repo)
1381 lfdirstate = lfutil.openlfdirstate(ui, repo)
1382 for f in forget:
1382 for f in forget:
1383 lfdirstate.set_untracked(f)
1383 lfdirstate.set_untracked(f)
1384 lfdirstate.write(repo.currenttransaction())
1384 lfdirstate.write(repo.currenttransaction())
1385 standins = [lfutil.standin(f) for f in forget]
1385 standins = [lfutil.standin(f) for f in forget]
1386 for f in standins:
1386 for f in standins:
1387 repo.wvfs.unlinkpath(f, ignoremissing=True)
1387 repo.wvfs.unlinkpath(f, ignoremissing=True)
1388 rejected = repo[None].forget(standins)
1388 rejected = repo[None].forget(standins)
1389
1389
1390 bad.extend(f for f in rejected if f in m.files())
1390 bad.extend(f for f in rejected if f in m.files())
1391 forgot.extend(f for f in forget if f not in rejected)
1391 forgot.extend(f for f in forget if f not in rejected)
1392 return bad, forgot
1392 return bad, forgot
1393
1393
1394
1394
1395 def _getoutgoings(repo, other, missing, addfunc):
1395 def _getoutgoings(repo, other, missing, addfunc):
1396 """get pairs of filename and largefile hash in outgoing revisions
1396 """get pairs of filename and largefile hash in outgoing revisions
1397 in 'missing'.
1397 in 'missing'.
1398
1398
1399 largefiles already existing on 'other' repository are ignored.
1399 largefiles already existing on 'other' repository are ignored.
1400
1400
1401 'addfunc' is invoked with each unique pairs of filename and
1401 'addfunc' is invoked with each unique pairs of filename and
1402 largefile hash value.
1402 largefile hash value.
1403 """
1403 """
1404 knowns = set()
1404 knowns = set()
1405 lfhashes = set()
1405 lfhashes = set()
1406
1406
1407 def dedup(fn, lfhash):
1407 def dedup(fn, lfhash):
1408 k = (fn, lfhash)
1408 k = (fn, lfhash)
1409 if k not in knowns:
1409 if k not in knowns:
1410 knowns.add(k)
1410 knowns.add(k)
1411 lfhashes.add(lfhash)
1411 lfhashes.add(lfhash)
1412
1412
1413 lfutil.getlfilestoupload(repo, missing, dedup)
1413 lfutil.getlfilestoupload(repo, missing, dedup)
1414 if lfhashes:
1414 if lfhashes:
1415 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1415 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1416 for fn, lfhash in knowns:
1416 for fn, lfhash in knowns:
1417 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1417 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1418 addfunc(fn, lfhash)
1418 addfunc(fn, lfhash)
1419
1419
1420
1420
1421 def outgoinghook(ui, repo, other, opts, missing):
1421 def outgoinghook(ui, repo, other, opts, missing):
1422 if opts.pop(b'large', None):
1422 if opts.pop(b'large', None):
1423 lfhashes = set()
1423 lfhashes = set()
1424 if ui.debugflag:
1424 if ui.debugflag:
1425 toupload = {}
1425 toupload = {}
1426
1426
1427 def addfunc(fn, lfhash):
1427 def addfunc(fn, lfhash):
1428 if fn not in toupload:
1428 if fn not in toupload:
1429 toupload[fn] = []
1429 toupload[fn] = []
1430 toupload[fn].append(lfhash)
1430 toupload[fn].append(lfhash)
1431 lfhashes.add(lfhash)
1431 lfhashes.add(lfhash)
1432
1432
1433 def showhashes(fn):
1433 def showhashes(fn):
1434 for lfhash in sorted(toupload[fn]):
1434 for lfhash in sorted(toupload[fn]):
1435 ui.debug(b' %s\n' % lfhash)
1435 ui.debug(b' %s\n' % lfhash)
1436
1436
1437 else:
1437 else:
1438 toupload = set()
1438 toupload = set()
1439
1439
1440 def addfunc(fn, lfhash):
1440 def addfunc(fn, lfhash):
1441 toupload.add(fn)
1441 toupload.add(fn)
1442 lfhashes.add(lfhash)
1442 lfhashes.add(lfhash)
1443
1443
1444 def showhashes(fn):
1444 def showhashes(fn):
1445 pass
1445 pass
1446
1446
1447 _getoutgoings(repo, other, missing, addfunc)
1447 _getoutgoings(repo, other, missing, addfunc)
1448
1448
1449 if not toupload:
1449 if not toupload:
1450 ui.status(_(b'largefiles: no files to upload\n'))
1450 ui.status(_(b'largefiles: no files to upload\n'))
1451 else:
1451 else:
1452 ui.status(
1452 ui.status(
1453 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1453 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1454 )
1454 )
1455 for file in sorted(toupload):
1455 for file in sorted(toupload):
1456 ui.status(lfutil.splitstandin(file) + b'\n')
1456 ui.status(lfutil.splitstandin(file) + b'\n')
1457 showhashes(file)
1457 showhashes(file)
1458 ui.status(b'\n')
1458 ui.status(b'\n')
1459
1459
1460
1460
1461 @eh.wrapcommand(
1461 @eh.wrapcommand(
1462 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1462 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1463 )
1463 )
1464 def _outgoingcmd(orig, *args, **kwargs):
1464 def _outgoingcmd(orig, *args, **kwargs):
1465 # Nothing to do here other than add the extra help option- the hook above
1465 # Nothing to do here other than add the extra help option- the hook above
1466 # processes it.
1466 # processes it.
1467 return orig(*args, **kwargs)
1467 return orig(*args, **kwargs)
1468
1468
1469
1469
1470 def summaryremotehook(ui, repo, opts, changes):
1470 def summaryremotehook(ui, repo, opts, changes):
1471 largeopt = opts.get(b'large', False)
1471 largeopt = opts.get(b'large', False)
1472 if changes is None:
1472 if changes is None:
1473 if largeopt:
1473 if largeopt:
1474 return (False, True) # only outgoing check is needed
1474 return (False, True) # only outgoing check is needed
1475 else:
1475 else:
1476 return (False, False)
1476 return (False, False)
1477 elif largeopt:
1477 elif largeopt:
1478 url, branch, peer, outgoing = changes[1]
1478 url, branch, peer, outgoing = changes[1]
1479 if peer is None:
1479 if peer is None:
1480 # i18n: column positioning for "hg summary"
1480 # i18n: column positioning for "hg summary"
1481 ui.status(_(b'largefiles: (no remote repo)\n'))
1481 ui.status(_(b'largefiles: (no remote repo)\n'))
1482 return
1482 return
1483
1483
1484 toupload = set()
1484 toupload = set()
1485 lfhashes = set()
1485 lfhashes = set()
1486
1486
1487 def addfunc(fn, lfhash):
1487 def addfunc(fn, lfhash):
1488 toupload.add(fn)
1488 toupload.add(fn)
1489 lfhashes.add(lfhash)
1489 lfhashes.add(lfhash)
1490
1490
1491 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1491 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1492
1492
1493 if not toupload:
1493 if not toupload:
1494 # i18n: column positioning for "hg summary"
1494 # i18n: column positioning for "hg summary"
1495 ui.status(_(b'largefiles: (no files to upload)\n'))
1495 ui.status(_(b'largefiles: (no files to upload)\n'))
1496 else:
1496 else:
1497 # i18n: column positioning for "hg summary"
1497 # i18n: column positioning for "hg summary"
1498 ui.status(
1498 ui.status(
1499 _(b'largefiles: %d entities for %d files to upload\n')
1499 _(b'largefiles: %d entities for %d files to upload\n')
1500 % (len(lfhashes), len(toupload))
1500 % (len(lfhashes), len(toupload))
1501 )
1501 )
1502
1502
1503
1503
1504 @eh.wrapcommand(
1504 @eh.wrapcommand(
1505 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1505 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1506 )
1506 )
1507 def overridesummary(orig, ui, repo, *pats, **opts):
1507 def overridesummary(orig, ui, repo, *pats, **opts):
1508 with lfstatus(repo):
1508 with lfstatus(repo):
1509 orig(ui, repo, *pats, **opts)
1509 orig(ui, repo, *pats, **opts)
1510
1510
1511
1511
1512 @eh.wrapfunction(scmutil, b'addremove')
1512 @eh.wrapfunction(scmutil, b'addremove')
1513 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1513 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1514 if opts is None:
1514 if opts is None:
1515 opts = {}
1515 opts = {}
1516 if not lfutil.islfilesrepo(repo):
1516 if not lfutil.islfilesrepo(repo):
1517 return orig(repo, matcher, prefix, uipathfn, opts)
1517 return orig(repo, matcher, prefix, uipathfn, opts)
1518 # Get the list of missing largefiles so we can remove them
1518 # Get the list of missing largefiles so we can remove them
1519 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1519 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1520 unsure, s, mtime_boundary = lfdirstate.status(
1520 unsure, s, mtime_boundary = lfdirstate.status(
1521 matchmod.always(),
1521 matchmod.always(),
1522 subrepos=[],
1522 subrepos=[],
1523 ignored=False,
1523 ignored=False,
1524 clean=False,
1524 clean=False,
1525 unknown=False,
1525 unknown=False,
1526 )
1526 )
1527
1527
1528 # Call into the normal remove code, but the removing of the standin, we want
1528 # Call into the normal remove code, but the removing of the standin, we want
1529 # to have handled by original addremove. Monkey patching here makes sure
1529 # to have handled by original addremove. Monkey patching here makes sure
1530 # we don't remove the standin in the largefiles code, preventing a very
1530 # we don't remove the standin in the largefiles code, preventing a very
1531 # confused state later.
1531 # confused state later.
1532 if s.deleted:
1532 if s.deleted:
1533 m = copy.copy(matcher)
1533 m = copy.copy(matcher)
1534
1534
1535 # The m._files and m._map attributes are not changed to the deleted list
1535 # The m._files and m._map attributes are not changed to the deleted list
1536 # because that affects the m.exact() test, which in turn governs whether
1536 # because that affects the m.exact() test, which in turn governs whether
1537 # or not the file name is printed, and how. Simply limit the original
1537 # or not the file name is printed, and how. Simply limit the original
1538 # matches to those in the deleted status list.
1538 # matches to those in the deleted status list.
1539 matchfn = m.matchfn
1539 matchfn = m.matchfn
1540 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1540 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1541
1541
1542 removelargefiles(
1542 removelargefiles(
1543 repo.ui,
1543 repo.ui,
1544 repo,
1544 repo,
1545 True,
1545 True,
1546 m,
1546 m,
1547 uipathfn,
1547 uipathfn,
1548 opts.get(b'dry_run'),
1548 opts.get(b'dry_run'),
1549 **pycompat.strkwargs(opts)
1549 **pycompat.strkwargs(opts)
1550 )
1550 )
1551 # Call into the normal add code, and any files that *should* be added as
1551 # Call into the normal add code, and any files that *should* be added as
1552 # largefiles will be
1552 # largefiles will be
1553 added, bad = addlargefiles(
1553 added, bad = addlargefiles(
1554 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1554 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1555 )
1555 )
1556 # Now that we've handled largefiles, hand off to the original addremove
1556 # Now that we've handled largefiles, hand off to the original addremove
1557 # function to take care of the rest. Make sure it doesn't do anything with
1557 # function to take care of the rest. Make sure it doesn't do anything with
1558 # largefiles by passing a matcher that will ignore them.
1558 # largefiles by passing a matcher that will ignore them.
1559 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1559 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1560 return orig(repo, matcher, prefix, uipathfn, opts)
1560 return orig(repo, matcher, prefix, uipathfn, opts)
1561
1561
1562
1562
1563 # Calling purge with --all will cause the largefiles to be deleted.
1563 # Calling purge with --all will cause the largefiles to be deleted.
1564 # Override repo.status to prevent this from happening.
1564 # Override repo.status to prevent this from happening.
1565 @eh.wrapcommand(b'purge')
1565 @eh.wrapcommand(b'purge')
1566 def overridepurge(orig, ui, repo, *dirs, **opts):
1566 def overridepurge(orig, ui, repo, *dirs, **opts):
1567 # XXX Monkey patching a repoview will not work. The assigned attribute will
1567 # XXX Monkey patching a repoview will not work. The assigned attribute will
1568 # be set on the unfiltered repo, but we will only lookup attributes in the
1568 # be set on the unfiltered repo, but we will only lookup attributes in the
1569 # unfiltered repo if the lookup in the repoview object itself fails. As the
1569 # unfiltered repo if the lookup in the repoview object itself fails. As the
1570 # monkey patched method exists on the repoview class the lookup will not
1570 # monkey patched method exists on the repoview class the lookup will not
1571 # fail. As a result, the original version will shadow the monkey patched
1571 # fail. As a result, the original version will shadow the monkey patched
1572 # one, defeating the monkey patch.
1572 # one, defeating the monkey patch.
1573 #
1573 #
1574 # As a work around we use an unfiltered repo here. We should do something
1574 # As a work around we use an unfiltered repo here. We should do something
1575 # cleaner instead.
1575 # cleaner instead.
1576 repo = repo.unfiltered()
1576 repo = repo.unfiltered()
1577 oldstatus = repo.status
1577 oldstatus = repo.status
1578
1578
1579 def overridestatus(
1579 def overridestatus(
1580 node1=b'.',
1580 node1=b'.',
1581 node2=None,
1581 node2=None,
1582 match=None,
1582 match=None,
1583 ignored=False,
1583 ignored=False,
1584 clean=False,
1584 clean=False,
1585 unknown=False,
1585 unknown=False,
1586 listsubrepos=False,
1586 listsubrepos=False,
1587 ):
1587 ):
1588 r = oldstatus(
1588 r = oldstatus(
1589 node1, node2, match, ignored, clean, unknown, listsubrepos
1589 node1, node2, match, ignored, clean, unknown, listsubrepos
1590 )
1590 )
1591 lfdirstate = lfutil.openlfdirstate(ui, repo)
1591 lfdirstate = lfutil.openlfdirstate(ui, repo)
1592 unknown = [
1592 unknown = [
1593 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1593 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1594 ]
1594 ]
1595 ignored = [
1595 ignored = [
1596 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1596 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1597 ]
1597 ]
1598 return scmutil.status(
1598 return scmutil.status(
1599 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1599 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1600 )
1600 )
1601
1601
1602 repo.status = overridestatus
1602 repo.status = overridestatus
1603 orig(ui, repo, *dirs, **opts)
1603 orig(ui, repo, *dirs, **opts)
1604 repo.status = oldstatus
1604 repo.status = oldstatus
1605
1605
1606
1606
1607 @eh.wrapcommand(b'rollback')
1607 @eh.wrapcommand(b'rollback')
1608 def overriderollback(orig, ui, repo, **opts):
1608 def overriderollback(orig, ui, repo, **opts):
1609 with repo.wlock():
1609 with repo.wlock():
1610 before = repo.dirstate.parents()
1610 before = repo.dirstate.parents()
1611 orphans = {
1611 orphans = {
1612 f
1612 f
1613 for f in repo.dirstate
1613 for f in repo.dirstate
1614 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1614 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1615 }
1615 }
1616 result = orig(ui, repo, **opts)
1616 result = orig(ui, repo, **opts)
1617 after = repo.dirstate.parents()
1617 after = repo.dirstate.parents()
1618 if before == after:
1618 if before == after:
1619 return result # no need to restore standins
1619 return result # no need to restore standins
1620
1620
1621 pctx = repo[b'.']
1621 pctx = repo[b'.']
1622 for f in repo.dirstate:
1622 for f in repo.dirstate:
1623 if lfutil.isstandin(f):
1623 if lfutil.isstandin(f):
1624 orphans.discard(f)
1624 orphans.discard(f)
1625 if repo.dirstate.get_entry(f).removed:
1625 if repo.dirstate.get_entry(f).removed:
1626 repo.wvfs.unlinkpath(f, ignoremissing=True)
1626 repo.wvfs.unlinkpath(f, ignoremissing=True)
1627 elif f in pctx:
1627 elif f in pctx:
1628 fctx = pctx[f]
1628 fctx = pctx[f]
1629 repo.wwrite(f, fctx.data(), fctx.flags())
1629 repo.wwrite(f, fctx.data(), fctx.flags())
1630 else:
1630 else:
1631 # content of standin is not so important in 'a',
1631 # content of standin is not so important in 'a',
1632 # 'm' or 'n' (coming from the 2nd parent) cases
1632 # 'm' or 'n' (coming from the 2nd parent) cases
1633 lfutil.writestandin(repo, f, b'', False)
1633 lfutil.writestandin(repo, f, b'', False)
1634 for standin in orphans:
1634 for standin in orphans:
1635 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1635 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1636
1636
1637 return result
1637 return result
1638
1638
1639
1639
1640 @eh.wrapcommand(b'transplant', extension=b'transplant')
1640 @eh.wrapcommand(b'transplant', extension=b'transplant')
1641 def overridetransplant(orig, ui, repo, *revs, **opts):
1641 def overridetransplant(orig, ui, repo, *revs, **opts):
1642 resuming = opts.get('continue')
1642 resuming = opts.get('continue')
1643 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1643 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1644 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1644 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1645 try:
1645 try:
1646 result = orig(ui, repo, *revs, **opts)
1646 result = orig(ui, repo, *revs, **opts)
1647 finally:
1647 finally:
1648 repo._lfstatuswriters.pop()
1648 repo._lfstatuswriters.pop()
1649 repo._lfcommithooks.pop()
1649 repo._lfcommithooks.pop()
1650 return result
1650 return result
1651
1651
1652
1652
1653 @eh.wrapcommand(b'cat')
1653 @eh.wrapcommand(b'cat')
1654 def overridecat(orig, ui, repo, file1, *pats, **opts):
1654 def overridecat(orig, ui, repo, file1, *pats, **opts):
1655 opts = pycompat.byteskwargs(opts)
1655 opts = pycompat.byteskwargs(opts)
1656 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1656 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1657 err = 1
1657 err = 1
1658 notbad = set()
1658 notbad = set()
1659 m = scmutil.match(ctx, (file1,) + pats, opts)
1659 m = scmutil.match(ctx, (file1,) + pats, opts)
1660 origmatchfn = m.matchfn
1660 origmatchfn = m.matchfn
1661
1661
1662 def lfmatchfn(f):
1662 def lfmatchfn(f):
1663 if origmatchfn(f):
1663 if origmatchfn(f):
1664 return True
1664 return True
1665 lf = lfutil.splitstandin(f)
1665 lf = lfutil.splitstandin(f)
1666 if lf is None:
1666 if lf is None:
1667 return False
1667 return False
1668 notbad.add(lf)
1668 notbad.add(lf)
1669 return origmatchfn(lf)
1669 return origmatchfn(lf)
1670
1670
1671 m.matchfn = lfmatchfn
1671 m.matchfn = lfmatchfn
1672 origbadfn = m.bad
1672 origbadfn = m.bad
1673
1673
1674 def lfbadfn(f, msg):
1674 def lfbadfn(f, msg):
1675 if not f in notbad:
1675 if not f in notbad:
1676 origbadfn(f, msg)
1676 origbadfn(f, msg)
1677
1677
1678 m.bad = lfbadfn
1678 m.bad = lfbadfn
1679
1679
1680 origvisitdirfn = m.visitdir
1680 origvisitdirfn = m.visitdir
1681
1681
1682 def lfvisitdirfn(dir):
1682 def lfvisitdirfn(dir):
1683 if dir == lfutil.shortname:
1683 if dir == lfutil.shortname:
1684 return True
1684 return True
1685 ret = origvisitdirfn(dir)
1685 ret = origvisitdirfn(dir)
1686 if ret:
1686 if ret:
1687 return ret
1687 return ret
1688 lf = lfutil.splitstandin(dir)
1688 lf = lfutil.splitstandin(dir)
1689 if lf is None:
1689 if lf is None:
1690 return False
1690 return False
1691 return origvisitdirfn(lf)
1691 return origvisitdirfn(lf)
1692
1692
1693 m.visitdir = lfvisitdirfn
1693 m.visitdir = lfvisitdirfn
1694
1694
1695 for f in ctx.walk(m):
1695 for f in ctx.walk(m):
1696 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1696 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1697 lf = lfutil.splitstandin(f)
1697 lf = lfutil.splitstandin(f)
1698 if lf is None or origmatchfn(f):
1698 if lf is None or origmatchfn(f):
1699 # duplicating unreachable code from commands.cat
1699 # duplicating unreachable code from commands.cat
1700 data = ctx[f].data()
1700 data = ctx[f].data()
1701 if opts.get(b'decode'):
1701 if opts.get(b'decode'):
1702 data = repo.wwritedata(f, data)
1702 data = repo.wwritedata(f, data)
1703 fp.write(data)
1703 fp.write(data)
1704 else:
1704 else:
1705 hash = lfutil.readasstandin(ctx[f])
1705 hash = lfutil.readasstandin(ctx[f])
1706 if not lfutil.inusercache(repo.ui, hash):
1706 if not lfutil.inusercache(repo.ui, hash):
1707 store = storefactory.openstore(repo)
1707 store = storefactory.openstore(repo)
1708 success, missing = store.get([(lf, hash)])
1708 success, missing = store.get([(lf, hash)])
1709 if len(success) != 1:
1709 if len(success) != 1:
1710 raise error.Abort(
1710 raise error.Abort(
1711 _(
1711 _(
1712 b'largefile %s is not in cache and could not be '
1712 b'largefile %s is not in cache and could not be '
1713 b'downloaded'
1713 b'downloaded'
1714 )
1714 )
1715 % lf
1715 % lf
1716 )
1716 )
1717 path = lfutil.usercachepath(repo.ui, hash)
1717 path = lfutil.usercachepath(repo.ui, hash)
1718 with open(path, b"rb") as fpin:
1718 with open(path, b"rb") as fpin:
1719 for chunk in util.filechunkiter(fpin):
1719 for chunk in util.filechunkiter(fpin):
1720 fp.write(chunk)
1720 fp.write(chunk)
1721 err = 0
1721 err = 0
1722 return err
1722 return err
1723
1723
1724
1724
1725 @eh.wrapfunction(merge, b'_update')
1725 @eh.wrapfunction(merge, b'_update')
1726 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1726 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1727 matcher = kwargs.get('matcher', None)
1727 matcher = kwargs.get('matcher', None)
1728 # note if this is a partial update
1728 # note if this is a partial update
1729 partial = matcher and not matcher.always()
1729 partial = matcher and not matcher.always()
1730 with repo.wlock():
1730 with repo.wlock():
1731 # branch | | |
1731 # branch | | |
1732 # merge | force | partial | action
1732 # merge | force | partial | action
1733 # -------+-------+---------+--------------
1733 # -------+-------+---------+--------------
1734 # x | x | x | linear-merge
1734 # x | x | x | linear-merge
1735 # o | x | x | branch-merge
1735 # o | x | x | branch-merge
1736 # x | o | x | overwrite (as clean update)
1736 # x | o | x | overwrite (as clean update)
1737 # o | o | x | force-branch-merge (*1)
1737 # o | o | x | force-branch-merge (*1)
1738 # x | x | o | (*)
1738 # x | x | o | (*)
1739 # o | x | o | (*)
1739 # o | x | o | (*)
1740 # x | o | o | overwrite (as revert)
1740 # x | o | o | overwrite (as revert)
1741 # o | o | o | (*)
1741 # o | o | o | (*)
1742 #
1742 #
1743 # (*) don't care
1743 # (*) don't care
1744 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1744 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1745
1745
1746 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1746 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1747 unsure, s, mtime_boundary = lfdirstate.status(
1747 unsure, s, mtime_boundary = lfdirstate.status(
1748 matchmod.always(),
1748 matchmod.always(),
1749 subrepos=[],
1749 subrepos=[],
1750 ignored=False,
1750 ignored=False,
1751 clean=True,
1751 clean=True,
1752 unknown=False,
1752 unknown=False,
1753 )
1753 )
1754 oldclean = set(s.clean)
1754 oldclean = set(s.clean)
1755 pctx = repo[b'.']
1755 pctx = repo[b'.']
1756 dctx = repo[node]
1756 dctx = repo[node]
1757 for lfile in unsure + s.modified:
1757 for lfile in unsure + s.modified:
1758 lfileabs = repo.wvfs.join(lfile)
1758 lfileabs = repo.wvfs.join(lfile)
1759 if not repo.wvfs.exists(lfileabs):
1759 if not repo.wvfs.exists(lfileabs):
1760 continue
1760 continue
1761 lfhash = lfutil.hashfile(lfileabs)
1761 lfhash = lfutil.hashfile(lfileabs)
1762 standin = lfutil.standin(lfile)
1762 standin = lfutil.standin(lfile)
1763 lfutil.writestandin(
1763 lfutil.writestandin(
1764 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1764 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1765 )
1765 )
1766 if standin in pctx and lfhash == lfutil.readasstandin(
1766 if standin in pctx and lfhash == lfutil.readasstandin(
1767 pctx[standin]
1767 pctx[standin]
1768 ):
1768 ):
1769 oldclean.add(lfile)
1769 oldclean.add(lfile)
1770 for lfile in s.added:
1770 for lfile in s.added:
1771 fstandin = lfutil.standin(lfile)
1771 fstandin = lfutil.standin(lfile)
1772 if fstandin not in dctx:
1772 if fstandin not in dctx:
1773 # in this case, content of standin file is meaningless
1773 # in this case, content of standin file is meaningless
1774 # (in dctx, lfile is unknown, or normal file)
1774 # (in dctx, lfile is unknown, or normal file)
1775 continue
1775 continue
1776 lfutil.updatestandin(repo, lfile, fstandin)
1776 lfutil.updatestandin(repo, lfile, fstandin)
1777 # mark all clean largefiles as dirty, just in case the update gets
1777 # mark all clean largefiles as dirty, just in case the update gets
1778 # interrupted before largefiles and lfdirstate are synchronized
1778 # interrupted before largefiles and lfdirstate are synchronized
1779 for lfile in oldclean:
1779 for lfile in oldclean:
1780 lfdirstate.set_possibly_dirty(lfile)
1780 lfdirstate.set_possibly_dirty(lfile)
1781 lfdirstate.write(repo.currenttransaction())
1781 lfdirstate.write(repo.currenttransaction())
1782
1782
1783 oldstandins = lfutil.getstandinsstate(repo)
1783 oldstandins = lfutil.getstandinsstate(repo)
1784 wc = kwargs.get('wc')
1784 wc = kwargs.get('wc')
1785 if wc and wc.isinmemory():
1785 if wc and wc.isinmemory():
1786 # largefiles is not a good candidate for in-memory merge (large
1786 # largefiles is not a good candidate for in-memory merge (large
1787 # files, custom dirstate, matcher usage).
1787 # files, custom dirstate, matcher usage).
1788 raise error.ProgrammingError(
1788 raise error.ProgrammingError(
1789 b'largefiles is not compatible with in-memory merge'
1789 b'largefiles is not compatible with in-memory merge'
1790 )
1790 )
1791 with lfdirstate.parentchange():
1791 with lfdirstate.parentchange():
1792 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1792 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1793
1793
1794 newstandins = lfutil.getstandinsstate(repo)
1794 newstandins = lfutil.getstandinsstate(repo)
1795 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1795 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1796
1796
1797 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1797 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1798 # all the ones that didn't change as clean
1798 # all the ones that didn't change as clean
1799 for lfile in oldclean.difference(filelist):
1799 for lfile in oldclean.difference(filelist):
1800 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1800 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1801 lfdirstate.write(repo.currenttransaction())
1801 lfdirstate.write(repo.currenttransaction())
1802
1802
1803 if branchmerge or force or partial:
1803 if branchmerge or force or partial:
1804 filelist.extend(s.deleted + s.removed)
1804 filelist.extend(s.deleted + s.removed)
1805
1805
1806 lfcommands.updatelfiles(
1806 lfcommands.updatelfiles(
1807 repo.ui, repo, filelist=filelist, normallookup=partial
1807 repo.ui, repo, filelist=filelist, normallookup=partial
1808 )
1808 )
1809
1809
1810 return result
1810 return result
1811
1811
1812
1812
1813 @eh.wrapfunction(scmutil, b'marktouched')
1813 @eh.wrapfunction(scmutil, b'marktouched')
1814 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1814 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1815 result = orig(repo, files, *args, **kwargs)
1815 result = orig(repo, files, *args, **kwargs)
1816
1816
1817 filelist = []
1817 filelist = []
1818 for f in files:
1818 for f in files:
1819 lf = lfutil.splitstandin(f)
1819 lf = lfutil.splitstandin(f)
1820 if lf is not None:
1820 if lf is not None:
1821 filelist.append(lf)
1821 filelist.append(lf)
1822 if filelist:
1822 if filelist:
1823 lfcommands.updatelfiles(
1823 lfcommands.updatelfiles(
1824 repo.ui,
1824 repo.ui,
1825 repo,
1825 repo,
1826 filelist=filelist,
1826 filelist=filelist,
1827 printmessage=False,
1827 printmessage=False,
1828 normallookup=True,
1828 normallookup=True,
1829 )
1829 )
1830
1830
1831 return result
1831 return result
1832
1832
1833
1833
1834 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1834 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1835 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1835 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1836 def upgraderequirements(orig, repo):
1836 def upgraderequirements(orig, repo):
1837 reqs = orig(repo)
1837 reqs = orig(repo)
1838 if b'largefiles' in repo.requirements:
1838 if b'largefiles' in repo.requirements:
1839 reqs.add(b'largefiles')
1839 reqs.add(b'largefiles')
1840 return reqs
1840 return reqs
1841
1841
1842
1842
1843 _lfscheme = b'largefile://'
1843 _lfscheme = b'largefile://'
1844
1844
1845
1845
1846 @eh.wrapfunction(urlmod, b'open')
1846 @eh.wrapfunction(urlmod, b'open')
1847 def openlargefile(orig, ui, url_, data=None, **kwargs):
1847 def openlargefile(orig, ui, url_, data=None, **kwargs):
1848 if url_.startswith(_lfscheme):
1848 if url_.startswith(_lfscheme):
1849 if data:
1849 if data:
1850 msg = b"cannot use data on a 'largefile://' url"
1850 msg = b"cannot use data on a 'largefile://' url"
1851 raise error.ProgrammingError(msg)
1851 raise error.ProgrammingError(msg)
1852 lfid = url_[len(_lfscheme) :]
1852 lfid = url_[len(_lfscheme) :]
1853 return storefactory.getlfile(ui, lfid)
1853 return storefactory.getlfile(ui, lfid)
1854 else:
1854 else:
1855 return orig(ui, url_, data=data, **kwargs)
1855 return orig(ui, url_, data=data, **kwargs)
@@ -1,1299 +1,1298 b''
1 # filemerge.py - file-level merge handling for Mercurial
1 # filemerge.py - file-level merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import re
12 import re
13 import shutil
13 import shutil
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 short,
18 short,
19 )
19 )
20 from .pycompat import (
20 from .pycompat import (
21 getattr,
21 getattr,
22 open,
22 open,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 encoding,
26 encoding,
27 error,
27 error,
28 formatter,
28 formatter,
29 match,
29 match,
30 pycompat,
30 pycompat,
31 registrar,
31 registrar,
32 scmutil,
32 scmutil,
33 simplemerge,
33 simplemerge,
34 tagmerge,
34 tagmerge,
35 templatekw,
35 templatekw,
36 templater,
36 templater,
37 templateutil,
37 templateutil,
38 util,
38 util,
39 )
39 )
40
40
41 from .utils import (
41 from .utils import (
42 procutil,
42 procutil,
43 stringutil,
43 stringutil,
44 )
44 )
45
45
46
46
47 def _toolstr(ui, tool, part, *args):
47 def _toolstr(ui, tool, part, *args):
48 return ui.config(b"merge-tools", tool + b"." + part, *args)
48 return ui.config(b"merge-tools", tool + b"." + part, *args)
49
49
50
50
51 def _toolbool(ui, tool, part, *args):
51 def _toolbool(ui, tool, part, *args):
52 return ui.configbool(b"merge-tools", tool + b"." + part, *args)
52 return ui.configbool(b"merge-tools", tool + b"." + part, *args)
53
53
54
54
55 def _toollist(ui, tool, part):
55 def _toollist(ui, tool, part):
56 return ui.configlist(b"merge-tools", tool + b"." + part)
56 return ui.configlist(b"merge-tools", tool + b"." + part)
57
57
58
58
59 internals = {}
59 internals = {}
60 # Merge tools to document.
60 # Merge tools to document.
61 internalsdoc = {}
61 internalsdoc = {}
62
62
63 internaltool = registrar.internalmerge()
63 internaltool = registrar.internalmerge()
64
64
65 # internal tool merge types
65 # internal tool merge types
66 nomerge = internaltool.nomerge
66 nomerge = internaltool.nomerge
67 mergeonly = internaltool.mergeonly # just the full merge, no premerge
67 mergeonly = internaltool.mergeonly # just the full merge, no premerge
68 fullmerge = internaltool.fullmerge # both premerge and merge
68 fullmerge = internaltool.fullmerge # both premerge and merge
69
69
70 # IMPORTANT: keep the last line of this prompt very short ("What do you want to
70 # IMPORTANT: keep the last line of this prompt very short ("What do you want to
71 # do?") because of issue6158, ideally to <40 English characters (to allow other
71 # do?") because of issue6158, ideally to <40 English characters (to allow other
72 # languages that may take more columns to still have a chance to fit in an
72 # languages that may take more columns to still have a chance to fit in an
73 # 80-column screen).
73 # 80-column screen).
74 _localchangedotherdeletedmsg = _(
74 _localchangedotherdeletedmsg = _(
75 b"file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
75 b"file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
76 b"You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
76 b"You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
77 b"What do you want to do?"
77 b"What do you want to do?"
78 b"$$ &Changed $$ &Delete $$ &Unresolved"
78 b"$$ &Changed $$ &Delete $$ &Unresolved"
79 )
79 )
80
80
81 _otherchangedlocaldeletedmsg = _(
81 _otherchangedlocaldeletedmsg = _(
82 b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
82 b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
83 b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
83 b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
84 b"What do you want to do?"
84 b"What do you want to do?"
85 b"$$ &Changed $$ &Deleted $$ &Unresolved"
85 b"$$ &Changed $$ &Deleted $$ &Unresolved"
86 )
86 )
87
87
88
88
89 class absentfilectx(object):
89 class absentfilectx(object):
90 """Represents a file that's ostensibly in a context but is actually not
90 """Represents a file that's ostensibly in a context but is actually not
91 present in it.
91 present in it.
92
92
93 This is here because it's very specific to the filemerge code for now --
93 This is here because it's very specific to the filemerge code for now --
94 other code is likely going to break with the values this returns."""
94 other code is likely going to break with the values this returns."""
95
95
96 def __init__(self, ctx, f):
96 def __init__(self, ctx, f):
97 self._ctx = ctx
97 self._ctx = ctx
98 self._f = f
98 self._f = f
99
99
100 def __bytes__(self):
100 def __bytes__(self):
101 return b'absent file %s@%s' % (self._f, self._ctx)
101 return b'absent file %s@%s' % (self._f, self._ctx)
102
102
103 def path(self):
103 def path(self):
104 return self._f
104 return self._f
105
105
106 def size(self):
106 def size(self):
107 return None
107 return None
108
108
109 def data(self):
109 def data(self):
110 return None
110 return None
111
111
112 def filenode(self):
112 def filenode(self):
113 return self._ctx.repo().nullid
113 return self._ctx.repo().nullid
114
114
115 _customcmp = True
115 _customcmp = True
116
116
117 def cmp(self, fctx):
117 def cmp(self, fctx):
118 """compare with other file context
118 """compare with other file context
119
119
120 returns True if different from fctx.
120 returns True if different from fctx.
121 """
121 """
122 return not (
122 return not (
123 fctx.isabsent()
123 fctx.isabsent()
124 and fctx.changectx() == self.changectx()
124 and fctx.changectx() == self.changectx()
125 and fctx.path() == self.path()
125 and fctx.path() == self.path()
126 )
126 )
127
127
128 def flags(self):
128 def flags(self):
129 return b''
129 return b''
130
130
131 def changectx(self):
131 def changectx(self):
132 return self._ctx
132 return self._ctx
133
133
134 def isbinary(self):
134 def isbinary(self):
135 return False
135 return False
136
136
137 def isabsent(self):
137 def isabsent(self):
138 return True
138 return True
139
139
140
140
141 def _findtool(ui, tool):
141 def _findtool(ui, tool):
142 if tool in internals:
142 if tool in internals:
143 return tool
143 return tool
144 cmd = _toolstr(ui, tool, b"executable", tool)
144 cmd = _toolstr(ui, tool, b"executable", tool)
145 if cmd.startswith(b'python:'):
145 if cmd.startswith(b'python:'):
146 return cmd
146 return cmd
147 return findexternaltool(ui, tool)
147 return findexternaltool(ui, tool)
148
148
149
149
150 def _quotetoolpath(cmd):
150 def _quotetoolpath(cmd):
151 if cmd.startswith(b'python:'):
151 if cmd.startswith(b'python:'):
152 return cmd
152 return cmd
153 return procutil.shellquote(cmd)
153 return procutil.shellquote(cmd)
154
154
155
155
156 def findexternaltool(ui, tool):
156 def findexternaltool(ui, tool):
157 for kn in (b"regkey", b"regkeyalt"):
157 for kn in (b"regkey", b"regkeyalt"):
158 k = _toolstr(ui, tool, kn)
158 k = _toolstr(ui, tool, kn)
159 if not k:
159 if not k:
160 continue
160 continue
161 p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
161 p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
162 if p:
162 if p:
163 p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
163 p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
164 if p:
164 if p:
165 return p
165 return p
166 exe = _toolstr(ui, tool, b"executable", tool)
166 exe = _toolstr(ui, tool, b"executable", tool)
167 return procutil.findexe(util.expandpath(exe))
167 return procutil.findexe(util.expandpath(exe))
168
168
169
169
170 def _picktool(repo, ui, path, binary, symlink, changedelete):
170 def _picktool(repo, ui, path, binary, symlink, changedelete):
171 strictcheck = ui.configbool(b'merge', b'strict-capability-check')
171 strictcheck = ui.configbool(b'merge', b'strict-capability-check')
172
172
173 def hascapability(tool, capability, strict=False):
173 def hascapability(tool, capability, strict=False):
174 if tool in internals:
174 if tool in internals:
175 return strict and internals[tool].capabilities.get(capability)
175 return strict and internals[tool].capabilities.get(capability)
176 return _toolbool(ui, tool, capability)
176 return _toolbool(ui, tool, capability)
177
177
178 def supportscd(tool):
178 def supportscd(tool):
179 return tool in internals and internals[tool].mergetype == nomerge
179 return tool in internals and internals[tool].mergetype == nomerge
180
180
181 def check(tool, pat, symlink, binary, changedelete):
181 def check(tool, pat, symlink, binary, changedelete):
182 tmsg = tool
182 tmsg = tool
183 if pat:
183 if pat:
184 tmsg = _(b"%s (for pattern %s)") % (tool, pat)
184 tmsg = _(b"%s (for pattern %s)") % (tool, pat)
185 if not _findtool(ui, tool):
185 if not _findtool(ui, tool):
186 if pat: # explicitly requested tool deserves a warning
186 if pat: # explicitly requested tool deserves a warning
187 ui.warn(_(b"couldn't find merge tool %s\n") % tmsg)
187 ui.warn(_(b"couldn't find merge tool %s\n") % tmsg)
188 else: # configured but non-existing tools are more silent
188 else: # configured but non-existing tools are more silent
189 ui.note(_(b"couldn't find merge tool %s\n") % tmsg)
189 ui.note(_(b"couldn't find merge tool %s\n") % tmsg)
190 elif symlink and not hascapability(tool, b"symlink", strictcheck):
190 elif symlink and not hascapability(tool, b"symlink", strictcheck):
191 ui.warn(_(b"tool %s can't handle symlinks\n") % tmsg)
191 ui.warn(_(b"tool %s can't handle symlinks\n") % tmsg)
192 elif binary and not hascapability(tool, b"binary", strictcheck):
192 elif binary and not hascapability(tool, b"binary", strictcheck):
193 ui.warn(_(b"tool %s can't handle binary\n") % tmsg)
193 ui.warn(_(b"tool %s can't handle binary\n") % tmsg)
194 elif changedelete and not supportscd(tool):
194 elif changedelete and not supportscd(tool):
195 # the nomerge tools are the only tools that support change/delete
195 # the nomerge tools are the only tools that support change/delete
196 # conflicts
196 # conflicts
197 pass
197 pass
198 elif not procutil.gui() and _toolbool(ui, tool, b"gui"):
198 elif not procutil.gui() and _toolbool(ui, tool, b"gui"):
199 ui.warn(_(b"tool %s requires a GUI\n") % tmsg)
199 ui.warn(_(b"tool %s requires a GUI\n") % tmsg)
200 else:
200 else:
201 return True
201 return True
202 return False
202 return False
203
203
204 # internal config: ui.forcemerge
204 # internal config: ui.forcemerge
205 # forcemerge comes from command line arguments, highest priority
205 # forcemerge comes from command line arguments, highest priority
206 force = ui.config(b'ui', b'forcemerge')
206 force = ui.config(b'ui', b'forcemerge')
207 if force:
207 if force:
208 toolpath = _findtool(ui, force)
208 toolpath = _findtool(ui, force)
209 if changedelete and not supportscd(toolpath):
209 if changedelete and not supportscd(toolpath):
210 return b":prompt", None
210 return b":prompt", None
211 else:
211 else:
212 if toolpath:
212 if toolpath:
213 return (force, _quotetoolpath(toolpath))
213 return (force, _quotetoolpath(toolpath))
214 else:
214 else:
215 # mimic HGMERGE if given tool not found
215 # mimic HGMERGE if given tool not found
216 return (force, force)
216 return (force, force)
217
217
218 # HGMERGE takes next precedence
218 # HGMERGE takes next precedence
219 hgmerge = encoding.environ.get(b"HGMERGE")
219 hgmerge = encoding.environ.get(b"HGMERGE")
220 if hgmerge:
220 if hgmerge:
221 if changedelete and not supportscd(hgmerge):
221 if changedelete and not supportscd(hgmerge):
222 return b":prompt", None
222 return b":prompt", None
223 else:
223 else:
224 return (hgmerge, hgmerge)
224 return (hgmerge, hgmerge)
225
225
226 # then patterns
226 # then patterns
227
227
228 # whether binary capability should be checked strictly
228 # whether binary capability should be checked strictly
229 binarycap = binary and strictcheck
229 binarycap = binary and strictcheck
230
230
231 for pat, tool in ui.configitems(b"merge-patterns"):
231 for pat, tool in ui.configitems(b"merge-patterns"):
232 mf = match.match(repo.root, b'', [pat])
232 mf = match.match(repo.root, b'', [pat])
233 if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
233 if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
234 if binary and not hascapability(tool, b"binary", strict=True):
234 if binary and not hascapability(tool, b"binary", strict=True):
235 ui.warn(
235 ui.warn(
236 _(
236 _(
237 b"warning: check merge-patterns configurations,"
237 b"warning: check merge-patterns configurations,"
238 b" if %r for binary file %r is unintentional\n"
238 b" if %r for binary file %r is unintentional\n"
239 b"(see 'hg help merge-tools'"
239 b"(see 'hg help merge-tools'"
240 b" for binary files capability)\n"
240 b" for binary files capability)\n"
241 )
241 )
242 % (pycompat.bytestr(tool), pycompat.bytestr(path))
242 % (pycompat.bytestr(tool), pycompat.bytestr(path))
243 )
243 )
244 toolpath = _findtool(ui, tool)
244 toolpath = _findtool(ui, tool)
245 return (tool, _quotetoolpath(toolpath))
245 return (tool, _quotetoolpath(toolpath))
246
246
247 # then merge tools
247 # then merge tools
248 tools = {}
248 tools = {}
249 disabled = set()
249 disabled = set()
250 for k, v in ui.configitems(b"merge-tools"):
250 for k, v in ui.configitems(b"merge-tools"):
251 t = k.split(b'.')[0]
251 t = k.split(b'.')[0]
252 if t not in tools:
252 if t not in tools:
253 tools[t] = int(_toolstr(ui, t, b"priority"))
253 tools[t] = int(_toolstr(ui, t, b"priority"))
254 if _toolbool(ui, t, b"disabled"):
254 if _toolbool(ui, t, b"disabled"):
255 disabled.add(t)
255 disabled.add(t)
256 names = tools.keys()
256 names = tools.keys()
257 tools = sorted(
257 tools = sorted(
258 [(-p, tool) for tool, p in tools.items() if tool not in disabled]
258 [(-p, tool) for tool, p in tools.items() if tool not in disabled]
259 )
259 )
260 uimerge = ui.config(b"ui", b"merge")
260 uimerge = ui.config(b"ui", b"merge")
261 if uimerge:
261 if uimerge:
262 # external tools defined in uimerge won't be able to handle
262 # external tools defined in uimerge won't be able to handle
263 # change/delete conflicts
263 # change/delete conflicts
264 if check(uimerge, path, symlink, binary, changedelete):
264 if check(uimerge, path, symlink, binary, changedelete):
265 if uimerge not in names and not changedelete:
265 if uimerge not in names and not changedelete:
266 return (uimerge, uimerge)
266 return (uimerge, uimerge)
267 tools.insert(0, (None, uimerge)) # highest priority
267 tools.insert(0, (None, uimerge)) # highest priority
268 tools.append((None, b"hgmerge")) # the old default, if found
268 tools.append((None, b"hgmerge")) # the old default, if found
269 for p, t in tools:
269 for p, t in tools:
270 if check(t, None, symlink, binary, changedelete):
270 if check(t, None, symlink, binary, changedelete):
271 toolpath = _findtool(ui, t)
271 toolpath = _findtool(ui, t)
272 return (t, _quotetoolpath(toolpath))
272 return (t, _quotetoolpath(toolpath))
273
273
274 # internal merge or prompt as last resort
274 # internal merge or prompt as last resort
275 if symlink or binary or changedelete:
275 if symlink or binary or changedelete:
276 if not changedelete and len(tools):
276 if not changedelete and len(tools):
277 # any tool is rejected by capability for symlink or binary
277 # any tool is rejected by capability for symlink or binary
278 ui.warn(_(b"no tool found to merge %s\n") % path)
278 ui.warn(_(b"no tool found to merge %s\n") % path)
279 return b":prompt", None
279 return b":prompt", None
280 return b":merge", None
280 return b":merge", None
281
281
282
282
283 def _eoltype(data):
283 def _eoltype(data):
284 """Guess the EOL type of a file"""
284 """Guess the EOL type of a file"""
285 if b'\0' in data: # binary
285 if b'\0' in data: # binary
286 return None
286 return None
287 if b'\r\n' in data: # Windows
287 if b'\r\n' in data: # Windows
288 return b'\r\n'
288 return b'\r\n'
289 if b'\r' in data: # Old Mac
289 if b'\r' in data: # Old Mac
290 return b'\r'
290 return b'\r'
291 if b'\n' in data: # UNIX
291 if b'\n' in data: # UNIX
292 return b'\n'
292 return b'\n'
293 return None # unknown
293 return None # unknown
294
294
295
295
296 def _matcheol(file, backup):
296 def _matcheol(file, backup):
297 """Convert EOL markers in a file to match origfile"""
297 """Convert EOL markers in a file to match origfile"""
298 tostyle = _eoltype(backup.data()) # No repo.wread filters?
298 tostyle = _eoltype(backup.data()) # No repo.wread filters?
299 if tostyle:
299 if tostyle:
300 data = util.readfile(file)
300 data = util.readfile(file)
301 style = _eoltype(data)
301 style = _eoltype(data)
302 if style:
302 if style:
303 newdata = data.replace(style, tostyle)
303 newdata = data.replace(style, tostyle)
304 if newdata != data:
304 if newdata != data:
305 util.writefile(file, newdata)
305 util.writefile(file, newdata)
306
306
307
307
308 @internaltool(b'prompt', nomerge)
308 @internaltool(b'prompt', nomerge)
309 def _iprompt(repo, mynode, fcd, fco, fca, toolconf, labels=None):
309 def _iprompt(repo, mynode, fcd, fco, fca, toolconf, labels=None):
310 """Asks the user which of the local `p1()` or the other `p2()` version to
310 """Asks the user which of the local `p1()` or the other `p2()` version to
311 keep as the merged version."""
311 keep as the merged version."""
312 ui = repo.ui
312 ui = repo.ui
313 fd = fcd.path()
313 fd = fcd.path()
314 uipathfn = scmutil.getuipathfn(repo)
314 uipathfn = scmutil.getuipathfn(repo)
315
315
316 # Avoid prompting during an in-memory merge since it doesn't support merge
316 # Avoid prompting during an in-memory merge since it doesn't support merge
317 # conflicts.
317 # conflicts.
318 if fcd.changectx().isinmemory():
318 if fcd.changectx().isinmemory():
319 raise error.InMemoryMergeConflictsError(
319 raise error.InMemoryMergeConflictsError(
320 b'in-memory merge does not support file conflicts'
320 b'in-memory merge does not support file conflicts'
321 )
321 )
322
322
323 prompts = partextras(labels)
323 prompts = partextras(labels)
324 prompts[b'fd'] = uipathfn(fd)
324 prompts[b'fd'] = uipathfn(fd)
325 try:
325 try:
326 if fco.isabsent():
326 if fco.isabsent():
327 index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
327 index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
328 choice = [b'local', b'other', b'unresolved'][index]
328 choice = [b'local', b'other', b'unresolved'][index]
329 elif fcd.isabsent():
329 elif fcd.isabsent():
330 index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
330 index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
331 choice = [b'other', b'local', b'unresolved'][index]
331 choice = [b'other', b'local', b'unresolved'][index]
332 else:
332 else:
333 # IMPORTANT: keep the last line of this prompt ("What do you want to
333 # IMPORTANT: keep the last line of this prompt ("What do you want to
334 # do?") very short, see comment next to _localchangedotherdeletedmsg
334 # do?") very short, see comment next to _localchangedotherdeletedmsg
335 # at the top of the file for details.
335 # at the top of the file for details.
336 index = ui.promptchoice(
336 index = ui.promptchoice(
337 _(
337 _(
338 b"file '%(fd)s' needs to be resolved.\n"
338 b"file '%(fd)s' needs to be resolved.\n"
339 b"You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
339 b"You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
340 b"(u)nresolved.\n"
340 b"(u)nresolved.\n"
341 b"What do you want to do?"
341 b"What do you want to do?"
342 b"$$ &Local $$ &Other $$ &Unresolved"
342 b"$$ &Local $$ &Other $$ &Unresolved"
343 )
343 )
344 % prompts,
344 % prompts,
345 2,
345 2,
346 )
346 )
347 choice = [b'local', b'other', b'unresolved'][index]
347 choice = [b'local', b'other', b'unresolved'][index]
348
348
349 if choice == b'other':
349 if choice == b'other':
350 return _iother(repo, mynode, fcd, fco, fca, toolconf, labels)
350 return _iother(repo, mynode, fcd, fco, fca, toolconf, labels)
351 elif choice == b'local':
351 elif choice == b'local':
352 return _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels)
352 return _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels)
353 elif choice == b'unresolved':
353 elif choice == b'unresolved':
354 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
354 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
355 except error.ResponseExpected:
355 except error.ResponseExpected:
356 ui.write(b"\n")
356 ui.write(b"\n")
357 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
357 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
358
358
359
359
360 @internaltool(b'local', nomerge)
360 @internaltool(b'local', nomerge)
361 def _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels=None):
361 def _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels=None):
362 """Uses the local `p1()` version of files as the merged version."""
362 """Uses the local `p1()` version of files as the merged version."""
363 return 0, fcd.isabsent()
363 return 0, fcd.isabsent()
364
364
365
365
366 @internaltool(b'other', nomerge)
366 @internaltool(b'other', nomerge)
367 def _iother(repo, mynode, fcd, fco, fca, toolconf, labels=None):
367 def _iother(repo, mynode, fcd, fco, fca, toolconf, labels=None):
368 """Uses the other `p2()` version of files as the merged version."""
368 """Uses the other `p2()` version of files as the merged version."""
369 if fco.isabsent():
369 if fco.isabsent():
370 # local changed, remote deleted -- 'deleted' picked
370 # local changed, remote deleted -- 'deleted' picked
371 _underlyingfctxifabsent(fcd).remove()
371 _underlyingfctxifabsent(fcd).remove()
372 deleted = True
372 deleted = True
373 else:
373 else:
374 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
374 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
375 deleted = False
375 deleted = False
376 return 0, deleted
376 return 0, deleted
377
377
378
378
379 @internaltool(b'fail', nomerge)
379 @internaltool(b'fail', nomerge)
380 def _ifail(repo, mynode, fcd, fco, fca, toolconf, labels=None):
380 def _ifail(repo, mynode, fcd, fco, fca, toolconf, labels=None):
381 """
381 """
382 Rather than attempting to merge files that were modified on both
382 Rather than attempting to merge files that were modified on both
383 branches, it marks them as unresolved. The resolve command must be
383 branches, it marks them as unresolved. The resolve command must be
384 used to resolve these conflicts."""
384 used to resolve these conflicts."""
385 # for change/delete conflicts write out the changed version, then fail
385 # for change/delete conflicts write out the changed version, then fail
386 if fcd.isabsent():
386 if fcd.isabsent():
387 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
387 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
388 return 1, False
388 return 1, False
389
389
390
390
391 def _underlyingfctxifabsent(filectx):
391 def _underlyingfctxifabsent(filectx):
392 """Sometimes when resolving, our fcd is actually an absentfilectx, but
392 """Sometimes when resolving, our fcd is actually an absentfilectx, but
393 we want to write to it (to do the resolve). This helper returns the
393 we want to write to it (to do the resolve). This helper returns the
394 underyling workingfilectx in that case.
394 underyling workingfilectx in that case.
395 """
395 """
396 if filectx.isabsent():
396 if filectx.isabsent():
397 return filectx.changectx()[filectx.path()]
397 return filectx.changectx()[filectx.path()]
398 else:
398 else:
399 return filectx
399 return filectx
400
400
401
401
402 def _premerge(repo, fcd, fco, fca, toolconf, backup, labels=None):
402 def _premerge(repo, fcd, fco, fca, toolconf, backup, labels=None):
403 tool, toolpath, binary, symlink, scriptfn = toolconf
403 tool, toolpath, binary, symlink, scriptfn = toolconf
404 if symlink or fcd.isabsent() or fco.isabsent():
404 if symlink or fcd.isabsent() or fco.isabsent():
405 return 1
405 return 1
406
406
407 ui = repo.ui
407 ui = repo.ui
408
408
409 validkeep = [b'keep', b'keep-merge3', b'keep-mergediff']
409 validkeep = [b'keep', b'keep-merge3', b'keep-mergediff']
410
410
411 # do we attempt to simplemerge first?
411 # do we attempt to simplemerge first?
412 try:
412 try:
413 premerge = _toolbool(ui, tool, b"premerge", not binary)
413 premerge = _toolbool(ui, tool, b"premerge", not binary)
414 except error.ConfigError:
414 except error.ConfigError:
415 premerge = _toolstr(ui, tool, b"premerge", b"").lower()
415 premerge = _toolstr(ui, tool, b"premerge", b"").lower()
416 if premerge not in validkeep:
416 if premerge not in validkeep:
417 _valid = b', '.join([b"'" + v + b"'" for v in validkeep])
417 _valid = b', '.join([b"'" + v + b"'" for v in validkeep])
418 raise error.ConfigError(
418 raise error.ConfigError(
419 _(b"%s.premerge not valid ('%s' is neither boolean nor %s)")
419 _(b"%s.premerge not valid ('%s' is neither boolean nor %s)")
420 % (tool, premerge, _valid)
420 % (tool, premerge, _valid)
421 )
421 )
422
422
423 if premerge:
423 if premerge:
424 mode = b'merge'
424 mode = b'merge'
425 if premerge in {b'keep-merge3', b'keep-mergediff'}:
425 if premerge in {b'keep-merge3', b'keep-mergediff'}:
426 if not labels:
426 if not labels:
427 labels = _defaultconflictlabels
427 labels = _defaultconflictlabels
428 if len(labels) < 3:
428 if len(labels) < 3:
429 labels.append(b'base')
429 labels.append(b'base')
430 if premerge == b'keep-mergediff':
430 if premerge == b'keep-mergediff':
431 mode = b'mergediff'
431 mode = b'mergediff'
432 r = simplemerge.simplemerge(
432 r = simplemerge.simplemerge(
433 ui, fcd, fca, fco, quiet=True, label=labels, mode=mode
433 ui, fcd, fca, fco, quiet=True, label=labels, mode=mode
434 )
434 )
435 if not r:
435 if not r:
436 ui.debug(b" premerge successful\n")
436 ui.debug(b" premerge successful\n")
437 return 0
437 return 0
438 if premerge not in validkeep:
438 if premerge not in validkeep:
439 # restore from backup and try again
439 # restore from backup and try again
440 _restorebackup(fcd, backup)
440 _restorebackup(fcd, backup)
441 return 1 # continue merging
441 return 1 # continue merging
442
442
443
443
444 def _mergecheck(repo, mynode, fcd, fco, fca, toolconf):
444 def _mergecheck(repo, mynode, fcd, fco, fca, toolconf):
445 tool, toolpath, binary, symlink, scriptfn = toolconf
445 tool, toolpath, binary, symlink, scriptfn = toolconf
446 uipathfn = scmutil.getuipathfn(repo)
446 uipathfn = scmutil.getuipathfn(repo)
447 if symlink:
447 if symlink:
448 repo.ui.warn(
448 repo.ui.warn(
449 _(b'warning: internal %s cannot merge symlinks for %s\n')
449 _(b'warning: internal %s cannot merge symlinks for %s\n')
450 % (tool, uipathfn(fcd.path()))
450 % (tool, uipathfn(fcd.path()))
451 )
451 )
452 return False
452 return False
453 if fcd.isabsent() or fco.isabsent():
453 if fcd.isabsent() or fco.isabsent():
454 repo.ui.warn(
454 repo.ui.warn(
455 _(
455 _(
456 b'warning: internal %s cannot merge change/delete '
456 b'warning: internal %s cannot merge change/delete '
457 b'conflict for %s\n'
457 b'conflict for %s\n'
458 )
458 )
459 % (tool, uipathfn(fcd.path()))
459 % (tool, uipathfn(fcd.path()))
460 )
460 )
461 return False
461 return False
462 return True
462 return True
463
463
464
464
465 def _merge(repo, mynode, fcd, fco, fca, toolconf, backup, labels, mode):
465 def _merge(repo, mynode, fcd, fco, fca, toolconf, backup, labels, mode):
466 """
466 """
467 Uses the internal non-interactive simple merge algorithm for merging
467 Uses the internal non-interactive simple merge algorithm for merging
468 files. It will fail if there are any conflicts and leave markers in
468 files. It will fail if there are any conflicts and leave markers in
469 the partially merged file. Markers will have two sections, one for each side
469 the partially merged file. Markers will have two sections, one for each side
470 of merge, unless mode equals 'union' which suppresses the markers."""
470 of merge, unless mode equals 'union' which suppresses the markers."""
471 ui = repo.ui
471 ui = repo.ui
472
472
473 r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
473 r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
474 return True, r, False
474 return True, r, False
475
475
476
476
477 @internaltool(
477 @internaltool(
478 b'union',
478 b'union',
479 fullmerge,
479 fullmerge,
480 _(
480 _(
481 b"warning: conflicts while merging %s! "
481 b"warning: conflicts while merging %s! "
482 b"(edit, then use 'hg resolve --mark')\n"
482 b"(edit, then use 'hg resolve --mark')\n"
483 ),
483 ),
484 precheck=_mergecheck,
484 precheck=_mergecheck,
485 )
485 )
486 def _iunion(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
486 def _iunion(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
487 """
487 """
488 Uses the internal non-interactive simple merge algorithm for merging
488 Uses the internal non-interactive simple merge algorithm for merging
489 files. It will use both left and right sides for conflict regions.
489 files. It will use both left and right sides for conflict regions.
490 No markers are inserted."""
490 No markers are inserted."""
491 return _merge(
491 return _merge(
492 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'union'
492 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'union'
493 )
493 )
494
494
495
495
496 @internaltool(
496 @internaltool(
497 b'merge',
497 b'merge',
498 fullmerge,
498 fullmerge,
499 _(
499 _(
500 b"warning: conflicts while merging %s! "
500 b"warning: conflicts while merging %s! "
501 b"(edit, then use 'hg resolve --mark')\n"
501 b"(edit, then use 'hg resolve --mark')\n"
502 ),
502 ),
503 precheck=_mergecheck,
503 precheck=_mergecheck,
504 )
504 )
505 def _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
505 def _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
506 """
506 """
507 Uses the internal non-interactive simple merge algorithm for merging
507 Uses the internal non-interactive simple merge algorithm for merging
508 files. It will fail if there are any conflicts and leave markers in
508 files. It will fail if there are any conflicts and leave markers in
509 the partially merged file. Markers will have two sections, one for each side
509 the partially merged file. Markers will have two sections, one for each side
510 of merge."""
510 of merge."""
511 return _merge(
511 return _merge(
512 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'merge'
512 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'merge'
513 )
513 )
514
514
515
515
516 @internaltool(
516 @internaltool(
517 b'merge3',
517 b'merge3',
518 fullmerge,
518 fullmerge,
519 _(
519 _(
520 b"warning: conflicts while merging %s! "
520 b"warning: conflicts while merging %s! "
521 b"(edit, then use 'hg resolve --mark')\n"
521 b"(edit, then use 'hg resolve --mark')\n"
522 ),
522 ),
523 precheck=_mergecheck,
523 precheck=_mergecheck,
524 )
524 )
525 def _imerge3(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
525 def _imerge3(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
526 """
526 """
527 Uses the internal non-interactive simple merge algorithm for merging
527 Uses the internal non-interactive simple merge algorithm for merging
528 files. It will fail if there are any conflicts and leave markers in
528 files. It will fail if there are any conflicts and leave markers in
529 the partially merged file. Marker will have three sections, one from each
529 the partially merged file. Marker will have three sections, one from each
530 side of the merge and one for the base content."""
530 side of the merge and one for the base content."""
531 if not labels:
531 if not labels:
532 labels = _defaultconflictlabels
532 labels = _defaultconflictlabels
533 if len(labels) < 3:
533 if len(labels) < 3:
534 labels.append(b'base')
534 labels.append(b'base')
535 return _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels)
535 return _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels)
536
536
537
537
538 @internaltool(
538 @internaltool(
539 b'merge3-lie-about-conflicts',
539 b'merge3-lie-about-conflicts',
540 fullmerge,
540 fullmerge,
541 b'',
541 b'',
542 precheck=_mergecheck,
542 precheck=_mergecheck,
543 )
543 )
544 def _imerge3alwaysgood(*args, **kwargs):
544 def _imerge3alwaysgood(*args, **kwargs):
545 # Like merge3, but record conflicts as resolved with markers in place.
545 # Like merge3, but record conflicts as resolved with markers in place.
546 #
546 #
547 # This is used for `diff.merge` to show the differences between
547 # This is used for `diff.merge` to show the differences between
548 # the auto-merge state and the committed merge state. It may be
548 # the auto-merge state and the committed merge state. It may be
549 # useful for other things.
549 # useful for other things.
550 b1, junk, b2 = _imerge3(*args, **kwargs)
550 b1, junk, b2 = _imerge3(*args, **kwargs)
551 # TODO is this right? I'm not sure what these return values mean,
551 # TODO is this right? I'm not sure what these return values mean,
552 # but as far as I can tell this will indicate to callers tha the
552 # but as far as I can tell this will indicate to callers tha the
553 # merge succeeded.
553 # merge succeeded.
554 return b1, False, b2
554 return b1, False, b2
555
555
556
556
557 @internaltool(
557 @internaltool(
558 b'mergediff',
558 b'mergediff',
559 fullmerge,
559 fullmerge,
560 _(
560 _(
561 b"warning: conflicts while merging %s! "
561 b"warning: conflicts while merging %s! "
562 b"(edit, then use 'hg resolve --mark')\n"
562 b"(edit, then use 'hg resolve --mark')\n"
563 ),
563 ),
564 precheck=_mergecheck,
564 precheck=_mergecheck,
565 )
565 )
566 def _imerge_diff(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
566 def _imerge_diff(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
567 """
567 """
568 Uses the internal non-interactive simple merge algorithm for merging
568 Uses the internal non-interactive simple merge algorithm for merging
569 files. It will fail if there are any conflicts and leave markers in
569 files. It will fail if there are any conflicts and leave markers in
570 the partially merged file. The marker will have two sections, one with the
570 the partially merged file. The marker will have two sections, one with the
571 content from one side of the merge, and one with a diff from the base
571 content from one side of the merge, and one with a diff from the base
572 content to the content on the other side. (experimental)"""
572 content to the content on the other side. (experimental)"""
573 if not labels:
573 if not labels:
574 labels = _defaultconflictlabels
574 labels = _defaultconflictlabels
575 if len(labels) < 3:
575 if len(labels) < 3:
576 labels.append(b'base')
576 labels.append(b'base')
577 return _merge(
577 return _merge(
578 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'mergediff'
578 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'mergediff'
579 )
579 )
580
580
581
581
582 def _imergeauto(
582 def _imergeauto(
583 repo,
583 repo,
584 mynode,
584 mynode,
585 fcd,
585 fcd,
586 fco,
586 fco,
587 fca,
587 fca,
588 toolconf,
588 toolconf,
589 backup,
589 backup,
590 labels=None,
590 labels=None,
591 localorother=None,
591 localorother=None,
592 ):
592 ):
593 """
593 """
594 Generic driver for _imergelocal and _imergeother
594 Generic driver for _imergelocal and _imergeother
595 """
595 """
596 assert localorother is not None
596 assert localorother is not None
597 r = simplemerge.simplemerge(
597 r = simplemerge.simplemerge(
598 repo.ui, fcd, fca, fco, label=labels, localorother=localorother
598 repo.ui, fcd, fca, fco, label=labels, localorother=localorother
599 )
599 )
600 return True, r
600 return True, r
601
601
602
602
603 @internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
603 @internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
604 def _imergelocal(*args, **kwargs):
604 def _imergelocal(*args, **kwargs):
605 """
605 """
606 Like :merge, but resolve all conflicts non-interactively in favor
606 Like :merge, but resolve all conflicts non-interactively in favor
607 of the local `p1()` changes."""
607 of the local `p1()` changes."""
608 success, status = _imergeauto(localorother=b'local', *args, **kwargs)
608 success, status = _imergeauto(localorother=b'local', *args, **kwargs)
609 return success, status, False
609 return success, status, False
610
610
611
611
612 @internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
612 @internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
613 def _imergeother(*args, **kwargs):
613 def _imergeother(*args, **kwargs):
614 """
614 """
615 Like :merge, but resolve all conflicts non-interactively in favor
615 Like :merge, but resolve all conflicts non-interactively in favor
616 of the other `p2()` changes."""
616 of the other `p2()` changes."""
617 success, status = _imergeauto(localorother=b'other', *args, **kwargs)
617 success, status = _imergeauto(localorother=b'other', *args, **kwargs)
618 return success, status, False
618 return success, status, False
619
619
620
620
621 @internaltool(
621 @internaltool(
622 b'tagmerge',
622 b'tagmerge',
623 mergeonly,
623 mergeonly,
624 _(
624 _(
625 b"automatic tag merging of %s failed! "
625 b"automatic tag merging of %s failed! "
626 b"(use 'hg resolve --tool :merge' or another merge "
626 b"(use 'hg resolve --tool :merge' or another merge "
627 b"tool of your choice)\n"
627 b"tool of your choice)\n"
628 ),
628 ),
629 )
629 )
630 def _itagmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
630 def _itagmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
631 """
631 """
632 Uses the internal tag merge algorithm (experimental).
632 Uses the internal tag merge algorithm (experimental).
633 """
633 """
634 success, status = tagmerge.merge(repo, fcd, fco, fca)
634 success, status = tagmerge.merge(repo, fcd, fco, fca)
635 return success, status, False
635 return success, status, False
636
636
637
637
638 @internaltool(b'dump', fullmerge, binary=True, symlink=True)
638 @internaltool(b'dump', fullmerge, binary=True, symlink=True)
639 def _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
639 def _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
640 """
640 """
641 Creates three versions of the files to merge, containing the
641 Creates three versions of the files to merge, containing the
642 contents of local, other and base. These files can then be used to
642 contents of local, other and base. These files can then be used to
643 perform a merge manually. If the file to be merged is named
643 perform a merge manually. If the file to be merged is named
644 ``a.txt``, these files will accordingly be named ``a.txt.local``,
644 ``a.txt``, these files will accordingly be named ``a.txt.local``,
645 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
645 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
646 same directory as ``a.txt``.
646 same directory as ``a.txt``.
647
647
648 This implies premerge. Therefore, files aren't dumped, if premerge
648 This implies premerge. Therefore, files aren't dumped, if premerge
649 runs successfully. Use :forcedump to forcibly write files out.
649 runs successfully. Use :forcedump to forcibly write files out.
650 """
650 """
651 a = _workingpath(repo, fcd)
651 a = _workingpath(repo, fcd)
652 fd = fcd.path()
652 fd = fcd.path()
653
653
654 from . import context
654 from . import context
655
655
656 if isinstance(fcd, context.overlayworkingfilectx):
656 if isinstance(fcd, context.overlayworkingfilectx):
657 raise error.InMemoryMergeConflictsError(
657 raise error.InMemoryMergeConflictsError(
658 b'in-memory merge does not support the :dump tool.'
658 b'in-memory merge does not support the :dump tool.'
659 )
659 )
660
660
661 util.writefile(a + b".local", fcd.decodeddata())
661 util.writefile(a + b".local", fcd.decodeddata())
662 repo.wwrite(fd + b".other", fco.data(), fco.flags())
662 repo.wwrite(fd + b".other", fco.data(), fco.flags())
663 repo.wwrite(fd + b".base", fca.data(), fca.flags())
663 repo.wwrite(fd + b".base", fca.data(), fca.flags())
664 return False, 1, False
664 return False, 1, False
665
665
666
666
667 @internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
667 @internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
668 def _forcedump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
668 def _forcedump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
669 """
669 """
670 Creates three versions of the files as same as :dump, but omits premerge.
670 Creates three versions of the files as same as :dump, but omits premerge.
671 """
671 """
672 return _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=labels)
672 return _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=labels)
673
673
674
674
675 def _xmergeimm(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
675 def _xmergeimm(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
676 # In-memory merge simply raises an exception on all external merge tools,
676 # In-memory merge simply raises an exception on all external merge tools,
677 # for now.
677 # for now.
678 #
678 #
679 # It would be possible to run most tools with temporary files, but this
679 # It would be possible to run most tools with temporary files, but this
680 # raises the question of what to do if the user only partially resolves the
680 # raises the question of what to do if the user only partially resolves the
681 # file -- we can't leave a merge state. (Copy to somewhere in the .hg/
681 # file -- we can't leave a merge state. (Copy to somewhere in the .hg/
682 # directory and tell the user how to get it is my best idea, but it's
682 # directory and tell the user how to get it is my best idea, but it's
683 # clunky.)
683 # clunky.)
684 raise error.InMemoryMergeConflictsError(
684 raise error.InMemoryMergeConflictsError(
685 b'in-memory merge does not support external merge tools'
685 b'in-memory merge does not support external merge tools'
686 )
686 )
687
687
688
688
689 def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
689 def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
690 tmpl = ui.config(b'command-templates', b'pre-merge-tool-output')
690 tmpl = ui.config(b'command-templates', b'pre-merge-tool-output')
691 if not tmpl:
691 if not tmpl:
692 return
692 return
693
693
694 mappingdict = templateutil.mappingdict
694 mappingdict = templateutil.mappingdict
695 props = {
695 props = {
696 b'ctx': fcl.changectx(),
696 b'ctx': fcl.changectx(),
697 b'node': hex(mynode),
697 b'node': hex(mynode),
698 b'path': fcl.path(),
698 b'path': fcl.path(),
699 b'local': mappingdict(
699 b'local': mappingdict(
700 {
700 {
701 b'ctx': fcl.changectx(),
701 b'ctx': fcl.changectx(),
702 b'fctx': fcl,
702 b'fctx': fcl,
703 b'node': hex(mynode),
703 b'node': hex(mynode),
704 b'name': _(b'local'),
704 b'name': _(b'local'),
705 b'islink': b'l' in fcl.flags(),
705 b'islink': b'l' in fcl.flags(),
706 b'label': env[b'HG_MY_LABEL'],
706 b'label': env[b'HG_MY_LABEL'],
707 }
707 }
708 ),
708 ),
709 b'base': mappingdict(
709 b'base': mappingdict(
710 {
710 {
711 b'ctx': fcb.changectx(),
711 b'ctx': fcb.changectx(),
712 b'fctx': fcb,
712 b'fctx': fcb,
713 b'name': _(b'base'),
713 b'name': _(b'base'),
714 b'islink': b'l' in fcb.flags(),
714 b'islink': b'l' in fcb.flags(),
715 b'label': env[b'HG_BASE_LABEL'],
715 b'label': env[b'HG_BASE_LABEL'],
716 }
716 }
717 ),
717 ),
718 b'other': mappingdict(
718 b'other': mappingdict(
719 {
719 {
720 b'ctx': fco.changectx(),
720 b'ctx': fco.changectx(),
721 b'fctx': fco,
721 b'fctx': fco,
722 b'name': _(b'other'),
722 b'name': _(b'other'),
723 b'islink': b'l' in fco.flags(),
723 b'islink': b'l' in fco.flags(),
724 b'label': env[b'HG_OTHER_LABEL'],
724 b'label': env[b'HG_OTHER_LABEL'],
725 }
725 }
726 ),
726 ),
727 b'toolpath': toolpath,
727 b'toolpath': toolpath,
728 b'toolargs': args,
728 b'toolargs': args,
729 }
729 }
730
730
731 # TODO: make all of this something that can be specified on a per-tool basis
731 # TODO: make all of this something that can be specified on a per-tool basis
732 tmpl = templater.unquotestring(tmpl)
732 tmpl = templater.unquotestring(tmpl)
733
733
734 # Not using cmdutil.rendertemplate here since it causes errors importing
734 # Not using cmdutil.rendertemplate here since it causes errors importing
735 # things for us to import cmdutil.
735 # things for us to import cmdutil.
736 tres = formatter.templateresources(ui, repo)
736 tres = formatter.templateresources(ui, repo)
737 t = formatter.maketemplater(
737 t = formatter.maketemplater(
738 ui, tmpl, defaults=templatekw.keywords, resources=tres
738 ui, tmpl, defaults=templatekw.keywords, resources=tres
739 )
739 )
740 ui.status(t.renderdefault(props))
740 ui.status(t.renderdefault(props))
741
741
742
742
743 def _xmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels):
743 def _xmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels):
744 tool, toolpath, binary, symlink, scriptfn = toolconf
744 tool, toolpath, binary, symlink, scriptfn = toolconf
745 uipathfn = scmutil.getuipathfn(repo)
745 uipathfn = scmutil.getuipathfn(repo)
746 if fcd.isabsent() or fco.isabsent():
746 if fcd.isabsent() or fco.isabsent():
747 repo.ui.warn(
747 repo.ui.warn(
748 _(b'warning: %s cannot merge change/delete conflict for %s\n')
748 _(b'warning: %s cannot merge change/delete conflict for %s\n')
749 % (tool, uipathfn(fcd.path()))
749 % (tool, uipathfn(fcd.path()))
750 )
750 )
751 return False, 1, None
751 return False, 1, None
752 localpath = _workingpath(repo, fcd)
752 localpath = _workingpath(repo, fcd)
753 args = _toolstr(repo.ui, tool, b"args")
753 args = _toolstr(repo.ui, tool, b"args")
754
754
755 with _maketempfiles(
755 with _maketempfiles(
756 repo, fco, fca, repo.wvfs.join(backup.path()), b"$output" in args
756 repo, fco, fca, repo.wvfs.join(backup.path()), b"$output" in args
757 ) as temppaths:
757 ) as temppaths:
758 basepath, otherpath, localoutputpath = temppaths
758 basepath, otherpath, localoutputpath = temppaths
759 outpath = b""
759 outpath = b""
760 mylabel, otherlabel = labels[:2]
760 mylabel, otherlabel = labels[:2]
761 if len(labels) >= 3:
761 if len(labels) >= 3:
762 baselabel = labels[2]
762 baselabel = labels[2]
763 else:
763 else:
764 baselabel = b'base'
764 baselabel = b'base'
765 env = {
765 env = {
766 b'HG_FILE': fcd.path(),
766 b'HG_FILE': fcd.path(),
767 b'HG_MY_NODE': short(mynode),
767 b'HG_MY_NODE': short(mynode),
768 b'HG_OTHER_NODE': short(fco.changectx().node()),
768 b'HG_OTHER_NODE': short(fco.changectx().node()),
769 b'HG_BASE_NODE': short(fca.changectx().node()),
769 b'HG_BASE_NODE': short(fca.changectx().node()),
770 b'HG_MY_ISLINK': b'l' in fcd.flags(),
770 b'HG_MY_ISLINK': b'l' in fcd.flags(),
771 b'HG_OTHER_ISLINK': b'l' in fco.flags(),
771 b'HG_OTHER_ISLINK': b'l' in fco.flags(),
772 b'HG_BASE_ISLINK': b'l' in fca.flags(),
772 b'HG_BASE_ISLINK': b'l' in fca.flags(),
773 b'HG_MY_LABEL': mylabel,
773 b'HG_MY_LABEL': mylabel,
774 b'HG_OTHER_LABEL': otherlabel,
774 b'HG_OTHER_LABEL': otherlabel,
775 b'HG_BASE_LABEL': baselabel,
775 b'HG_BASE_LABEL': baselabel,
776 }
776 }
777 ui = repo.ui
777 ui = repo.ui
778
778
779 if b"$output" in args:
779 if b"$output" in args:
780 # read input from backup, write to original
780 # read input from backup, write to original
781 outpath = localpath
781 outpath = localpath
782 localpath = localoutputpath
782 localpath = localoutputpath
783 replace = {
783 replace = {
784 b'local': localpath,
784 b'local': localpath,
785 b'base': basepath,
785 b'base': basepath,
786 b'other': otherpath,
786 b'other': otherpath,
787 b'output': outpath,
787 b'output': outpath,
788 b'labellocal': mylabel,
788 b'labellocal': mylabel,
789 b'labelother': otherlabel,
789 b'labelother': otherlabel,
790 b'labelbase': baselabel,
790 b'labelbase': baselabel,
791 }
791 }
792 args = util.interpolate(
792 args = util.interpolate(
793 br'\$',
793 br'\$',
794 replace,
794 replace,
795 args,
795 args,
796 lambda s: procutil.shellquote(util.localpath(s)),
796 lambda s: procutil.shellquote(util.localpath(s)),
797 )
797 )
798 if _toolbool(ui, tool, b"gui"):
798 if _toolbool(ui, tool, b"gui"):
799 repo.ui.status(
799 repo.ui.status(
800 _(b'running merge tool %s for file %s\n')
800 _(b'running merge tool %s for file %s\n')
801 % (tool, uipathfn(fcd.path()))
801 % (tool, uipathfn(fcd.path()))
802 )
802 )
803 if scriptfn is None:
803 if scriptfn is None:
804 cmd = toolpath + b' ' + args
804 cmd = toolpath + b' ' + args
805 repo.ui.debug(b'launching merge tool: %s\n' % cmd)
805 repo.ui.debug(b'launching merge tool: %s\n' % cmd)
806 _describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
806 _describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
807 r = ui.system(
807 r = ui.system(
808 cmd, cwd=repo.root, environ=env, blockedtag=b'mergetool'
808 cmd, cwd=repo.root, environ=env, blockedtag=b'mergetool'
809 )
809 )
810 else:
810 else:
811 repo.ui.debug(
811 repo.ui.debug(
812 b'launching python merge script: %s:%s\n' % (toolpath, scriptfn)
812 b'launching python merge script: %s:%s\n' % (toolpath, scriptfn)
813 )
813 )
814 r = 0
814 r = 0
815 try:
815 try:
816 # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
816 # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
817 from . import extensions
817 from . import extensions
818
818
819 mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
819 mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
820 except Exception:
820 except Exception:
821 raise error.Abort(
821 raise error.Abort(
822 _(b"loading python merge script failed: %s") % toolpath
822 _(b"loading python merge script failed: %s") % toolpath
823 )
823 )
824 mergefn = getattr(mod, scriptfn, None)
824 mergefn = getattr(mod, scriptfn, None)
825 if mergefn is None:
825 if mergefn is None:
826 raise error.Abort(
826 raise error.Abort(
827 _(b"%s does not have function: %s") % (toolpath, scriptfn)
827 _(b"%s does not have function: %s") % (toolpath, scriptfn)
828 )
828 )
829 argslist = procutil.shellsplit(args)
829 argslist = procutil.shellsplit(args)
830 # avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil
830 # avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil
831 from . import hook
831 from . import hook
832
832
833 ret, raised = hook.pythonhook(
833 ret, raised = hook.pythonhook(
834 ui, repo, b"merge", toolpath, mergefn, {b'args': argslist}, True
834 ui, repo, b"merge", toolpath, mergefn, {b'args': argslist}, True
835 )
835 )
836 if raised:
836 if raised:
837 r = 1
837 r = 1
838 repo.ui.debug(b'merge tool returned: %d\n' % r)
838 repo.ui.debug(b'merge tool returned: %d\n' % r)
839 return True, r, False
839 return True, r, False
840
840
841
841
842 def _formatlabel(ctx, template, label, pad):
842 def _formatlabel(ctx, template, label, pad):
843 """Applies the given template to the ctx, prefixed by the label.
843 """Applies the given template to the ctx, prefixed by the label.
844
844
845 Pad is the minimum width of the label prefix, so that multiple markers
845 Pad is the minimum width of the label prefix, so that multiple markers
846 can have aligned templated parts.
846 can have aligned templated parts.
847 """
847 """
848 if ctx.node() is None:
848 if ctx.node() is None:
849 ctx = ctx.p1()
849 ctx = ctx.p1()
850
850
851 props = {b'ctx': ctx}
851 props = {b'ctx': ctx}
852 templateresult = template.renderdefault(props)
852 templateresult = template.renderdefault(props)
853
853
854 label = (b'%s:' % label).ljust(pad + 1)
854 label = (b'%s:' % label).ljust(pad + 1)
855 mark = b'%s %s' % (label, templateresult)
855 mark = b'%s %s' % (label, templateresult)
856
856
857 if mark:
857 if mark:
858 mark = mark.splitlines()[0] # split for safety
858 mark = mark.splitlines()[0] # split for safety
859
859
860 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
860 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
861 return stringutil.ellipsis(mark, 80 - 8)
861 return stringutil.ellipsis(mark, 80 - 8)
862
862
863
863
864 _defaultconflictlabels = [b'local', b'other']
864 _defaultconflictlabels = [b'local', b'other']
865
865
866
866
867 def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
867 def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
868 """Formats the given labels using the conflict marker template.
868 """Formats the given labels using the conflict marker template.
869
869
870 Returns a list of formatted labels.
870 Returns a list of formatted labels.
871 """
871 """
872 cd = fcd.changectx()
872 cd = fcd.changectx()
873 co = fco.changectx()
873 co = fco.changectx()
874 ca = fca.changectx()
874 ca = fca.changectx()
875
875
876 ui = repo.ui
876 ui = repo.ui
877 template = ui.config(b'command-templates', b'mergemarker')
877 template = ui.config(b'command-templates', b'mergemarker')
878 if tool is not None:
878 if tool is not None:
879 template = _toolstr(ui, tool, b'mergemarkertemplate', template)
879 template = _toolstr(ui, tool, b'mergemarkertemplate', template)
880 template = templater.unquotestring(template)
880 template = templater.unquotestring(template)
881 tres = formatter.templateresources(ui, repo)
881 tres = formatter.templateresources(ui, repo)
882 tmpl = formatter.maketemplater(
882 tmpl = formatter.maketemplater(
883 ui, template, defaults=templatekw.keywords, resources=tres
883 ui, template, defaults=templatekw.keywords, resources=tres
884 )
884 )
885
885
886 pad = max(len(l) for l in labels)
886 pad = max(len(l) for l in labels)
887
887
888 newlabels = [
888 newlabels = [
889 _formatlabel(cd, tmpl, labels[0], pad),
889 _formatlabel(cd, tmpl, labels[0], pad),
890 _formatlabel(co, tmpl, labels[1], pad),
890 _formatlabel(co, tmpl, labels[1], pad),
891 ]
891 ]
892 if len(labels) > 2:
892 if len(labels) > 2:
893 newlabels.append(_formatlabel(ca, tmpl, labels[2], pad))
893 newlabels.append(_formatlabel(ca, tmpl, labels[2], pad))
894 return newlabels
894 return newlabels
895
895
896
896
897 def partextras(labels):
897 def partextras(labels):
898 """Return a dictionary of extra labels for use in prompts to the user
898 """Return a dictionary of extra labels for use in prompts to the user
899
899
900 Intended use is in strings of the form "(l)ocal%(l)s".
900 Intended use is in strings of the form "(l)ocal%(l)s".
901 """
901 """
902 if labels is None:
902 if labels is None:
903 return {
903 return {
904 b"l": b"",
904 b"l": b"",
905 b"o": b"",
905 b"o": b"",
906 }
906 }
907
907
908 return {
908 return {
909 b"l": b" [%s]" % labels[0],
909 b"l": b" [%s]" % labels[0],
910 b"o": b" [%s]" % labels[1],
910 b"o": b" [%s]" % labels[1],
911 }
911 }
912
912
913
913
914 def _restorebackup(fcd, backup):
914 def _restorebackup(fcd, backup):
915 # TODO: Add a workingfilectx.write(otherfilectx) path so we can use
915 # TODO: Add a workingfilectx.write(otherfilectx) path so we can use
916 # util.copy here instead.
916 # util.copy here instead.
917 fcd.write(backup.data(), fcd.flags())
917 fcd.write(backup.data(), fcd.flags())
918
918
919
919
920 def _makebackup(repo, ui, wctx, fcd):
920 def _makebackup(repo, ui, wctx, fcd):
921 """Makes and returns a filectx-like object for ``fcd``'s backup file.
921 """Makes and returns a filectx-like object for ``fcd``'s backup file.
922
922
923 In addition to preserving the user's pre-existing modifications to `fcd`
923 In addition to preserving the user's pre-existing modifications to `fcd`
924 (if any), the backup is used to undo certain premerges, confirm whether a
924 (if any), the backup is used to undo certain premerges, confirm whether a
925 merge changed anything, and determine what line endings the new file should
925 merge changed anything, and determine what line endings the new file should
926 have.
926 have.
927
927
928 Backups only need to be written once since their content doesn't change
928 Backups only need to be written once since their content doesn't change
929 afterwards.
929 afterwards.
930 """
930 """
931 if fcd.isabsent():
931 if fcd.isabsent():
932 return None
932 return None
933 # TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
933 # TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
934 # merge -> filemerge). (I suspect the fileset import is the weakest link)
934 # merge -> filemerge). (I suspect the fileset import is the weakest link)
935 from . import context
935 from . import context
936
936
937 backup = scmutil.backuppath(ui, repo, fcd.path())
937 backup = scmutil.backuppath(ui, repo, fcd.path())
938 inworkingdir = backup.startswith(repo.wvfs.base) and not backup.startswith(
938 inworkingdir = backup.startswith(repo.wvfs.base) and not backup.startswith(
939 repo.vfs.base
939 repo.vfs.base
940 )
940 )
941 if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
941 if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
942 # If the backup file is to be in the working directory, and we're
942 # If the backup file is to be in the working directory, and we're
943 # merging in-memory, we must redirect the backup to the memory context
943 # merging in-memory, we must redirect the backup to the memory context
944 # so we don't disturb the working directory.
944 # so we don't disturb the working directory.
945 relpath = backup[len(repo.wvfs.base) + 1 :]
945 relpath = backup[len(repo.wvfs.base) + 1 :]
946 wctx[relpath].write(fcd.data(), fcd.flags())
946 wctx[relpath].write(fcd.data(), fcd.flags())
947 return wctx[relpath]
947 return wctx[relpath]
948 else:
948 else:
949 # Otherwise, write to wherever path the user specified the backups
949 # Otherwise, write to wherever path the user specified the backups
950 # should go. We still need to switch based on whether the source is
950 # should go. We still need to switch based on whether the source is
951 # in-memory so we can use the fast path of ``util.copy`` if both are
951 # in-memory so we can use the fast path of ``util.copy`` if both are
952 # on disk.
952 # on disk.
953 if isinstance(fcd, context.overlayworkingfilectx):
953 if isinstance(fcd, context.overlayworkingfilectx):
954 util.writefile(backup, fcd.data())
954 util.writefile(backup, fcd.data())
955 else:
955 else:
956 a = _workingpath(repo, fcd)
956 a = _workingpath(repo, fcd)
957 util.copyfile(a, backup)
957 util.copyfile(a, backup)
958 # A arbitraryfilectx is returned, so we can run the same functions on
958 # A arbitraryfilectx is returned, so we can run the same functions on
959 # the backup context regardless of where it lives.
959 # the backup context regardless of where it lives.
960 return context.arbitraryfilectx(backup, repo=repo)
960 return context.arbitraryfilectx(backup, repo=repo)
961
961
962
962
963 @contextlib.contextmanager
963 @contextlib.contextmanager
964 def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
964 def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
965 """Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
965 """Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
966 copies `localpath` to another temporary file, so an external merge tool may
966 copies `localpath` to another temporary file, so an external merge tool may
967 use them.
967 use them.
968 """
968 """
969 tmproot = None
969 tmproot = None
970 tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
970 tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
971 if tmprootprefix:
971 if tmprootprefix:
972 tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
972 tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
973
973
974 def maketempfrompath(prefix, path):
974 def maketempfrompath(prefix, path):
975 fullbase, ext = os.path.splitext(path)
975 fullbase, ext = os.path.splitext(path)
976 pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
976 pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
977 if tmproot:
977 if tmproot:
978 name = os.path.join(tmproot, pre)
978 name = os.path.join(tmproot, pre)
979 if ext:
979 if ext:
980 name += ext
980 name += ext
981 f = open(name, "wb")
981 f = open(name, "wb")
982 else:
982 else:
983 fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
983 fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
984 f = os.fdopen(fd, "wb")
984 f = os.fdopen(fd, "wb")
985 return f, name
985 return f, name
986
986
987 def tempfromcontext(prefix, ctx):
987 def tempfromcontext(prefix, ctx):
988 f, name = maketempfrompath(prefix, ctx.path())
988 f, name = maketempfrompath(prefix, ctx.path())
989 data = ctx.decodeddata()
989 data = ctx.decodeddata()
990 f.write(data)
990 f.write(data)
991 f.close()
991 f.close()
992 return name
992 return name
993
993
994 b = tempfromcontext(b"base", fca)
994 b = tempfromcontext(b"base", fca)
995 c = tempfromcontext(b"other", fco)
995 c = tempfromcontext(b"other", fco)
996 d = localpath
996 d = localpath
997 if uselocalpath:
997 if uselocalpath:
998 # We start off with this being the backup filename, so remove the .orig
998 # We start off with this being the backup filename, so remove the .orig
999 # to make syntax-highlighting more likely.
999 # to make syntax-highlighting more likely.
1000 if d.endswith(b'.orig'):
1000 if d.endswith(b'.orig'):
1001 d, _ = os.path.splitext(d)
1001 d, _ = os.path.splitext(d)
1002 f, d = maketempfrompath(b"local", d)
1002 f, d = maketempfrompath(b"local", d)
1003 with open(localpath, b'rb') as src:
1003 with open(localpath, b'rb') as src:
1004 f.write(src.read())
1004 f.write(src.read())
1005 f.close()
1005 f.close()
1006
1006
1007 try:
1007 try:
1008 yield b, c, d
1008 yield b, c, d
1009 finally:
1009 finally:
1010 if tmproot:
1010 if tmproot:
1011 shutil.rmtree(tmproot)
1011 shutil.rmtree(tmproot)
1012 else:
1012 else:
1013 util.unlink(b)
1013 util.unlink(b)
1014 util.unlink(c)
1014 util.unlink(c)
1015 # if not uselocalpath, d is the 'orig'/backup file which we
1015 # if not uselocalpath, d is the 'orig'/backup file which we
1016 # shouldn't delete.
1016 # shouldn't delete.
1017 if d and uselocalpath:
1017 if d and uselocalpath:
1018 util.unlink(d)
1018 util.unlink(d)
1019
1019
1020
1020
1021 def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
1021 def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
1022 """perform a 3-way merge in the working directory
1022 """perform a 3-way merge in the working directory
1023
1023
1024 mynode = parent node before merge
1024 mynode = parent node before merge
1025 orig = original local filename before merge
1025 orig = original local filename before merge
1026 fco = other file context
1026 fco = other file context
1027 fca = ancestor file context
1027 fca = ancestor file context
1028 fcd = local file context for current/destination file
1028 fcd = local file context for current/destination file
1029
1029
1030 Returns whether the merge is complete, the return value of the merge, and
1030 Returns whether the merge is complete, the return value of the merge, and
1031 a boolean indicating whether the file was deleted from disk."""
1031 a boolean indicating whether the file was deleted from disk."""
1032
1032
1033 if not fco.cmp(fcd): # files identical?
1033 if not fco.cmp(fcd): # files identical?
1034 return True, None, False
1034 return None, False
1035
1035
1036 ui = repo.ui
1036 ui = repo.ui
1037 fd = fcd.path()
1037 fd = fcd.path()
1038 uipathfn = scmutil.getuipathfn(repo)
1038 uipathfn = scmutil.getuipathfn(repo)
1039 fduipath = uipathfn(fd)
1039 fduipath = uipathfn(fd)
1040 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
1040 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
1041 symlink = b'l' in fcd.flags() + fco.flags()
1041 symlink = b'l' in fcd.flags() + fco.flags()
1042 changedelete = fcd.isabsent() or fco.isabsent()
1042 changedelete = fcd.isabsent() or fco.isabsent()
1043 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
1043 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
1044 scriptfn = None
1044 scriptfn = None
1045 if tool in internals and tool.startswith(b'internal:'):
1045 if tool in internals and tool.startswith(b'internal:'):
1046 # normalize to new-style names (':merge' etc)
1046 # normalize to new-style names (':merge' etc)
1047 tool = tool[len(b'internal') :]
1047 tool = tool[len(b'internal') :]
1048 if toolpath and toolpath.startswith(b'python:'):
1048 if toolpath and toolpath.startswith(b'python:'):
1049 invalidsyntax = False
1049 invalidsyntax = False
1050 if toolpath.count(b':') >= 2:
1050 if toolpath.count(b':') >= 2:
1051 script, scriptfn = toolpath[7:].rsplit(b':', 1)
1051 script, scriptfn = toolpath[7:].rsplit(b':', 1)
1052 if not scriptfn:
1052 if not scriptfn:
1053 invalidsyntax = True
1053 invalidsyntax = True
1054 # missing :callable can lead to spliting on windows drive letter
1054 # missing :callable can lead to spliting on windows drive letter
1055 if b'\\' in scriptfn or b'/' in scriptfn:
1055 if b'\\' in scriptfn or b'/' in scriptfn:
1056 invalidsyntax = True
1056 invalidsyntax = True
1057 else:
1057 else:
1058 invalidsyntax = True
1058 invalidsyntax = True
1059 if invalidsyntax:
1059 if invalidsyntax:
1060 raise error.Abort(_(b"invalid 'python:' syntax: %s") % toolpath)
1060 raise error.Abort(_(b"invalid 'python:' syntax: %s") % toolpath)
1061 toolpath = script
1061 toolpath = script
1062 ui.debug(
1062 ui.debug(
1063 b"picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
1063 b"picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
1064 % (
1064 % (
1065 tool,
1065 tool,
1066 fduipath,
1066 fduipath,
1067 pycompat.bytestr(binary),
1067 pycompat.bytestr(binary),
1068 pycompat.bytestr(symlink),
1068 pycompat.bytestr(symlink),
1069 pycompat.bytestr(changedelete),
1069 pycompat.bytestr(changedelete),
1070 )
1070 )
1071 )
1071 )
1072
1072
1073 if tool in internals:
1073 if tool in internals:
1074 func = internals[tool]
1074 func = internals[tool]
1075 mergetype = func.mergetype
1075 mergetype = func.mergetype
1076 onfailure = func.onfailure
1076 onfailure = func.onfailure
1077 precheck = func.precheck
1077 precheck = func.precheck
1078 isexternal = False
1078 isexternal = False
1079 else:
1079 else:
1080 if wctx.isinmemory():
1080 if wctx.isinmemory():
1081 func = _xmergeimm
1081 func = _xmergeimm
1082 else:
1082 else:
1083 func = _xmerge
1083 func = _xmerge
1084 mergetype = fullmerge
1084 mergetype = fullmerge
1085 onfailure = _(b"merging %s failed!\n")
1085 onfailure = _(b"merging %s failed!\n")
1086 precheck = None
1086 precheck = None
1087 isexternal = True
1087 isexternal = True
1088
1088
1089 toolconf = tool, toolpath, binary, symlink, scriptfn
1089 toolconf = tool, toolpath, binary, symlink, scriptfn
1090
1090
1091 if mergetype == nomerge:
1091 if mergetype == nomerge:
1092 r, deleted = func(repo, mynode, fcd, fco, fca, toolconf, labels)
1092 return func(repo, mynode, fcd, fco, fca, toolconf, labels)
1093 return True, r, deleted
1094
1093
1095 if orig != fco.path():
1094 if orig != fco.path():
1096 ui.status(
1095 ui.status(
1097 _(b"merging %s and %s to %s\n")
1096 _(b"merging %s and %s to %s\n")
1098 % (uipathfn(orig), uipathfn(fco.path()), fduipath)
1097 % (uipathfn(orig), uipathfn(fco.path()), fduipath)
1099 )
1098 )
1100 else:
1099 else:
1101 ui.status(_(b"merging %s\n") % fduipath)
1100 ui.status(_(b"merging %s\n") % fduipath)
1102
1101
1103 ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
1102 ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
1104
1103
1105 if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf):
1104 if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf):
1106 if onfailure:
1105 if onfailure:
1107 if wctx.isinmemory():
1106 if wctx.isinmemory():
1108 raise error.InMemoryMergeConflictsError(
1107 raise error.InMemoryMergeConflictsError(
1109 b'in-memory merge does not support merge conflicts'
1108 b'in-memory merge does not support merge conflicts'
1110 )
1109 )
1111 ui.warn(onfailure % fduipath)
1110 ui.warn(onfailure % fduipath)
1112 return True, 1, False
1111 return 1, False
1113
1112
1114 backup = _makebackup(repo, ui, wctx, fcd)
1113 backup = _makebackup(repo, ui, wctx, fcd)
1115 r = 1
1114 r = 1
1116 try:
1115 try:
1117 internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
1116 internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
1118 if isexternal:
1117 if isexternal:
1119 markerstyle = _toolstr(ui, tool, b'mergemarkers')
1118 markerstyle = _toolstr(ui, tool, b'mergemarkers')
1120 else:
1119 else:
1121 markerstyle = internalmarkerstyle
1120 markerstyle = internalmarkerstyle
1122
1121
1123 if not labels:
1122 if not labels:
1124 labels = _defaultconflictlabels
1123 labels = _defaultconflictlabels
1125 formattedlabels = labels
1124 formattedlabels = labels
1126 if markerstyle != b'basic':
1125 if markerstyle != b'basic':
1127 formattedlabels = _formatlabels(
1126 formattedlabels = _formatlabels(
1128 repo, fcd, fco, fca, labels, tool=tool
1127 repo, fcd, fco, fca, labels, tool=tool
1129 )
1128 )
1130
1129
1131 if mergetype == fullmerge:
1130 if mergetype == fullmerge:
1132 # conflict markers generated by premerge will use 'detailed'
1131 # conflict markers generated by premerge will use 'detailed'
1133 # settings if either ui.mergemarkers or the tool's mergemarkers
1132 # settings if either ui.mergemarkers or the tool's mergemarkers
1134 # setting is 'detailed'. This way tools can have basic labels in
1133 # setting is 'detailed'. This way tools can have basic labels in
1135 # space-constrained areas of the UI, but still get full information
1134 # space-constrained areas of the UI, but still get full information
1136 # in conflict markers if premerge is 'keep' or 'keep-merge3'.
1135 # in conflict markers if premerge is 'keep' or 'keep-merge3'.
1137 premergelabels = labels
1136 premergelabels = labels
1138 labeltool = None
1137 labeltool = None
1139 if markerstyle != b'basic':
1138 if markerstyle != b'basic':
1140 # respect 'tool's mergemarkertemplate (which defaults to
1139 # respect 'tool's mergemarkertemplate (which defaults to
1141 # command-templates.mergemarker)
1140 # command-templates.mergemarker)
1142 labeltool = tool
1141 labeltool = tool
1143 if internalmarkerstyle != b'basic' or markerstyle != b'basic':
1142 if internalmarkerstyle != b'basic' or markerstyle != b'basic':
1144 premergelabels = _formatlabels(
1143 premergelabels = _formatlabels(
1145 repo, fcd, fco, fca, premergelabels, tool=labeltool
1144 repo, fcd, fco, fca, premergelabels, tool=labeltool
1146 )
1145 )
1147
1146
1148 r = _premerge(
1147 r = _premerge(
1149 repo, fcd, fco, fca, toolconf, backup, labels=premergelabels
1148 repo, fcd, fco, fca, toolconf, backup, labels=premergelabels
1150 )
1149 )
1151 # we're done if premerge was successful (r is 0)
1150 # we're done if premerge was successful (r is 0)
1152 if not r:
1151 if not r:
1153 return not r, r, False
1152 return r, False
1154
1153
1155 needcheck, r, deleted = func(
1154 needcheck, r, deleted = func(
1156 repo,
1155 repo,
1157 mynode,
1156 mynode,
1158 fcd,
1157 fcd,
1159 fco,
1158 fco,
1160 fca,
1159 fca,
1161 toolconf,
1160 toolconf,
1162 backup,
1161 backup,
1163 labels=formattedlabels,
1162 labels=formattedlabels,
1164 )
1163 )
1165
1164
1166 if needcheck:
1165 if needcheck:
1167 r = _check(repo, r, ui, tool, fcd, backup)
1166 r = _check(repo, r, ui, tool, fcd, backup)
1168
1167
1169 if r:
1168 if r:
1170 if onfailure:
1169 if onfailure:
1171 if wctx.isinmemory():
1170 if wctx.isinmemory():
1172 raise error.InMemoryMergeConflictsError(
1171 raise error.InMemoryMergeConflictsError(
1173 b'in-memory merge '
1172 b'in-memory merge '
1174 b'does not support '
1173 b'does not support '
1175 b'merge conflicts'
1174 b'merge conflicts'
1176 )
1175 )
1177 ui.warn(onfailure % fduipath)
1176 ui.warn(onfailure % fduipath)
1178 _onfilemergefailure(ui)
1177 _onfilemergefailure(ui)
1179
1178
1180 return True, r, deleted
1179 return r, deleted
1181 finally:
1180 finally:
1182 if not r and backup is not None:
1181 if not r and backup is not None:
1183 backup.remove()
1182 backup.remove()
1184
1183
1185
1184
1186 def _haltmerge():
1185 def _haltmerge():
1187 msg = _(b'merge halted after failed merge (see hg resolve)')
1186 msg = _(b'merge halted after failed merge (see hg resolve)')
1188 raise error.InterventionRequired(msg)
1187 raise error.InterventionRequired(msg)
1189
1188
1190
1189
1191 def _onfilemergefailure(ui):
1190 def _onfilemergefailure(ui):
1192 action = ui.config(b'merge', b'on-failure')
1191 action = ui.config(b'merge', b'on-failure')
1193 if action == b'prompt':
1192 if action == b'prompt':
1194 msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No')
1193 msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No')
1195 if ui.promptchoice(msg, 0) == 1:
1194 if ui.promptchoice(msg, 0) == 1:
1196 _haltmerge()
1195 _haltmerge()
1197 if action == b'halt':
1196 if action == b'halt':
1198 _haltmerge()
1197 _haltmerge()
1199 # default action is 'continue', in which case we neither prompt nor halt
1198 # default action is 'continue', in which case we neither prompt nor halt
1200
1199
1201
1200
1202 def hasconflictmarkers(data):
1201 def hasconflictmarkers(data):
1203 # Detect lines starting with a string of 7 identical characters from the
1202 # Detect lines starting with a string of 7 identical characters from the
1204 # subset Mercurial uses for conflict markers, followed by either the end of
1203 # subset Mercurial uses for conflict markers, followed by either the end of
1205 # line or a space and some text. Note that using [<>=+|-]{7} would detect
1204 # line or a space and some text. Note that using [<>=+|-]{7} would detect
1206 # `<><><><><` as a conflict marker, which we don't want.
1205 # `<><><><><` as a conflict marker, which we don't want.
1207 return bool(
1206 return bool(
1208 re.search(
1207 re.search(
1209 br"^([<>=+|-])\1{6}( .*)$",
1208 br"^([<>=+|-])\1{6}( .*)$",
1210 data,
1209 data,
1211 re.MULTILINE,
1210 re.MULTILINE,
1212 )
1211 )
1213 )
1212 )
1214
1213
1215
1214
1216 def _check(repo, r, ui, tool, fcd, backup):
1215 def _check(repo, r, ui, tool, fcd, backup):
1217 fd = fcd.path()
1216 fd = fcd.path()
1218 uipathfn = scmutil.getuipathfn(repo)
1217 uipathfn = scmutil.getuipathfn(repo)
1219
1218
1220 if not r and (
1219 if not r and (
1221 _toolbool(ui, tool, b"checkconflicts")
1220 _toolbool(ui, tool, b"checkconflicts")
1222 or b'conflicts' in _toollist(ui, tool, b"check")
1221 or b'conflicts' in _toollist(ui, tool, b"check")
1223 ):
1222 ):
1224 if hasconflictmarkers(fcd.data()):
1223 if hasconflictmarkers(fcd.data()):
1225 r = 1
1224 r = 1
1226
1225
1227 checked = False
1226 checked = False
1228 if b'prompt' in _toollist(ui, tool, b"check"):
1227 if b'prompt' in _toollist(ui, tool, b"check"):
1229 checked = True
1228 checked = True
1230 if ui.promptchoice(
1229 if ui.promptchoice(
1231 _(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No")
1230 _(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No")
1232 % uipathfn(fd),
1231 % uipathfn(fd),
1233 1,
1232 1,
1234 ):
1233 ):
1235 r = 1
1234 r = 1
1236
1235
1237 if (
1236 if (
1238 not r
1237 not r
1239 and not checked
1238 and not checked
1240 and (
1239 and (
1241 _toolbool(ui, tool, b"checkchanged")
1240 _toolbool(ui, tool, b"checkchanged")
1242 or b'changed' in _toollist(ui, tool, b"check")
1241 or b'changed' in _toollist(ui, tool, b"check")
1243 )
1242 )
1244 ):
1243 ):
1245 if backup is not None and not fcd.cmp(backup):
1244 if backup is not None and not fcd.cmp(backup):
1246 if ui.promptchoice(
1245 if ui.promptchoice(
1247 _(
1246 _(
1248 b" output file %s appears unchanged\n"
1247 b" output file %s appears unchanged\n"
1249 b"was merge successful (yn)?"
1248 b"was merge successful (yn)?"
1250 b"$$ &Yes $$ &No"
1249 b"$$ &Yes $$ &No"
1251 )
1250 )
1252 % uipathfn(fd),
1251 % uipathfn(fd),
1253 1,
1252 1,
1254 ):
1253 ):
1255 r = 1
1254 r = 1
1256
1255
1257 if backup is not None and _toolbool(ui, tool, b"fixeol"):
1256 if backup is not None and _toolbool(ui, tool, b"fixeol"):
1258 _matcheol(_workingpath(repo, fcd), backup)
1257 _matcheol(_workingpath(repo, fcd), backup)
1259
1258
1260 return r
1259 return r
1261
1260
1262
1261
1263 def _workingpath(repo, ctx):
1262 def _workingpath(repo, ctx):
1264 return repo.wjoin(ctx.path())
1263 return repo.wjoin(ctx.path())
1265
1264
1266
1265
1267 def loadinternalmerge(ui, extname, registrarobj):
1266 def loadinternalmerge(ui, extname, registrarobj):
1268 """Load internal merge tool from specified registrarobj"""
1267 """Load internal merge tool from specified registrarobj"""
1269 for name, func in pycompat.iteritems(registrarobj._table):
1268 for name, func in pycompat.iteritems(registrarobj._table):
1270 fullname = b':' + name
1269 fullname = b':' + name
1271 internals[fullname] = func
1270 internals[fullname] = func
1272 internals[b'internal:' + name] = func
1271 internals[b'internal:' + name] = func
1273 internalsdoc[fullname] = func
1272 internalsdoc[fullname] = func
1274
1273
1275 capabilities = sorted([k for k, v in func.capabilities.items() if v])
1274 capabilities = sorted([k for k, v in func.capabilities.items() if v])
1276 if capabilities:
1275 if capabilities:
1277 capdesc = b" (actual capabilities: %s)" % b', '.join(
1276 capdesc = b" (actual capabilities: %s)" % b', '.join(
1278 capabilities
1277 capabilities
1279 )
1278 )
1280 func.__doc__ = func.__doc__ + pycompat.sysstr(b"\n\n%s" % capdesc)
1279 func.__doc__ = func.__doc__ + pycompat.sysstr(b"\n\n%s" % capdesc)
1281
1280
1282 # to put i18n comments into hg.pot for automatically generated texts
1281 # to put i18n comments into hg.pot for automatically generated texts
1283
1282
1284 # i18n: "binary" and "symlink" are keywords
1283 # i18n: "binary" and "symlink" are keywords
1285 # i18n: this text is added automatically
1284 # i18n: this text is added automatically
1286 _(b" (actual capabilities: binary, symlink)")
1285 _(b" (actual capabilities: binary, symlink)")
1287 # i18n: "binary" is keyword
1286 # i18n: "binary" is keyword
1288 # i18n: this text is added automatically
1287 # i18n: this text is added automatically
1289 _(b" (actual capabilities: binary)")
1288 _(b" (actual capabilities: binary)")
1290 # i18n: "symlink" is keyword
1289 # i18n: "symlink" is keyword
1291 # i18n: this text is added automatically
1290 # i18n: this text is added automatically
1292 _(b" (actual capabilities: symlink)")
1291 _(b" (actual capabilities: symlink)")
1293
1292
1294
1293
1295 # load built-in merge tools explicitly to setup internalsdoc
1294 # load built-in merge tools explicitly to setup internalsdoc
1296 loadinternalmerge(None, None, internaltool)
1295 loadinternalmerge(None, None, internaltool)
1297
1296
1298 # tell hggettext to extract docstrings from these functions:
1297 # tell hggettext to extract docstrings from these functions:
1299 i18nfunctions = internals.values()
1298 i18nfunctions = internals.values()
@@ -1,845 +1,844 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullrev,
12 nullrev,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 filemerge,
16 filemerge,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 )
19 )
20 from .utils import hashutil
20 from .utils import hashutil
21
21
22 _pack = struct.pack
22 _pack = struct.pack
23 _unpack = struct.unpack
23 _unpack = struct.unpack
24
24
25
25
26 def _droponode(data):
26 def _droponode(data):
27 # used for compatibility for v1
27 # used for compatibility for v1
28 bits = data.split(b'\0')
28 bits = data.split(b'\0')
29 bits = bits[:-2] + bits[-1:]
29 bits = bits[:-2] + bits[-1:]
30 return b'\0'.join(bits)
30 return b'\0'.join(bits)
31
31
32
32
33 def _filectxorabsent(hexnode, ctx, f):
33 def _filectxorabsent(hexnode, ctx, f):
34 if hexnode == ctx.repo().nodeconstants.nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 return filemerge.absentfilectx(ctx, f)
35 return filemerge.absentfilectx(ctx, f)
36 else:
36 else:
37 return ctx[f]
37 return ctx[f]
38
38
39
39
40 # Merge state record types. See ``mergestate`` docs for more.
40 # Merge state record types. See ``mergestate`` docs for more.
41
41
42 ####
42 ####
43 # merge records which records metadata about a current merge
43 # merge records which records metadata about a current merge
44 # exists only once in a mergestate
44 # exists only once in a mergestate
45 #####
45 #####
46 RECORD_LOCAL = b'L'
46 RECORD_LOCAL = b'L'
47 RECORD_OTHER = b'O'
47 RECORD_OTHER = b'O'
48 # record merge labels
48 # record merge labels
49 RECORD_LABELS = b'l'
49 RECORD_LABELS = b'l'
50
50
51 #####
51 #####
52 # record extra information about files, with one entry containing info about one
52 # record extra information about files, with one entry containing info about one
53 # file. Hence, multiple of them can exists
53 # file. Hence, multiple of them can exists
54 #####
54 #####
55 RECORD_FILE_VALUES = b'f'
55 RECORD_FILE_VALUES = b'f'
56
56
57 #####
57 #####
58 # merge records which represents state of individual merges of files/folders
58 # merge records which represents state of individual merges of files/folders
59 # These are top level records for each entry containing merge related info.
59 # These are top level records for each entry containing merge related info.
60 # Each record of these has info about one file. Hence multiple of them can
60 # Each record of these has info about one file. Hence multiple of them can
61 # exists
61 # exists
62 #####
62 #####
63 RECORD_MERGED = b'F'
63 RECORD_MERGED = b'F'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 # the path was dir on one side of merge and file on another
65 # the path was dir on one side of merge and file on another
66 RECORD_PATH_CONFLICT = b'P'
66 RECORD_PATH_CONFLICT = b'P'
67
67
68 #####
68 #####
69 # possible state which a merge entry can have. These are stored inside top-level
69 # possible state which a merge entry can have. These are stored inside top-level
70 # merge records mentioned just above.
70 # merge records mentioned just above.
71 #####
71 #####
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 # represents that the file was automatically merged in favor
76 # represents that the file was automatically merged in favor
77 # of other version. This info is used on commit.
77 # of other version. This info is used on commit.
78 # This is now deprecated and commit related information is now
78 # This is now deprecated and commit related information is now
79 # stored in RECORD_FILE_VALUES
79 # stored in RECORD_FILE_VALUES
80 MERGE_RECORD_MERGED_OTHER = b'o'
80 MERGE_RECORD_MERGED_OTHER = b'o'
81
81
82 #####
82 #####
83 # top level record which stores other unknown records. Multiple of these can
83 # top level record which stores other unknown records. Multiple of these can
84 # exists
84 # exists
85 #####
85 #####
86 RECORD_OVERRIDE = b't'
86 RECORD_OVERRIDE = b't'
87
87
88 #####
88 #####
89 # legacy records which are no longer used but kept to prevent breaking BC
89 # legacy records which are no longer used but kept to prevent breaking BC
90 #####
90 #####
91 # This record was release in 5.4 and usage was removed in 5.5
91 # This record was release in 5.4 and usage was removed in 5.5
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 # This record was release in 3.7 and usage was removed in 5.6
93 # This record was release in 3.7 and usage was removed in 5.6
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 # This record was release in 3.7 and usage was removed in 5.6
95 # This record was release in 3.7 and usage was removed in 5.6
96 LEGACY_MERGE_DRIVER_STATE = b'm'
96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 # This record was release in 3.7 and usage was removed in 5.6
97 # This record was release in 3.7 and usage was removed in 5.6
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99
99
100
100
101 ACTION_FORGET = b'f'
101 ACTION_FORGET = b'f'
102 ACTION_REMOVE = b'r'
102 ACTION_REMOVE = b'r'
103 ACTION_ADD = b'a'
103 ACTION_ADD = b'a'
104 ACTION_GET = b'g'
104 ACTION_GET = b'g'
105 ACTION_PATH_CONFLICT = b'p'
105 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_ADD_MODIFIED = b'am'
107 ACTION_ADD_MODIFIED = b'am'
108 ACTION_CREATED = b'c'
108 ACTION_CREATED = b'c'
109 ACTION_DELETED_CHANGED = b'dc'
109 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_CHANGED_DELETED = b'cd'
110 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_MERGE = b'm'
111 ACTION_MERGE = b'm'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_KEEP = b'k'
114 ACTION_KEEP = b'k'
115 # the file was absent on local side before merge and we should
115 # the file was absent on local side before merge and we should
116 # keep it absent (absent means file not present, it can be a result
116 # keep it absent (absent means file not present, it can be a result
117 # of file deletion, rename etc.)
117 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
118 ACTION_KEEP_ABSENT = b'ka'
119 # the file is absent on the ancestor and remote side of the merge
119 # the file is absent on the ancestor and remote side of the merge
120 # hence this file is new and we should keep it
120 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
121 ACTION_KEEP_NEW = b'kn'
122 ACTION_EXEC = b'e'
122 ACTION_EXEC = b'e'
123 ACTION_CREATED_MERGE = b'cm'
123 ACTION_CREATED_MERGE = b'cm'
124
124
125 # actions which are no op
125 # actions which are no op
126 NO_OP_ACTIONS = (
126 NO_OP_ACTIONS = (
127 ACTION_KEEP,
127 ACTION_KEEP,
128 ACTION_KEEP_ABSENT,
128 ACTION_KEEP_ABSENT,
129 ACTION_KEEP_NEW,
129 ACTION_KEEP_NEW,
130 )
130 )
131
131
132
132
133 class _mergestate_base(object):
133 class _mergestate_base(object):
134 """track 3-way merge state of individual files
134 """track 3-way merge state of individual files
135
135
136 The merge state is stored on disk when needed. Two files are used: one with
136 The merge state is stored on disk when needed. Two files are used: one with
137 an old format (version 1), and one with a new format (version 2). Version 2
137 an old format (version 1), and one with a new format (version 2). Version 2
138 stores a superset of the data in version 1, including new kinds of records
138 stores a superset of the data in version 1, including new kinds of records
139 in the future. For more about the new format, see the documentation for
139 in the future. For more about the new format, see the documentation for
140 `_readrecordsv2`.
140 `_readrecordsv2`.
141
141
142 Each record can contain arbitrary content, and has an associated type. This
142 Each record can contain arbitrary content, and has an associated type. This
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 versions of Mercurial that don't support it should abort. If `type` is
144 versions of Mercurial that don't support it should abort. If `type` is
145 lowercase, the record can be safely ignored.
145 lowercase, the record can be safely ignored.
146
146
147 Currently known records:
147 Currently known records:
148
148
149 L: the node of the "local" part of the merge (hexified version)
149 L: the node of the "local" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
151 F: a file to be merged entry
151 F: a file to be merged entry
152 C: a change/delete or delete/change conflict
152 C: a change/delete or delete/change conflict
153 P: a path conflict (file vs directory)
153 P: a path conflict (file vs directory)
154 f: a (filename, dictionary) tuple of optional values for a given file
154 f: a (filename, dictionary) tuple of optional values for a given file
155 l: the labels for the parts of the merge.
155 l: the labels for the parts of the merge.
156
156
157 Merge record states (stored in self._state, indexed by filename):
157 Merge record states (stored in self._state, indexed by filename):
158 u: unresolved conflict
158 u: unresolved conflict
159 r: resolved conflict
159 r: resolved conflict
160 pu: unresolved path conflict (file conflicts with directory)
160 pu: unresolved path conflict (file conflicts with directory)
161 pr: resolved path conflict
161 pr: resolved path conflict
162 o: file was merged in favor of other parent of merge (DEPRECATED)
162 o: file was merged in favor of other parent of merge (DEPRECATED)
163
163
164 The resolve command transitions between 'u' and 'r' for conflicts and
164 The resolve command transitions between 'u' and 'r' for conflicts and
165 'pu' and 'pr' for path conflicts.
165 'pu' and 'pr' for path conflicts.
166 """
166 """
167
167
168 def __init__(self, repo):
168 def __init__(self, repo):
169 """Initialize the merge state.
169 """Initialize the merge state.
170
170
171 Do not use this directly! Instead call read() or clean()."""
171 Do not use this directly! Instead call read() or clean()."""
172 self._repo = repo
172 self._repo = repo
173 self._state = {}
173 self._state = {}
174 self._stateextras = collections.defaultdict(dict)
174 self._stateextras = collections.defaultdict(dict)
175 self._local = None
175 self._local = None
176 self._other = None
176 self._other = None
177 self._labels = None
177 self._labels = None
178 # contains a mapping of form:
178 # contains a mapping of form:
179 # {filename : (merge_return_value, action_to_be_performed}
179 # {filename : (merge_return_value, action_to_be_performed}
180 # these are results of re-running merge process
180 # these are results of re-running merge process
181 # this dict is used to perform actions on dirstate caused by re-running
181 # this dict is used to perform actions on dirstate caused by re-running
182 # the merge
182 # the merge
183 self._results = {}
183 self._results = {}
184 self._dirty = False
184 self._dirty = False
185
185
186 def reset(self):
186 def reset(self):
187 pass
187 pass
188
188
189 def start(self, node, other, labels=None):
189 def start(self, node, other, labels=None):
190 self._local = node
190 self._local = node
191 self._other = other
191 self._other = other
192 self._labels = labels
192 self._labels = labels
193
193
194 @util.propertycache
194 @util.propertycache
195 def local(self):
195 def local(self):
196 if self._local is None:
196 if self._local is None:
197 msg = b"local accessed but self._local isn't set"
197 msg = b"local accessed but self._local isn't set"
198 raise error.ProgrammingError(msg)
198 raise error.ProgrammingError(msg)
199 return self._local
199 return self._local
200
200
201 @util.propertycache
201 @util.propertycache
202 def localctx(self):
202 def localctx(self):
203 return self._repo[self.local]
203 return self._repo[self.local]
204
204
205 @util.propertycache
205 @util.propertycache
206 def other(self):
206 def other(self):
207 if self._other is None:
207 if self._other is None:
208 msg = b"other accessed but self._other isn't set"
208 msg = b"other accessed but self._other isn't set"
209 raise error.ProgrammingError(msg)
209 raise error.ProgrammingError(msg)
210 return self._other
210 return self._other
211
211
212 @util.propertycache
212 @util.propertycache
213 def otherctx(self):
213 def otherctx(self):
214 return self._repo[self.other]
214 return self._repo[self.other]
215
215
216 def active(self):
216 def active(self):
217 """Whether mergestate is active.
217 """Whether mergestate is active.
218
218
219 Returns True if there appears to be mergestate. This is a rough proxy
219 Returns True if there appears to be mergestate. This is a rough proxy
220 for "is a merge in progress."
220 for "is a merge in progress."
221 """
221 """
222 return bool(self._local) or bool(self._state)
222 return bool(self._local) or bool(self._state)
223
223
224 def commit(self):
224 def commit(self):
225 """Write current state on disk (if necessary)"""
225 """Write current state on disk (if necessary)"""
226
226
227 @staticmethod
227 @staticmethod
228 def getlocalkey(path):
228 def getlocalkey(path):
229 """hash the path of a local file context for storage in the .hg/merge
229 """hash the path of a local file context for storage in the .hg/merge
230 directory."""
230 directory."""
231
231
232 return hex(hashutil.sha1(path).digest())
232 return hex(hashutil.sha1(path).digest())
233
233
234 def _make_backup(self, fctx, localkey):
234 def _make_backup(self, fctx, localkey):
235 raise NotImplementedError()
235 raise NotImplementedError()
236
236
237 def _restore_backup(self, fctx, localkey, flags):
237 def _restore_backup(self, fctx, localkey, flags):
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 def add(self, fcl, fco, fca, fd):
240 def add(self, fcl, fco, fca, fd):
241 """add a new (potentially?) conflicting file the merge state
241 """add a new (potentially?) conflicting file the merge state
242 fcl: file context for local,
242 fcl: file context for local,
243 fco: file context for remote,
243 fco: file context for remote,
244 fca: file context for ancestors,
244 fca: file context for ancestors,
245 fd: file path of the resulting merge.
245 fd: file path of the resulting merge.
246
246
247 note: also write the local version to the `.hg/merge` directory.
247 note: also write the local version to the `.hg/merge` directory.
248 """
248 """
249 if fcl.isabsent():
249 if fcl.isabsent():
250 localkey = self._repo.nodeconstants.nullhex
250 localkey = self._repo.nodeconstants.nullhex
251 else:
251 else:
252 localkey = mergestate.getlocalkey(fcl.path())
252 localkey = mergestate.getlocalkey(fcl.path())
253 self._make_backup(fcl, localkey)
253 self._make_backup(fcl, localkey)
254 self._state[fd] = [
254 self._state[fd] = [
255 MERGE_RECORD_UNRESOLVED,
255 MERGE_RECORD_UNRESOLVED,
256 localkey,
256 localkey,
257 fcl.path(),
257 fcl.path(),
258 fca.path(),
258 fca.path(),
259 hex(fca.filenode()),
259 hex(fca.filenode()),
260 fco.path(),
260 fco.path(),
261 hex(fco.filenode()),
261 hex(fco.filenode()),
262 fcl.flags(),
262 fcl.flags(),
263 ]
263 ]
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 self._dirty = True
265 self._dirty = True
266
266
267 def addpathconflict(self, path, frename, forigin):
267 def addpathconflict(self, path, frename, forigin):
268 """add a new conflicting path to the merge state
268 """add a new conflicting path to the merge state
269 path: the path that conflicts
269 path: the path that conflicts
270 frename: the filename the conflicting file was renamed to
270 frename: the filename the conflicting file was renamed to
271 forigin: origin of the file ('l' or 'r' for local/remote)
271 forigin: origin of the file ('l' or 'r' for local/remote)
272 """
272 """
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 self._dirty = True
274 self._dirty = True
275
275
276 def addcommitinfo(self, path, data):
276 def addcommitinfo(self, path, data):
277 """stores information which is required at commit
277 """stores information which is required at commit
278 into _stateextras"""
278 into _stateextras"""
279 self._stateextras[path].update(data)
279 self._stateextras[path].update(data)
280 self._dirty = True
280 self._dirty = True
281
281
282 def __contains__(self, dfile):
282 def __contains__(self, dfile):
283 return dfile in self._state
283 return dfile in self._state
284
284
285 def __getitem__(self, dfile):
285 def __getitem__(self, dfile):
286 return self._state[dfile][0]
286 return self._state[dfile][0]
287
287
288 def __iter__(self):
288 def __iter__(self):
289 return iter(sorted(self._state))
289 return iter(sorted(self._state))
290
290
291 def files(self):
291 def files(self):
292 return self._state.keys()
292 return self._state.keys()
293
293
294 def mark(self, dfile, state):
294 def mark(self, dfile, state):
295 self._state[dfile][0] = state
295 self._state[dfile][0] = state
296 self._dirty = True
296 self._dirty = True
297
297
298 def unresolved(self):
298 def unresolved(self):
299 """Obtain the paths of unresolved files."""
299 """Obtain the paths of unresolved files."""
300
300
301 for f, entry in pycompat.iteritems(self._state):
301 for f, entry in pycompat.iteritems(self._state):
302 if entry[0] in (
302 if entry[0] in (
303 MERGE_RECORD_UNRESOLVED,
303 MERGE_RECORD_UNRESOLVED,
304 MERGE_RECORD_UNRESOLVED_PATH,
304 MERGE_RECORD_UNRESOLVED_PATH,
305 ):
305 ):
306 yield f
306 yield f
307
307
308 def allextras(self):
308 def allextras(self):
309 """return all extras information stored with the mergestate"""
309 """return all extras information stored with the mergestate"""
310 return self._stateextras
310 return self._stateextras
311
311
312 def extras(self, filename):
312 def extras(self, filename):
313 """return extras stored with the mergestate for the given filename"""
313 """return extras stored with the mergestate for the given filename"""
314 return self._stateextras[filename]
314 return self._stateextras[filename]
315
315
316 def resolve(self, dfile, wctx):
316 def resolve(self, dfile, wctx):
317 """run merge process for dfile
317 """run merge process for dfile
318
318
319 Returns the exit code of the merge."""
319 Returns the exit code of the merge."""
320 if self[dfile] in (
320 if self[dfile] in (
321 MERGE_RECORD_RESOLVED,
321 MERGE_RECORD_RESOLVED,
322 LEGACY_RECORD_DRIVER_RESOLVED,
322 LEGACY_RECORD_DRIVER_RESOLVED,
323 ):
323 ):
324 return 0
324 return 0
325 stateentry = self._state[dfile]
325 stateentry = self._state[dfile]
326 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
326 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
327 octx = self._repo[self._other]
327 octx = self._repo[self._other]
328 extras = self.extras(dfile)
328 extras = self.extras(dfile)
329 anccommitnode = extras.get(b'ancestorlinknode')
329 anccommitnode = extras.get(b'ancestorlinknode')
330 if anccommitnode:
330 if anccommitnode:
331 actx = self._repo[anccommitnode]
331 actx = self._repo[anccommitnode]
332 else:
332 else:
333 actx = None
333 actx = None
334 fcd = _filectxorabsent(localkey, wctx, dfile)
334 fcd = _filectxorabsent(localkey, wctx, dfile)
335 fco = _filectxorabsent(onode, octx, ofile)
335 fco = _filectxorabsent(onode, octx, ofile)
336 # TODO: move this to filectxorabsent
336 # TODO: move this to filectxorabsent
337 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
337 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
338 # "premerge" x flags
338 # "premerge" x flags
339 flo = fco.flags()
339 flo = fco.flags()
340 fla = fca.flags()
340 fla = fca.flags()
341 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
341 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
342 if fca.rev() == nullrev and flags != flo:
342 if fca.rev() == nullrev and flags != flo:
343 self._repo.ui.warn(
343 self._repo.ui.warn(
344 _(
344 _(
345 b'warning: cannot merge flags for %s '
345 b'warning: cannot merge flags for %s '
346 b'without common ancestor - keeping local flags\n'
346 b'without common ancestor - keeping local flags\n'
347 )
347 )
348 % afile
348 % afile
349 )
349 )
350 elif flags == fla:
350 elif flags == fla:
351 flags = flo
351 flags = flo
352 # restore local
352 # restore local
353 if localkey != self._repo.nodeconstants.nullhex:
353 if localkey != self._repo.nodeconstants.nullhex:
354 self._restore_backup(wctx[dfile], localkey, flags)
354 self._restore_backup(wctx[dfile], localkey, flags)
355 else:
355 else:
356 wctx[dfile].remove(ignoremissing=True)
356 wctx[dfile].remove(ignoremissing=True)
357 complete, merge_ret, deleted = filemerge.filemerge(
357 merge_ret, deleted = filemerge.filemerge(
358 self._repo,
358 self._repo,
359 wctx,
359 wctx,
360 self._local,
360 self._local,
361 lfile,
361 lfile,
362 fcd,
362 fcd,
363 fco,
363 fco,
364 fca,
364 fca,
365 labels=self._labels,
365 labels=self._labels,
366 )
366 )
367 if merge_ret is None:
367 if merge_ret is None:
368 # If return value of merge is None, then there are no real conflict
368 # If return value of merge is None, then there are no real conflict
369 del self._state[dfile]
369 del self._state[dfile]
370 self._dirty = True
370 self._dirty = True
371 elif not merge_ret:
371 elif not merge_ret:
372 self.mark(dfile, MERGE_RECORD_RESOLVED)
372 self.mark(dfile, MERGE_RECORD_RESOLVED)
373
373
374 if complete:
374 action = None
375 action = None
375 if deleted:
376 if deleted:
376 if fcd.isabsent():
377 if fcd.isabsent():
377 # dc: local picked. Need to drop if present, which may
378 # dc: local picked. Need to drop if present, which may
378 # happen on re-resolves.
379 # happen on re-resolves.
379 action = ACTION_FORGET
380 action = ACTION_FORGET
380 else:
381 # cd: remote picked (or otherwise deleted)
382 action = ACTION_REMOVE
383 else:
384 if fcd.isabsent(): # dc: remote picked
385 action = ACTION_GET
386 elif fco.isabsent(): # cd: local picked
387 if dfile in self.localctx:
388 action = ACTION_ADD_MODIFIED
381 else:
389 else:
382 # cd: remote picked (or otherwise deleted)
390 action = ACTION_ADD
383 action = ACTION_REMOVE
391 # else: regular merges (no action necessary)
384 else:
392 self._results[dfile] = merge_ret, action
385 if fcd.isabsent(): # dc: remote picked
386 action = ACTION_GET
387 elif fco.isabsent(): # cd: local picked
388 if dfile in self.localctx:
389 action = ACTION_ADD_MODIFIED
390 else:
391 action = ACTION_ADD
392 # else: regular merges (no action necessary)
393 self._results[dfile] = merge_ret, action
394
393
395 return merge_ret
394 return merge_ret
396
395
397 def counts(self):
396 def counts(self):
398 """return counts for updated, merged and removed files in this
397 """return counts for updated, merged and removed files in this
399 session"""
398 session"""
400 updated, merged, removed = 0, 0, 0
399 updated, merged, removed = 0, 0, 0
401 for r, action in pycompat.itervalues(self._results):
400 for r, action in pycompat.itervalues(self._results):
402 if r is None:
401 if r is None:
403 updated += 1
402 updated += 1
404 elif r == 0:
403 elif r == 0:
405 if action == ACTION_REMOVE:
404 if action == ACTION_REMOVE:
406 removed += 1
405 removed += 1
407 else:
406 else:
408 merged += 1
407 merged += 1
409 return updated, merged, removed
408 return updated, merged, removed
410
409
411 def unresolvedcount(self):
410 def unresolvedcount(self):
412 """get unresolved count for this merge (persistent)"""
411 """get unresolved count for this merge (persistent)"""
413 return len(list(self.unresolved()))
412 return len(list(self.unresolved()))
414
413
415 def actions(self):
414 def actions(self):
416 """return lists of actions to perform on the dirstate"""
415 """return lists of actions to perform on the dirstate"""
417 actions = {
416 actions = {
418 ACTION_REMOVE: [],
417 ACTION_REMOVE: [],
419 ACTION_FORGET: [],
418 ACTION_FORGET: [],
420 ACTION_ADD: [],
419 ACTION_ADD: [],
421 ACTION_ADD_MODIFIED: [],
420 ACTION_ADD_MODIFIED: [],
422 ACTION_GET: [],
421 ACTION_GET: [],
423 }
422 }
424 for f, (r, action) in pycompat.iteritems(self._results):
423 for f, (r, action) in pycompat.iteritems(self._results):
425 if action is not None:
424 if action is not None:
426 actions[action].append((f, None, b"merge result"))
425 actions[action].append((f, None, b"merge result"))
427 return actions
426 return actions
428
427
429
428
430 class mergestate(_mergestate_base):
429 class mergestate(_mergestate_base):
431
430
432 statepathv1 = b'merge/state'
431 statepathv1 = b'merge/state'
433 statepathv2 = b'merge/state2'
432 statepathv2 = b'merge/state2'
434
433
435 @staticmethod
434 @staticmethod
436 def clean(repo):
435 def clean(repo):
437 """Initialize a brand new merge state, removing any existing state on
436 """Initialize a brand new merge state, removing any existing state on
438 disk."""
437 disk."""
439 ms = mergestate(repo)
438 ms = mergestate(repo)
440 ms.reset()
439 ms.reset()
441 return ms
440 return ms
442
441
443 @staticmethod
442 @staticmethod
444 def read(repo):
443 def read(repo):
445 """Initialize the merge state, reading it from disk."""
444 """Initialize the merge state, reading it from disk."""
446 ms = mergestate(repo)
445 ms = mergestate(repo)
447 ms._read()
446 ms._read()
448 return ms
447 return ms
449
448
450 def _read(self):
449 def _read(self):
451 """Analyse each record content to restore a serialized state from disk
450 """Analyse each record content to restore a serialized state from disk
452
451
453 This function process "record" entry produced by the de-serialization
452 This function process "record" entry produced by the de-serialization
454 of on disk file.
453 of on disk file.
455 """
454 """
456 unsupported = set()
455 unsupported = set()
457 records = self._readrecords()
456 records = self._readrecords()
458 for rtype, record in records:
457 for rtype, record in records:
459 if rtype == RECORD_LOCAL:
458 if rtype == RECORD_LOCAL:
460 self._local = bin(record)
459 self._local = bin(record)
461 elif rtype == RECORD_OTHER:
460 elif rtype == RECORD_OTHER:
462 self._other = bin(record)
461 self._other = bin(record)
463 elif rtype == LEGACY_MERGE_DRIVER_STATE:
462 elif rtype == LEGACY_MERGE_DRIVER_STATE:
464 pass
463 pass
465 elif rtype in (
464 elif rtype in (
466 RECORD_MERGED,
465 RECORD_MERGED,
467 RECORD_CHANGEDELETE_CONFLICT,
466 RECORD_CHANGEDELETE_CONFLICT,
468 RECORD_PATH_CONFLICT,
467 RECORD_PATH_CONFLICT,
469 LEGACY_MERGE_DRIVER_MERGE,
468 LEGACY_MERGE_DRIVER_MERGE,
470 LEGACY_RECORD_RESOLVED_OTHER,
469 LEGACY_RECORD_RESOLVED_OTHER,
471 ):
470 ):
472 bits = record.split(b'\0')
471 bits = record.split(b'\0')
473 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
472 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
474 # and we now store related information in _stateextras, so
473 # and we now store related information in _stateextras, so
475 # lets write to _stateextras directly
474 # lets write to _stateextras directly
476 if bits[1] == MERGE_RECORD_MERGED_OTHER:
475 if bits[1] == MERGE_RECORD_MERGED_OTHER:
477 self._stateextras[bits[0]][b'filenode-source'] = b'other'
476 self._stateextras[bits[0]][b'filenode-source'] = b'other'
478 else:
477 else:
479 self._state[bits[0]] = bits[1:]
478 self._state[bits[0]] = bits[1:]
480 elif rtype == RECORD_FILE_VALUES:
479 elif rtype == RECORD_FILE_VALUES:
481 filename, rawextras = record.split(b'\0', 1)
480 filename, rawextras = record.split(b'\0', 1)
482 extraparts = rawextras.split(b'\0')
481 extraparts = rawextras.split(b'\0')
483 extras = {}
482 extras = {}
484 i = 0
483 i = 0
485 while i < len(extraparts):
484 while i < len(extraparts):
486 extras[extraparts[i]] = extraparts[i + 1]
485 extras[extraparts[i]] = extraparts[i + 1]
487 i += 2
486 i += 2
488
487
489 self._stateextras[filename] = extras
488 self._stateextras[filename] = extras
490 elif rtype == RECORD_LABELS:
489 elif rtype == RECORD_LABELS:
491 labels = record.split(b'\0', 2)
490 labels = record.split(b'\0', 2)
492 self._labels = [l for l in labels if len(l) > 0]
491 self._labels = [l for l in labels if len(l) > 0]
493 elif not rtype.islower():
492 elif not rtype.islower():
494 unsupported.add(rtype)
493 unsupported.add(rtype)
495
494
496 if unsupported:
495 if unsupported:
497 raise error.UnsupportedMergeRecords(unsupported)
496 raise error.UnsupportedMergeRecords(unsupported)
498
497
499 def _readrecords(self):
498 def _readrecords(self):
500 """Read merge state from disk and return a list of record (TYPE, data)
499 """Read merge state from disk and return a list of record (TYPE, data)
501
500
502 We read data from both v1 and v2 files and decide which one to use.
501 We read data from both v1 and v2 files and decide which one to use.
503
502
504 V1 has been used by version prior to 2.9.1 and contains less data than
503 V1 has been used by version prior to 2.9.1 and contains less data than
505 v2. We read both versions and check if no data in v2 contradicts
504 v2. We read both versions and check if no data in v2 contradicts
506 v1. If there is not contradiction we can safely assume that both v1
505 v1. If there is not contradiction we can safely assume that both v1
507 and v2 were written at the same time and use the extract data in v2. If
506 and v2 were written at the same time and use the extract data in v2. If
508 there is contradiction we ignore v2 content as we assume an old version
507 there is contradiction we ignore v2 content as we assume an old version
509 of Mercurial has overwritten the mergestate file and left an old v2
508 of Mercurial has overwritten the mergestate file and left an old v2
510 file around.
509 file around.
511
510
512 returns list of record [(TYPE, data), ...]"""
511 returns list of record [(TYPE, data), ...]"""
513 v1records = self._readrecordsv1()
512 v1records = self._readrecordsv1()
514 v2records = self._readrecordsv2()
513 v2records = self._readrecordsv2()
515 if self._v1v2match(v1records, v2records):
514 if self._v1v2match(v1records, v2records):
516 return v2records
515 return v2records
517 else:
516 else:
518 # v1 file is newer than v2 file, use it
517 # v1 file is newer than v2 file, use it
519 # we have to infer the "other" changeset of the merge
518 # we have to infer the "other" changeset of the merge
520 # we cannot do better than that with v1 of the format
519 # we cannot do better than that with v1 of the format
521 mctx = self._repo[None].parents()[-1]
520 mctx = self._repo[None].parents()[-1]
522 v1records.append((RECORD_OTHER, mctx.hex()))
521 v1records.append((RECORD_OTHER, mctx.hex()))
523 # add place holder "other" file node information
522 # add place holder "other" file node information
524 # nobody is using it yet so we do no need to fetch the data
523 # nobody is using it yet so we do no need to fetch the data
525 # if mctx was wrong `mctx[bits[-2]]` may fails.
524 # if mctx was wrong `mctx[bits[-2]]` may fails.
526 for idx, r in enumerate(v1records):
525 for idx, r in enumerate(v1records):
527 if r[0] == RECORD_MERGED:
526 if r[0] == RECORD_MERGED:
528 bits = r[1].split(b'\0')
527 bits = r[1].split(b'\0')
529 bits.insert(-2, b'')
528 bits.insert(-2, b'')
530 v1records[idx] = (r[0], b'\0'.join(bits))
529 v1records[idx] = (r[0], b'\0'.join(bits))
531 return v1records
530 return v1records
532
531
533 def _v1v2match(self, v1records, v2records):
532 def _v1v2match(self, v1records, v2records):
534 oldv2 = set() # old format version of v2 record
533 oldv2 = set() # old format version of v2 record
535 for rec in v2records:
534 for rec in v2records:
536 if rec[0] == RECORD_LOCAL:
535 if rec[0] == RECORD_LOCAL:
537 oldv2.add(rec)
536 oldv2.add(rec)
538 elif rec[0] == RECORD_MERGED:
537 elif rec[0] == RECORD_MERGED:
539 # drop the onode data (not contained in v1)
538 # drop the onode data (not contained in v1)
540 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
539 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
541 for rec in v1records:
540 for rec in v1records:
542 if rec not in oldv2:
541 if rec not in oldv2:
543 return False
542 return False
544 else:
543 else:
545 return True
544 return True
546
545
547 def _readrecordsv1(self):
546 def _readrecordsv1(self):
548 """read on disk merge state for version 1 file
547 """read on disk merge state for version 1 file
549
548
550 returns list of record [(TYPE, data), ...]
549 returns list of record [(TYPE, data), ...]
551
550
552 Note: the "F" data from this file are one entry short
551 Note: the "F" data from this file are one entry short
553 (no "other file node" entry)
552 (no "other file node" entry)
554 """
553 """
555 records = []
554 records = []
556 try:
555 try:
557 f = self._repo.vfs(self.statepathv1)
556 f = self._repo.vfs(self.statepathv1)
558 for i, l in enumerate(f):
557 for i, l in enumerate(f):
559 if i == 0:
558 if i == 0:
560 records.append((RECORD_LOCAL, l[:-1]))
559 records.append((RECORD_LOCAL, l[:-1]))
561 else:
560 else:
562 records.append((RECORD_MERGED, l[:-1]))
561 records.append((RECORD_MERGED, l[:-1]))
563 f.close()
562 f.close()
564 except IOError as err:
563 except IOError as err:
565 if err.errno != errno.ENOENT:
564 if err.errno != errno.ENOENT:
566 raise
565 raise
567 return records
566 return records
568
567
569 def _readrecordsv2(self):
568 def _readrecordsv2(self):
570 """read on disk merge state for version 2 file
569 """read on disk merge state for version 2 file
571
570
572 This format is a list of arbitrary records of the form:
571 This format is a list of arbitrary records of the form:
573
572
574 [type][length][content]
573 [type][length][content]
575
574
576 `type` is a single character, `length` is a 4 byte integer, and
575 `type` is a single character, `length` is a 4 byte integer, and
577 `content` is an arbitrary byte sequence of length `length`.
576 `content` is an arbitrary byte sequence of length `length`.
578
577
579 Mercurial versions prior to 3.7 have a bug where if there are
578 Mercurial versions prior to 3.7 have a bug where if there are
580 unsupported mandatory merge records, attempting to clear out the merge
579 unsupported mandatory merge records, attempting to clear out the merge
581 state with hg update --clean or similar aborts. The 't' record type
580 state with hg update --clean or similar aborts. The 't' record type
582 works around that by writing out what those versions treat as an
581 works around that by writing out what those versions treat as an
583 advisory record, but later versions interpret as special: the first
582 advisory record, but later versions interpret as special: the first
584 character is the 'real' record type and everything onwards is the data.
583 character is the 'real' record type and everything onwards is the data.
585
584
586 Returns list of records [(TYPE, data), ...]."""
585 Returns list of records [(TYPE, data), ...]."""
587 records = []
586 records = []
588 try:
587 try:
589 f = self._repo.vfs(self.statepathv2)
588 f = self._repo.vfs(self.statepathv2)
590 data = f.read()
589 data = f.read()
591 off = 0
590 off = 0
592 end = len(data)
591 end = len(data)
593 while off < end:
592 while off < end:
594 rtype = data[off : off + 1]
593 rtype = data[off : off + 1]
595 off += 1
594 off += 1
596 length = _unpack(b'>I', data[off : (off + 4)])[0]
595 length = _unpack(b'>I', data[off : (off + 4)])[0]
597 off += 4
596 off += 4
598 record = data[off : (off + length)]
597 record = data[off : (off + length)]
599 off += length
598 off += length
600 if rtype == RECORD_OVERRIDE:
599 if rtype == RECORD_OVERRIDE:
601 rtype, record = record[0:1], record[1:]
600 rtype, record = record[0:1], record[1:]
602 records.append((rtype, record))
601 records.append((rtype, record))
603 f.close()
602 f.close()
604 except IOError as err:
603 except IOError as err:
605 if err.errno != errno.ENOENT:
604 if err.errno != errno.ENOENT:
606 raise
605 raise
607 return records
606 return records
608
607
609 def commit(self):
608 def commit(self):
610 if self._dirty:
609 if self._dirty:
611 records = self._makerecords()
610 records = self._makerecords()
612 self._writerecords(records)
611 self._writerecords(records)
613 self._dirty = False
612 self._dirty = False
614
613
615 def _makerecords(self):
614 def _makerecords(self):
616 records = []
615 records = []
617 records.append((RECORD_LOCAL, hex(self._local)))
616 records.append((RECORD_LOCAL, hex(self._local)))
618 records.append((RECORD_OTHER, hex(self._other)))
617 records.append((RECORD_OTHER, hex(self._other)))
619 # Write out state items. In all cases, the value of the state map entry
618 # Write out state items. In all cases, the value of the state map entry
620 # is written as the contents of the record. The record type depends on
619 # is written as the contents of the record. The record type depends on
621 # the type of state that is stored, and capital-letter records are used
620 # the type of state that is stored, and capital-letter records are used
622 # to prevent older versions of Mercurial that do not support the feature
621 # to prevent older versions of Mercurial that do not support the feature
623 # from loading them.
622 # from loading them.
624 for filename, v in pycompat.iteritems(self._state):
623 for filename, v in pycompat.iteritems(self._state):
625 if v[0] in (
624 if v[0] in (
626 MERGE_RECORD_UNRESOLVED_PATH,
625 MERGE_RECORD_UNRESOLVED_PATH,
627 MERGE_RECORD_RESOLVED_PATH,
626 MERGE_RECORD_RESOLVED_PATH,
628 ):
627 ):
629 # Path conflicts. These are stored in 'P' records. The current
628 # Path conflicts. These are stored in 'P' records. The current
630 # resolution state ('pu' or 'pr') is stored within the record.
629 # resolution state ('pu' or 'pr') is stored within the record.
631 records.append(
630 records.append(
632 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
631 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
633 )
632 )
634 elif (
633 elif (
635 v[1] == self._repo.nodeconstants.nullhex
634 v[1] == self._repo.nodeconstants.nullhex
636 or v[6] == self._repo.nodeconstants.nullhex
635 or v[6] == self._repo.nodeconstants.nullhex
637 ):
636 ):
638 # Change/Delete or Delete/Change conflicts. These are stored in
637 # Change/Delete or Delete/Change conflicts. These are stored in
639 # 'C' records. v[1] is the local file, and is nullhex when the
638 # 'C' records. v[1] is the local file, and is nullhex when the
640 # file is deleted locally ('dc'). v[6] is the remote file, and
639 # file is deleted locally ('dc'). v[6] is the remote file, and
641 # is nullhex when the file is deleted remotely ('cd').
640 # is nullhex when the file is deleted remotely ('cd').
642 records.append(
641 records.append(
643 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
642 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
644 )
643 )
645 else:
644 else:
646 # Normal files. These are stored in 'F' records.
645 # Normal files. These are stored in 'F' records.
647 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
646 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
648 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
647 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
649 rawextras = b'\0'.join(
648 rawextras = b'\0'.join(
650 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
649 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
651 )
650 )
652 records.append(
651 records.append(
653 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
652 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
654 )
653 )
655 if self._labels is not None:
654 if self._labels is not None:
656 labels = b'\0'.join(self._labels)
655 labels = b'\0'.join(self._labels)
657 records.append((RECORD_LABELS, labels))
656 records.append((RECORD_LABELS, labels))
658 return records
657 return records
659
658
660 def _writerecords(self, records):
659 def _writerecords(self, records):
661 """Write current state on disk (both v1 and v2)"""
660 """Write current state on disk (both v1 and v2)"""
662 self._writerecordsv1(records)
661 self._writerecordsv1(records)
663 self._writerecordsv2(records)
662 self._writerecordsv2(records)
664
663
665 def _writerecordsv1(self, records):
664 def _writerecordsv1(self, records):
666 """Write current state on disk in a version 1 file"""
665 """Write current state on disk in a version 1 file"""
667 f = self._repo.vfs(self.statepathv1, b'wb')
666 f = self._repo.vfs(self.statepathv1, b'wb')
668 irecords = iter(records)
667 irecords = iter(records)
669 lrecords = next(irecords)
668 lrecords = next(irecords)
670 assert lrecords[0] == RECORD_LOCAL
669 assert lrecords[0] == RECORD_LOCAL
671 f.write(hex(self._local) + b'\n')
670 f.write(hex(self._local) + b'\n')
672 for rtype, data in irecords:
671 for rtype, data in irecords:
673 if rtype == RECORD_MERGED:
672 if rtype == RECORD_MERGED:
674 f.write(b'%s\n' % _droponode(data))
673 f.write(b'%s\n' % _droponode(data))
675 f.close()
674 f.close()
676
675
677 def _writerecordsv2(self, records):
676 def _writerecordsv2(self, records):
678 """Write current state on disk in a version 2 file
677 """Write current state on disk in a version 2 file
679
678
680 See the docstring for _readrecordsv2 for why we use 't'."""
679 See the docstring for _readrecordsv2 for why we use 't'."""
681 # these are the records that all version 2 clients can read
680 # these are the records that all version 2 clients can read
682 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
681 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
683 f = self._repo.vfs(self.statepathv2, b'wb')
682 f = self._repo.vfs(self.statepathv2, b'wb')
684 for key, data in records:
683 for key, data in records:
685 assert len(key) == 1
684 assert len(key) == 1
686 if key not in allowlist:
685 if key not in allowlist:
687 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
686 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
688 format = b'>sI%is' % len(data)
687 format = b'>sI%is' % len(data)
689 f.write(_pack(format, key, len(data), data))
688 f.write(_pack(format, key, len(data), data))
690 f.close()
689 f.close()
691
690
692 def _make_backup(self, fctx, localkey):
691 def _make_backup(self, fctx, localkey):
693 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
692 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
694
693
695 def _restore_backup(self, fctx, localkey, flags):
694 def _restore_backup(self, fctx, localkey, flags):
696 with self._repo.vfs(b'merge/' + localkey) as f:
695 with self._repo.vfs(b'merge/' + localkey) as f:
697 fctx.write(f.read(), flags)
696 fctx.write(f.read(), flags)
698
697
699 def reset(self):
698 def reset(self):
700 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
699 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
701
700
702
701
703 class memmergestate(_mergestate_base):
702 class memmergestate(_mergestate_base):
704 def __init__(self, repo):
703 def __init__(self, repo):
705 super(memmergestate, self).__init__(repo)
704 super(memmergestate, self).__init__(repo)
706 self._backups = {}
705 self._backups = {}
707
706
708 def _make_backup(self, fctx, localkey):
707 def _make_backup(self, fctx, localkey):
709 self._backups[localkey] = fctx.data()
708 self._backups[localkey] = fctx.data()
710
709
711 def _restore_backup(self, fctx, localkey, flags):
710 def _restore_backup(self, fctx, localkey, flags):
712 fctx.write(self._backups[localkey], flags)
711 fctx.write(self._backups[localkey], flags)
713
712
714
713
715 def recordupdates(repo, actions, branchmerge, getfiledata):
714 def recordupdates(repo, actions, branchmerge, getfiledata):
716 """record merge actions to the dirstate"""
715 """record merge actions to the dirstate"""
717 # remove (must come first)
716 # remove (must come first)
718 for f, args, msg in actions.get(ACTION_REMOVE, []):
717 for f, args, msg in actions.get(ACTION_REMOVE, []):
719 if branchmerge:
718 if branchmerge:
720 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
719 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
721 else:
720 else:
722 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
721 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
723
722
724 # forget (must come first)
723 # forget (must come first)
725 for f, args, msg in actions.get(ACTION_FORGET, []):
724 for f, args, msg in actions.get(ACTION_FORGET, []):
726 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
725 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
727
726
728 # resolve path conflicts
727 # resolve path conflicts
729 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
728 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
730 (f0, origf0) = args
729 (f0, origf0) = args
731 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
730 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
732 repo.dirstate.copy(origf0, f)
731 repo.dirstate.copy(origf0, f)
733 if f0 == origf0:
732 if f0 == origf0:
734 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
733 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
735 else:
734 else:
736 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
735 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
737
736
738 # re-add
737 # re-add
739 for f, args, msg in actions.get(ACTION_ADD, []):
738 for f, args, msg in actions.get(ACTION_ADD, []):
740 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
739 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
741
740
742 # re-add/mark as modified
741 # re-add/mark as modified
743 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
742 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
744 if branchmerge:
743 if branchmerge:
745 repo.dirstate.update_file(
744 repo.dirstate.update_file(
746 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
745 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
747 )
746 )
748 else:
747 else:
749 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
750
749
751 # exec change
750 # exec change
752 for f, args, msg in actions.get(ACTION_EXEC, []):
751 for f, args, msg in actions.get(ACTION_EXEC, []):
753 repo.dirstate.update_file(
752 repo.dirstate.update_file(
754 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
753 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
755 )
754 )
756
755
757 # keep
756 # keep
758 for f, args, msg in actions.get(ACTION_KEEP, []):
757 for f, args, msg in actions.get(ACTION_KEEP, []):
759 pass
758 pass
760
759
761 # keep deleted
760 # keep deleted
762 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
761 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
763 pass
762 pass
764
763
765 # keep new
764 # keep new
766 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
765 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
767 pass
766 pass
768
767
769 # get
768 # get
770 for f, args, msg in actions.get(ACTION_GET, []):
769 for f, args, msg in actions.get(ACTION_GET, []):
771 if branchmerge:
770 if branchmerge:
772 # tracked in p1 can be True also but update_file should not care
771 # tracked in p1 can be True also but update_file should not care
773 old_entry = repo.dirstate.get_entry(f)
772 old_entry = repo.dirstate.get_entry(f)
774 p1_tracked = old_entry.any_tracked and not old_entry.added
773 p1_tracked = old_entry.any_tracked and not old_entry.added
775 repo.dirstate.update_file(
774 repo.dirstate.update_file(
776 f,
775 f,
777 p1_tracked=p1_tracked,
776 p1_tracked=p1_tracked,
778 wc_tracked=True,
777 wc_tracked=True,
779 p2_info=True,
778 p2_info=True,
780 )
779 )
781 else:
780 else:
782 parentfiledata = getfiledata[f] if getfiledata else None
781 parentfiledata = getfiledata[f] if getfiledata else None
783 repo.dirstate.update_file(
782 repo.dirstate.update_file(
784 f,
783 f,
785 p1_tracked=True,
784 p1_tracked=True,
786 wc_tracked=True,
785 wc_tracked=True,
787 parentfiledata=parentfiledata,
786 parentfiledata=parentfiledata,
788 )
787 )
789
788
790 # merge
789 # merge
791 for f, args, msg in actions.get(ACTION_MERGE, []):
790 for f, args, msg in actions.get(ACTION_MERGE, []):
792 f1, f2, fa, move, anc = args
791 f1, f2, fa, move, anc = args
793 if branchmerge:
792 if branchmerge:
794 # We've done a branch merge, mark this file as merged
793 # We've done a branch merge, mark this file as merged
795 # so that we properly record the merger later
794 # so that we properly record the merger later
796 p1_tracked = f1 == f
795 p1_tracked = f1 == f
797 repo.dirstate.update_file(
796 repo.dirstate.update_file(
798 f,
797 f,
799 p1_tracked=p1_tracked,
798 p1_tracked=p1_tracked,
800 wc_tracked=True,
799 wc_tracked=True,
801 p2_info=True,
800 p2_info=True,
802 )
801 )
803 if f1 != f2: # copy/rename
802 if f1 != f2: # copy/rename
804 if move:
803 if move:
805 repo.dirstate.update_file(
804 repo.dirstate.update_file(
806 f1, p1_tracked=True, wc_tracked=False
805 f1, p1_tracked=True, wc_tracked=False
807 )
806 )
808 if f1 != f:
807 if f1 != f:
809 repo.dirstate.copy(f1, f)
808 repo.dirstate.copy(f1, f)
810 else:
809 else:
811 repo.dirstate.copy(f2, f)
810 repo.dirstate.copy(f2, f)
812 else:
811 else:
813 # We've update-merged a locally modified file, so
812 # We've update-merged a locally modified file, so
814 # we set the dirstate to emulate a normal checkout
813 # we set the dirstate to emulate a normal checkout
815 # of that file some time in the past. Thus our
814 # of that file some time in the past. Thus our
816 # merge will appear as a normal local file
815 # merge will appear as a normal local file
817 # modification.
816 # modification.
818 if f2 == f: # file not locally copied/moved
817 if f2 == f: # file not locally copied/moved
819 repo.dirstate.update_file(
818 repo.dirstate.update_file(
820 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
819 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
821 )
820 )
822 if move:
821 if move:
823 repo.dirstate.update_file(
822 repo.dirstate.update_file(
824 f1, p1_tracked=False, wc_tracked=False
823 f1, p1_tracked=False, wc_tracked=False
825 )
824 )
826
825
827 # directory rename, move local
826 # directory rename, move local
828 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
827 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
829 f0, flag = args
828 f0, flag = args
830 if branchmerge:
829 if branchmerge:
831 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
830 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
832 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
831 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
833 repo.dirstate.copy(f0, f)
832 repo.dirstate.copy(f0, f)
834 else:
833 else:
835 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
834 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
836 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
835 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
837
836
838 # directory rename, get
837 # directory rename, get
839 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
838 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
840 f0, flag = args
839 f0, flag = args
841 if branchmerge:
840 if branchmerge:
842 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
841 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
843 repo.dirstate.copy(f0, f)
842 repo.dirstate.copy(f0, f)
844 else:
843 else:
845 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
844 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now