##// END OF EJS Templates
mergeresult: introduce getfile() and use it where required...
Pulkit Goyal -
r45904:4c6004af default
parent child Browse files
Show More
@@ -1,1836 +1,1836 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
40 upgrade,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from . import (
45 from . import (
46 lfcommands,
46 lfcommands,
47 lfutil,
47 lfutil,
48 storefactory,
48 storefactory,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53 lfstatus = lfutil.lfstatus
53 lfstatus = lfutil.lfstatus
54
54
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56
56
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58
58
59
59
60 def composelargefilematcher(match, manifest):
60 def composelargefilematcher(match, manifest):
61 '''create a matcher that matches only the largefiles in the original
61 '''create a matcher that matches only the largefiles in the original
62 matcher'''
62 matcher'''
63 m = copy.copy(match)
63 m = copy.copy(match)
64 lfile = lambda f: lfutil.standin(f) in manifest
64 lfile = lambda f: lfutil.standin(f) in manifest
65 m._files = [lf for lf in m._files if lfile(lf)]
65 m._files = [lf for lf in m._files if lfile(lf)]
66 m._fileset = set(m._files)
66 m._fileset = set(m._files)
67 m.always = lambda: False
67 m.always = lambda: False
68 origmatchfn = m.matchfn
68 origmatchfn = m.matchfn
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 return m
70 return m
71
71
72
72
73 def composenormalfilematcher(match, manifest, exclude=None):
73 def composenormalfilematcher(match, manifest, exclude=None):
74 excluded = set()
74 excluded = set()
75 if exclude is not None:
75 if exclude is not None:
76 excluded.update(exclude)
76 excluded.update(exclude)
77
77
78 m = copy.copy(match)
78 m = copy.copy(match)
79 notlfile = lambda f: not (
79 notlfile = lambda f: not (
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 )
81 )
82 m._files = [lf for lf in m._files if notlfile(lf)]
82 m._files = [lf for lf in m._files if notlfile(lf)]
83 m._fileset = set(m._files)
83 m._fileset = set(m._files)
84 m.always = lambda: False
84 m.always = lambda: False
85 origmatchfn = m.matchfn
85 origmatchfn = m.matchfn
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 return m
87 return m
88
88
89
89
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 large = opts.get('large')
91 large = opts.get('large')
92 lfsize = lfutil.getminsize(
92 lfsize = lfutil.getminsize(
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 )
94 )
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 if lfpats:
99 if lfpats:
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = matcher
103 m = matcher
104
104
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # Don't warn the user when they attempt to add a normal tracked file.
112 # Don't warn the user when they attempt to add a normal tracked file.
113 # The normal add code will do that for us.
113 # The normal add code will do that for us.
114 if exact and exists:
114 if exact and exists:
115 if lfile:
115 if lfile:
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 continue
117 continue
118
118
119 if (exact or not exists) and not lfutil.isstandin(f):
119 if (exact or not exists) and not lfutil.isstandin(f):
120 # In case the file was removed previously, but not committed
120 # In case the file was removed previously, but not committed
121 # (issue3507)
121 # (issue3507)
122 if not repo.wvfs.exists(f):
122 if not repo.wvfs.exists(f):
123 continue
123 continue
124
124
125 abovemin = (
125 abovemin = (
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 )
127 )
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 lfnames.append(f)
129 lfnames.append(f)
130 if ui.verbose or not exact:
130 if ui.verbose or not exact:
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132
132
133 bad = []
133 bad = []
134
134
135 # Need to lock, otherwise there could be a race condition between
135 # Need to lock, otherwise there could be a race condition between
136 # when standins are created and added to the repo.
136 # when standins are created and added to the repo.
137 with repo.wlock():
137 with repo.wlock():
138 if not opts.get('dry_run'):
138 if not opts.get('dry_run'):
139 standins = []
139 standins = []
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 for f in lfnames:
141 for f in lfnames:
142 standinname = lfutil.standin(f)
142 standinname = lfutil.standin(f)
143 lfutil.writestandin(
143 lfutil.writestandin(
144 repo,
144 repo,
145 standinname,
145 standinname,
146 hash=b'',
146 hash=b'',
147 executable=lfutil.getexecutable(repo.wjoin(f)),
147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 )
148 )
149 standins.append(standinname)
149 standins.append(standinname)
150 if lfdirstate[f] == b'r':
150 if lfdirstate[f] == b'r':
151 lfdirstate.normallookup(f)
151 lfdirstate.normallookup(f)
152 else:
152 else:
153 lfdirstate.add(f)
153 lfdirstate.add(f)
154 lfdirstate.write()
154 lfdirstate.write()
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
158 if f in m.files()
158 if f in m.files()
159 ]
159 ]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 return added, bad
162 return added, bad
163
163
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
168 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
169 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
170 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
171 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
172 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
173 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
174 ]
175
175
176 def warn(files, msg):
176 def warn(files, msg):
177 for f in files:
177 for f in files:
178 ui.warn(msg % uipathfn(f))
178 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
179 return int(len(files) > 0)
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(
183 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
185 )
186 else:
186 else:
187 remove = deleted + clean
187 remove = deleted + clean
188 result = warn(
188 result = warn(
189 modified,
189 modified,
190 _(
190 _(
191 b'not removing %s: file is modified (use -f'
191 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
192 b' to force removal)\n'
193 ),
193 ),
194 )
194 )
195 result = (
195 result = (
196 warn(
196 warn(
197 added,
197 added,
198 _(
198 _(
199 b'not removing %s: file has been marked for add'
199 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
200 b' (use forget to undo)\n'
201 ),
201 ),
202 )
202 )
203 or result
203 or result
204 )
204 )
205
205
206 # Need to lock because standin files are deleted then removed from the
206 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
207 # repository and we could race in-between.
208 with repo.wlock():
208 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
210 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
211 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
212 ui.status(_(b'removing %s\n') % uipathfn(f))
213
213
214 if not dryrun:
214 if not dryrun:
215 if not after:
215 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
217
218 if dryrun:
218 if dryrun:
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(
230 lfutil.synclfdirstate(
231 repo, lfdirstate, lfutil.splitstandin(f), False
231 repo, lfdirstate, lfutil.splitstandin(f), False
232 )
232 )
233
233
234 lfdirstate.write()
234 lfdirstate.write()
235
235
236 return result
236 return result
237
237
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 @eh.wrapfunction(webcommands, b'decodepath')
241 @eh.wrapfunction(webcommands, b'decodepath')
242 def decodepath(orig, path):
242 def decodepath(orig, path):
243 return lfutil.splitstandin(path) or path
243 return lfutil.splitstandin(path) or path
244
244
245
245
246 # -- Wrappers: modify existing commands --------------------------------
246 # -- Wrappers: modify existing commands --------------------------------
247
247
248
248
249 @eh.wrapcommand(
249 @eh.wrapcommand(
250 b'add',
250 b'add',
251 opts=[
251 opts=[
252 (b'', b'large', None, _(b'add as largefile')),
252 (b'', b'large', None, _(b'add as largefile')),
253 (b'', b'normal', None, _(b'add as normal file')),
253 (b'', b'normal', None, _(b'add as normal file')),
254 (
254 (
255 b'',
255 b'',
256 b'lfsize',
256 b'lfsize',
257 b'',
257 b'',
258 _(
258 _(
259 b'add all files above this size (in megabytes) '
259 b'add all files above this size (in megabytes) '
260 b'as largefiles (default: 10)'
260 b'as largefiles (default: 10)'
261 ),
261 ),
262 ),
262 ),
263 ],
263 ],
264 )
264 )
265 def overrideadd(orig, ui, repo, *pats, **opts):
265 def overrideadd(orig, ui, repo, *pats, **opts):
266 if opts.get('normal') and opts.get('large'):
266 if opts.get('normal') and opts.get('large'):
267 raise error.Abort(_(b'--normal cannot be used with --large'))
267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 return orig(ui, repo, *pats, **opts)
268 return orig(ui, repo, *pats, **opts)
269
269
270
270
271 @eh.wrapfunction(cmdutil, b'add')
271 @eh.wrapfunction(cmdutil, b'add')
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 # The --normal flag short circuits this override
273 # The --normal flag short circuits this override
274 if opts.get('normal'):
274 if opts.get('normal'):
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276
276
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 normalmatcher = composenormalfilematcher(
278 normalmatcher = composenormalfilematcher(
279 matcher, repo[None].manifest(), ladded
279 matcher, repo[None].manifest(), ladded
280 )
280 )
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282
282
283 bad.extend(f for f in lbad)
283 bad.extend(f for f in lbad)
284 return bad
284 return bad
285
285
286
286
287 @eh.wrapfunction(cmdutil, b'remove')
287 @eh.wrapfunction(cmdutil, b'remove')
288 def cmdutilremove(
288 def cmdutilremove(
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 ):
290 ):
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 result = orig(
292 result = orig(
293 ui,
293 ui,
294 repo,
294 repo,
295 normalmatcher,
295 normalmatcher,
296 prefix,
296 prefix,
297 uipathfn,
297 uipathfn,
298 after,
298 after,
299 force,
299 force,
300 subrepos,
300 subrepos,
301 dryrun,
301 dryrun,
302 )
302 )
303 return (
303 return (
304 removelargefiles(
304 removelargefiles(
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 )
306 )
307 or result
307 or result
308 )
308 )
309
309
310
310
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 def overridestatusfn(orig, repo, rev2, **opts):
312 def overridestatusfn(orig, repo, rev2, **opts):
313 with lfstatus(repo._repo):
313 with lfstatus(repo._repo):
314 return orig(repo, rev2, **opts)
314 return orig(repo, rev2, **opts)
315
315
316
316
317 @eh.wrapcommand(b'status')
317 @eh.wrapcommand(b'status')
318 def overridestatus(orig, ui, repo, *pats, **opts):
318 def overridestatus(orig, ui, repo, *pats, **opts):
319 with lfstatus(repo):
319 with lfstatus(repo):
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321
321
322
322
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 with lfstatus(repo._repo):
325 with lfstatus(repo._repo):
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327
327
328
328
329 @eh.wrapcommand(b'log')
329 @eh.wrapcommand(b'log')
330 def overridelog(orig, ui, repo, *pats, **opts):
330 def overridelog(orig, ui, repo, *pats, **opts):
331 def overridematchandpats(
331 def overridematchandpats(
332 orig,
332 orig,
333 ctx,
333 ctx,
334 pats=(),
334 pats=(),
335 opts=None,
335 opts=None,
336 globbed=False,
336 globbed=False,
337 default=b'relpath',
337 default=b'relpath',
338 badfn=None,
338 badfn=None,
339 ):
339 ):
340 """Matcher that merges root directory with .hglf, suitable for log.
340 """Matcher that merges root directory with .hglf, suitable for log.
341 It is still possible to match .hglf directly.
341 It is still possible to match .hglf directly.
342 For any listed files run log on the standin too.
342 For any listed files run log on the standin too.
343 matchfn tries both the given filename and with .hglf stripped.
343 matchfn tries both the given filename and with .hglf stripped.
344 """
344 """
345 if opts is None:
345 if opts is None:
346 opts = {}
346 opts = {}
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 m, p = copy.copy(matchandpats)
348 m, p = copy.copy(matchandpats)
349
349
350 if m.always():
350 if m.always():
351 # We want to match everything anyway, so there's no benefit trying
351 # We want to match everything anyway, so there's no benefit trying
352 # to add standins.
352 # to add standins.
353 return matchandpats
353 return matchandpats
354
354
355 pats = set(p)
355 pats = set(p)
356
356
357 def fixpats(pat, tostandin=lfutil.standin):
357 def fixpats(pat, tostandin=lfutil.standin):
358 if pat.startswith(b'set:'):
358 if pat.startswith(b'set:'):
359 return pat
359 return pat
360
360
361 kindpat = matchmod._patsplit(pat, None)
361 kindpat = matchmod._patsplit(pat, None)
362
362
363 if kindpat[0] is not None:
363 if kindpat[0] is not None:
364 return kindpat[0] + b':' + tostandin(kindpat[1])
364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 return tostandin(kindpat[1])
365 return tostandin(kindpat[1])
366
366
367 cwd = repo.getcwd()
367 cwd = repo.getcwd()
368 if cwd:
368 if cwd:
369 hglf = lfutil.shortname
369 hglf = lfutil.shortname
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371
371
372 def tostandin(f):
372 def tostandin(f):
373 # The file may already be a standin, so truncate the back
373 # The file may already be a standin, so truncate the back
374 # prefix and test before mangling it. This avoids turning
374 # prefix and test before mangling it. This avoids turning
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 return f
377 return f
378
378
379 # An absolute path is from outside the repo, so truncate the
379 # An absolute path is from outside the repo, so truncate the
380 # path to the root before building the standin. Otherwise cwd
380 # path to the root before building the standin. Otherwise cwd
381 # is somewhere in the repo, relative to root, and needs to be
381 # is somewhere in the repo, relative to root, and needs to be
382 # prepended before building the standin.
382 # prepended before building the standin.
383 if os.path.isabs(cwd):
383 if os.path.isabs(cwd):
384 f = f[len(back) :]
384 f = f[len(back) :]
385 else:
385 else:
386 f = cwd + b'/' + f
386 f = cwd + b'/' + f
387 return back + lfutil.standin(f)
387 return back + lfutil.standin(f)
388
388
389 else:
389 else:
390
390
391 def tostandin(f):
391 def tostandin(f):
392 if lfutil.isstandin(f):
392 if lfutil.isstandin(f):
393 return f
393 return f
394 return lfutil.standin(f)
394 return lfutil.standin(f)
395
395
396 pats.update(fixpats(f, tostandin) for f in p)
396 pats.update(fixpats(f, tostandin) for f in p)
397
397
398 for i in range(0, len(m._files)):
398 for i in range(0, len(m._files)):
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 if m._files[i] == b'.':
400 if m._files[i] == b'.':
401 continue
401 continue
402 standin = lfutil.standin(m._files[i])
402 standin = lfutil.standin(m._files[i])
403 # If the "standin" is a directory, append instead of replace to
403 # If the "standin" is a directory, append instead of replace to
404 # support naming a directory on the command line with only
404 # support naming a directory on the command line with only
405 # largefiles. The original directory is kept to support normal
405 # largefiles. The original directory is kept to support normal
406 # files.
406 # files.
407 if standin in ctx:
407 if standin in ctx:
408 m._files[i] = standin
408 m._files[i] = standin
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 m._files.append(standin)
410 m._files.append(standin)
411
411
412 m._fileset = set(m._files)
412 m._fileset = set(m._files)
413 m.always = lambda: False
413 m.always = lambda: False
414 origmatchfn = m.matchfn
414 origmatchfn = m.matchfn
415
415
416 def lfmatchfn(f):
416 def lfmatchfn(f):
417 lf = lfutil.splitstandin(f)
417 lf = lfutil.splitstandin(f)
418 if lf is not None and origmatchfn(lf):
418 if lf is not None and origmatchfn(lf):
419 return True
419 return True
420 r = origmatchfn(f)
420 r = origmatchfn(f)
421 return r
421 return r
422
422
423 m.matchfn = lfmatchfn
423 m.matchfn = lfmatchfn
424
424
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 return m, pats
426 return m, pats
427
427
428 # For hg log --patch, the match object is used in two different senses:
428 # For hg log --patch, the match object is used in two different senses:
429 # (1) to determine what revisions should be printed out, and
429 # (1) to determine what revisions should be printed out, and
430 # (2) to determine what files to print out diffs for.
430 # (2) to determine what files to print out diffs for.
431 # The magic matchandpats override should be used for case (1) but not for
431 # The magic matchandpats override should be used for case (1) but not for
432 # case (2).
432 # case (2).
433 oldmatchandpats = scmutil.matchandpats
433 oldmatchandpats = scmutil.matchandpats
434
434
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 wctx = repo[None]
436 wctx = repo[None]
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 return lambda ctx: match
438 return lambda ctx: match
439
439
440 wrappedmatchandpats = extensions.wrappedfunction(
440 wrappedmatchandpats = extensions.wrappedfunction(
441 scmutil, b'matchandpats', overridematchandpats
441 scmutil, b'matchandpats', overridematchandpats
442 )
442 )
443 wrappedmakefilematcher = extensions.wrappedfunction(
443 wrappedmakefilematcher = extensions.wrappedfunction(
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 )
445 )
446 with wrappedmatchandpats, wrappedmakefilematcher:
446 with wrappedmatchandpats, wrappedmakefilematcher:
447 return orig(ui, repo, *pats, **opts)
447 return orig(ui, repo, *pats, **opts)
448
448
449
449
450 @eh.wrapcommand(
450 @eh.wrapcommand(
451 b'verify',
451 b'verify',
452 opts=[
452 opts=[
453 (
453 (
454 b'',
454 b'',
455 b'large',
455 b'large',
456 None,
456 None,
457 _(b'verify that all largefiles in current revision exists'),
457 _(b'verify that all largefiles in current revision exists'),
458 ),
458 ),
459 (
459 (
460 b'',
460 b'',
461 b'lfa',
461 b'lfa',
462 None,
462 None,
463 _(b'verify largefiles in all revisions, not just current'),
463 _(b'verify largefiles in all revisions, not just current'),
464 ),
464 ),
465 (
465 (
466 b'',
466 b'',
467 b'lfc',
467 b'lfc',
468 None,
468 None,
469 _(b'verify local largefile contents, not just existence'),
469 _(b'verify local largefile contents, not just existence'),
470 ),
470 ),
471 ],
471 ],
472 )
472 )
473 def overrideverify(orig, ui, repo, *pats, **opts):
473 def overrideverify(orig, ui, repo, *pats, **opts):
474 large = opts.pop('large', False)
474 large = opts.pop('large', False)
475 all = opts.pop('lfa', False)
475 all = opts.pop('lfa', False)
476 contents = opts.pop('lfc', False)
476 contents = opts.pop('lfc', False)
477
477
478 result = orig(ui, repo, *pats, **opts)
478 result = orig(ui, repo, *pats, **opts)
479 if large or all or contents:
479 if large or all or contents:
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 return result
481 return result
482
482
483
483
484 @eh.wrapcommand(
484 @eh.wrapcommand(
485 b'debugstate',
485 b'debugstate',
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 )
487 )
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 large = opts.pop('large', False)
489 large = opts.pop('large', False)
490 if large:
490 if large:
491
491
492 class fakerepo(object):
492 class fakerepo(object):
493 dirstate = lfutil.openlfdirstate(ui, repo)
493 dirstate = lfutil.openlfdirstate(ui, repo)
494
494
495 orig(ui, fakerepo, *pats, **opts)
495 orig(ui, fakerepo, *pats, **opts)
496 else:
496 else:
497 orig(ui, repo, *pats, **opts)
497 orig(ui, repo, *pats, **opts)
498
498
499
499
500 # Register the MERGE_ACTION_LARGEFILE_MARK_REMOVED in emptyactions() return type
500 # Register the MERGE_ACTION_LARGEFILE_MARK_REMOVED in emptyactions() return type
501 @eh.wrapfunction(merge, b'emptyactions')
501 @eh.wrapfunction(merge, b'emptyactions')
502 def overrideemptyactions(origfn):
502 def overrideemptyactions(origfn):
503 ret = origfn()
503 ret = origfn()
504 ret[MERGE_ACTION_LARGEFILE_MARK_REMOVED] = []
504 ret[MERGE_ACTION_LARGEFILE_MARK_REMOVED] = []
505 return ret
505 return ret
506
506
507
507
508 # Before starting the manifest merge, merge.updates will call
508 # Before starting the manifest merge, merge.updates will call
509 # _checkunknownfile to check if there are any files in the merged-in
509 # _checkunknownfile to check if there are any files in the merged-in
510 # changeset that collide with unknown files in the working copy.
510 # changeset that collide with unknown files in the working copy.
511 #
511 #
512 # The largefiles are seen as unknown, so this prevents us from merging
512 # The largefiles are seen as unknown, so this prevents us from merging
513 # in a file 'foo' if we already have a largefile with the same name.
513 # in a file 'foo' if we already have a largefile with the same name.
514 #
514 #
515 # The overridden function filters the unknown files by removing any
515 # The overridden function filters the unknown files by removing any
516 # largefiles. This makes the merge proceed and we can then handle this
516 # largefiles. This makes the merge proceed and we can then handle this
517 # case further in the overridden calculateupdates function below.
517 # case further in the overridden calculateupdates function below.
518 @eh.wrapfunction(merge, b'_checkunknownfile')
518 @eh.wrapfunction(merge, b'_checkunknownfile')
519 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
519 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
520 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
520 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
521 return False
521 return False
522 return origfn(repo, wctx, mctx, f, f2)
522 return origfn(repo, wctx, mctx, f, f2)
523
523
524
524
525 # The manifest merge handles conflicts on the manifest level. We want
525 # The manifest merge handles conflicts on the manifest level. We want
526 # to handle changes in largefile-ness of files at this level too.
526 # to handle changes in largefile-ness of files at this level too.
527 #
527 #
528 # The strategy is to run the original calculateupdates and then process
528 # The strategy is to run the original calculateupdates and then process
529 # the action list it outputs. There are two cases we need to deal with:
529 # the action list it outputs. There are two cases we need to deal with:
530 #
530 #
531 # 1. Normal file in p1, largefile in p2. Here the largefile is
531 # 1. Normal file in p1, largefile in p2. Here the largefile is
532 # detected via its standin file, which will enter the working copy
532 # detected via its standin file, which will enter the working copy
533 # with a "get" action. It is not "merge" since the standin is all
533 # with a "get" action. It is not "merge" since the standin is all
534 # Mercurial is concerned with at this level -- the link to the
534 # Mercurial is concerned with at this level -- the link to the
535 # existing normal file is not relevant here.
535 # existing normal file is not relevant here.
536 #
536 #
537 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
537 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
538 # since the largefile will be present in the working copy and
538 # since the largefile will be present in the working copy and
539 # different from the normal file in p2. Mercurial therefore
539 # different from the normal file in p2. Mercurial therefore
540 # triggers a merge action.
540 # triggers a merge action.
541 #
541 #
542 # In both cases, we prompt the user and emit new actions to either
542 # In both cases, we prompt the user and emit new actions to either
543 # remove the standin (if the normal file was kept) or to remove the
543 # remove the standin (if the normal file was kept) or to remove the
544 # normal file and get the standin (if the largefile was kept). The
544 # normal file and get the standin (if the largefile was kept). The
545 # default prompt answer is to use the largefile version since it was
545 # default prompt answer is to use the largefile version since it was
546 # presumably changed on purpose.
546 # presumably changed on purpose.
547 #
547 #
548 # Finally, the merge.applyupdates function will then take care of
548 # Finally, the merge.applyupdates function will then take care of
549 # writing the files into the working copy and lfcommands.updatelfiles
549 # writing the files into the working copy and lfcommands.updatelfiles
550 # will update the largefiles.
550 # will update the largefiles.
551 @eh.wrapfunction(merge, b'calculateupdates')
551 @eh.wrapfunction(merge, b'calculateupdates')
552 def overridecalculateupdates(
552 def overridecalculateupdates(
553 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
553 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 ):
554 ):
555 overwrite = force and not branchmerge
555 overwrite = force and not branchmerge
556 mresult = origfn(
556 mresult = origfn(
557 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
557 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
558 )
558 )
559
559
560 if overwrite:
560 if overwrite:
561 return mresult
561 return mresult
562
562
563 # Convert to dictionary with filename as key and action as value.
563 # Convert to dictionary with filename as key and action as value.
564 lfiles = set()
564 lfiles = set()
565 for f in mresult.actions:
565 for f in mresult.actions:
566 splitstandin = lfutil.splitstandin(f)
566 splitstandin = lfutil.splitstandin(f)
567 if splitstandin is not None and splitstandin in p1:
567 if splitstandin is not None and splitstandin in p1:
568 lfiles.add(splitstandin)
568 lfiles.add(splitstandin)
569 elif lfutil.standin(f) in p1:
569 elif lfutil.standin(f) in p1:
570 lfiles.add(f)
570 lfiles.add(f)
571
571
572 for lfile in sorted(lfiles):
572 for lfile in sorted(lfiles):
573 standin = lfutil.standin(lfile)
573 standin = lfutil.standin(lfile)
574 (lm, largs, lmsg) = mresult.actions.get(lfile, (None, None, None))
574 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
575 (sm, sargs, smsg) = mresult.actions.get(standin, (None, None, None))
575 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
576 if sm in (b'g', b'dc') and lm != b'r':
576 if sm in (b'g', b'dc') and lm != b'r':
577 if sm == b'dc':
577 if sm == b'dc':
578 f1, f2, fa, move, anc = sargs
578 f1, f2, fa, move, anc = sargs
579 sargs = (p2[f2].flags(), False)
579 sargs = (p2[f2].flags(), False)
580 # Case 1: normal file in the working copy, largefile in
580 # Case 1: normal file in the working copy, largefile in
581 # the second parent
581 # the second parent
582 usermsg = (
582 usermsg = (
583 _(
583 _(
584 b'remote turned local normal file %s into a largefile\n'
584 b'remote turned local normal file %s into a largefile\n'
585 b'use (l)argefile or keep (n)ormal file?'
585 b'use (l)argefile or keep (n)ormal file?'
586 b'$$ &Largefile $$ &Normal file'
586 b'$$ &Largefile $$ &Normal file'
587 )
587 )
588 % lfile
588 % lfile
589 )
589 )
590 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
590 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
591 mresult.addfile(lfile, b'r', None, b'replaced by standin')
591 mresult.addfile(lfile, b'r', None, b'replaced by standin')
592 mresult.addfile(standin, b'g', sargs, b'replaces standin')
592 mresult.addfile(standin, b'g', sargs, b'replaces standin')
593 else: # keep local normal file
593 else: # keep local normal file
594 mresult.addfile(lfile, b'k', None, b'replaces standin')
594 mresult.addfile(lfile, b'k', None, b'replaces standin')
595 if branchmerge:
595 if branchmerge:
596 mresult.addfile(
596 mresult.addfile(
597 standin, b'k', None, b'replaced by non-standin',
597 standin, b'k', None, b'replaced by non-standin',
598 )
598 )
599 else:
599 else:
600 mresult.addfile(
600 mresult.addfile(
601 standin, b'r', None, b'replaced by non-standin',
601 standin, b'r', None, b'replaced by non-standin',
602 )
602 )
603 elif lm in (b'g', b'dc') and sm != b'r':
603 elif lm in (b'g', b'dc') and sm != b'r':
604 if lm == b'dc':
604 if lm == b'dc':
605 f1, f2, fa, move, anc = largs
605 f1, f2, fa, move, anc = largs
606 largs = (p2[f2].flags(), False)
606 largs = (p2[f2].flags(), False)
607 # Case 2: largefile in the working copy, normal file in
607 # Case 2: largefile in the working copy, normal file in
608 # the second parent
608 # the second parent
609 usermsg = (
609 usermsg = (
610 _(
610 _(
611 b'remote turned local largefile %s into a normal file\n'
611 b'remote turned local largefile %s into a normal file\n'
612 b'keep (l)argefile or use (n)ormal file?'
612 b'keep (l)argefile or use (n)ormal file?'
613 b'$$ &Largefile $$ &Normal file'
613 b'$$ &Largefile $$ &Normal file'
614 )
614 )
615 % lfile
615 % lfile
616 )
616 )
617 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
617 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
618 if branchmerge:
618 if branchmerge:
619 # largefile can be restored from standin safely
619 # largefile can be restored from standin safely
620 mresult.addfile(
620 mresult.addfile(
621 lfile, b'k', None, b'replaced by standin',
621 lfile, b'k', None, b'replaced by standin',
622 )
622 )
623 mresult.addfile(standin, b'k', None, b'replaces standin')
623 mresult.addfile(standin, b'k', None, b'replaces standin')
624 else:
624 else:
625 # "lfile" should be marked as "removed" without
625 # "lfile" should be marked as "removed" without
626 # removal of itself
626 # removal of itself
627 mresult.addfile(
627 mresult.addfile(
628 lfile,
628 lfile,
629 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
629 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
630 None,
630 None,
631 b'forget non-standin largefile',
631 b'forget non-standin largefile',
632 )
632 )
633
633
634 # linear-merge should treat this largefile as 're-added'
634 # linear-merge should treat this largefile as 're-added'
635 mresult.addfile(standin, b'a', None, b'keep standin')
635 mresult.addfile(standin, b'a', None, b'keep standin')
636 else: # pick remote normal file
636 else: # pick remote normal file
637 mresult.addfile(lfile, b'g', largs, b'replaces standin')
637 mresult.addfile(lfile, b'g', largs, b'replaces standin')
638 mresult.addfile(
638 mresult.addfile(
639 standin, b'r', None, b'replaced by non-standin',
639 standin, b'r', None, b'replaced by non-standin',
640 )
640 )
641
641
642 return mresult
642 return mresult
643
643
644
644
645 @eh.wrapfunction(mergestatemod, b'recordupdates')
645 @eh.wrapfunction(mergestatemod, b'recordupdates')
646 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
646 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
647 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
647 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
648 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
648 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
649 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
649 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
650 # this should be executed before 'orig', to execute 'remove'
650 # this should be executed before 'orig', to execute 'remove'
651 # before all other actions
651 # before all other actions
652 repo.dirstate.remove(lfile)
652 repo.dirstate.remove(lfile)
653 # make sure lfile doesn't get synclfdirstate'd as normal
653 # make sure lfile doesn't get synclfdirstate'd as normal
654 lfdirstate.add(lfile)
654 lfdirstate.add(lfile)
655 lfdirstate.write()
655 lfdirstate.write()
656
656
657 return orig(repo, actions, branchmerge, getfiledata)
657 return orig(repo, actions, branchmerge, getfiledata)
658
658
659
659
660 # Override filemerge to prompt the user about how they wish to merge
660 # Override filemerge to prompt the user about how they wish to merge
661 # largefiles. This will handle identical edits without prompting the user.
661 # largefiles. This will handle identical edits without prompting the user.
662 @eh.wrapfunction(filemerge, b'_filemerge')
662 @eh.wrapfunction(filemerge, b'_filemerge')
663 def overridefilemerge(
663 def overridefilemerge(
664 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
664 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
665 ):
665 ):
666 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
666 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
667 return origfn(
667 return origfn(
668 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
668 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
669 )
669 )
670
670
671 ahash = lfutil.readasstandin(fca).lower()
671 ahash = lfutil.readasstandin(fca).lower()
672 dhash = lfutil.readasstandin(fcd).lower()
672 dhash = lfutil.readasstandin(fcd).lower()
673 ohash = lfutil.readasstandin(fco).lower()
673 ohash = lfutil.readasstandin(fco).lower()
674 if (
674 if (
675 ohash != ahash
675 ohash != ahash
676 and ohash != dhash
676 and ohash != dhash
677 and (
677 and (
678 dhash == ahash
678 dhash == ahash
679 or repo.ui.promptchoice(
679 or repo.ui.promptchoice(
680 _(
680 _(
681 b'largefile %s has a merge conflict\nancestor was %s\n'
681 b'largefile %s has a merge conflict\nancestor was %s\n'
682 b'you can keep (l)ocal %s or take (o)ther %s.\n'
682 b'you can keep (l)ocal %s or take (o)ther %s.\n'
683 b'what do you want to do?'
683 b'what do you want to do?'
684 b'$$ &Local $$ &Other'
684 b'$$ &Local $$ &Other'
685 )
685 )
686 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
686 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
687 0,
687 0,
688 )
688 )
689 == 1
689 == 1
690 )
690 )
691 ):
691 ):
692 repo.wwrite(fcd.path(), fco.data(), fco.flags())
692 repo.wwrite(fcd.path(), fco.data(), fco.flags())
693 return True, 0, False
693 return True, 0, False
694
694
695
695
696 @eh.wrapfunction(copiesmod, b'pathcopies')
696 @eh.wrapfunction(copiesmod, b'pathcopies')
697 def copiespathcopies(orig, ctx1, ctx2, match=None):
697 def copiespathcopies(orig, ctx1, ctx2, match=None):
698 copies = orig(ctx1, ctx2, match=match)
698 copies = orig(ctx1, ctx2, match=match)
699 updated = {}
699 updated = {}
700
700
701 for k, v in pycompat.iteritems(copies):
701 for k, v in pycompat.iteritems(copies):
702 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
702 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
703
703
704 return updated
704 return updated
705
705
706
706
707 # Copy first changes the matchers to match standins instead of
707 # Copy first changes the matchers to match standins instead of
708 # largefiles. Then it overrides util.copyfile in that function it
708 # largefiles. Then it overrides util.copyfile in that function it
709 # checks if the destination largefile already exists. It also keeps a
709 # checks if the destination largefile already exists. It also keeps a
710 # list of copied files so that the largefiles can be copied and the
710 # list of copied files so that the largefiles can be copied and the
711 # dirstate updated.
711 # dirstate updated.
712 @eh.wrapfunction(cmdutil, b'copy')
712 @eh.wrapfunction(cmdutil, b'copy')
713 def overridecopy(orig, ui, repo, pats, opts, rename=False):
713 def overridecopy(orig, ui, repo, pats, opts, rename=False):
714 # doesn't remove largefile on rename
714 # doesn't remove largefile on rename
715 if len(pats) < 2:
715 if len(pats) < 2:
716 # this isn't legal, let the original function deal with it
716 # this isn't legal, let the original function deal with it
717 return orig(ui, repo, pats, opts, rename)
717 return orig(ui, repo, pats, opts, rename)
718
718
719 # This could copy both lfiles and normal files in one command,
719 # This could copy both lfiles and normal files in one command,
720 # but we don't want to do that. First replace their matcher to
720 # but we don't want to do that. First replace their matcher to
721 # only match normal files and run it, then replace it to just
721 # only match normal files and run it, then replace it to just
722 # match largefiles and run it again.
722 # match largefiles and run it again.
723 nonormalfiles = False
723 nonormalfiles = False
724 nolfiles = False
724 nolfiles = False
725 manifest = repo[None].manifest()
725 manifest = repo[None].manifest()
726
726
727 def normalfilesmatchfn(
727 def normalfilesmatchfn(
728 orig,
728 orig,
729 ctx,
729 ctx,
730 pats=(),
730 pats=(),
731 opts=None,
731 opts=None,
732 globbed=False,
732 globbed=False,
733 default=b'relpath',
733 default=b'relpath',
734 badfn=None,
734 badfn=None,
735 ):
735 ):
736 if opts is None:
736 if opts is None:
737 opts = {}
737 opts = {}
738 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
738 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
739 return composenormalfilematcher(match, manifest)
739 return composenormalfilematcher(match, manifest)
740
740
741 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
741 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
742 try:
742 try:
743 result = orig(ui, repo, pats, opts, rename)
743 result = orig(ui, repo, pats, opts, rename)
744 except error.Abort as e:
744 except error.Abort as e:
745 if pycompat.bytestr(e) != _(b'no files to copy'):
745 if pycompat.bytestr(e) != _(b'no files to copy'):
746 raise e
746 raise e
747 else:
747 else:
748 nonormalfiles = True
748 nonormalfiles = True
749 result = 0
749 result = 0
750
750
751 # The first rename can cause our current working directory to be removed.
751 # The first rename can cause our current working directory to be removed.
752 # In that case there is nothing left to copy/rename so just quit.
752 # In that case there is nothing left to copy/rename so just quit.
753 try:
753 try:
754 repo.getcwd()
754 repo.getcwd()
755 except OSError:
755 except OSError:
756 return result
756 return result
757
757
758 def makestandin(relpath):
758 def makestandin(relpath):
759 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
759 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
760 return repo.wvfs.join(lfutil.standin(path))
760 return repo.wvfs.join(lfutil.standin(path))
761
761
762 fullpats = scmutil.expandpats(pats)
762 fullpats = scmutil.expandpats(pats)
763 dest = fullpats[-1]
763 dest = fullpats[-1]
764
764
765 if os.path.isdir(dest):
765 if os.path.isdir(dest):
766 if not os.path.isdir(makestandin(dest)):
766 if not os.path.isdir(makestandin(dest)):
767 os.makedirs(makestandin(dest))
767 os.makedirs(makestandin(dest))
768
768
769 try:
769 try:
770 # When we call orig below it creates the standins but we don't add
770 # When we call orig below it creates the standins but we don't add
771 # them to the dir state until later so lock during that time.
771 # them to the dir state until later so lock during that time.
772 wlock = repo.wlock()
772 wlock = repo.wlock()
773
773
774 manifest = repo[None].manifest()
774 manifest = repo[None].manifest()
775
775
776 def overridematch(
776 def overridematch(
777 orig,
777 orig,
778 ctx,
778 ctx,
779 pats=(),
779 pats=(),
780 opts=None,
780 opts=None,
781 globbed=False,
781 globbed=False,
782 default=b'relpath',
782 default=b'relpath',
783 badfn=None,
783 badfn=None,
784 ):
784 ):
785 if opts is None:
785 if opts is None:
786 opts = {}
786 opts = {}
787 newpats = []
787 newpats = []
788 # The patterns were previously mangled to add the standin
788 # The patterns were previously mangled to add the standin
789 # directory; we need to remove that now
789 # directory; we need to remove that now
790 for pat in pats:
790 for pat in pats:
791 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
791 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
792 newpats.append(pat.replace(lfutil.shortname, b''))
792 newpats.append(pat.replace(lfutil.shortname, b''))
793 else:
793 else:
794 newpats.append(pat)
794 newpats.append(pat)
795 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
795 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
796 m = copy.copy(match)
796 m = copy.copy(match)
797 lfile = lambda f: lfutil.standin(f) in manifest
797 lfile = lambda f: lfutil.standin(f) in manifest
798 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
798 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
799 m._fileset = set(m._files)
799 m._fileset = set(m._files)
800 origmatchfn = m.matchfn
800 origmatchfn = m.matchfn
801
801
802 def matchfn(f):
802 def matchfn(f):
803 lfile = lfutil.splitstandin(f)
803 lfile = lfutil.splitstandin(f)
804 return (
804 return (
805 lfile is not None
805 lfile is not None
806 and (f in manifest)
806 and (f in manifest)
807 and origmatchfn(lfile)
807 and origmatchfn(lfile)
808 or None
808 or None
809 )
809 )
810
810
811 m.matchfn = matchfn
811 m.matchfn = matchfn
812 return m
812 return m
813
813
814 listpats = []
814 listpats = []
815 for pat in pats:
815 for pat in pats:
816 if matchmod.patkind(pat) is not None:
816 if matchmod.patkind(pat) is not None:
817 listpats.append(pat)
817 listpats.append(pat)
818 else:
818 else:
819 listpats.append(makestandin(pat))
819 listpats.append(makestandin(pat))
820
820
821 copiedfiles = []
821 copiedfiles = []
822
822
823 def overridecopyfile(orig, src, dest, *args, **kwargs):
823 def overridecopyfile(orig, src, dest, *args, **kwargs):
824 if lfutil.shortname in src and dest.startswith(
824 if lfutil.shortname in src and dest.startswith(
825 repo.wjoin(lfutil.shortname)
825 repo.wjoin(lfutil.shortname)
826 ):
826 ):
827 destlfile = dest.replace(lfutil.shortname, b'')
827 destlfile = dest.replace(lfutil.shortname, b'')
828 if not opts[b'force'] and os.path.exists(destlfile):
828 if not opts[b'force'] and os.path.exists(destlfile):
829 raise IOError(
829 raise IOError(
830 b'', _(b'destination largefile already exists')
830 b'', _(b'destination largefile already exists')
831 )
831 )
832 copiedfiles.append((src, dest))
832 copiedfiles.append((src, dest))
833 orig(src, dest, *args, **kwargs)
833 orig(src, dest, *args, **kwargs)
834
834
835 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
835 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
836 with extensions.wrappedfunction(scmutil, b'match', overridematch):
836 with extensions.wrappedfunction(scmutil, b'match', overridematch):
837 result += orig(ui, repo, listpats, opts, rename)
837 result += orig(ui, repo, listpats, opts, rename)
838
838
839 lfdirstate = lfutil.openlfdirstate(ui, repo)
839 lfdirstate = lfutil.openlfdirstate(ui, repo)
840 for (src, dest) in copiedfiles:
840 for (src, dest) in copiedfiles:
841 if lfutil.shortname in src and dest.startswith(
841 if lfutil.shortname in src and dest.startswith(
842 repo.wjoin(lfutil.shortname)
842 repo.wjoin(lfutil.shortname)
843 ):
843 ):
844 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
844 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
845 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
845 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
846 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
846 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
847 if not os.path.isdir(destlfiledir):
847 if not os.path.isdir(destlfiledir):
848 os.makedirs(destlfiledir)
848 os.makedirs(destlfiledir)
849 if rename:
849 if rename:
850 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
850 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
851
851
852 # The file is gone, but this deletes any empty parent
852 # The file is gone, but this deletes any empty parent
853 # directories as a side-effect.
853 # directories as a side-effect.
854 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
854 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
855 lfdirstate.remove(srclfile)
855 lfdirstate.remove(srclfile)
856 else:
856 else:
857 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
857 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
858
858
859 lfdirstate.add(destlfile)
859 lfdirstate.add(destlfile)
860 lfdirstate.write()
860 lfdirstate.write()
861 except error.Abort as e:
861 except error.Abort as e:
862 if pycompat.bytestr(e) != _(b'no files to copy'):
862 if pycompat.bytestr(e) != _(b'no files to copy'):
863 raise e
863 raise e
864 else:
864 else:
865 nolfiles = True
865 nolfiles = True
866 finally:
866 finally:
867 wlock.release()
867 wlock.release()
868
868
869 if nolfiles and nonormalfiles:
869 if nolfiles and nonormalfiles:
870 raise error.Abort(_(b'no files to copy'))
870 raise error.Abort(_(b'no files to copy'))
871
871
872 return result
872 return result
873
873
874
874
875 # When the user calls revert, we have to be careful to not revert any
875 # When the user calls revert, we have to be careful to not revert any
876 # changes to other largefiles accidentally. This means we have to keep
876 # changes to other largefiles accidentally. This means we have to keep
877 # track of the largefiles that are being reverted so we only pull down
877 # track of the largefiles that are being reverted so we only pull down
878 # the necessary largefiles.
878 # the necessary largefiles.
879 #
879 #
880 # Standins are only updated (to match the hash of largefiles) before
880 # Standins are only updated (to match the hash of largefiles) before
881 # commits. Update the standins then run the original revert, changing
881 # commits. Update the standins then run the original revert, changing
882 # the matcher to hit standins instead of largefiles. Based on the
882 # the matcher to hit standins instead of largefiles. Based on the
883 # resulting standins update the largefiles.
883 # resulting standins update the largefiles.
884 @eh.wrapfunction(cmdutil, b'revert')
884 @eh.wrapfunction(cmdutil, b'revert')
885 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
885 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
886 # Because we put the standins in a bad state (by updating them)
886 # Because we put the standins in a bad state (by updating them)
887 # and then return them to a correct state we need to lock to
887 # and then return them to a correct state we need to lock to
888 # prevent others from changing them in their incorrect state.
888 # prevent others from changing them in their incorrect state.
889 with repo.wlock():
889 with repo.wlock():
890 lfdirstate = lfutil.openlfdirstate(ui, repo)
890 lfdirstate = lfutil.openlfdirstate(ui, repo)
891 s = lfutil.lfdirstatestatus(lfdirstate, repo)
891 s = lfutil.lfdirstatestatus(lfdirstate, repo)
892 lfdirstate.write()
892 lfdirstate.write()
893 for lfile in s.modified:
893 for lfile in s.modified:
894 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
894 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
895 for lfile in s.deleted:
895 for lfile in s.deleted:
896 fstandin = lfutil.standin(lfile)
896 fstandin = lfutil.standin(lfile)
897 if repo.wvfs.exists(fstandin):
897 if repo.wvfs.exists(fstandin):
898 repo.wvfs.unlink(fstandin)
898 repo.wvfs.unlink(fstandin)
899
899
900 oldstandins = lfutil.getstandinsstate(repo)
900 oldstandins = lfutil.getstandinsstate(repo)
901
901
902 def overridematch(
902 def overridematch(
903 orig,
903 orig,
904 mctx,
904 mctx,
905 pats=(),
905 pats=(),
906 opts=None,
906 opts=None,
907 globbed=False,
907 globbed=False,
908 default=b'relpath',
908 default=b'relpath',
909 badfn=None,
909 badfn=None,
910 ):
910 ):
911 if opts is None:
911 if opts is None:
912 opts = {}
912 opts = {}
913 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
913 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
914 m = copy.copy(match)
914 m = copy.copy(match)
915
915
916 # revert supports recursing into subrepos, and though largefiles
916 # revert supports recursing into subrepos, and though largefiles
917 # currently doesn't work correctly in that case, this match is
917 # currently doesn't work correctly in that case, this match is
918 # called, so the lfdirstate above may not be the correct one for
918 # called, so the lfdirstate above may not be the correct one for
919 # this invocation of match.
919 # this invocation of match.
920 lfdirstate = lfutil.openlfdirstate(
920 lfdirstate = lfutil.openlfdirstate(
921 mctx.repo().ui, mctx.repo(), False
921 mctx.repo().ui, mctx.repo(), False
922 )
922 )
923
923
924 wctx = repo[None]
924 wctx = repo[None]
925 matchfiles = []
925 matchfiles = []
926 for f in m._files:
926 for f in m._files:
927 standin = lfutil.standin(f)
927 standin = lfutil.standin(f)
928 if standin in ctx or standin in mctx:
928 if standin in ctx or standin in mctx:
929 matchfiles.append(standin)
929 matchfiles.append(standin)
930 elif standin in wctx or lfdirstate[f] == b'r':
930 elif standin in wctx or lfdirstate[f] == b'r':
931 continue
931 continue
932 else:
932 else:
933 matchfiles.append(f)
933 matchfiles.append(f)
934 m._files = matchfiles
934 m._files = matchfiles
935 m._fileset = set(m._files)
935 m._fileset = set(m._files)
936 origmatchfn = m.matchfn
936 origmatchfn = m.matchfn
937
937
938 def matchfn(f):
938 def matchfn(f):
939 lfile = lfutil.splitstandin(f)
939 lfile = lfutil.splitstandin(f)
940 if lfile is not None:
940 if lfile is not None:
941 return origmatchfn(lfile) and (f in ctx or f in mctx)
941 return origmatchfn(lfile) and (f in ctx or f in mctx)
942 return origmatchfn(f)
942 return origmatchfn(f)
943
943
944 m.matchfn = matchfn
944 m.matchfn = matchfn
945 return m
945 return m
946
946
947 with extensions.wrappedfunction(scmutil, b'match', overridematch):
947 with extensions.wrappedfunction(scmutil, b'match', overridematch):
948 orig(ui, repo, ctx, parents, *pats, **opts)
948 orig(ui, repo, ctx, parents, *pats, **opts)
949
949
950 newstandins = lfutil.getstandinsstate(repo)
950 newstandins = lfutil.getstandinsstate(repo)
951 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
951 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
952 # lfdirstate should be 'normallookup'-ed for updated files,
952 # lfdirstate should be 'normallookup'-ed for updated files,
953 # because reverting doesn't touch dirstate for 'normal' files
953 # because reverting doesn't touch dirstate for 'normal' files
954 # when target revision is explicitly specified: in such case,
954 # when target revision is explicitly specified: in such case,
955 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
955 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
956 # of target (standin) file.
956 # of target (standin) file.
957 lfcommands.updatelfiles(
957 lfcommands.updatelfiles(
958 ui, repo, filelist, printmessage=False, normallookup=True
958 ui, repo, filelist, printmessage=False, normallookup=True
959 )
959 )
960
960
961
961
962 # after pulling changesets, we need to take some extra care to get
962 # after pulling changesets, we need to take some extra care to get
963 # largefiles updated remotely
963 # largefiles updated remotely
964 @eh.wrapcommand(
964 @eh.wrapcommand(
965 b'pull',
965 b'pull',
966 opts=[
966 opts=[
967 (
967 (
968 b'',
968 b'',
969 b'all-largefiles',
969 b'all-largefiles',
970 None,
970 None,
971 _(b'download all pulled versions of largefiles (DEPRECATED)'),
971 _(b'download all pulled versions of largefiles (DEPRECATED)'),
972 ),
972 ),
973 (
973 (
974 b'',
974 b'',
975 b'lfrev',
975 b'lfrev',
976 [],
976 [],
977 _(b'download largefiles for these revisions'),
977 _(b'download largefiles for these revisions'),
978 _(b'REV'),
978 _(b'REV'),
979 ),
979 ),
980 ],
980 ],
981 )
981 )
982 def overridepull(orig, ui, repo, source=None, **opts):
982 def overridepull(orig, ui, repo, source=None, **opts):
983 revsprepull = len(repo)
983 revsprepull = len(repo)
984 if not source:
984 if not source:
985 source = b'default'
985 source = b'default'
986 repo.lfpullsource = source
986 repo.lfpullsource = source
987 result = orig(ui, repo, source, **opts)
987 result = orig(ui, repo, source, **opts)
988 revspostpull = len(repo)
988 revspostpull = len(repo)
989 lfrevs = opts.get('lfrev', [])
989 lfrevs = opts.get('lfrev', [])
990 if opts.get('all_largefiles'):
990 if opts.get('all_largefiles'):
991 lfrevs.append(b'pulled()')
991 lfrevs.append(b'pulled()')
992 if lfrevs and revspostpull > revsprepull:
992 if lfrevs and revspostpull > revsprepull:
993 numcached = 0
993 numcached = 0
994 repo.firstpulled = revsprepull # for pulled() revset expression
994 repo.firstpulled = revsprepull # for pulled() revset expression
995 try:
995 try:
996 for rev in scmutil.revrange(repo, lfrevs):
996 for rev in scmutil.revrange(repo, lfrevs):
997 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
997 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
998 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
998 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
999 numcached += len(cached)
999 numcached += len(cached)
1000 finally:
1000 finally:
1001 del repo.firstpulled
1001 del repo.firstpulled
1002 ui.status(_(b"%d largefiles cached\n") % numcached)
1002 ui.status(_(b"%d largefiles cached\n") % numcached)
1003 return result
1003 return result
1004
1004
1005
1005
1006 @eh.wrapcommand(
1006 @eh.wrapcommand(
1007 b'push',
1007 b'push',
1008 opts=[
1008 opts=[
1009 (
1009 (
1010 b'',
1010 b'',
1011 b'lfrev',
1011 b'lfrev',
1012 [],
1012 [],
1013 _(b'upload largefiles for these revisions'),
1013 _(b'upload largefiles for these revisions'),
1014 _(b'REV'),
1014 _(b'REV'),
1015 )
1015 )
1016 ],
1016 ],
1017 )
1017 )
1018 def overridepush(orig, ui, repo, *args, **kwargs):
1018 def overridepush(orig, ui, repo, *args, **kwargs):
1019 """Override push command and store --lfrev parameters in opargs"""
1019 """Override push command and store --lfrev parameters in opargs"""
1020 lfrevs = kwargs.pop('lfrev', None)
1020 lfrevs = kwargs.pop('lfrev', None)
1021 if lfrevs:
1021 if lfrevs:
1022 opargs = kwargs.setdefault('opargs', {})
1022 opargs = kwargs.setdefault('opargs', {})
1023 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1023 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1024 return orig(ui, repo, *args, **kwargs)
1024 return orig(ui, repo, *args, **kwargs)
1025
1025
1026
1026
1027 @eh.wrapfunction(exchange, b'pushoperation')
1027 @eh.wrapfunction(exchange, b'pushoperation')
1028 def exchangepushoperation(orig, *args, **kwargs):
1028 def exchangepushoperation(orig, *args, **kwargs):
1029 """Override pushoperation constructor and store lfrevs parameter"""
1029 """Override pushoperation constructor and store lfrevs parameter"""
1030 lfrevs = kwargs.pop('lfrevs', None)
1030 lfrevs = kwargs.pop('lfrevs', None)
1031 pushop = orig(*args, **kwargs)
1031 pushop = orig(*args, **kwargs)
1032 pushop.lfrevs = lfrevs
1032 pushop.lfrevs = lfrevs
1033 return pushop
1033 return pushop
1034
1034
1035
1035
1036 @eh.revsetpredicate(b'pulled()')
1036 @eh.revsetpredicate(b'pulled()')
1037 def pulledrevsetsymbol(repo, subset, x):
1037 def pulledrevsetsymbol(repo, subset, x):
1038 """Changesets that just has been pulled.
1038 """Changesets that just has been pulled.
1039
1039
1040 Only available with largefiles from pull --lfrev expressions.
1040 Only available with largefiles from pull --lfrev expressions.
1041
1041
1042 .. container:: verbose
1042 .. container:: verbose
1043
1043
1044 Some examples:
1044 Some examples:
1045
1045
1046 - pull largefiles for all new changesets::
1046 - pull largefiles for all new changesets::
1047
1047
1048 hg pull -lfrev "pulled()"
1048 hg pull -lfrev "pulled()"
1049
1049
1050 - pull largefiles for all new branch heads::
1050 - pull largefiles for all new branch heads::
1051
1051
1052 hg pull -lfrev "head(pulled()) and not closed()"
1052 hg pull -lfrev "head(pulled()) and not closed()"
1053
1053
1054 """
1054 """
1055
1055
1056 try:
1056 try:
1057 firstpulled = repo.firstpulled
1057 firstpulled = repo.firstpulled
1058 except AttributeError:
1058 except AttributeError:
1059 raise error.Abort(_(b"pulled() only available in --lfrev"))
1059 raise error.Abort(_(b"pulled() only available in --lfrev"))
1060 return smartset.baseset([r for r in subset if r >= firstpulled])
1060 return smartset.baseset([r for r in subset if r >= firstpulled])
1061
1061
1062
1062
1063 @eh.wrapcommand(
1063 @eh.wrapcommand(
1064 b'clone',
1064 b'clone',
1065 opts=[
1065 opts=[
1066 (
1066 (
1067 b'',
1067 b'',
1068 b'all-largefiles',
1068 b'all-largefiles',
1069 None,
1069 None,
1070 _(b'download all versions of all largefiles'),
1070 _(b'download all versions of all largefiles'),
1071 )
1071 )
1072 ],
1072 ],
1073 )
1073 )
1074 def overrideclone(orig, ui, source, dest=None, **opts):
1074 def overrideclone(orig, ui, source, dest=None, **opts):
1075 d = dest
1075 d = dest
1076 if d is None:
1076 if d is None:
1077 d = hg.defaultdest(source)
1077 d = hg.defaultdest(source)
1078 if opts.get('all_largefiles') and not hg.islocal(d):
1078 if opts.get('all_largefiles') and not hg.islocal(d):
1079 raise error.Abort(
1079 raise error.Abort(
1080 _(b'--all-largefiles is incompatible with non-local destination %s')
1080 _(b'--all-largefiles is incompatible with non-local destination %s')
1081 % d
1081 % d
1082 )
1082 )
1083
1083
1084 return orig(ui, source, dest, **opts)
1084 return orig(ui, source, dest, **opts)
1085
1085
1086
1086
1087 @eh.wrapfunction(hg, b'clone')
1087 @eh.wrapfunction(hg, b'clone')
1088 def hgclone(orig, ui, opts, *args, **kwargs):
1088 def hgclone(orig, ui, opts, *args, **kwargs):
1089 result = orig(ui, opts, *args, **kwargs)
1089 result = orig(ui, opts, *args, **kwargs)
1090
1090
1091 if result is not None:
1091 if result is not None:
1092 sourcerepo, destrepo = result
1092 sourcerepo, destrepo = result
1093 repo = destrepo.local()
1093 repo = destrepo.local()
1094
1094
1095 # When cloning to a remote repo (like through SSH), no repo is available
1095 # When cloning to a remote repo (like through SSH), no repo is available
1096 # from the peer. Therefore the largefiles can't be downloaded and the
1096 # from the peer. Therefore the largefiles can't be downloaded and the
1097 # hgrc can't be updated.
1097 # hgrc can't be updated.
1098 if not repo:
1098 if not repo:
1099 return result
1099 return result
1100
1100
1101 # Caching is implicitly limited to 'rev' option, since the dest repo was
1101 # Caching is implicitly limited to 'rev' option, since the dest repo was
1102 # truncated at that point. The user may expect a download count with
1102 # truncated at that point. The user may expect a download count with
1103 # this option, so attempt whether or not this is a largefile repo.
1103 # this option, so attempt whether or not this is a largefile repo.
1104 if opts.get(b'all_largefiles'):
1104 if opts.get(b'all_largefiles'):
1105 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1105 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1106
1106
1107 if missing != 0:
1107 if missing != 0:
1108 return None
1108 return None
1109
1109
1110 return result
1110 return result
1111
1111
1112
1112
1113 @eh.wrapcommand(b'rebase', extension=b'rebase')
1113 @eh.wrapcommand(b'rebase', extension=b'rebase')
1114 def overriderebase(orig, ui, repo, **opts):
1114 def overriderebase(orig, ui, repo, **opts):
1115 if not util.safehasattr(repo, b'_largefilesenabled'):
1115 if not util.safehasattr(repo, b'_largefilesenabled'):
1116 return orig(ui, repo, **opts)
1116 return orig(ui, repo, **opts)
1117
1117
1118 resuming = opts.get('continue')
1118 resuming = opts.get('continue')
1119 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1119 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1120 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1120 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1121 try:
1121 try:
1122 return orig(ui, repo, **opts)
1122 return orig(ui, repo, **opts)
1123 finally:
1123 finally:
1124 repo._lfstatuswriters.pop()
1124 repo._lfstatuswriters.pop()
1125 repo._lfcommithooks.pop()
1125 repo._lfcommithooks.pop()
1126
1126
1127
1127
1128 @eh.wrapcommand(b'archive')
1128 @eh.wrapcommand(b'archive')
1129 def overridearchivecmd(orig, ui, repo, dest, **opts):
1129 def overridearchivecmd(orig, ui, repo, dest, **opts):
1130 with lfstatus(repo.unfiltered()):
1130 with lfstatus(repo.unfiltered()):
1131 return orig(ui, repo.unfiltered(), dest, **opts)
1131 return orig(ui, repo.unfiltered(), dest, **opts)
1132
1132
1133
1133
1134 @eh.wrapfunction(webcommands, b'archive')
1134 @eh.wrapfunction(webcommands, b'archive')
1135 def hgwebarchive(orig, web):
1135 def hgwebarchive(orig, web):
1136 with lfstatus(web.repo):
1136 with lfstatus(web.repo):
1137 return orig(web)
1137 return orig(web)
1138
1138
1139
1139
1140 @eh.wrapfunction(archival, b'archive')
1140 @eh.wrapfunction(archival, b'archive')
1141 def overridearchive(
1141 def overridearchive(
1142 orig,
1142 orig,
1143 repo,
1143 repo,
1144 dest,
1144 dest,
1145 node,
1145 node,
1146 kind,
1146 kind,
1147 decode=True,
1147 decode=True,
1148 match=None,
1148 match=None,
1149 prefix=b'',
1149 prefix=b'',
1150 mtime=None,
1150 mtime=None,
1151 subrepos=None,
1151 subrepos=None,
1152 ):
1152 ):
1153 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1153 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1154 # unfiltered repo's attr, so check that as well.
1154 # unfiltered repo's attr, so check that as well.
1155 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1155 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1156 return orig(
1156 return orig(
1157 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1157 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1158 )
1158 )
1159
1159
1160 # No need to lock because we are only reading history and
1160 # No need to lock because we are only reading history and
1161 # largefile caches, neither of which are modified.
1161 # largefile caches, neither of which are modified.
1162 if node is not None:
1162 if node is not None:
1163 lfcommands.cachelfiles(repo.ui, repo, node)
1163 lfcommands.cachelfiles(repo.ui, repo, node)
1164
1164
1165 if kind not in archival.archivers:
1165 if kind not in archival.archivers:
1166 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1166 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1167
1167
1168 ctx = repo[node]
1168 ctx = repo[node]
1169
1169
1170 if kind == b'files':
1170 if kind == b'files':
1171 if prefix:
1171 if prefix:
1172 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1172 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1173 else:
1173 else:
1174 prefix = archival.tidyprefix(dest, kind, prefix)
1174 prefix = archival.tidyprefix(dest, kind, prefix)
1175
1175
1176 def write(name, mode, islink, getdata):
1176 def write(name, mode, islink, getdata):
1177 if match and not match(name):
1177 if match and not match(name):
1178 return
1178 return
1179 data = getdata()
1179 data = getdata()
1180 if decode:
1180 if decode:
1181 data = repo.wwritedata(name, data)
1181 data = repo.wwritedata(name, data)
1182 archiver.addfile(prefix + name, mode, islink, data)
1182 archiver.addfile(prefix + name, mode, islink, data)
1183
1183
1184 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1184 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1185
1185
1186 if repo.ui.configbool(b"ui", b"archivemeta"):
1186 if repo.ui.configbool(b"ui", b"archivemeta"):
1187 write(
1187 write(
1188 b'.hg_archival.txt',
1188 b'.hg_archival.txt',
1189 0o644,
1189 0o644,
1190 False,
1190 False,
1191 lambda: archival.buildmetadata(ctx),
1191 lambda: archival.buildmetadata(ctx),
1192 )
1192 )
1193
1193
1194 for f in ctx:
1194 for f in ctx:
1195 ff = ctx.flags(f)
1195 ff = ctx.flags(f)
1196 getdata = ctx[f].data
1196 getdata = ctx[f].data
1197 lfile = lfutil.splitstandin(f)
1197 lfile = lfutil.splitstandin(f)
1198 if lfile is not None:
1198 if lfile is not None:
1199 if node is not None:
1199 if node is not None:
1200 path = lfutil.findfile(repo, getdata().strip())
1200 path = lfutil.findfile(repo, getdata().strip())
1201
1201
1202 if path is None:
1202 if path is None:
1203 raise error.Abort(
1203 raise error.Abort(
1204 _(
1204 _(
1205 b'largefile %s not found in repo store or system cache'
1205 b'largefile %s not found in repo store or system cache'
1206 )
1206 )
1207 % lfile
1207 % lfile
1208 )
1208 )
1209 else:
1209 else:
1210 path = lfile
1210 path = lfile
1211
1211
1212 f = lfile
1212 f = lfile
1213
1213
1214 getdata = lambda: util.readfile(path)
1214 getdata = lambda: util.readfile(path)
1215 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1215 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1216
1216
1217 if subrepos:
1217 if subrepos:
1218 for subpath in sorted(ctx.substate):
1218 for subpath in sorted(ctx.substate):
1219 sub = ctx.workingsub(subpath)
1219 sub = ctx.workingsub(subpath)
1220 submatch = matchmod.subdirmatcher(subpath, match)
1220 submatch = matchmod.subdirmatcher(subpath, match)
1221 subprefix = prefix + subpath + b'/'
1221 subprefix = prefix + subpath + b'/'
1222
1222
1223 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1223 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1224 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1224 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1225 # allow only hgsubrepos to set this, instead of the current scheme
1225 # allow only hgsubrepos to set this, instead of the current scheme
1226 # where the parent sets this for the child.
1226 # where the parent sets this for the child.
1227 with (
1227 with (
1228 util.safehasattr(sub, '_repo')
1228 util.safehasattr(sub, '_repo')
1229 and lfstatus(sub._repo)
1229 and lfstatus(sub._repo)
1230 or util.nullcontextmanager()
1230 or util.nullcontextmanager()
1231 ):
1231 ):
1232 sub.archive(archiver, subprefix, submatch)
1232 sub.archive(archiver, subprefix, submatch)
1233
1233
1234 archiver.done()
1234 archiver.done()
1235
1235
1236
1236
1237 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1237 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1238 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1238 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1239 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1239 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1240 if not lfenabled or not repo._repo.lfstatus:
1240 if not lfenabled or not repo._repo.lfstatus:
1241 return orig(repo, archiver, prefix, match, decode)
1241 return orig(repo, archiver, prefix, match, decode)
1242
1242
1243 repo._get(repo._state + (b'hg',))
1243 repo._get(repo._state + (b'hg',))
1244 rev = repo._state[1]
1244 rev = repo._state[1]
1245 ctx = repo._repo[rev]
1245 ctx = repo._repo[rev]
1246
1246
1247 if ctx.node() is not None:
1247 if ctx.node() is not None:
1248 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1248 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1249
1249
1250 def write(name, mode, islink, getdata):
1250 def write(name, mode, islink, getdata):
1251 # At this point, the standin has been replaced with the largefile name,
1251 # At this point, the standin has been replaced with the largefile name,
1252 # so the normal matcher works here without the lfutil variants.
1252 # so the normal matcher works here without the lfutil variants.
1253 if match and not match(f):
1253 if match and not match(f):
1254 return
1254 return
1255 data = getdata()
1255 data = getdata()
1256 if decode:
1256 if decode:
1257 data = repo._repo.wwritedata(name, data)
1257 data = repo._repo.wwritedata(name, data)
1258
1258
1259 archiver.addfile(prefix + name, mode, islink, data)
1259 archiver.addfile(prefix + name, mode, islink, data)
1260
1260
1261 for f in ctx:
1261 for f in ctx:
1262 ff = ctx.flags(f)
1262 ff = ctx.flags(f)
1263 getdata = ctx[f].data
1263 getdata = ctx[f].data
1264 lfile = lfutil.splitstandin(f)
1264 lfile = lfutil.splitstandin(f)
1265 if lfile is not None:
1265 if lfile is not None:
1266 if ctx.node() is not None:
1266 if ctx.node() is not None:
1267 path = lfutil.findfile(repo._repo, getdata().strip())
1267 path = lfutil.findfile(repo._repo, getdata().strip())
1268
1268
1269 if path is None:
1269 if path is None:
1270 raise error.Abort(
1270 raise error.Abort(
1271 _(
1271 _(
1272 b'largefile %s not found in repo store or system cache'
1272 b'largefile %s not found in repo store or system cache'
1273 )
1273 )
1274 % lfile
1274 % lfile
1275 )
1275 )
1276 else:
1276 else:
1277 path = lfile
1277 path = lfile
1278
1278
1279 f = lfile
1279 f = lfile
1280
1280
1281 getdata = lambda: util.readfile(os.path.join(prefix, path))
1281 getdata = lambda: util.readfile(os.path.join(prefix, path))
1282
1282
1283 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1283 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1284
1284
1285 for subpath in sorted(ctx.substate):
1285 for subpath in sorted(ctx.substate):
1286 sub = ctx.workingsub(subpath)
1286 sub = ctx.workingsub(subpath)
1287 submatch = matchmod.subdirmatcher(subpath, match)
1287 submatch = matchmod.subdirmatcher(subpath, match)
1288 subprefix = prefix + subpath + b'/'
1288 subprefix = prefix + subpath + b'/'
1289 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1289 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1290 # infer and possibly set lfstatus at the top of this function. That
1290 # infer and possibly set lfstatus at the top of this function. That
1291 # would allow only hgsubrepos to set this, instead of the current scheme
1291 # would allow only hgsubrepos to set this, instead of the current scheme
1292 # where the parent sets this for the child.
1292 # where the parent sets this for the child.
1293 with (
1293 with (
1294 util.safehasattr(sub, '_repo')
1294 util.safehasattr(sub, '_repo')
1295 and lfstatus(sub._repo)
1295 and lfstatus(sub._repo)
1296 or util.nullcontextmanager()
1296 or util.nullcontextmanager()
1297 ):
1297 ):
1298 sub.archive(archiver, subprefix, submatch, decode)
1298 sub.archive(archiver, subprefix, submatch, decode)
1299
1299
1300
1300
1301 # If a largefile is modified, the change is not reflected in its
1301 # If a largefile is modified, the change is not reflected in its
1302 # standin until a commit. cmdutil.bailifchanged() raises an exception
1302 # standin until a commit. cmdutil.bailifchanged() raises an exception
1303 # if the repo has uncommitted changes. Wrap it to also check if
1303 # if the repo has uncommitted changes. Wrap it to also check if
1304 # largefiles were changed. This is used by bisect, backout and fetch.
1304 # largefiles were changed. This is used by bisect, backout and fetch.
1305 @eh.wrapfunction(cmdutil, b'bailifchanged')
1305 @eh.wrapfunction(cmdutil, b'bailifchanged')
1306 def overridebailifchanged(orig, repo, *args, **kwargs):
1306 def overridebailifchanged(orig, repo, *args, **kwargs):
1307 orig(repo, *args, **kwargs)
1307 orig(repo, *args, **kwargs)
1308 with lfstatus(repo):
1308 with lfstatus(repo):
1309 s = repo.status()
1309 s = repo.status()
1310 if s.modified or s.added or s.removed or s.deleted:
1310 if s.modified or s.added or s.removed or s.deleted:
1311 raise error.Abort(_(b'uncommitted changes'))
1311 raise error.Abort(_(b'uncommitted changes'))
1312
1312
1313
1313
1314 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1314 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1315 def postcommitstatus(orig, repo, *args, **kwargs):
1315 def postcommitstatus(orig, repo, *args, **kwargs):
1316 with lfstatus(repo):
1316 with lfstatus(repo):
1317 return orig(repo, *args, **kwargs)
1317 return orig(repo, *args, **kwargs)
1318
1318
1319
1319
1320 @eh.wrapfunction(cmdutil, b'forget')
1320 @eh.wrapfunction(cmdutil, b'forget')
1321 def cmdutilforget(
1321 def cmdutilforget(
1322 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1322 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1323 ):
1323 ):
1324 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1324 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1325 bad, forgot = orig(
1325 bad, forgot = orig(
1326 ui,
1326 ui,
1327 repo,
1327 repo,
1328 normalmatcher,
1328 normalmatcher,
1329 prefix,
1329 prefix,
1330 uipathfn,
1330 uipathfn,
1331 explicitonly,
1331 explicitonly,
1332 dryrun,
1332 dryrun,
1333 interactive,
1333 interactive,
1334 )
1334 )
1335 m = composelargefilematcher(match, repo[None].manifest())
1335 m = composelargefilematcher(match, repo[None].manifest())
1336
1336
1337 with lfstatus(repo):
1337 with lfstatus(repo):
1338 s = repo.status(match=m, clean=True)
1338 s = repo.status(match=m, clean=True)
1339 manifest = repo[None].manifest()
1339 manifest = repo[None].manifest()
1340 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1340 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1341 forget = [f for f in forget if lfutil.standin(f) in manifest]
1341 forget = [f for f in forget if lfutil.standin(f) in manifest]
1342
1342
1343 for f in forget:
1343 for f in forget:
1344 fstandin = lfutil.standin(f)
1344 fstandin = lfutil.standin(f)
1345 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1345 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1346 ui.warn(
1346 ui.warn(
1347 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1347 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1348 )
1348 )
1349 bad.append(f)
1349 bad.append(f)
1350
1350
1351 for f in forget:
1351 for f in forget:
1352 if ui.verbose or not m.exact(f):
1352 if ui.verbose or not m.exact(f):
1353 ui.status(_(b'removing %s\n') % uipathfn(f))
1353 ui.status(_(b'removing %s\n') % uipathfn(f))
1354
1354
1355 # Need to lock because standin files are deleted then removed from the
1355 # Need to lock because standin files are deleted then removed from the
1356 # repository and we could race in-between.
1356 # repository and we could race in-between.
1357 with repo.wlock():
1357 with repo.wlock():
1358 lfdirstate = lfutil.openlfdirstate(ui, repo)
1358 lfdirstate = lfutil.openlfdirstate(ui, repo)
1359 for f in forget:
1359 for f in forget:
1360 if lfdirstate[f] == b'a':
1360 if lfdirstate[f] == b'a':
1361 lfdirstate.drop(f)
1361 lfdirstate.drop(f)
1362 else:
1362 else:
1363 lfdirstate.remove(f)
1363 lfdirstate.remove(f)
1364 lfdirstate.write()
1364 lfdirstate.write()
1365 standins = [lfutil.standin(f) for f in forget]
1365 standins = [lfutil.standin(f) for f in forget]
1366 for f in standins:
1366 for f in standins:
1367 repo.wvfs.unlinkpath(f, ignoremissing=True)
1367 repo.wvfs.unlinkpath(f, ignoremissing=True)
1368 rejected = repo[None].forget(standins)
1368 rejected = repo[None].forget(standins)
1369
1369
1370 bad.extend(f for f in rejected if f in m.files())
1370 bad.extend(f for f in rejected if f in m.files())
1371 forgot.extend(f for f in forget if f not in rejected)
1371 forgot.extend(f for f in forget if f not in rejected)
1372 return bad, forgot
1372 return bad, forgot
1373
1373
1374
1374
1375 def _getoutgoings(repo, other, missing, addfunc):
1375 def _getoutgoings(repo, other, missing, addfunc):
1376 """get pairs of filename and largefile hash in outgoing revisions
1376 """get pairs of filename and largefile hash in outgoing revisions
1377 in 'missing'.
1377 in 'missing'.
1378
1378
1379 largefiles already existing on 'other' repository are ignored.
1379 largefiles already existing on 'other' repository are ignored.
1380
1380
1381 'addfunc' is invoked with each unique pairs of filename and
1381 'addfunc' is invoked with each unique pairs of filename and
1382 largefile hash value.
1382 largefile hash value.
1383 """
1383 """
1384 knowns = set()
1384 knowns = set()
1385 lfhashes = set()
1385 lfhashes = set()
1386
1386
1387 def dedup(fn, lfhash):
1387 def dedup(fn, lfhash):
1388 k = (fn, lfhash)
1388 k = (fn, lfhash)
1389 if k not in knowns:
1389 if k not in knowns:
1390 knowns.add(k)
1390 knowns.add(k)
1391 lfhashes.add(lfhash)
1391 lfhashes.add(lfhash)
1392
1392
1393 lfutil.getlfilestoupload(repo, missing, dedup)
1393 lfutil.getlfilestoupload(repo, missing, dedup)
1394 if lfhashes:
1394 if lfhashes:
1395 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1395 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1396 for fn, lfhash in knowns:
1396 for fn, lfhash in knowns:
1397 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1397 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1398 addfunc(fn, lfhash)
1398 addfunc(fn, lfhash)
1399
1399
1400
1400
1401 def outgoinghook(ui, repo, other, opts, missing):
1401 def outgoinghook(ui, repo, other, opts, missing):
1402 if opts.pop(b'large', None):
1402 if opts.pop(b'large', None):
1403 lfhashes = set()
1403 lfhashes = set()
1404 if ui.debugflag:
1404 if ui.debugflag:
1405 toupload = {}
1405 toupload = {}
1406
1406
1407 def addfunc(fn, lfhash):
1407 def addfunc(fn, lfhash):
1408 if fn not in toupload:
1408 if fn not in toupload:
1409 toupload[fn] = []
1409 toupload[fn] = []
1410 toupload[fn].append(lfhash)
1410 toupload[fn].append(lfhash)
1411 lfhashes.add(lfhash)
1411 lfhashes.add(lfhash)
1412
1412
1413 def showhashes(fn):
1413 def showhashes(fn):
1414 for lfhash in sorted(toupload[fn]):
1414 for lfhash in sorted(toupload[fn]):
1415 ui.debug(b' %s\n' % lfhash)
1415 ui.debug(b' %s\n' % lfhash)
1416
1416
1417 else:
1417 else:
1418 toupload = set()
1418 toupload = set()
1419
1419
1420 def addfunc(fn, lfhash):
1420 def addfunc(fn, lfhash):
1421 toupload.add(fn)
1421 toupload.add(fn)
1422 lfhashes.add(lfhash)
1422 lfhashes.add(lfhash)
1423
1423
1424 def showhashes(fn):
1424 def showhashes(fn):
1425 pass
1425 pass
1426
1426
1427 _getoutgoings(repo, other, missing, addfunc)
1427 _getoutgoings(repo, other, missing, addfunc)
1428
1428
1429 if not toupload:
1429 if not toupload:
1430 ui.status(_(b'largefiles: no files to upload\n'))
1430 ui.status(_(b'largefiles: no files to upload\n'))
1431 else:
1431 else:
1432 ui.status(
1432 ui.status(
1433 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1433 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1434 )
1434 )
1435 for file in sorted(toupload):
1435 for file in sorted(toupload):
1436 ui.status(lfutil.splitstandin(file) + b'\n')
1436 ui.status(lfutil.splitstandin(file) + b'\n')
1437 showhashes(file)
1437 showhashes(file)
1438 ui.status(b'\n')
1438 ui.status(b'\n')
1439
1439
1440
1440
1441 @eh.wrapcommand(
1441 @eh.wrapcommand(
1442 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1442 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1443 )
1443 )
1444 def _outgoingcmd(orig, *args, **kwargs):
1444 def _outgoingcmd(orig, *args, **kwargs):
1445 # Nothing to do here other than add the extra help option- the hook above
1445 # Nothing to do here other than add the extra help option- the hook above
1446 # processes it.
1446 # processes it.
1447 return orig(*args, **kwargs)
1447 return orig(*args, **kwargs)
1448
1448
1449
1449
1450 def summaryremotehook(ui, repo, opts, changes):
1450 def summaryremotehook(ui, repo, opts, changes):
1451 largeopt = opts.get(b'large', False)
1451 largeopt = opts.get(b'large', False)
1452 if changes is None:
1452 if changes is None:
1453 if largeopt:
1453 if largeopt:
1454 return (False, True) # only outgoing check is needed
1454 return (False, True) # only outgoing check is needed
1455 else:
1455 else:
1456 return (False, False)
1456 return (False, False)
1457 elif largeopt:
1457 elif largeopt:
1458 url, branch, peer, outgoing = changes[1]
1458 url, branch, peer, outgoing = changes[1]
1459 if peer is None:
1459 if peer is None:
1460 # i18n: column positioning for "hg summary"
1460 # i18n: column positioning for "hg summary"
1461 ui.status(_(b'largefiles: (no remote repo)\n'))
1461 ui.status(_(b'largefiles: (no remote repo)\n'))
1462 return
1462 return
1463
1463
1464 toupload = set()
1464 toupload = set()
1465 lfhashes = set()
1465 lfhashes = set()
1466
1466
1467 def addfunc(fn, lfhash):
1467 def addfunc(fn, lfhash):
1468 toupload.add(fn)
1468 toupload.add(fn)
1469 lfhashes.add(lfhash)
1469 lfhashes.add(lfhash)
1470
1470
1471 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1471 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1472
1472
1473 if not toupload:
1473 if not toupload:
1474 # i18n: column positioning for "hg summary"
1474 # i18n: column positioning for "hg summary"
1475 ui.status(_(b'largefiles: (no files to upload)\n'))
1475 ui.status(_(b'largefiles: (no files to upload)\n'))
1476 else:
1476 else:
1477 # i18n: column positioning for "hg summary"
1477 # i18n: column positioning for "hg summary"
1478 ui.status(
1478 ui.status(
1479 _(b'largefiles: %d entities for %d files to upload\n')
1479 _(b'largefiles: %d entities for %d files to upload\n')
1480 % (len(lfhashes), len(toupload))
1480 % (len(lfhashes), len(toupload))
1481 )
1481 )
1482
1482
1483
1483
1484 @eh.wrapcommand(
1484 @eh.wrapcommand(
1485 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1485 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1486 )
1486 )
1487 def overridesummary(orig, ui, repo, *pats, **opts):
1487 def overridesummary(orig, ui, repo, *pats, **opts):
1488 with lfstatus(repo):
1488 with lfstatus(repo):
1489 orig(ui, repo, *pats, **opts)
1489 orig(ui, repo, *pats, **opts)
1490
1490
1491
1491
1492 @eh.wrapfunction(scmutil, b'addremove')
1492 @eh.wrapfunction(scmutil, b'addremove')
1493 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1493 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1494 if opts is None:
1494 if opts is None:
1495 opts = {}
1495 opts = {}
1496 if not lfutil.islfilesrepo(repo):
1496 if not lfutil.islfilesrepo(repo):
1497 return orig(repo, matcher, prefix, uipathfn, opts)
1497 return orig(repo, matcher, prefix, uipathfn, opts)
1498 # Get the list of missing largefiles so we can remove them
1498 # Get the list of missing largefiles so we can remove them
1499 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1499 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1500 unsure, s = lfdirstate.status(
1500 unsure, s = lfdirstate.status(
1501 matchmod.always(),
1501 matchmod.always(),
1502 subrepos=[],
1502 subrepos=[],
1503 ignored=False,
1503 ignored=False,
1504 clean=False,
1504 clean=False,
1505 unknown=False,
1505 unknown=False,
1506 )
1506 )
1507
1507
1508 # Call into the normal remove code, but the removing of the standin, we want
1508 # Call into the normal remove code, but the removing of the standin, we want
1509 # to have handled by original addremove. Monkey patching here makes sure
1509 # to have handled by original addremove. Monkey patching here makes sure
1510 # we don't remove the standin in the largefiles code, preventing a very
1510 # we don't remove the standin in the largefiles code, preventing a very
1511 # confused state later.
1511 # confused state later.
1512 if s.deleted:
1512 if s.deleted:
1513 m = copy.copy(matcher)
1513 m = copy.copy(matcher)
1514
1514
1515 # The m._files and m._map attributes are not changed to the deleted list
1515 # The m._files and m._map attributes are not changed to the deleted list
1516 # because that affects the m.exact() test, which in turn governs whether
1516 # because that affects the m.exact() test, which in turn governs whether
1517 # or not the file name is printed, and how. Simply limit the original
1517 # or not the file name is printed, and how. Simply limit the original
1518 # matches to those in the deleted status list.
1518 # matches to those in the deleted status list.
1519 matchfn = m.matchfn
1519 matchfn = m.matchfn
1520 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1520 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1521
1521
1522 removelargefiles(
1522 removelargefiles(
1523 repo.ui,
1523 repo.ui,
1524 repo,
1524 repo,
1525 True,
1525 True,
1526 m,
1526 m,
1527 uipathfn,
1527 uipathfn,
1528 opts.get(b'dry_run'),
1528 opts.get(b'dry_run'),
1529 **pycompat.strkwargs(opts)
1529 **pycompat.strkwargs(opts)
1530 )
1530 )
1531 # Call into the normal add code, and any files that *should* be added as
1531 # Call into the normal add code, and any files that *should* be added as
1532 # largefiles will be
1532 # largefiles will be
1533 added, bad = addlargefiles(
1533 added, bad = addlargefiles(
1534 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1534 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1535 )
1535 )
1536 # Now that we've handled largefiles, hand off to the original addremove
1536 # Now that we've handled largefiles, hand off to the original addremove
1537 # function to take care of the rest. Make sure it doesn't do anything with
1537 # function to take care of the rest. Make sure it doesn't do anything with
1538 # largefiles by passing a matcher that will ignore them.
1538 # largefiles by passing a matcher that will ignore them.
1539 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1539 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1540 return orig(repo, matcher, prefix, uipathfn, opts)
1540 return orig(repo, matcher, prefix, uipathfn, opts)
1541
1541
1542
1542
1543 # Calling purge with --all will cause the largefiles to be deleted.
1543 # Calling purge with --all will cause the largefiles to be deleted.
1544 # Override repo.status to prevent this from happening.
1544 # Override repo.status to prevent this from happening.
1545 @eh.wrapcommand(b'purge', extension=b'purge')
1545 @eh.wrapcommand(b'purge', extension=b'purge')
1546 def overridepurge(orig, ui, repo, *dirs, **opts):
1546 def overridepurge(orig, ui, repo, *dirs, **opts):
1547 # XXX Monkey patching a repoview will not work. The assigned attribute will
1547 # XXX Monkey patching a repoview will not work. The assigned attribute will
1548 # be set on the unfiltered repo, but we will only lookup attributes in the
1548 # be set on the unfiltered repo, but we will only lookup attributes in the
1549 # unfiltered repo if the lookup in the repoview object itself fails. As the
1549 # unfiltered repo if the lookup in the repoview object itself fails. As the
1550 # monkey patched method exists on the repoview class the lookup will not
1550 # monkey patched method exists on the repoview class the lookup will not
1551 # fail. As a result, the original version will shadow the monkey patched
1551 # fail. As a result, the original version will shadow the monkey patched
1552 # one, defeating the monkey patch.
1552 # one, defeating the monkey patch.
1553 #
1553 #
1554 # As a work around we use an unfiltered repo here. We should do something
1554 # As a work around we use an unfiltered repo here. We should do something
1555 # cleaner instead.
1555 # cleaner instead.
1556 repo = repo.unfiltered()
1556 repo = repo.unfiltered()
1557 oldstatus = repo.status
1557 oldstatus = repo.status
1558
1558
1559 def overridestatus(
1559 def overridestatus(
1560 node1=b'.',
1560 node1=b'.',
1561 node2=None,
1561 node2=None,
1562 match=None,
1562 match=None,
1563 ignored=False,
1563 ignored=False,
1564 clean=False,
1564 clean=False,
1565 unknown=False,
1565 unknown=False,
1566 listsubrepos=False,
1566 listsubrepos=False,
1567 ):
1567 ):
1568 r = oldstatus(
1568 r = oldstatus(
1569 node1, node2, match, ignored, clean, unknown, listsubrepos
1569 node1, node2, match, ignored, clean, unknown, listsubrepos
1570 )
1570 )
1571 lfdirstate = lfutil.openlfdirstate(ui, repo)
1571 lfdirstate = lfutil.openlfdirstate(ui, repo)
1572 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1572 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1573 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1573 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1574 return scmutil.status(
1574 return scmutil.status(
1575 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1575 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1576 )
1576 )
1577
1577
1578 repo.status = overridestatus
1578 repo.status = overridestatus
1579 orig(ui, repo, *dirs, **opts)
1579 orig(ui, repo, *dirs, **opts)
1580 repo.status = oldstatus
1580 repo.status = oldstatus
1581
1581
1582
1582
1583 @eh.wrapcommand(b'rollback')
1583 @eh.wrapcommand(b'rollback')
1584 def overriderollback(orig, ui, repo, **opts):
1584 def overriderollback(orig, ui, repo, **opts):
1585 with repo.wlock():
1585 with repo.wlock():
1586 before = repo.dirstate.parents()
1586 before = repo.dirstate.parents()
1587 orphans = {
1587 orphans = {
1588 f
1588 f
1589 for f in repo.dirstate
1589 for f in repo.dirstate
1590 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1590 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1591 }
1591 }
1592 result = orig(ui, repo, **opts)
1592 result = orig(ui, repo, **opts)
1593 after = repo.dirstate.parents()
1593 after = repo.dirstate.parents()
1594 if before == after:
1594 if before == after:
1595 return result # no need to restore standins
1595 return result # no need to restore standins
1596
1596
1597 pctx = repo[b'.']
1597 pctx = repo[b'.']
1598 for f in repo.dirstate:
1598 for f in repo.dirstate:
1599 if lfutil.isstandin(f):
1599 if lfutil.isstandin(f):
1600 orphans.discard(f)
1600 orphans.discard(f)
1601 if repo.dirstate[f] == b'r':
1601 if repo.dirstate[f] == b'r':
1602 repo.wvfs.unlinkpath(f, ignoremissing=True)
1602 repo.wvfs.unlinkpath(f, ignoremissing=True)
1603 elif f in pctx:
1603 elif f in pctx:
1604 fctx = pctx[f]
1604 fctx = pctx[f]
1605 repo.wwrite(f, fctx.data(), fctx.flags())
1605 repo.wwrite(f, fctx.data(), fctx.flags())
1606 else:
1606 else:
1607 # content of standin is not so important in 'a',
1607 # content of standin is not so important in 'a',
1608 # 'm' or 'n' (coming from the 2nd parent) cases
1608 # 'm' or 'n' (coming from the 2nd parent) cases
1609 lfutil.writestandin(repo, f, b'', False)
1609 lfutil.writestandin(repo, f, b'', False)
1610 for standin in orphans:
1610 for standin in orphans:
1611 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1611 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1612
1612
1613 lfdirstate = lfutil.openlfdirstate(ui, repo)
1613 lfdirstate = lfutil.openlfdirstate(ui, repo)
1614 orphans = set(lfdirstate)
1614 orphans = set(lfdirstate)
1615 lfiles = lfutil.listlfiles(repo)
1615 lfiles = lfutil.listlfiles(repo)
1616 for file in lfiles:
1616 for file in lfiles:
1617 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1617 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1618 orphans.discard(file)
1618 orphans.discard(file)
1619 for lfile in orphans:
1619 for lfile in orphans:
1620 lfdirstate.drop(lfile)
1620 lfdirstate.drop(lfile)
1621 lfdirstate.write()
1621 lfdirstate.write()
1622 return result
1622 return result
1623
1623
1624
1624
1625 @eh.wrapcommand(b'transplant', extension=b'transplant')
1625 @eh.wrapcommand(b'transplant', extension=b'transplant')
1626 def overridetransplant(orig, ui, repo, *revs, **opts):
1626 def overridetransplant(orig, ui, repo, *revs, **opts):
1627 resuming = opts.get('continue')
1627 resuming = opts.get('continue')
1628 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1628 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1629 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1629 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1630 try:
1630 try:
1631 result = orig(ui, repo, *revs, **opts)
1631 result = orig(ui, repo, *revs, **opts)
1632 finally:
1632 finally:
1633 repo._lfstatuswriters.pop()
1633 repo._lfstatuswriters.pop()
1634 repo._lfcommithooks.pop()
1634 repo._lfcommithooks.pop()
1635 return result
1635 return result
1636
1636
1637
1637
1638 @eh.wrapcommand(b'cat')
1638 @eh.wrapcommand(b'cat')
1639 def overridecat(orig, ui, repo, file1, *pats, **opts):
1639 def overridecat(orig, ui, repo, file1, *pats, **opts):
1640 opts = pycompat.byteskwargs(opts)
1640 opts = pycompat.byteskwargs(opts)
1641 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1641 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1642 err = 1
1642 err = 1
1643 notbad = set()
1643 notbad = set()
1644 m = scmutil.match(ctx, (file1,) + pats, opts)
1644 m = scmutil.match(ctx, (file1,) + pats, opts)
1645 origmatchfn = m.matchfn
1645 origmatchfn = m.matchfn
1646
1646
1647 def lfmatchfn(f):
1647 def lfmatchfn(f):
1648 if origmatchfn(f):
1648 if origmatchfn(f):
1649 return True
1649 return True
1650 lf = lfutil.splitstandin(f)
1650 lf = lfutil.splitstandin(f)
1651 if lf is None:
1651 if lf is None:
1652 return False
1652 return False
1653 notbad.add(lf)
1653 notbad.add(lf)
1654 return origmatchfn(lf)
1654 return origmatchfn(lf)
1655
1655
1656 m.matchfn = lfmatchfn
1656 m.matchfn = lfmatchfn
1657 origbadfn = m.bad
1657 origbadfn = m.bad
1658
1658
1659 def lfbadfn(f, msg):
1659 def lfbadfn(f, msg):
1660 if not f in notbad:
1660 if not f in notbad:
1661 origbadfn(f, msg)
1661 origbadfn(f, msg)
1662
1662
1663 m.bad = lfbadfn
1663 m.bad = lfbadfn
1664
1664
1665 origvisitdirfn = m.visitdir
1665 origvisitdirfn = m.visitdir
1666
1666
1667 def lfvisitdirfn(dir):
1667 def lfvisitdirfn(dir):
1668 if dir == lfutil.shortname:
1668 if dir == lfutil.shortname:
1669 return True
1669 return True
1670 ret = origvisitdirfn(dir)
1670 ret = origvisitdirfn(dir)
1671 if ret:
1671 if ret:
1672 return ret
1672 return ret
1673 lf = lfutil.splitstandin(dir)
1673 lf = lfutil.splitstandin(dir)
1674 if lf is None:
1674 if lf is None:
1675 return False
1675 return False
1676 return origvisitdirfn(lf)
1676 return origvisitdirfn(lf)
1677
1677
1678 m.visitdir = lfvisitdirfn
1678 m.visitdir = lfvisitdirfn
1679
1679
1680 for f in ctx.walk(m):
1680 for f in ctx.walk(m):
1681 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1681 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1682 lf = lfutil.splitstandin(f)
1682 lf = lfutil.splitstandin(f)
1683 if lf is None or origmatchfn(f):
1683 if lf is None or origmatchfn(f):
1684 # duplicating unreachable code from commands.cat
1684 # duplicating unreachable code from commands.cat
1685 data = ctx[f].data()
1685 data = ctx[f].data()
1686 if opts.get(b'decode'):
1686 if opts.get(b'decode'):
1687 data = repo.wwritedata(f, data)
1687 data = repo.wwritedata(f, data)
1688 fp.write(data)
1688 fp.write(data)
1689 else:
1689 else:
1690 hash = lfutil.readasstandin(ctx[f])
1690 hash = lfutil.readasstandin(ctx[f])
1691 if not lfutil.inusercache(repo.ui, hash):
1691 if not lfutil.inusercache(repo.ui, hash):
1692 store = storefactory.openstore(repo)
1692 store = storefactory.openstore(repo)
1693 success, missing = store.get([(lf, hash)])
1693 success, missing = store.get([(lf, hash)])
1694 if len(success) != 1:
1694 if len(success) != 1:
1695 raise error.Abort(
1695 raise error.Abort(
1696 _(
1696 _(
1697 b'largefile %s is not in cache and could not be '
1697 b'largefile %s is not in cache and could not be '
1698 b'downloaded'
1698 b'downloaded'
1699 )
1699 )
1700 % lf
1700 % lf
1701 )
1701 )
1702 path = lfutil.usercachepath(repo.ui, hash)
1702 path = lfutil.usercachepath(repo.ui, hash)
1703 with open(path, b"rb") as fpin:
1703 with open(path, b"rb") as fpin:
1704 for chunk in util.filechunkiter(fpin):
1704 for chunk in util.filechunkiter(fpin):
1705 fp.write(chunk)
1705 fp.write(chunk)
1706 err = 0
1706 err = 0
1707 return err
1707 return err
1708
1708
1709
1709
1710 @eh.wrapfunction(merge, b'update')
1710 @eh.wrapfunction(merge, b'update')
1711 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1711 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1712 matcher = kwargs.get('matcher', None)
1712 matcher = kwargs.get('matcher', None)
1713 # note if this is a partial update
1713 # note if this is a partial update
1714 partial = matcher and not matcher.always()
1714 partial = matcher and not matcher.always()
1715 with repo.wlock():
1715 with repo.wlock():
1716 # branch | | |
1716 # branch | | |
1717 # merge | force | partial | action
1717 # merge | force | partial | action
1718 # -------+-------+---------+--------------
1718 # -------+-------+---------+--------------
1719 # x | x | x | linear-merge
1719 # x | x | x | linear-merge
1720 # o | x | x | branch-merge
1720 # o | x | x | branch-merge
1721 # x | o | x | overwrite (as clean update)
1721 # x | o | x | overwrite (as clean update)
1722 # o | o | x | force-branch-merge (*1)
1722 # o | o | x | force-branch-merge (*1)
1723 # x | x | o | (*)
1723 # x | x | o | (*)
1724 # o | x | o | (*)
1724 # o | x | o | (*)
1725 # x | o | o | overwrite (as revert)
1725 # x | o | o | overwrite (as revert)
1726 # o | o | o | (*)
1726 # o | o | o | (*)
1727 #
1727 #
1728 # (*) don't care
1728 # (*) don't care
1729 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1729 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1730
1730
1731 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1731 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1732 unsure, s = lfdirstate.status(
1732 unsure, s = lfdirstate.status(
1733 matchmod.always(),
1733 matchmod.always(),
1734 subrepos=[],
1734 subrepos=[],
1735 ignored=False,
1735 ignored=False,
1736 clean=True,
1736 clean=True,
1737 unknown=False,
1737 unknown=False,
1738 )
1738 )
1739 oldclean = set(s.clean)
1739 oldclean = set(s.clean)
1740 pctx = repo[b'.']
1740 pctx = repo[b'.']
1741 dctx = repo[node]
1741 dctx = repo[node]
1742 for lfile in unsure + s.modified:
1742 for lfile in unsure + s.modified:
1743 lfileabs = repo.wvfs.join(lfile)
1743 lfileabs = repo.wvfs.join(lfile)
1744 if not repo.wvfs.exists(lfileabs):
1744 if not repo.wvfs.exists(lfileabs):
1745 continue
1745 continue
1746 lfhash = lfutil.hashfile(lfileabs)
1746 lfhash = lfutil.hashfile(lfileabs)
1747 standin = lfutil.standin(lfile)
1747 standin = lfutil.standin(lfile)
1748 lfutil.writestandin(
1748 lfutil.writestandin(
1749 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1749 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1750 )
1750 )
1751 if standin in pctx and lfhash == lfutil.readasstandin(
1751 if standin in pctx and lfhash == lfutil.readasstandin(
1752 pctx[standin]
1752 pctx[standin]
1753 ):
1753 ):
1754 oldclean.add(lfile)
1754 oldclean.add(lfile)
1755 for lfile in s.added:
1755 for lfile in s.added:
1756 fstandin = lfutil.standin(lfile)
1756 fstandin = lfutil.standin(lfile)
1757 if fstandin not in dctx:
1757 if fstandin not in dctx:
1758 # in this case, content of standin file is meaningless
1758 # in this case, content of standin file is meaningless
1759 # (in dctx, lfile is unknown, or normal file)
1759 # (in dctx, lfile is unknown, or normal file)
1760 continue
1760 continue
1761 lfutil.updatestandin(repo, lfile, fstandin)
1761 lfutil.updatestandin(repo, lfile, fstandin)
1762 # mark all clean largefiles as dirty, just in case the update gets
1762 # mark all clean largefiles as dirty, just in case the update gets
1763 # interrupted before largefiles and lfdirstate are synchronized
1763 # interrupted before largefiles and lfdirstate are synchronized
1764 for lfile in oldclean:
1764 for lfile in oldclean:
1765 lfdirstate.normallookup(lfile)
1765 lfdirstate.normallookup(lfile)
1766 lfdirstate.write()
1766 lfdirstate.write()
1767
1767
1768 oldstandins = lfutil.getstandinsstate(repo)
1768 oldstandins = lfutil.getstandinsstate(repo)
1769 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1769 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1770 # good candidate for in-memory merge (large files, custom dirstate,
1770 # good candidate for in-memory merge (large files, custom dirstate,
1771 # matcher usage).
1771 # matcher usage).
1772 kwargs['wc'] = repo[None]
1772 kwargs['wc'] = repo[None]
1773 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1773 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1774
1774
1775 newstandins = lfutil.getstandinsstate(repo)
1775 newstandins = lfutil.getstandinsstate(repo)
1776 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1776 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1777
1777
1778 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1778 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1779 # all the ones that didn't change as clean
1779 # all the ones that didn't change as clean
1780 for lfile in oldclean.difference(filelist):
1780 for lfile in oldclean.difference(filelist):
1781 lfdirstate.normal(lfile)
1781 lfdirstate.normal(lfile)
1782 lfdirstate.write()
1782 lfdirstate.write()
1783
1783
1784 if branchmerge or force or partial:
1784 if branchmerge or force or partial:
1785 filelist.extend(s.deleted + s.removed)
1785 filelist.extend(s.deleted + s.removed)
1786
1786
1787 lfcommands.updatelfiles(
1787 lfcommands.updatelfiles(
1788 repo.ui, repo, filelist=filelist, normallookup=partial
1788 repo.ui, repo, filelist=filelist, normallookup=partial
1789 )
1789 )
1790
1790
1791 return result
1791 return result
1792
1792
1793
1793
1794 @eh.wrapfunction(scmutil, b'marktouched')
1794 @eh.wrapfunction(scmutil, b'marktouched')
1795 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1795 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1796 result = orig(repo, files, *args, **kwargs)
1796 result = orig(repo, files, *args, **kwargs)
1797
1797
1798 filelist = []
1798 filelist = []
1799 for f in files:
1799 for f in files:
1800 lf = lfutil.splitstandin(f)
1800 lf = lfutil.splitstandin(f)
1801 if lf is not None:
1801 if lf is not None:
1802 filelist.append(lf)
1802 filelist.append(lf)
1803 if filelist:
1803 if filelist:
1804 lfcommands.updatelfiles(
1804 lfcommands.updatelfiles(
1805 repo.ui,
1805 repo.ui,
1806 repo,
1806 repo,
1807 filelist=filelist,
1807 filelist=filelist,
1808 printmessage=False,
1808 printmessage=False,
1809 normallookup=True,
1809 normallookup=True,
1810 )
1810 )
1811
1811
1812 return result
1812 return result
1813
1813
1814
1814
1815 @eh.wrapfunction(upgrade, b'preservedrequirements')
1815 @eh.wrapfunction(upgrade, b'preservedrequirements')
1816 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1816 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1817 def upgraderequirements(orig, repo):
1817 def upgraderequirements(orig, repo):
1818 reqs = orig(repo)
1818 reqs = orig(repo)
1819 if b'largefiles' in repo.requirements:
1819 if b'largefiles' in repo.requirements:
1820 reqs.add(b'largefiles')
1820 reqs.add(b'largefiles')
1821 return reqs
1821 return reqs
1822
1822
1823
1823
1824 _lfscheme = b'largefile://'
1824 _lfscheme = b'largefile://'
1825
1825
1826
1826
1827 @eh.wrapfunction(urlmod, b'open')
1827 @eh.wrapfunction(urlmod, b'open')
1828 def openlargefile(orig, ui, url_, data=None):
1828 def openlargefile(orig, ui, url_, data=None):
1829 if url_.startswith(_lfscheme):
1829 if url_.startswith(_lfscheme):
1830 if data:
1830 if data:
1831 msg = b"cannot use data on a 'largefile://' url"
1831 msg = b"cannot use data on a 'largefile://' url"
1832 raise error.ProgrammingError(msg)
1832 raise error.ProgrammingError(msg)
1833 lfid = url_[len(_lfscheme) :]
1833 lfid = url_[len(_lfscheme) :]
1834 return storefactory.getlfile(ui, lfid)
1834 return storefactory.getlfile(ui, lfid)
1835 else:
1835 else:
1836 return orig(ui, url_, data=data)
1836 return orig(ui, url_, data=data)
@@ -1,2295 +1,2305 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 modifiednodeid,
18 modifiednodeid,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .thirdparty import attr
22 from .thirdparty import attr
23 from . import (
23 from . import (
24 copies,
24 copies,
25 encoding,
25 encoding,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 match as matchmod,
28 match as matchmod,
29 mergestate as mergestatemod,
29 mergestate as mergestatemod,
30 obsutil,
30 obsutil,
31 pathutil,
31 pathutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepoutil,
34 subrepoutil,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42
42
43 def _getcheckunknownconfig(repo, section, name):
43 def _getcheckunknownconfig(repo, section, name):
44 config = repo.ui.config(section, name)
44 config = repo.ui.config(section, name)
45 valid = [b'abort', b'ignore', b'warn']
45 valid = [b'abort', b'ignore', b'warn']
46 if config not in valid:
46 if config not in valid:
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 raise error.ConfigError(
48 raise error.ConfigError(
49 _(b"%s.%s not valid ('%s' is none of %s)")
49 _(b"%s.%s not valid ('%s' is none of %s)")
50 % (section, name, config, validstr)
50 % (section, name, config, validstr)
51 )
51 )
52 return config
52 return config
53
53
54
54
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 if wctx.isinmemory():
56 if wctx.isinmemory():
57 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 # unknown file.
58 # unknown file.
59 #
59 #
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 # because that function does other useful work.
61 # because that function does other useful work.
62 return False
62 return False
63
63
64 if f2 is None:
64 if f2 is None:
65 f2 = f
65 f2 = f
66 return (
66 return (
67 repo.wvfs.audit.check(f)
67 repo.wvfs.audit.check(f)
68 and repo.wvfs.isfileorlink(f)
68 and repo.wvfs.isfileorlink(f)
69 and repo.dirstate.normalize(f) not in repo.dirstate
69 and repo.dirstate.normalize(f) not in repo.dirstate
70 and mctx[f2].cmp(wctx[f])
70 and mctx[f2].cmp(wctx[f])
71 )
71 )
72
72
73
73
74 class _unknowndirschecker(object):
74 class _unknowndirschecker(object):
75 """
75 """
76 Look for any unknown files or directories that may have a path conflict
76 Look for any unknown files or directories that may have a path conflict
77 with a file. If any path prefix of the file exists as a file or link,
77 with a file. If any path prefix of the file exists as a file or link,
78 then it conflicts. If the file itself is a directory that contains any
78 then it conflicts. If the file itself is a directory that contains any
79 file that is not tracked, then it conflicts.
79 file that is not tracked, then it conflicts.
80
80
81 Returns the shortest path at which a conflict occurs, or None if there is
81 Returns the shortest path at which a conflict occurs, or None if there is
82 no conflict.
82 no conflict.
83 """
83 """
84
84
85 def __init__(self):
85 def __init__(self):
86 # A set of paths known to be good. This prevents repeated checking of
86 # A set of paths known to be good. This prevents repeated checking of
87 # dirs. It will be updated with any new dirs that are checked and found
87 # dirs. It will be updated with any new dirs that are checked and found
88 # to be safe.
88 # to be safe.
89 self._unknowndircache = set()
89 self._unknowndircache = set()
90
90
91 # A set of paths that are known to be absent. This prevents repeated
91 # A set of paths that are known to be absent. This prevents repeated
92 # checking of subdirectories that are known not to exist. It will be
92 # checking of subdirectories that are known not to exist. It will be
93 # updated with any new dirs that are checked and found to be absent.
93 # updated with any new dirs that are checked and found to be absent.
94 self._missingdircache = set()
94 self._missingdircache = set()
95
95
96 def __call__(self, repo, wctx, f):
96 def __call__(self, repo, wctx, f):
97 if wctx.isinmemory():
97 if wctx.isinmemory():
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 return False
99 return False
100
100
101 # Check for path prefixes that exist as unknown files.
101 # Check for path prefixes that exist as unknown files.
102 for p in reversed(list(pathutil.finddirs(f))):
102 for p in reversed(list(pathutil.finddirs(f))):
103 if p in self._missingdircache:
103 if p in self._missingdircache:
104 return
104 return
105 if p in self._unknowndircache:
105 if p in self._unknowndircache:
106 continue
106 continue
107 if repo.wvfs.audit.check(p):
107 if repo.wvfs.audit.check(p):
108 if (
108 if (
109 repo.wvfs.isfileorlink(p)
109 repo.wvfs.isfileorlink(p)
110 and repo.dirstate.normalize(p) not in repo.dirstate
110 and repo.dirstate.normalize(p) not in repo.dirstate
111 ):
111 ):
112 return p
112 return p
113 if not repo.wvfs.lexists(p):
113 if not repo.wvfs.lexists(p):
114 self._missingdircache.add(p)
114 self._missingdircache.add(p)
115 return
115 return
116 self._unknowndircache.add(p)
116 self._unknowndircache.add(p)
117
117
118 # Check if the file conflicts with a directory containing unknown files.
118 # Check if the file conflicts with a directory containing unknown files.
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 # Does the directory contain any files that are not in the dirstate?
120 # Does the directory contain any files that are not in the dirstate?
121 for p, dirs, files in repo.wvfs.walk(f):
121 for p, dirs, files in repo.wvfs.walk(f):
122 for fn in files:
122 for fn in files:
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 relf = repo.dirstate.normalize(relf, isknown=True)
124 relf = repo.dirstate.normalize(relf, isknown=True)
125 if relf not in repo.dirstate:
125 if relf not in repo.dirstate:
126 return f
126 return f
127 return None
127 return None
128
128
129
129
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 """
131 """
132 Considers any actions that care about the presence of conflicting unknown
132 Considers any actions that care about the presence of conflicting unknown
133 files. For some actions, the result is to abort; for others, it is to
133 files. For some actions, the result is to abort; for others, it is to
134 choose a different action.
134 choose a different action.
135 """
135 """
136 fileconflicts = set()
136 fileconflicts = set()
137 pathconflicts = set()
137 pathconflicts = set()
138 warnconflicts = set()
138 warnconflicts = set()
139 abortconflicts = set()
139 abortconflicts = set()
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 pathconfig = repo.ui.configbool(
142 pathconfig = repo.ui.configbool(
143 b'experimental', b'merge.checkpathconflicts'
143 b'experimental', b'merge.checkpathconflicts'
144 )
144 )
145 if not force:
145 if not force:
146
146
147 def collectconflicts(conflicts, config):
147 def collectconflicts(conflicts, config):
148 if config == b'abort':
148 if config == b'abort':
149 abortconflicts.update(conflicts)
149 abortconflicts.update(conflicts)
150 elif config == b'warn':
150 elif config == b'warn':
151 warnconflicts.update(conflicts)
151 warnconflicts.update(conflicts)
152
152
153 checkunknowndirs = _unknowndirschecker()
153 checkunknowndirs = _unknowndirschecker()
154 for f, args, msg in mresult.getactions(
154 for f, args, msg in mresult.getactions(
155 [
155 [
156 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_CREATED,
157 mergestatemod.ACTION_DELETED_CHANGED,
157 mergestatemod.ACTION_DELETED_CHANGED,
158 ]
158 ]
159 ):
159 ):
160 if _checkunknownfile(repo, wctx, mctx, f):
160 if _checkunknownfile(repo, wctx, mctx, f):
161 fileconflicts.add(f)
161 fileconflicts.add(f)
162 elif pathconfig and f not in wctx:
162 elif pathconfig and f not in wctx:
163 path = checkunknowndirs(repo, wctx, f)
163 path = checkunknowndirs(repo, wctx, f)
164 if path is not None:
164 if path is not None:
165 pathconflicts.add(path)
165 pathconflicts.add(path)
166 for f, args, msg in mresult.getactions(
166 for f, args, msg in mresult.getactions(
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 ):
168 ):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 fileconflicts.add(f)
170 fileconflicts.add(f)
171
171
172 allconflicts = fileconflicts | pathconflicts
172 allconflicts = fileconflicts | pathconflicts
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 unknownconflicts = allconflicts - ignoredconflicts
174 unknownconflicts = allconflicts - ignoredconflicts
175 collectconflicts(ignoredconflicts, ignoredconfig)
175 collectconflicts(ignoredconflicts, ignoredconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
177 else:
177 else:
178 for f, args, msg in list(
178 for f, args, msg in list(
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 ):
180 ):
181 fl2, anc = args
181 fl2, anc = args
182 different = _checkunknownfile(repo, wctx, mctx, f)
182 different = _checkunknownfile(repo, wctx, mctx, f)
183 if repo.dirstate._ignore(f):
183 if repo.dirstate._ignore(f):
184 config = ignoredconfig
184 config = ignoredconfig
185 else:
185 else:
186 config = unknownconfig
186 config = unknownconfig
187
187
188 # The behavior when force is True is described by this table:
188 # The behavior when force is True is described by this table:
189 # config different mergeforce | action backup
189 # config different mergeforce | action backup
190 # * n * | get n
190 # * n * | get n
191 # * y y | merge -
191 # * y y | merge -
192 # abort y n | merge - (1)
192 # abort y n | merge - (1)
193 # warn y n | warn + get y
193 # warn y n | warn + get y
194 # ignore y n | get y
194 # ignore y n | get y
195 #
195 #
196 # (1) this is probably the wrong behavior here -- we should
196 # (1) this is probably the wrong behavior here -- we should
197 # probably abort, but some actions like rebases currently
197 # probably abort, but some actions like rebases currently
198 # don't like an abort happening in the middle of
198 # don't like an abort happening in the middle of
199 # merge.update.
199 # merge.update.
200 if not different:
200 if not different:
201 mresult.addfile(
201 mresult.addfile(
202 f,
202 f,
203 mergestatemod.ACTION_GET,
203 mergestatemod.ACTION_GET,
204 (fl2, False),
204 (fl2, False),
205 b'remote created',
205 b'remote created',
206 )
206 )
207 elif mergeforce or config == b'abort':
207 elif mergeforce or config == b'abort':
208 mresult.addfile(
208 mresult.addfile(
209 f,
209 f,
210 mergestatemod.ACTION_MERGE,
210 mergestatemod.ACTION_MERGE,
211 (f, f, None, False, anc),
211 (f, f, None, False, anc),
212 b'remote differs from untracked local',
212 b'remote differs from untracked local',
213 )
213 )
214 elif config == b'abort':
214 elif config == b'abort':
215 abortconflicts.add(f)
215 abortconflicts.add(f)
216 else:
216 else:
217 if config == b'warn':
217 if config == b'warn':
218 warnconflicts.add(f)
218 warnconflicts.add(f)
219 mresult.addfile(
219 mresult.addfile(
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 )
221 )
222
222
223 for f in sorted(abortconflicts):
223 for f in sorted(abortconflicts):
224 warn = repo.ui.warn
224 warn = repo.ui.warn
225 if f in pathconflicts:
225 if f in pathconflicts:
226 if repo.wvfs.isfileorlink(f):
226 if repo.wvfs.isfileorlink(f):
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 else:
228 else:
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 else:
230 else:
231 warn(_(b"%s: untracked file differs\n") % f)
231 warn(_(b"%s: untracked file differs\n") % f)
232 if abortconflicts:
232 if abortconflicts:
233 raise error.Abort(
233 raise error.Abort(
234 _(
234 _(
235 b"untracked files in working directory "
235 b"untracked files in working directory "
236 b"differ from files in requested revision"
236 b"differ from files in requested revision"
237 )
237 )
238 )
238 )
239
239
240 for f in sorted(warnconflicts):
240 for f in sorted(warnconflicts):
241 if repo.wvfs.isfileorlink(f):
241 if repo.wvfs.isfileorlink(f):
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 else:
243 else:
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245
245
246 for f, args, msg in list(
246 for f, args, msg in list(
247 mresult.getactions([mergestatemod.ACTION_CREATED])
247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 ):
248 ):
249 backup = (
249 backup = (
250 f in fileconflicts
250 f in fileconflicts
251 or f in pathconflicts
251 or f in pathconflicts
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 )
253 )
254 (flags,) = args
254 (flags,) = args
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256
256
257
257
258 def _forgetremoved(wctx, mctx, branchmerge):
258 def _forgetremoved(wctx, mctx, branchmerge):
259 """
259 """
260 Forget removed files
260 Forget removed files
261
261
262 If we're jumping between revisions (as opposed to merging), and if
262 If we're jumping between revisions (as opposed to merging), and if
263 neither the working directory nor the target rev has the file,
263 neither the working directory nor the target rev has the file,
264 then we need to remove it from the dirstate, to prevent the
264 then we need to remove it from the dirstate, to prevent the
265 dirstate from listing the file when it is no longer in the
265 dirstate from listing the file when it is no longer in the
266 manifest.
266 manifest.
267
267
268 If we're merging, and the other revision has removed a file
268 If we're merging, and the other revision has removed a file
269 that is not present in the working directory, we need to mark it
269 that is not present in the working directory, we need to mark it
270 as removed.
270 as removed.
271 """
271 """
272
272
273 actions = {}
273 actions = {}
274 m = mergestatemod.ACTION_FORGET
274 m = mergestatemod.ACTION_FORGET
275 if branchmerge:
275 if branchmerge:
276 m = mergestatemod.ACTION_REMOVE
276 m = mergestatemod.ACTION_REMOVE
277 for f in wctx.deleted():
277 for f in wctx.deleted():
278 if f not in mctx:
278 if f not in mctx:
279 actions[f] = m, None, b"forget deleted"
279 actions[f] = m, None, b"forget deleted"
280
280
281 if not branchmerge:
281 if not branchmerge:
282 for f in wctx.removed():
282 for f in wctx.removed():
283 if f not in mctx:
283 if f not in mctx:
284 actions[f] = (
284 actions[f] = (
285 mergestatemod.ACTION_FORGET,
285 mergestatemod.ACTION_FORGET,
286 None,
286 None,
287 b"forget removed",
287 b"forget removed",
288 )
288 )
289
289
290 return actions
290 return actions
291
291
292
292
293 def _checkcollision(repo, wmf, mresult):
293 def _checkcollision(repo, wmf, mresult):
294 """
294 """
295 Check for case-folding collisions.
295 Check for case-folding collisions.
296 """
296 """
297 # If the repo is narrowed, filter out files outside the narrowspec.
297 # If the repo is narrowed, filter out files outside the narrowspec.
298 narrowmatch = repo.narrowmatch()
298 narrowmatch = repo.narrowmatch()
299 if not narrowmatch.always():
299 if not narrowmatch.always():
300 pmmf = set(wmf.walk(narrowmatch))
300 pmmf = set(wmf.walk(narrowmatch))
301 if mresult:
301 if mresult:
302 for f, actionsfortype in pycompat.iteritems(mresult.actions):
302 for f, actionsfortype in pycompat.iteritems(mresult.actions):
303 if not narrowmatch(f):
303 if not narrowmatch(f):
304 mresult.removefile(f)
304 mresult.removefile(f)
305 else:
305 else:
306 # build provisional merged manifest up
306 # build provisional merged manifest up
307 pmmf = set(wmf)
307 pmmf = set(wmf)
308
308
309 if mresult:
309 if mresult:
310 # KEEP and EXEC are no-op
310 # KEEP and EXEC are no-op
311 for f, args, msg in mresult.getactions(
311 for f, args, msg in mresult.getactions(
312 (
312 (
313 mergestatemod.ACTION_ADD,
313 mergestatemod.ACTION_ADD,
314 mergestatemod.ACTION_ADD_MODIFIED,
314 mergestatemod.ACTION_ADD_MODIFIED,
315 mergestatemod.ACTION_FORGET,
315 mergestatemod.ACTION_FORGET,
316 mergestatemod.ACTION_GET,
316 mergestatemod.ACTION_GET,
317 mergestatemod.ACTION_CHANGED_DELETED,
317 mergestatemod.ACTION_CHANGED_DELETED,
318 mergestatemod.ACTION_DELETED_CHANGED,
318 mergestatemod.ACTION_DELETED_CHANGED,
319 )
319 )
320 ):
320 ):
321 pmmf.add(f)
321 pmmf.add(f)
322 for f, args, msg in mresult.getactions([mergestatemod.ACTION_REMOVE]):
322 for f, args, msg in mresult.getactions([mergestatemod.ACTION_REMOVE]):
323 pmmf.discard(f)
323 pmmf.discard(f)
324 for f, args, msg in mresult.getactions(
324 for f, args, msg in mresult.getactions(
325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
326 ):
326 ):
327 f2, flags = args
327 f2, flags = args
328 pmmf.discard(f2)
328 pmmf.discard(f2)
329 pmmf.add(f)
329 pmmf.add(f)
330 for f, args, msg in mresult.getactions(
330 for f, args, msg in mresult.getactions(
331 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
331 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
332 ):
332 ):
333 pmmf.add(f)
333 pmmf.add(f)
334 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
334 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
335 f1, f2, fa, move, anc = args
335 f1, f2, fa, move, anc = args
336 if move:
336 if move:
337 pmmf.discard(f1)
337 pmmf.discard(f1)
338 pmmf.add(f)
338 pmmf.add(f)
339
339
340 # check case-folding collision in provisional merged manifest
340 # check case-folding collision in provisional merged manifest
341 foldmap = {}
341 foldmap = {}
342 for f in pmmf:
342 for f in pmmf:
343 fold = util.normcase(f)
343 fold = util.normcase(f)
344 if fold in foldmap:
344 if fold in foldmap:
345 raise error.Abort(
345 raise error.Abort(
346 _(b"case-folding collision between %s and %s")
346 _(b"case-folding collision between %s and %s")
347 % (f, foldmap[fold])
347 % (f, foldmap[fold])
348 )
348 )
349 foldmap[fold] = f
349 foldmap[fold] = f
350
350
351 # check case-folding of directories
351 # check case-folding of directories
352 foldprefix = unfoldprefix = lastfull = b''
352 foldprefix = unfoldprefix = lastfull = b''
353 for fold, f in sorted(foldmap.items()):
353 for fold, f in sorted(foldmap.items()):
354 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
354 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
355 # the folded prefix matches but actual casing is different
355 # the folded prefix matches but actual casing is different
356 raise error.Abort(
356 raise error.Abort(
357 _(b"case-folding collision between %s and directory of %s")
357 _(b"case-folding collision between %s and directory of %s")
358 % (lastfull, f)
358 % (lastfull, f)
359 )
359 )
360 foldprefix = fold + b'/'
360 foldprefix = fold + b'/'
361 unfoldprefix = f + b'/'
361 unfoldprefix = f + b'/'
362 lastfull = f
362 lastfull = f
363
363
364
364
365 def driverpreprocess(repo, ms, wctx, labels=None):
365 def driverpreprocess(repo, ms, wctx, labels=None):
366 """run the preprocess step of the merge driver, if any
366 """run the preprocess step of the merge driver, if any
367
367
368 This is currently not implemented -- it's an extension point."""
368 This is currently not implemented -- it's an extension point."""
369 return True
369 return True
370
370
371
371
372 def driverconclude(repo, ms, wctx, labels=None):
372 def driverconclude(repo, ms, wctx, labels=None):
373 """run the conclude step of the merge driver, if any
373 """run the conclude step of the merge driver, if any
374
374
375 This is currently not implemented -- it's an extension point."""
375 This is currently not implemented -- it's an extension point."""
376 return True
376 return True
377
377
378
378
379 def _filesindirs(repo, manifest, dirs):
379 def _filesindirs(repo, manifest, dirs):
380 """
380 """
381 Generator that yields pairs of all the files in the manifest that are found
381 Generator that yields pairs of all the files in the manifest that are found
382 inside the directories listed in dirs, and which directory they are found
382 inside the directories listed in dirs, and which directory they are found
383 in.
383 in.
384 """
384 """
385 for f in manifest:
385 for f in manifest:
386 for p in pathutil.finddirs(f):
386 for p in pathutil.finddirs(f):
387 if p in dirs:
387 if p in dirs:
388 yield f, p
388 yield f, p
389 break
389 break
390
390
391
391
392 def checkpathconflicts(repo, wctx, mctx, mresult):
392 def checkpathconflicts(repo, wctx, mctx, mresult):
393 """
393 """
394 Check if any actions introduce path conflicts in the repository, updating
394 Check if any actions introduce path conflicts in the repository, updating
395 actions to record or handle the path conflict accordingly.
395 actions to record or handle the path conflict accordingly.
396 """
396 """
397 mf = wctx.manifest()
397 mf = wctx.manifest()
398
398
399 # The set of local files that conflict with a remote directory.
399 # The set of local files that conflict with a remote directory.
400 localconflicts = set()
400 localconflicts = set()
401
401
402 # The set of directories that conflict with a remote file, and so may cause
402 # The set of directories that conflict with a remote file, and so may cause
403 # conflicts if they still contain any files after the merge.
403 # conflicts if they still contain any files after the merge.
404 remoteconflicts = set()
404 remoteconflicts = set()
405
405
406 # The set of directories that appear as both a file and a directory in the
406 # The set of directories that appear as both a file and a directory in the
407 # remote manifest. These indicate an invalid remote manifest, which
407 # remote manifest. These indicate an invalid remote manifest, which
408 # can't be updated to cleanly.
408 # can't be updated to cleanly.
409 invalidconflicts = set()
409 invalidconflicts = set()
410
410
411 # The set of directories that contain files that are being created.
411 # The set of directories that contain files that are being created.
412 createdfiledirs = set()
412 createdfiledirs = set()
413
413
414 # The set of files deleted by all the actions.
414 # The set of files deleted by all the actions.
415 deletedfiles = set()
415 deletedfiles = set()
416
416
417 for (f, args, msg) in mresult.getactions(
417 for (f, args, msg) in mresult.getactions(
418 (
418 (
419 mergestatemod.ACTION_CREATED,
419 mergestatemod.ACTION_CREATED,
420 mergestatemod.ACTION_DELETED_CHANGED,
420 mergestatemod.ACTION_DELETED_CHANGED,
421 mergestatemod.ACTION_MERGE,
421 mergestatemod.ACTION_MERGE,
422 mergestatemod.ACTION_CREATED_MERGE,
422 mergestatemod.ACTION_CREATED_MERGE,
423 )
423 )
424 ):
424 ):
425 # This action may create a new local file.
425 # This action may create a new local file.
426 createdfiledirs.update(pathutil.finddirs(f))
426 createdfiledirs.update(pathutil.finddirs(f))
427 if mf.hasdir(f):
427 if mf.hasdir(f):
428 # The file aliases a local directory. This might be ok if all
428 # The file aliases a local directory. This might be ok if all
429 # the files in the local directory are being deleted. This
429 # the files in the local directory are being deleted. This
430 # will be checked once we know what all the deleted files are.
430 # will be checked once we know what all the deleted files are.
431 remoteconflicts.add(f)
431 remoteconflicts.add(f)
432 # Track the names of all deleted files.
432 # Track the names of all deleted files.
433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_REMOVE,)):
433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_REMOVE,)):
434 deletedfiles.add(f)
434 deletedfiles.add(f)
435 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
435 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
436 f1, f2, fa, move, anc = args
436 f1, f2, fa, move, anc = args
437 if move:
437 if move:
438 deletedfiles.add(f1)
438 deletedfiles.add(f1)
439 for (f, args, msg) in mresult.getactions(
439 for (f, args, msg) in mresult.getactions(
440 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
440 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
441 ):
441 ):
442 f2, flags = args
442 f2, flags = args
443 deletedfiles.add(f2)
443 deletedfiles.add(f2)
444
444
445 # Check all directories that contain created files for path conflicts.
445 # Check all directories that contain created files for path conflicts.
446 for p in createdfiledirs:
446 for p in createdfiledirs:
447 if p in mf:
447 if p in mf:
448 if p in mctx:
448 if p in mctx:
449 # A file is in a directory which aliases both a local
449 # A file is in a directory which aliases both a local
450 # and a remote file. This is an internal inconsistency
450 # and a remote file. This is an internal inconsistency
451 # within the remote manifest.
451 # within the remote manifest.
452 invalidconflicts.add(p)
452 invalidconflicts.add(p)
453 else:
453 else:
454 # A file is in a directory which aliases a local file.
454 # A file is in a directory which aliases a local file.
455 # We will need to rename the local file.
455 # We will need to rename the local file.
456 localconflicts.add(p)
456 localconflicts.add(p)
457 if p in mresult.actions and mresult.actions[p][0] in (
457 pd = mresult.getfile(p)
458 if pd and pd[0] in (
458 mergestatemod.ACTION_CREATED,
459 mergestatemod.ACTION_CREATED,
459 mergestatemod.ACTION_DELETED_CHANGED,
460 mergestatemod.ACTION_DELETED_CHANGED,
460 mergestatemod.ACTION_MERGE,
461 mergestatemod.ACTION_MERGE,
461 mergestatemod.ACTION_CREATED_MERGE,
462 mergestatemod.ACTION_CREATED_MERGE,
462 ):
463 ):
463 # The file is in a directory which aliases a remote file.
464 # The file is in a directory which aliases a remote file.
464 # This is an internal inconsistency within the remote
465 # This is an internal inconsistency within the remote
465 # manifest.
466 # manifest.
466 invalidconflicts.add(p)
467 invalidconflicts.add(p)
467
468
468 # Rename all local conflicting files that have not been deleted.
469 # Rename all local conflicting files that have not been deleted.
469 for p in localconflicts:
470 for p in localconflicts:
470 if p not in deletedfiles:
471 if p not in deletedfiles:
471 ctxname = bytes(wctx).rstrip(b'+')
472 ctxname = bytes(wctx).rstrip(b'+')
472 pnew = util.safename(p, ctxname, wctx, set(mresult.actions.keys()))
473 pnew = util.safename(p, ctxname, wctx, set(mresult.actions.keys()))
473 porig = wctx[p].copysource() or p
474 porig = wctx[p].copysource() or p
474 mresult.addfile(
475 mresult.addfile(
475 pnew,
476 pnew,
476 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
477 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
477 (p, porig),
478 (p, porig),
478 b'local path conflict',
479 b'local path conflict',
479 )
480 )
480 mresult.addfile(
481 mresult.addfile(
481 p,
482 p,
482 mergestatemod.ACTION_PATH_CONFLICT,
483 mergestatemod.ACTION_PATH_CONFLICT,
483 (pnew, b'l'),
484 (pnew, b'l'),
484 b'path conflict',
485 b'path conflict',
485 )
486 )
486
487
487 if remoteconflicts:
488 if remoteconflicts:
488 # Check if all files in the conflicting directories have been removed.
489 # Check if all files in the conflicting directories have been removed.
489 ctxname = bytes(mctx).rstrip(b'+')
490 ctxname = bytes(mctx).rstrip(b'+')
490 for f, p in _filesindirs(repo, mf, remoteconflicts):
491 for f, p in _filesindirs(repo, mf, remoteconflicts):
491 if f not in deletedfiles:
492 if f not in deletedfiles:
492 m, args, msg = mresult.actions[p]
493 m, args, msg = mresult.getfile(p)
493 pnew = util.safename(
494 pnew = util.safename(
494 p, ctxname, wctx, set(mresult.actions.keys())
495 p, ctxname, wctx, set(mresult.actions.keys())
495 )
496 )
496 if m in (
497 if m in (
497 mergestatemod.ACTION_DELETED_CHANGED,
498 mergestatemod.ACTION_DELETED_CHANGED,
498 mergestatemod.ACTION_MERGE,
499 mergestatemod.ACTION_MERGE,
499 ):
500 ):
500 # Action was merge, just update target.
501 # Action was merge, just update target.
501 mresult.addfile(pnew, m, args, msg)
502 mresult.addfile(pnew, m, args, msg)
502 else:
503 else:
503 # Action was create, change to renamed get action.
504 # Action was create, change to renamed get action.
504 fl = args[0]
505 fl = args[0]
505 mresult.addfile(
506 mresult.addfile(
506 pnew,
507 pnew,
507 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
508 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
508 (p, fl),
509 (p, fl),
509 b'remote path conflict',
510 b'remote path conflict',
510 )
511 )
511 mresult.addfile(
512 mresult.addfile(
512 p,
513 p,
513 mergestatemod.ACTION_PATH_CONFLICT,
514 mergestatemod.ACTION_PATH_CONFLICT,
514 (pnew, mergestatemod.ACTION_REMOVE),
515 (pnew, mergestatemod.ACTION_REMOVE),
515 b'path conflict',
516 b'path conflict',
516 )
517 )
517 remoteconflicts.remove(p)
518 remoteconflicts.remove(p)
518 break
519 break
519
520
520 if invalidconflicts:
521 if invalidconflicts:
521 for p in invalidconflicts:
522 for p in invalidconflicts:
522 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
523 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
523 raise error.Abort(_(b"destination manifest contains path conflicts"))
524 raise error.Abort(_(b"destination manifest contains path conflicts"))
524
525
525
526
526 def _filternarrowactions(narrowmatch, branchmerge, mresult):
527 def _filternarrowactions(narrowmatch, branchmerge, mresult):
527 """
528 """
528 Filters out actions that can ignored because the repo is narrowed.
529 Filters out actions that can ignored because the repo is narrowed.
529
530
530 Raise an exception if the merge cannot be completed because the repo is
531 Raise an exception if the merge cannot be completed because the repo is
531 narrowed.
532 narrowed.
532 """
533 """
533 # TODO: handle with nonconflicttypes
534 # TODO: handle with nonconflicttypes
534 nooptypes = {mergestatemod.ACTION_KEEP}
535 nooptypes = {mergestatemod.ACTION_KEEP}
535 nonconflicttypes = {
536 nonconflicttypes = {
536 mergestatemod.ACTION_ADD,
537 mergestatemod.ACTION_ADD,
537 mergestatemod.ACTION_ADD_MODIFIED,
538 mergestatemod.ACTION_ADD_MODIFIED,
538 mergestatemod.ACTION_CREATED,
539 mergestatemod.ACTION_CREATED,
539 mergestatemod.ACTION_CREATED_MERGE,
540 mergestatemod.ACTION_CREATED_MERGE,
540 mergestatemod.ACTION_FORGET,
541 mergestatemod.ACTION_FORGET,
541 mergestatemod.ACTION_GET,
542 mergestatemod.ACTION_GET,
542 mergestatemod.ACTION_REMOVE,
543 mergestatemod.ACTION_REMOVE,
543 mergestatemod.ACTION_EXEC,
544 mergestatemod.ACTION_EXEC,
544 }
545 }
545 # We mutate the items in the dict during iteration, so iterate
546 # We mutate the items in the dict during iteration, so iterate
546 # over a copy.
547 # over a copy.
547 for f, action in list(mresult.actions.items()):
548 for f, action in list(mresult.actions.items()):
548 if narrowmatch(f):
549 if narrowmatch(f):
549 pass
550 pass
550 elif not branchmerge:
551 elif not branchmerge:
551 mresult.removefile(f) # just updating, ignore changes outside clone
552 mresult.removefile(f) # just updating, ignore changes outside clone
552 elif action[0] in nooptypes:
553 elif action[0] in nooptypes:
553 mresult.removefile(f) # merge does not affect file
554 mresult.removefile(f) # merge does not affect file
554 elif action[0] in nonconflicttypes:
555 elif action[0] in nonconflicttypes:
555 raise error.Abort(
556 raise error.Abort(
556 _(
557 _(
557 b'merge affects file \'%s\' outside narrow, '
558 b'merge affects file \'%s\' outside narrow, '
558 b'which is not yet supported'
559 b'which is not yet supported'
559 )
560 )
560 % f,
561 % f,
561 hint=_(b'merging in the other direction may work'),
562 hint=_(b'merging in the other direction may work'),
562 )
563 )
563 else:
564 else:
564 raise error.Abort(
565 raise error.Abort(
565 _(b'conflict in file \'%s\' is outside narrow clone') % f
566 _(b'conflict in file \'%s\' is outside narrow clone') % f
566 )
567 )
567
568
568
569
569 class mergeresult(object):
570 class mergeresult(object):
570 ''''An object representing result of merging manifests.
571 ''''An object representing result of merging manifests.
571
572
572 It has information about what actions need to be performed on dirstate
573 It has information about what actions need to be performed on dirstate
573 mapping of divergent renames and other such cases. '''
574 mapping of divergent renames and other such cases. '''
574
575
575 def __init__(self):
576 def __init__(self):
576 """
577 """
577 filemapping: dict of filename as keys and action related info as values
578 filemapping: dict of filename as keys and action related info as values
578 diverge: mapping of source name -> list of dest name for
579 diverge: mapping of source name -> list of dest name for
579 divergent renames
580 divergent renames
580 renamedelete: mapping of source name -> list of destinations for files
581 renamedelete: mapping of source name -> list of destinations for files
581 deleted on one side and renamed on other.
582 deleted on one side and renamed on other.
582 commitinfo: dict containing data which should be used on commit
583 commitinfo: dict containing data which should be used on commit
583 contains a filename -> info mapping
584 contains a filename -> info mapping
584 actionmapping: dict of action names as keys and values are dict of
585 actionmapping: dict of action names as keys and values are dict of
585 filename as key and related data as values
586 filename as key and related data as values
586 """
587 """
587 self._filemapping = {}
588 self._filemapping = {}
588 self._diverge = {}
589 self._diverge = {}
589 self._renamedelete = {}
590 self._renamedelete = {}
590 self._commitinfo = {}
591 self._commitinfo = {}
591 self._actionmapping = collections.defaultdict(dict)
592 self._actionmapping = collections.defaultdict(dict)
592
593
593 def updatevalues(self, diverge, renamedelete, commitinfo):
594 def updatevalues(self, diverge, renamedelete, commitinfo):
594 self._diverge = diverge
595 self._diverge = diverge
595 self._renamedelete = renamedelete
596 self._renamedelete = renamedelete
596 self._commitinfo = commitinfo
597 self._commitinfo = commitinfo
597
598
598 def addfile(self, filename, action, data, message):
599 def addfile(self, filename, action, data, message):
599 """ adds a new file to the mergeresult object
600 """ adds a new file to the mergeresult object
600
601
601 filename: file which we are adding
602 filename: file which we are adding
602 action: one of mergestatemod.ACTION_*
603 action: one of mergestatemod.ACTION_*
603 data: a tuple of information like fctx and ctx related to this merge
604 data: a tuple of information like fctx and ctx related to this merge
604 message: a message about the merge
605 message: a message about the merge
605 """
606 """
606 # if the file already existed, we need to delete it's old
607 # if the file already existed, we need to delete it's old
607 # entry form _actionmapping too
608 # entry form _actionmapping too
608 if filename in self._filemapping:
609 if filename in self._filemapping:
609 a, d, m = self._filemapping[filename]
610 a, d, m = self._filemapping[filename]
610 del self._actionmapping[a][filename]
611 del self._actionmapping[a][filename]
611
612
612 self._filemapping[filename] = (action, data, message)
613 self._filemapping[filename] = (action, data, message)
613 self._actionmapping[action][filename] = (data, message)
614 self._actionmapping[action][filename] = (data, message)
614
615
616 def getfile(self, filename, default_return=None):
617 """ returns (action, args, msg) about this file
618
619 returns default_return if the file is not present """
620 if filename in self._filemapping:
621 return self._filemapping[filename]
622 return default_return
623
615 def removefile(self, filename):
624 def removefile(self, filename):
616 """ removes a file from the mergeresult object as the file might
625 """ removes a file from the mergeresult object as the file might
617 not merging anymore """
626 not merging anymore """
618 action, data, message = self._filemapping[filename]
627 action, data, message = self._filemapping[filename]
619 del self._filemapping[filename]
628 del self._filemapping[filename]
620 del self._actionmapping[action][filename]
629 del self._actionmapping[action][filename]
621
630
622 def getactions(self, actions, sort=False):
631 def getactions(self, actions, sort=False):
623 """ get list of files which are marked with these actions
632 """ get list of files which are marked with these actions
624 if sort is true, files for each action is sorted and then added
633 if sort is true, files for each action is sorted and then added
625
634
626 Returns a list of tuple of form (filename, data, message)
635 Returns a list of tuple of form (filename, data, message)
627 """
636 """
628 for a in actions:
637 for a in actions:
629 if sort:
638 if sort:
630 for f in sorted(self._actionmapping[a]):
639 for f in sorted(self._actionmapping[a]):
631 args, msg = self._actionmapping[a][f]
640 args, msg = self._actionmapping[a][f]
632 yield f, args, msg
641 yield f, args, msg
633 else:
642 else:
634 for f, (args, msg) in pycompat.iteritems(
643 for f, (args, msg) in pycompat.iteritems(
635 self._actionmapping[a]
644 self._actionmapping[a]
636 ):
645 ):
637 yield f, args, msg
646 yield f, args, msg
638
647
639 def len(self, actions=None):
648 def len(self, actions=None):
640 """ returns number of files which needs actions
649 """ returns number of files which needs actions
641
650
642 if actions is passed, total of number of files in that action
651 if actions is passed, total of number of files in that action
643 only is returned """
652 only is returned """
644
653
645 if actions is None:
654 if actions is None:
646 return len(self._filemapping)
655 return len(self._filemapping)
647
656
648 return sum(len(self._actionmapping[a]) for a in actions)
657 return sum(len(self._actionmapping[a]) for a in actions)
649
658
650 @property
659 @property
651 def actions(self):
660 def actions(self):
652 return self._filemapping
661 return self._filemapping
653
662
654 @property
663 @property
655 def diverge(self):
664 def diverge(self):
656 return self._diverge
665 return self._diverge
657
666
658 @property
667 @property
659 def renamedelete(self):
668 def renamedelete(self):
660 return self._renamedelete
669 return self._renamedelete
661
670
662 @property
671 @property
663 def commitinfo(self):
672 def commitinfo(self):
664 return self._commitinfo
673 return self._commitinfo
665
674
666 @property
675 @property
667 def actionsdict(self):
676 def actionsdict(self):
668 """ returns a dictionary of actions to be perfomed with action as key
677 """ returns a dictionary of actions to be perfomed with action as key
669 and a list of files and related arguments as values """
678 and a list of files and related arguments as values """
670 res = emptyactions()
679 res = emptyactions()
671 for a, d in pycompat.iteritems(self._actionmapping):
680 for a, d in pycompat.iteritems(self._actionmapping):
672 for f, (args, msg) in pycompat.iteritems(d):
681 for f, (args, msg) in pycompat.iteritems(d):
673 res[a].append((f, args, msg))
682 res[a].append((f, args, msg))
674 return res
683 return res
675
684
676 def setactions(self, actions):
685 def setactions(self, actions):
677 self._filemapping = actions
686 self._filemapping = actions
678 self._actionmapping = collections.defaultdict(dict)
687 self._actionmapping = collections.defaultdict(dict)
679 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
688 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
680 self._actionmapping[act][f] = data, msg
689 self._actionmapping[act][f] = data, msg
681
690
682 def updateactions(self, updates):
691 def updateactions(self, updates):
683 for f, (a, data, msg) in pycompat.iteritems(updates):
692 for f, (a, data, msg) in pycompat.iteritems(updates):
684 self.addfile(f, a, data, msg)
693 self.addfile(f, a, data, msg)
685
694
686 def hasconflicts(self):
695 def hasconflicts(self):
687 """ tells whether this merge resulted in some actions which can
696 """ tells whether this merge resulted in some actions which can
688 result in conflicts or not """
697 result in conflicts or not """
689 for a in self._actionmapping.keys():
698 for a in self._actionmapping.keys():
690 if (
699 if (
691 a
700 a
692 not in (
701 not in (
693 mergestatemod.ACTION_GET,
702 mergestatemod.ACTION_GET,
694 mergestatemod.ACTION_KEEP,
703 mergestatemod.ACTION_KEEP,
695 mergestatemod.ACTION_EXEC,
704 mergestatemod.ACTION_EXEC,
696 mergestatemod.ACTION_REMOVE,
705 mergestatemod.ACTION_REMOVE,
697 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
706 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
698 )
707 )
699 and self._actionmapping[a]
708 and self._actionmapping[a]
700 ):
709 ):
701 return True
710 return True
702
711
703 return False
712 return False
704
713
705
714
706 def manifestmerge(
715 def manifestmerge(
707 repo,
716 repo,
708 wctx,
717 wctx,
709 p2,
718 p2,
710 pa,
719 pa,
711 branchmerge,
720 branchmerge,
712 force,
721 force,
713 matcher,
722 matcher,
714 acceptremote,
723 acceptremote,
715 followcopies,
724 followcopies,
716 forcefulldiff=False,
725 forcefulldiff=False,
717 ):
726 ):
718 """
727 """
719 Merge wctx and p2 with ancestor pa and generate merge action list
728 Merge wctx and p2 with ancestor pa and generate merge action list
720
729
721 branchmerge and force are as passed in to update
730 branchmerge and force are as passed in to update
722 matcher = matcher to filter file lists
731 matcher = matcher to filter file lists
723 acceptremote = accept the incoming changes without prompting
732 acceptremote = accept the incoming changes without prompting
724
733
725 Returns an object of mergeresult class
734 Returns an object of mergeresult class
726 """
735 """
727 mresult = mergeresult()
736 mresult = mergeresult()
728 if matcher is not None and matcher.always():
737 if matcher is not None and matcher.always():
729 matcher = None
738 matcher = None
730
739
731 # manifests fetched in order are going to be faster, so prime the caches
740 # manifests fetched in order are going to be faster, so prime the caches
732 [
741 [
733 x.manifest()
742 x.manifest()
734 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
743 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
735 ]
744 ]
736
745
737 branch_copies1 = copies.branch_copies()
746 branch_copies1 = copies.branch_copies()
738 branch_copies2 = copies.branch_copies()
747 branch_copies2 = copies.branch_copies()
739 diverge = {}
748 diverge = {}
740 # information from merge which is needed at commit time
749 # information from merge which is needed at commit time
741 # for example choosing filelog of which parent to commit
750 # for example choosing filelog of which parent to commit
742 # TODO: use specific constants in future for this mapping
751 # TODO: use specific constants in future for this mapping
743 commitinfo = {}
752 commitinfo = {}
744 if followcopies:
753 if followcopies:
745 branch_copies1, branch_copies2, diverge = copies.mergecopies(
754 branch_copies1, branch_copies2, diverge = copies.mergecopies(
746 repo, wctx, p2, pa
755 repo, wctx, p2, pa
747 )
756 )
748
757
749 boolbm = pycompat.bytestr(bool(branchmerge))
758 boolbm = pycompat.bytestr(bool(branchmerge))
750 boolf = pycompat.bytestr(bool(force))
759 boolf = pycompat.bytestr(bool(force))
751 boolm = pycompat.bytestr(bool(matcher))
760 boolm = pycompat.bytestr(bool(matcher))
752 repo.ui.note(_(b"resolving manifests\n"))
761 repo.ui.note(_(b"resolving manifests\n"))
753 repo.ui.debug(
762 repo.ui.debug(
754 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
763 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
755 )
764 )
756 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
765 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
757
766
758 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
767 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
759 copied1 = set(branch_copies1.copy.values())
768 copied1 = set(branch_copies1.copy.values())
760 copied1.update(branch_copies1.movewithdir.values())
769 copied1.update(branch_copies1.movewithdir.values())
761 copied2 = set(branch_copies2.copy.values())
770 copied2 = set(branch_copies2.copy.values())
762 copied2.update(branch_copies2.movewithdir.values())
771 copied2.update(branch_copies2.movewithdir.values())
763
772
764 if b'.hgsubstate' in m1 and wctx.rev() is None:
773 if b'.hgsubstate' in m1 and wctx.rev() is None:
765 # Check whether sub state is modified, and overwrite the manifest
774 # Check whether sub state is modified, and overwrite the manifest
766 # to flag the change. If wctx is a committed revision, we shouldn't
775 # to flag the change. If wctx is a committed revision, we shouldn't
767 # care for the dirty state of the working directory.
776 # care for the dirty state of the working directory.
768 if any(wctx.sub(s).dirty() for s in wctx.substate):
777 if any(wctx.sub(s).dirty() for s in wctx.substate):
769 m1[b'.hgsubstate'] = modifiednodeid
778 m1[b'.hgsubstate'] = modifiednodeid
770
779
771 # Don't use m2-vs-ma optimization if:
780 # Don't use m2-vs-ma optimization if:
772 # - ma is the same as m1 or m2, which we're just going to diff again later
781 # - ma is the same as m1 or m2, which we're just going to diff again later
773 # - The caller specifically asks for a full diff, which is useful during bid
782 # - The caller specifically asks for a full diff, which is useful during bid
774 # merge.
783 # merge.
775 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
784 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
776 # Identify which files are relevant to the merge, so we can limit the
785 # Identify which files are relevant to the merge, so we can limit the
777 # total m1-vs-m2 diff to just those files. This has significant
786 # total m1-vs-m2 diff to just those files. This has significant
778 # performance benefits in large repositories.
787 # performance benefits in large repositories.
779 relevantfiles = set(ma.diff(m2).keys())
788 relevantfiles = set(ma.diff(m2).keys())
780
789
781 # For copied and moved files, we need to add the source file too.
790 # For copied and moved files, we need to add the source file too.
782 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
791 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
783 if copyvalue in relevantfiles:
792 if copyvalue in relevantfiles:
784 relevantfiles.add(copykey)
793 relevantfiles.add(copykey)
785 for movedirkey in branch_copies1.movewithdir:
794 for movedirkey in branch_copies1.movewithdir:
786 relevantfiles.add(movedirkey)
795 relevantfiles.add(movedirkey)
787 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
796 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
788 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
797 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
789
798
790 diff = m1.diff(m2, match=matcher)
799 diff = m1.diff(m2, match=matcher)
791
800
792 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
801 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
793 if n1 and n2: # file exists on both local and remote side
802 if n1 and n2: # file exists on both local and remote side
794 if f not in ma:
803 if f not in ma:
795 # TODO: what if they're renamed from different sources?
804 # TODO: what if they're renamed from different sources?
796 fa = branch_copies1.copy.get(
805 fa = branch_copies1.copy.get(
797 f, None
806 f, None
798 ) or branch_copies2.copy.get(f, None)
807 ) or branch_copies2.copy.get(f, None)
799 args, msg = None, None
808 args, msg = None, None
800 if fa is not None:
809 if fa is not None:
801 args = (f, f, fa, False, pa.node())
810 args = (f, f, fa, False, pa.node())
802 msg = b'both renamed from %s' % fa
811 msg = b'both renamed from %s' % fa
803 else:
812 else:
804 args = (f, f, None, False, pa.node())
813 args = (f, f, None, False, pa.node())
805 msg = b'both created'
814 msg = b'both created'
806 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
815 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
807 else:
816 else:
808 a = ma[f]
817 a = ma[f]
809 fla = ma.flags(f)
818 fla = ma.flags(f)
810 nol = b'l' not in fl1 + fl2 + fla
819 nol = b'l' not in fl1 + fl2 + fla
811 if n2 == a and fl2 == fla:
820 if n2 == a and fl2 == fla:
812 mresult.addfile(
821 mresult.addfile(
813 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
822 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
814 )
823 )
815 elif n1 == a and fl1 == fla: # local unchanged - use remote
824 elif n1 == a and fl1 == fla: # local unchanged - use remote
816 if n1 == n2: # optimization: keep local content
825 if n1 == n2: # optimization: keep local content
817 mresult.addfile(
826 mresult.addfile(
818 f,
827 f,
819 mergestatemod.ACTION_EXEC,
828 mergestatemod.ACTION_EXEC,
820 (fl2,),
829 (fl2,),
821 b'update permissions',
830 b'update permissions',
822 )
831 )
823 else:
832 else:
824 mresult.addfile(
833 mresult.addfile(
825 f,
834 f,
826 mergestatemod.ACTION_GET,
835 mergestatemod.ACTION_GET,
827 (fl2, False),
836 (fl2, False),
828 b'remote is newer',
837 b'remote is newer',
829 )
838 )
830 if branchmerge:
839 if branchmerge:
831 commitinfo[f] = b'other'
840 commitinfo[f] = b'other'
832 elif nol and n2 == a: # remote only changed 'x'
841 elif nol and n2 == a: # remote only changed 'x'
833 mresult.addfile(
842 mresult.addfile(
834 f,
843 f,
835 mergestatemod.ACTION_EXEC,
844 mergestatemod.ACTION_EXEC,
836 (fl2,),
845 (fl2,),
837 b'update permissions',
846 b'update permissions',
838 )
847 )
839 elif nol and n1 == a: # local only changed 'x'
848 elif nol and n1 == a: # local only changed 'x'
840 mresult.addfile(
849 mresult.addfile(
841 f,
850 f,
842 mergestatemod.ACTION_GET,
851 mergestatemod.ACTION_GET,
843 (fl1, False),
852 (fl1, False),
844 b'remote is newer',
853 b'remote is newer',
845 )
854 )
846 if branchmerge:
855 if branchmerge:
847 commitinfo[f] = b'other'
856 commitinfo[f] = b'other'
848 else: # both changed something
857 else: # both changed something
849 mresult.addfile(
858 mresult.addfile(
850 f,
859 f,
851 mergestatemod.ACTION_MERGE,
860 mergestatemod.ACTION_MERGE,
852 (f, f, f, False, pa.node()),
861 (f, f, f, False, pa.node()),
853 b'versions differ',
862 b'versions differ',
854 )
863 )
855 elif n1: # file exists only on local side
864 elif n1: # file exists only on local side
856 if f in copied2:
865 if f in copied2:
857 pass # we'll deal with it on m2 side
866 pass # we'll deal with it on m2 side
858 elif (
867 elif (
859 f in branch_copies1.movewithdir
868 f in branch_copies1.movewithdir
860 ): # directory rename, move local
869 ): # directory rename, move local
861 f2 = branch_copies1.movewithdir[f]
870 f2 = branch_copies1.movewithdir[f]
862 if f2 in m2:
871 if f2 in m2:
863 mresult.addfile(
872 mresult.addfile(
864 f2,
873 f2,
865 mergestatemod.ACTION_MERGE,
874 mergestatemod.ACTION_MERGE,
866 (f, f2, None, True, pa.node()),
875 (f, f2, None, True, pa.node()),
867 b'remote directory rename, both created',
876 b'remote directory rename, both created',
868 )
877 )
869 else:
878 else:
870 mresult.addfile(
879 mresult.addfile(
871 f2,
880 f2,
872 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
881 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
873 (f, fl1),
882 (f, fl1),
874 b'remote directory rename - move from %s' % f,
883 b'remote directory rename - move from %s' % f,
875 )
884 )
876 elif f in branch_copies1.copy:
885 elif f in branch_copies1.copy:
877 f2 = branch_copies1.copy[f]
886 f2 = branch_copies1.copy[f]
878 mresult.addfile(
887 mresult.addfile(
879 f,
888 f,
880 mergestatemod.ACTION_MERGE,
889 mergestatemod.ACTION_MERGE,
881 (f, f2, f2, False, pa.node()),
890 (f, f2, f2, False, pa.node()),
882 b'local copied/moved from %s' % f2,
891 b'local copied/moved from %s' % f2,
883 )
892 )
884 elif f in ma: # clean, a different, no remote
893 elif f in ma: # clean, a different, no remote
885 if n1 != ma[f]:
894 if n1 != ma[f]:
886 if acceptremote:
895 if acceptremote:
887 mresult.addfile(
896 mresult.addfile(
888 f,
897 f,
889 mergestatemod.ACTION_REMOVE,
898 mergestatemod.ACTION_REMOVE,
890 None,
899 None,
891 b'remote delete',
900 b'remote delete',
892 )
901 )
893 else:
902 else:
894 mresult.addfile(
903 mresult.addfile(
895 f,
904 f,
896 mergestatemod.ACTION_CHANGED_DELETED,
905 mergestatemod.ACTION_CHANGED_DELETED,
897 (f, None, f, False, pa.node()),
906 (f, None, f, False, pa.node()),
898 b'prompt changed/deleted',
907 b'prompt changed/deleted',
899 )
908 )
900 elif n1 == addednodeid:
909 elif n1 == addednodeid:
901 # This file was locally added. We should forget it instead of
910 # This file was locally added. We should forget it instead of
902 # deleting it.
911 # deleting it.
903 mresult.addfile(
912 mresult.addfile(
904 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
913 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
905 )
914 )
906 else:
915 else:
907 mresult.addfile(
916 mresult.addfile(
908 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
917 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
909 )
918 )
910 elif n2: # file exists only on remote side
919 elif n2: # file exists only on remote side
911 if f in copied1:
920 if f in copied1:
912 pass # we'll deal with it on m1 side
921 pass # we'll deal with it on m1 side
913 elif f in branch_copies2.movewithdir:
922 elif f in branch_copies2.movewithdir:
914 f2 = branch_copies2.movewithdir[f]
923 f2 = branch_copies2.movewithdir[f]
915 if f2 in m1:
924 if f2 in m1:
916 mresult.addfile(
925 mresult.addfile(
917 f2,
926 f2,
918 mergestatemod.ACTION_MERGE,
927 mergestatemod.ACTION_MERGE,
919 (f2, f, None, False, pa.node()),
928 (f2, f, None, False, pa.node()),
920 b'local directory rename, both created',
929 b'local directory rename, both created',
921 )
930 )
922 else:
931 else:
923 mresult.addfile(
932 mresult.addfile(
924 f2,
933 f2,
925 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
934 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
926 (f, fl2),
935 (f, fl2),
927 b'local directory rename - get from %s' % f,
936 b'local directory rename - get from %s' % f,
928 )
937 )
929 elif f in branch_copies2.copy:
938 elif f in branch_copies2.copy:
930 f2 = branch_copies2.copy[f]
939 f2 = branch_copies2.copy[f]
931 msg, args = None, None
940 msg, args = None, None
932 if f2 in m2:
941 if f2 in m2:
933 args = (f2, f, f2, False, pa.node())
942 args = (f2, f, f2, False, pa.node())
934 msg = b'remote copied from %s' % f2
943 msg = b'remote copied from %s' % f2
935 else:
944 else:
936 args = (f2, f, f2, True, pa.node())
945 args = (f2, f, f2, True, pa.node())
937 msg = b'remote moved from %s' % f2
946 msg = b'remote moved from %s' % f2
938 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
947 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
939 elif f not in ma:
948 elif f not in ma:
940 # local unknown, remote created: the logic is described by the
949 # local unknown, remote created: the logic is described by the
941 # following table:
950 # following table:
942 #
951 #
943 # force branchmerge different | action
952 # force branchmerge different | action
944 # n * * | create
953 # n * * | create
945 # y n * | create
954 # y n * | create
946 # y y n | create
955 # y y n | create
947 # y y y | merge
956 # y y y | merge
948 #
957 #
949 # Checking whether the files are different is expensive, so we
958 # Checking whether the files are different is expensive, so we
950 # don't do that when we can avoid it.
959 # don't do that when we can avoid it.
951 if not force:
960 if not force:
952 mresult.addfile(
961 mresult.addfile(
953 f,
962 f,
954 mergestatemod.ACTION_CREATED,
963 mergestatemod.ACTION_CREATED,
955 (fl2,),
964 (fl2,),
956 b'remote created',
965 b'remote created',
957 )
966 )
958 elif not branchmerge:
967 elif not branchmerge:
959 mresult.addfile(
968 mresult.addfile(
960 f,
969 f,
961 mergestatemod.ACTION_CREATED,
970 mergestatemod.ACTION_CREATED,
962 (fl2,),
971 (fl2,),
963 b'remote created',
972 b'remote created',
964 )
973 )
965 else:
974 else:
966 mresult.addfile(
975 mresult.addfile(
967 f,
976 f,
968 mergestatemod.ACTION_CREATED_MERGE,
977 mergestatemod.ACTION_CREATED_MERGE,
969 (fl2, pa.node()),
978 (fl2, pa.node()),
970 b'remote created, get or merge',
979 b'remote created, get or merge',
971 )
980 )
972 elif n2 != ma[f]:
981 elif n2 != ma[f]:
973 df = None
982 df = None
974 for d in branch_copies1.dirmove:
983 for d in branch_copies1.dirmove:
975 if f.startswith(d):
984 if f.startswith(d):
976 # new file added in a directory that was moved
985 # new file added in a directory that was moved
977 df = branch_copies1.dirmove[d] + f[len(d) :]
986 df = branch_copies1.dirmove[d] + f[len(d) :]
978 break
987 break
979 if df is not None and df in m1:
988 if df is not None and df in m1:
980 mresult.addfile(
989 mresult.addfile(
981 df,
990 df,
982 mergestatemod.ACTION_MERGE,
991 mergestatemod.ACTION_MERGE,
983 (df, f, f, False, pa.node()),
992 (df, f, f, False, pa.node()),
984 b'local directory rename - respect move '
993 b'local directory rename - respect move '
985 b'from %s' % f,
994 b'from %s' % f,
986 )
995 )
987 elif acceptremote:
996 elif acceptremote:
988 mresult.addfile(
997 mresult.addfile(
989 f,
998 f,
990 mergestatemod.ACTION_CREATED,
999 mergestatemod.ACTION_CREATED,
991 (fl2,),
1000 (fl2,),
992 b'remote recreating',
1001 b'remote recreating',
993 )
1002 )
994 else:
1003 else:
995 mresult.addfile(
1004 mresult.addfile(
996 f,
1005 f,
997 mergestatemod.ACTION_DELETED_CHANGED,
1006 mergestatemod.ACTION_DELETED_CHANGED,
998 (None, f, f, False, pa.node()),
1007 (None, f, f, False, pa.node()),
999 b'prompt deleted/changed',
1008 b'prompt deleted/changed',
1000 )
1009 )
1001
1010
1002 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1011 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1003 # If we are merging, look for path conflicts.
1012 # If we are merging, look for path conflicts.
1004 checkpathconflicts(repo, wctx, p2, mresult)
1013 checkpathconflicts(repo, wctx, p2, mresult)
1005
1014
1006 narrowmatch = repo.narrowmatch()
1015 narrowmatch = repo.narrowmatch()
1007 if not narrowmatch.always():
1016 if not narrowmatch.always():
1008 # Updates "actions" in place
1017 # Updates "actions" in place
1009 _filternarrowactions(narrowmatch, branchmerge, mresult)
1018 _filternarrowactions(narrowmatch, branchmerge, mresult)
1010
1019
1011 renamedelete = branch_copies1.renamedelete
1020 renamedelete = branch_copies1.renamedelete
1012 renamedelete.update(branch_copies2.renamedelete)
1021 renamedelete.update(branch_copies2.renamedelete)
1013
1022
1014 mresult.updatevalues(diverge, renamedelete, commitinfo)
1023 mresult.updatevalues(diverge, renamedelete, commitinfo)
1015 return mresult
1024 return mresult
1016
1025
1017
1026
1018 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1027 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1019 """Resolves false conflicts where the nodeid changed but the content
1028 """Resolves false conflicts where the nodeid changed but the content
1020 remained the same."""
1029 remained the same."""
1021 # We force a copy of actions.items() because we're going to mutate
1030 # We force a copy of actions.items() because we're going to mutate
1022 # actions as we resolve trivial conflicts.
1031 # actions as we resolve trivial conflicts.
1023 for f, args, msg in list(
1032 for f, args, msg in list(
1024 mresult.getactions([mergestatemod.ACTION_CHANGED_DELETED])
1033 mresult.getactions([mergestatemod.ACTION_CHANGED_DELETED])
1025 ):
1034 ):
1026 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1035 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1027 # local did change but ended up with same content
1036 # local did change but ended up with same content
1028 mresult.addfile(
1037 mresult.addfile(
1029 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1038 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1030 )
1039 )
1031
1040
1032 for f, args, msg in list(
1041 for f, args, msg in list(
1033 mresult.getactions([mergestatemod.ACTION_DELETED_CHANGED])
1042 mresult.getactions([mergestatemod.ACTION_DELETED_CHANGED])
1034 ):
1043 ):
1035 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1044 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1036 # remote did change but ended up with same content
1045 # remote did change but ended up with same content
1037 mresult.removefile(f) # don't get = keep local deleted
1046 mresult.removefile(f) # don't get = keep local deleted
1038
1047
1039
1048
1040 def calculateupdates(
1049 def calculateupdates(
1041 repo,
1050 repo,
1042 wctx,
1051 wctx,
1043 mctx,
1052 mctx,
1044 ancestors,
1053 ancestors,
1045 branchmerge,
1054 branchmerge,
1046 force,
1055 force,
1047 acceptremote,
1056 acceptremote,
1048 followcopies,
1057 followcopies,
1049 matcher=None,
1058 matcher=None,
1050 mergeforce=False,
1059 mergeforce=False,
1051 ):
1060 ):
1052 """
1061 """
1053 Calculate the actions needed to merge mctx into wctx using ancestors
1062 Calculate the actions needed to merge mctx into wctx using ancestors
1054
1063
1055 Uses manifestmerge() to merge manifest and get list of actions required to
1064 Uses manifestmerge() to merge manifest and get list of actions required to
1056 perform for merging two manifests. If there are multiple ancestors, uses bid
1065 perform for merging two manifests. If there are multiple ancestors, uses bid
1057 merge if enabled.
1066 merge if enabled.
1058
1067
1059 Also filters out actions which are unrequired if repository is sparse.
1068 Also filters out actions which are unrequired if repository is sparse.
1060
1069
1061 Returns mergeresult object same as manifestmerge().
1070 Returns mergeresult object same as manifestmerge().
1062 """
1071 """
1063 # Avoid cycle.
1072 # Avoid cycle.
1064 from . import sparse
1073 from . import sparse
1065
1074
1066 mresult = None
1075 mresult = None
1067 if len(ancestors) == 1: # default
1076 if len(ancestors) == 1: # default
1068 mresult = manifestmerge(
1077 mresult = manifestmerge(
1069 repo,
1078 repo,
1070 wctx,
1079 wctx,
1071 mctx,
1080 mctx,
1072 ancestors[0],
1081 ancestors[0],
1073 branchmerge,
1082 branchmerge,
1074 force,
1083 force,
1075 matcher,
1084 matcher,
1076 acceptremote,
1085 acceptremote,
1077 followcopies,
1086 followcopies,
1078 )
1087 )
1079 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1088 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1080
1089
1081 else: # only when merge.preferancestor=* - the default
1090 else: # only when merge.preferancestor=* - the default
1082 repo.ui.note(
1091 repo.ui.note(
1083 _(b"note: merging %s and %s using bids from ancestors %s\n")
1092 _(b"note: merging %s and %s using bids from ancestors %s\n")
1084 % (
1093 % (
1085 wctx,
1094 wctx,
1086 mctx,
1095 mctx,
1087 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1096 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1088 )
1097 )
1089 )
1098 )
1090
1099
1091 # mapping filename to bids (action method to list af actions)
1100 # mapping filename to bids (action method to list af actions)
1092 # {FILENAME1 : BID1, FILENAME2 : BID2}
1101 # {FILENAME1 : BID1, FILENAME2 : BID2}
1093 # BID is another dictionary which contains
1102 # BID is another dictionary which contains
1094 # mapping of following form:
1103 # mapping of following form:
1095 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1104 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1096 fbids = {}
1105 fbids = {}
1097 diverge, renamedelete = None, None
1106 diverge, renamedelete = None, None
1098 for ancestor in ancestors:
1107 for ancestor in ancestors:
1099 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1108 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1100 mresult1 = manifestmerge(
1109 mresult1 = manifestmerge(
1101 repo,
1110 repo,
1102 wctx,
1111 wctx,
1103 mctx,
1112 mctx,
1104 ancestor,
1113 ancestor,
1105 branchmerge,
1114 branchmerge,
1106 force,
1115 force,
1107 matcher,
1116 matcher,
1108 acceptremote,
1117 acceptremote,
1109 followcopies,
1118 followcopies,
1110 forcefulldiff=True,
1119 forcefulldiff=True,
1111 )
1120 )
1112 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1121 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1113
1122
1114 # Track the shortest set of warning on the theory that bid
1123 # Track the shortest set of warning on the theory that bid
1115 # merge will correctly incorporate more information
1124 # merge will correctly incorporate more information
1116 if diverge is None or len(mresult1.diverge) < len(diverge):
1125 if diverge is None or len(mresult1.diverge) < len(diverge):
1117 diverge = mresult1.diverge
1126 diverge = mresult1.diverge
1118 if renamedelete is None or len(renamedelete) < len(
1127 if renamedelete is None or len(renamedelete) < len(
1119 mresult1.renamedelete
1128 mresult1.renamedelete
1120 ):
1129 ):
1121 renamedelete = mresult1.renamedelete
1130 renamedelete = mresult1.renamedelete
1122
1131
1123 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1132 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1124 m, args, msg = a
1133 m, args, msg = a
1125 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1134 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1126 if f in fbids:
1135 if f in fbids:
1127 d = fbids[f]
1136 d = fbids[f]
1128 if m in d:
1137 if m in d:
1129 d[m].append(a)
1138 d[m].append(a)
1130 else:
1139 else:
1131 d[m] = [a]
1140 d[m] = [a]
1132 else:
1141 else:
1133 fbids[f] = {m: [a]}
1142 fbids[f] = {m: [a]}
1134
1143
1135 # Call for bids
1144 # Call for bids
1136 # Pick the best bid for each file
1145 # Pick the best bid for each file
1137 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1146 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1138 mresult = mergeresult()
1147 mresult = mergeresult()
1139 for f, bids in sorted(fbids.items()):
1148 for f, bids in sorted(fbids.items()):
1140 # bids is a mapping from action method to list af actions
1149 # bids is a mapping from action method to list af actions
1141 # Consensus?
1150 # Consensus?
1142 if len(bids) == 1: # all bids are the same kind of method
1151 if len(bids) == 1: # all bids are the same kind of method
1143 m, l = list(bids.items())[0]
1152 m, l = list(bids.items())[0]
1144 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1153 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1145 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1154 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1146 mresult.addfile(f, *l[0])
1155 mresult.addfile(f, *l[0])
1147 continue
1156 continue
1148 # If keep is an option, just do it.
1157 # If keep is an option, just do it.
1149 if mergestatemod.ACTION_KEEP in bids:
1158 if mergestatemod.ACTION_KEEP in bids:
1150 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1159 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1151 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1160 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1152 continue
1161 continue
1153 # If there are gets and they all agree [how could they not?], do it.
1162 # If there are gets and they all agree [how could they not?], do it.
1154 if mergestatemod.ACTION_GET in bids:
1163 if mergestatemod.ACTION_GET in bids:
1155 ga0 = bids[mergestatemod.ACTION_GET][0]
1164 ga0 = bids[mergestatemod.ACTION_GET][0]
1156 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1165 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1157 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1166 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1158 mresult.addfile(f, *ga0)
1167 mresult.addfile(f, *ga0)
1159 continue
1168 continue
1160 # TODO: Consider other simple actions such as mode changes
1169 # TODO: Consider other simple actions such as mode changes
1161 # Handle inefficient democrazy.
1170 # Handle inefficient democrazy.
1162 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1171 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1163 for m, l in sorted(bids.items()):
1172 for m, l in sorted(bids.items()):
1164 for _f, args, msg in l:
1173 for _f, args, msg in l:
1165 repo.ui.note(b' %s -> %s\n' % (msg, m))
1174 repo.ui.note(b' %s -> %s\n' % (msg, m))
1166 # Pick random action. TODO: Instead, prompt user when resolving
1175 # Pick random action. TODO: Instead, prompt user when resolving
1167 m, l = list(bids.items())[0]
1176 m, l = list(bids.items())[0]
1168 repo.ui.warn(
1177 repo.ui.warn(
1169 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1178 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1170 )
1179 )
1171 mresult.addfile(f, *l[0])
1180 mresult.addfile(f, *l[0])
1172 continue
1181 continue
1173 repo.ui.note(_(b'end of auction\n\n'))
1182 repo.ui.note(_(b'end of auction\n\n'))
1174 # TODO: think about commitinfo when bid merge is used
1183 # TODO: think about commitinfo when bid merge is used
1175 mresult.updatevalues(diverge, renamedelete, {})
1184 mresult.updatevalues(diverge, renamedelete, {})
1176
1185
1177 if wctx.rev() is None:
1186 if wctx.rev() is None:
1178 fractions = _forgetremoved(wctx, mctx, branchmerge)
1187 fractions = _forgetremoved(wctx, mctx, branchmerge)
1179 mresult.updateactions(fractions)
1188 mresult.updateactions(fractions)
1180
1189
1181 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1190 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1182 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1191 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1183
1192
1184 return mresult
1193 return mresult
1185
1194
1186
1195
1187 def _getcwd():
1196 def _getcwd():
1188 try:
1197 try:
1189 return encoding.getcwd()
1198 return encoding.getcwd()
1190 except OSError as err:
1199 except OSError as err:
1191 if err.errno == errno.ENOENT:
1200 if err.errno == errno.ENOENT:
1192 return None
1201 return None
1193 raise
1202 raise
1194
1203
1195
1204
1196 def batchremove(repo, wctx, actions):
1205 def batchremove(repo, wctx, actions):
1197 """apply removes to the working directory
1206 """apply removes to the working directory
1198
1207
1199 yields tuples for progress updates
1208 yields tuples for progress updates
1200 """
1209 """
1201 verbose = repo.ui.verbose
1210 verbose = repo.ui.verbose
1202 cwd = _getcwd()
1211 cwd = _getcwd()
1203 i = 0
1212 i = 0
1204 for f, args, msg in actions:
1213 for f, args, msg in actions:
1205 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1214 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1206 if verbose:
1215 if verbose:
1207 repo.ui.note(_(b"removing %s\n") % f)
1216 repo.ui.note(_(b"removing %s\n") % f)
1208 wctx[f].audit()
1217 wctx[f].audit()
1209 try:
1218 try:
1210 wctx[f].remove(ignoremissing=True)
1219 wctx[f].remove(ignoremissing=True)
1211 except OSError as inst:
1220 except OSError as inst:
1212 repo.ui.warn(
1221 repo.ui.warn(
1213 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1222 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1214 )
1223 )
1215 if i == 100:
1224 if i == 100:
1216 yield i, f
1225 yield i, f
1217 i = 0
1226 i = 0
1218 i += 1
1227 i += 1
1219 if i > 0:
1228 if i > 0:
1220 yield i, f
1229 yield i, f
1221
1230
1222 if cwd and not _getcwd():
1231 if cwd and not _getcwd():
1223 # cwd was removed in the course of removing files; print a helpful
1232 # cwd was removed in the course of removing files; print a helpful
1224 # warning.
1233 # warning.
1225 repo.ui.warn(
1234 repo.ui.warn(
1226 _(
1235 _(
1227 b"current directory was removed\n"
1236 b"current directory was removed\n"
1228 b"(consider changing to repo root: %s)\n"
1237 b"(consider changing to repo root: %s)\n"
1229 )
1238 )
1230 % repo.root
1239 % repo.root
1231 )
1240 )
1232
1241
1233
1242
1234 def batchget(repo, mctx, wctx, wantfiledata, actions):
1243 def batchget(repo, mctx, wctx, wantfiledata, actions):
1235 """apply gets to the working directory
1244 """apply gets to the working directory
1236
1245
1237 mctx is the context to get from
1246 mctx is the context to get from
1238
1247
1239 Yields arbitrarily many (False, tuple) for progress updates, followed by
1248 Yields arbitrarily many (False, tuple) for progress updates, followed by
1240 exactly one (True, filedata). When wantfiledata is false, filedata is an
1249 exactly one (True, filedata). When wantfiledata is false, filedata is an
1241 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1250 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1242 mtime) of the file f written for each action.
1251 mtime) of the file f written for each action.
1243 """
1252 """
1244 filedata = {}
1253 filedata = {}
1245 verbose = repo.ui.verbose
1254 verbose = repo.ui.verbose
1246 fctx = mctx.filectx
1255 fctx = mctx.filectx
1247 ui = repo.ui
1256 ui = repo.ui
1248 i = 0
1257 i = 0
1249 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1258 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1250 for f, (flags, backup), msg in actions:
1259 for f, (flags, backup), msg in actions:
1251 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1260 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1252 if verbose:
1261 if verbose:
1253 repo.ui.note(_(b"getting %s\n") % f)
1262 repo.ui.note(_(b"getting %s\n") % f)
1254
1263
1255 if backup:
1264 if backup:
1256 # If a file or directory exists with the same name, back that
1265 # If a file or directory exists with the same name, back that
1257 # up. Otherwise, look to see if there is a file that conflicts
1266 # up. Otherwise, look to see if there is a file that conflicts
1258 # with a directory this file is in, and if so, back that up.
1267 # with a directory this file is in, and if so, back that up.
1259 conflicting = f
1268 conflicting = f
1260 if not repo.wvfs.lexists(f):
1269 if not repo.wvfs.lexists(f):
1261 for p in pathutil.finddirs(f):
1270 for p in pathutil.finddirs(f):
1262 if repo.wvfs.isfileorlink(p):
1271 if repo.wvfs.isfileorlink(p):
1263 conflicting = p
1272 conflicting = p
1264 break
1273 break
1265 if repo.wvfs.lexists(conflicting):
1274 if repo.wvfs.lexists(conflicting):
1266 orig = scmutil.backuppath(ui, repo, conflicting)
1275 orig = scmutil.backuppath(ui, repo, conflicting)
1267 util.rename(repo.wjoin(conflicting), orig)
1276 util.rename(repo.wjoin(conflicting), orig)
1268 wfctx = wctx[f]
1277 wfctx = wctx[f]
1269 wfctx.clearunknown()
1278 wfctx.clearunknown()
1270 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1279 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1271 size = wfctx.write(
1280 size = wfctx.write(
1272 fctx(f).data(),
1281 fctx(f).data(),
1273 flags,
1282 flags,
1274 backgroundclose=True,
1283 backgroundclose=True,
1275 atomictemp=atomictemp,
1284 atomictemp=atomictemp,
1276 )
1285 )
1277 if wantfiledata:
1286 if wantfiledata:
1278 s = wfctx.lstat()
1287 s = wfctx.lstat()
1279 mode = s.st_mode
1288 mode = s.st_mode
1280 mtime = s[stat.ST_MTIME]
1289 mtime = s[stat.ST_MTIME]
1281 filedata[f] = (mode, size, mtime) # for dirstate.normal
1290 filedata[f] = (mode, size, mtime) # for dirstate.normal
1282 if i == 100:
1291 if i == 100:
1283 yield False, (i, f)
1292 yield False, (i, f)
1284 i = 0
1293 i = 0
1285 i += 1
1294 i += 1
1286 if i > 0:
1295 if i > 0:
1287 yield False, (i, f)
1296 yield False, (i, f)
1288 yield True, filedata
1297 yield True, filedata
1289
1298
1290
1299
1291 def _prefetchfiles(repo, ctx, mresult):
1300 def _prefetchfiles(repo, ctx, mresult):
1292 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1301 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1293 of merge actions. ``ctx`` is the context being merged in."""
1302 of merge actions. ``ctx`` is the context being merged in."""
1294
1303
1295 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1304 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1296 # don't touch the context to be merged in. 'cd' is skipped, because
1305 # don't touch the context to be merged in. 'cd' is skipped, because
1297 # changed/deleted never resolves to something from the remote side.
1306 # changed/deleted never resolves to something from the remote side.
1298 files = []
1307 files = []
1299 for f, args, msg in mresult.getactions(
1308 for f, args, msg in mresult.getactions(
1300 [
1309 [
1301 mergestatemod.ACTION_GET,
1310 mergestatemod.ACTION_GET,
1302 mergestatemod.ACTION_DELETED_CHANGED,
1311 mergestatemod.ACTION_DELETED_CHANGED,
1303 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1312 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1304 mergestatemod.ACTION_MERGE,
1313 mergestatemod.ACTION_MERGE,
1305 ]
1314 ]
1306 ):
1315 ):
1307 files.append(f)
1316 files.append(f)
1308
1317
1309 prefetch = scmutil.prefetchfiles
1318 prefetch = scmutil.prefetchfiles
1310 matchfiles = scmutil.matchfiles
1319 matchfiles = scmutil.matchfiles
1311 prefetch(
1320 prefetch(
1312 repo, [(ctx.rev(), matchfiles(repo, files),)],
1321 repo, [(ctx.rev(), matchfiles(repo, files),)],
1313 )
1322 )
1314
1323
1315
1324
1316 @attr.s(frozen=True)
1325 @attr.s(frozen=True)
1317 class updateresult(object):
1326 class updateresult(object):
1318 updatedcount = attr.ib()
1327 updatedcount = attr.ib()
1319 mergedcount = attr.ib()
1328 mergedcount = attr.ib()
1320 removedcount = attr.ib()
1329 removedcount = attr.ib()
1321 unresolvedcount = attr.ib()
1330 unresolvedcount = attr.ib()
1322
1331
1323 def isempty(self):
1332 def isempty(self):
1324 return not (
1333 return not (
1325 self.updatedcount
1334 self.updatedcount
1326 or self.mergedcount
1335 or self.mergedcount
1327 or self.removedcount
1336 or self.removedcount
1328 or self.unresolvedcount
1337 or self.unresolvedcount
1329 )
1338 )
1330
1339
1331
1340
1332 def emptyactions():
1341 def emptyactions():
1333 """create an actions dict, to be populated and passed to applyupdates()"""
1342 """create an actions dict, to be populated and passed to applyupdates()"""
1334 return {
1343 return {
1335 m: []
1344 m: []
1336 for m in (
1345 for m in (
1337 mergestatemod.ACTION_ADD,
1346 mergestatemod.ACTION_ADD,
1338 mergestatemod.ACTION_ADD_MODIFIED,
1347 mergestatemod.ACTION_ADD_MODIFIED,
1339 mergestatemod.ACTION_FORGET,
1348 mergestatemod.ACTION_FORGET,
1340 mergestatemod.ACTION_GET,
1349 mergestatemod.ACTION_GET,
1341 mergestatemod.ACTION_CHANGED_DELETED,
1350 mergestatemod.ACTION_CHANGED_DELETED,
1342 mergestatemod.ACTION_DELETED_CHANGED,
1351 mergestatemod.ACTION_DELETED_CHANGED,
1343 mergestatemod.ACTION_REMOVE,
1352 mergestatemod.ACTION_REMOVE,
1344 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1353 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1345 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1354 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1346 mergestatemod.ACTION_MERGE,
1355 mergestatemod.ACTION_MERGE,
1347 mergestatemod.ACTION_EXEC,
1356 mergestatemod.ACTION_EXEC,
1348 mergestatemod.ACTION_KEEP,
1357 mergestatemod.ACTION_KEEP,
1349 mergestatemod.ACTION_PATH_CONFLICT,
1358 mergestatemod.ACTION_PATH_CONFLICT,
1350 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1359 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1351 )
1360 )
1352 }
1361 }
1353
1362
1354
1363
1355 def applyupdates(
1364 def applyupdates(
1356 repo,
1365 repo,
1357 mresult,
1366 mresult,
1358 wctx,
1367 wctx,
1359 mctx,
1368 mctx,
1360 overwrite,
1369 overwrite,
1361 wantfiledata,
1370 wantfiledata,
1362 labels=None,
1371 labels=None,
1363 commitinfo=None,
1372 commitinfo=None,
1364 ):
1373 ):
1365 """apply the merge action list to the working directory
1374 """apply the merge action list to the working directory
1366
1375
1367 mresult is a mergeresult object representing result of the merge
1376 mresult is a mergeresult object representing result of the merge
1368 wctx is the working copy context
1377 wctx is the working copy context
1369 mctx is the context to be merged into the working copy
1378 mctx is the context to be merged into the working copy
1370 commitinfo is a mapping of information which needs to be stored somewhere
1379 commitinfo is a mapping of information which needs to be stored somewhere
1371 (probably mergestate) so that it can be used at commit time.
1380 (probably mergestate) so that it can be used at commit time.
1372
1381
1373 Return a tuple of (counts, filedata), where counts is a tuple
1382 Return a tuple of (counts, filedata), where counts is a tuple
1374 (updated, merged, removed, unresolved) that describes how many
1383 (updated, merged, removed, unresolved) that describes how many
1375 files were affected by the update, and filedata is as described in
1384 files were affected by the update, and filedata is as described in
1376 batchget.
1385 batchget.
1377 """
1386 """
1378
1387
1379 _prefetchfiles(repo, mctx, mresult)
1388 _prefetchfiles(repo, mctx, mresult)
1380
1389
1381 updated, merged, removed = 0, 0, 0
1390 updated, merged, removed = 0, 0, 0
1382 ms = mergestatemod.mergestate.clean(
1391 ms = mergestatemod.mergestate.clean(
1383 repo, wctx.p1().node(), mctx.node(), labels
1392 repo, wctx.p1().node(), mctx.node(), labels
1384 )
1393 )
1385
1394
1386 if commitinfo is None:
1395 if commitinfo is None:
1387 commitinfo = {}
1396 commitinfo = {}
1388
1397
1389 for f, op in pycompat.iteritems(commitinfo):
1398 for f, op in pycompat.iteritems(commitinfo):
1390 # the other side of filenode was choosen while merging, store this in
1399 # the other side of filenode was choosen while merging, store this in
1391 # mergestate so that it can be reused on commit
1400 # mergestate so that it can be reused on commit
1392 if op == b'other':
1401 if op == b'other':
1393 ms.addmergedother(f)
1402 ms.addmergedother(f)
1394
1403
1395 moves = []
1404 moves = []
1396
1405
1397 # 'cd' and 'dc' actions are treated like other merge conflicts
1406 # 'cd' and 'dc' actions are treated like other merge conflicts
1398 mergeactions = list(
1407 mergeactions = list(
1399 mresult.getactions(
1408 mresult.getactions(
1400 [
1409 [
1401 mergestatemod.ACTION_CHANGED_DELETED,
1410 mergestatemod.ACTION_CHANGED_DELETED,
1402 mergestatemod.ACTION_DELETED_CHANGED,
1411 mergestatemod.ACTION_DELETED_CHANGED,
1403 mergestatemod.ACTION_MERGE,
1412 mergestatemod.ACTION_MERGE,
1404 ],
1413 ],
1405 sort=True,
1414 sort=True,
1406 )
1415 )
1407 )
1416 )
1408 for f, args, msg in mergeactions:
1417 for f, args, msg in mergeactions:
1409 f1, f2, fa, move, anc = args
1418 f1, f2, fa, move, anc = args
1410 if f == b'.hgsubstate': # merged internally
1419 if f == b'.hgsubstate': # merged internally
1411 continue
1420 continue
1412 if f1 is None:
1421 if f1 is None:
1413 fcl = filemerge.absentfilectx(wctx, fa)
1422 fcl = filemerge.absentfilectx(wctx, fa)
1414 else:
1423 else:
1415 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1424 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1416 fcl = wctx[f1]
1425 fcl = wctx[f1]
1417 if f2 is None:
1426 if f2 is None:
1418 fco = filemerge.absentfilectx(mctx, fa)
1427 fco = filemerge.absentfilectx(mctx, fa)
1419 else:
1428 else:
1420 fco = mctx[f2]
1429 fco = mctx[f2]
1421 actx = repo[anc]
1430 actx = repo[anc]
1422 if fa in actx:
1431 if fa in actx:
1423 fca = actx[fa]
1432 fca = actx[fa]
1424 else:
1433 else:
1425 # TODO: move to absentfilectx
1434 # TODO: move to absentfilectx
1426 fca = repo.filectx(f1, fileid=nullrev)
1435 fca = repo.filectx(f1, fileid=nullrev)
1427 ms.add(fcl, fco, fca, f)
1436 ms.add(fcl, fco, fca, f)
1428 if f1 != f and move:
1437 if f1 != f and move:
1429 moves.append(f1)
1438 moves.append(f1)
1430
1439
1431 # remove renamed files after safely stored
1440 # remove renamed files after safely stored
1432 for f in moves:
1441 for f in moves:
1433 if wctx[f].lexists():
1442 if wctx[f].lexists():
1434 repo.ui.debug(b"removing %s\n" % f)
1443 repo.ui.debug(b"removing %s\n" % f)
1435 wctx[f].audit()
1444 wctx[f].audit()
1436 wctx[f].remove()
1445 wctx[f].remove()
1437
1446
1438 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1447 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1439 progress = repo.ui.makeprogress(
1448 progress = repo.ui.makeprogress(
1440 _(b'updating'), unit=_(b'files'), total=numupdates
1449 _(b'updating'), unit=_(b'files'), total=numupdates
1441 )
1450 )
1442
1451
1443 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1452 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1444 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1453 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1445
1454
1446 # record path conflicts
1455 # record path conflicts
1447 for f, args, msg in mresult.getactions(
1456 for f, args, msg in mresult.getactions(
1448 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1457 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1449 ):
1458 ):
1450 f1, fo = args
1459 f1, fo = args
1451 s = repo.ui.status
1460 s = repo.ui.status
1452 s(
1461 s(
1453 _(
1462 _(
1454 b"%s: path conflict - a file or link has the same name as a "
1463 b"%s: path conflict - a file or link has the same name as a "
1455 b"directory\n"
1464 b"directory\n"
1456 )
1465 )
1457 % f
1466 % f
1458 )
1467 )
1459 if fo == b'l':
1468 if fo == b'l':
1460 s(_(b"the local file has been renamed to %s\n") % f1)
1469 s(_(b"the local file has been renamed to %s\n") % f1)
1461 else:
1470 else:
1462 s(_(b"the remote file has been renamed to %s\n") % f1)
1471 s(_(b"the remote file has been renamed to %s\n") % f1)
1463 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1472 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1464 ms.addpathconflict(f, f1, fo)
1473 ms.addpathconflict(f, f1, fo)
1465 progress.increment(item=f)
1474 progress.increment(item=f)
1466
1475
1467 # When merging in-memory, we can't support worker processes, so set the
1476 # When merging in-memory, we can't support worker processes, so set the
1468 # per-item cost at 0 in that case.
1477 # per-item cost at 0 in that case.
1469 cost = 0 if wctx.isinmemory() else 0.001
1478 cost = 0 if wctx.isinmemory() else 0.001
1470
1479
1471 # remove in parallel (must come before resolving path conflicts and getting)
1480 # remove in parallel (must come before resolving path conflicts and getting)
1472 prog = worker.worker(
1481 prog = worker.worker(
1473 repo.ui,
1482 repo.ui,
1474 cost,
1483 cost,
1475 batchremove,
1484 batchremove,
1476 (repo, wctx),
1485 (repo, wctx),
1477 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1486 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1478 )
1487 )
1479 for i, item in prog:
1488 for i, item in prog:
1480 progress.increment(step=i, item=item)
1489 progress.increment(step=i, item=item)
1481 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1490 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1482
1491
1483 # resolve path conflicts (must come before getting)
1492 # resolve path conflicts (must come before getting)
1484 for f, args, msg in mresult.getactions(
1493 for f, args, msg in mresult.getactions(
1485 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1494 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1486 ):
1495 ):
1487 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1496 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1488 (f0, origf0) = args
1497 (f0, origf0) = args
1489 if wctx[f0].lexists():
1498 if wctx[f0].lexists():
1490 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1499 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1491 wctx[f].audit()
1500 wctx[f].audit()
1492 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1501 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1493 wctx[f0].remove()
1502 wctx[f0].remove()
1494 progress.increment(item=f)
1503 progress.increment(item=f)
1495
1504
1496 # get in parallel.
1505 # get in parallel.
1497 threadsafe = repo.ui.configbool(
1506 threadsafe = repo.ui.configbool(
1498 b'experimental', b'worker.wdir-get-thread-safe'
1507 b'experimental', b'worker.wdir-get-thread-safe'
1499 )
1508 )
1500 prog = worker.worker(
1509 prog = worker.worker(
1501 repo.ui,
1510 repo.ui,
1502 cost,
1511 cost,
1503 batchget,
1512 batchget,
1504 (repo, mctx, wctx, wantfiledata),
1513 (repo, mctx, wctx, wantfiledata),
1505 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1514 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1506 threadsafe=threadsafe,
1515 threadsafe=threadsafe,
1507 hasretval=True,
1516 hasretval=True,
1508 )
1517 )
1509 getfiledata = {}
1518 getfiledata = {}
1510 for final, res in prog:
1519 for final, res in prog:
1511 if final:
1520 if final:
1512 getfiledata = res
1521 getfiledata = res
1513 else:
1522 else:
1514 i, item = res
1523 i, item = res
1515 progress.increment(step=i, item=item)
1524 progress.increment(step=i, item=item)
1516
1525
1517 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1526 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1518 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1527 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1519
1528
1520 # forget (manifest only, just log it) (must come first)
1529 # forget (manifest only, just log it) (must come first)
1521 for f, args, msg in mresult.getactions(
1530 for f, args, msg in mresult.getactions(
1522 (mergestatemod.ACTION_FORGET,), sort=True
1531 (mergestatemod.ACTION_FORGET,), sort=True
1523 ):
1532 ):
1524 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1533 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1525 progress.increment(item=f)
1534 progress.increment(item=f)
1526
1535
1527 # re-add (manifest only, just log it)
1536 # re-add (manifest only, just log it)
1528 for f, args, msg in mresult.getactions(
1537 for f, args, msg in mresult.getactions(
1529 (mergestatemod.ACTION_ADD,), sort=True
1538 (mergestatemod.ACTION_ADD,), sort=True
1530 ):
1539 ):
1531 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1540 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1532 progress.increment(item=f)
1541 progress.increment(item=f)
1533
1542
1534 # re-add/mark as modified (manifest only, just log it)
1543 # re-add/mark as modified (manifest only, just log it)
1535 for f, args, msg in mresult.getactions(
1544 for f, args, msg in mresult.getactions(
1536 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1545 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1537 ):
1546 ):
1538 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1547 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1539 progress.increment(item=f)
1548 progress.increment(item=f)
1540
1549
1541 # keep (noop, just log it)
1550 # keep (noop, just log it)
1542 for f, args, msg in mresult.getactions(
1551 for f, args, msg in mresult.getactions(
1543 (mergestatemod.ACTION_KEEP,), sort=True
1552 (mergestatemod.ACTION_KEEP,), sort=True
1544 ):
1553 ):
1545 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1554 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1546 # no progress
1555 # no progress
1547
1556
1548 # directory rename, move local
1557 # directory rename, move local
1549 for f, args, msg in mresult.getactions(
1558 for f, args, msg in mresult.getactions(
1550 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1559 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1551 ):
1560 ):
1552 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1561 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1553 progress.increment(item=f)
1562 progress.increment(item=f)
1554 f0, flags = args
1563 f0, flags = args
1555 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1564 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1556 wctx[f].audit()
1565 wctx[f].audit()
1557 wctx[f].write(wctx.filectx(f0).data(), flags)
1566 wctx[f].write(wctx.filectx(f0).data(), flags)
1558 wctx[f0].remove()
1567 wctx[f0].remove()
1559
1568
1560 # local directory rename, get
1569 # local directory rename, get
1561 for f, args, msg in mresult.getactions(
1570 for f, args, msg in mresult.getactions(
1562 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1571 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1563 ):
1572 ):
1564 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1573 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1565 progress.increment(item=f)
1574 progress.increment(item=f)
1566 f0, flags = args
1575 f0, flags = args
1567 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1576 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1568 wctx[f].write(mctx.filectx(f0).data(), flags)
1577 wctx[f].write(mctx.filectx(f0).data(), flags)
1569
1578
1570 # exec
1579 # exec
1571 for f, args, msg in mresult.getactions(
1580 for f, args, msg in mresult.getactions(
1572 (mergestatemod.ACTION_EXEC,), sort=True
1581 (mergestatemod.ACTION_EXEC,), sort=True
1573 ):
1582 ):
1574 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1583 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1575 progress.increment(item=f)
1584 progress.increment(item=f)
1576 (flags,) = args
1585 (flags,) = args
1577 wctx[f].audit()
1586 wctx[f].audit()
1578 wctx[f].setflags(b'l' in flags, b'x' in flags)
1587 wctx[f].setflags(b'l' in flags, b'x' in flags)
1579
1588
1580 # these actions updates the file
1589 # these actions updates the file
1581 updated = mresult.len(
1590 updated = mresult.len(
1582 (
1591 (
1583 mergestatemod.ACTION_GET,
1592 mergestatemod.ACTION_GET,
1584 mergestatemod.ACTION_EXEC,
1593 mergestatemod.ACTION_EXEC,
1585 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1594 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1586 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1595 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1587 )
1596 )
1588 )
1597 )
1589 # the ordering is important here -- ms.mergedriver will raise if the merge
1598 # the ordering is important here -- ms.mergedriver will raise if the merge
1590 # driver has changed, and we want to be able to bypass it when overwrite is
1599 # driver has changed, and we want to be able to bypass it when overwrite is
1591 # True
1600 # True
1592 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1601 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1593
1602
1594 if usemergedriver:
1603 if usemergedriver:
1595 if wctx.isinmemory():
1604 if wctx.isinmemory():
1596 raise error.InMemoryMergeConflictsError(
1605 raise error.InMemoryMergeConflictsError(
1597 b"in-memory merge does not support mergedriver"
1606 b"in-memory merge does not support mergedriver"
1598 )
1607 )
1599 ms.commit()
1608 ms.commit()
1600 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1609 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1601 # the driver might leave some files unresolved
1610 # the driver might leave some files unresolved
1602 unresolvedf = set(ms.unresolved())
1611 unresolvedf = set(ms.unresolved())
1603 if not proceed:
1612 if not proceed:
1604 # XXX setting unresolved to at least 1 is a hack to make sure we
1613 # XXX setting unresolved to at least 1 is a hack to make sure we
1605 # error out
1614 # error out
1606 return updateresult(
1615 return updateresult(
1607 updated, merged, removed, max(len(unresolvedf), 1)
1616 updated, merged, removed, max(len(unresolvedf), 1)
1608 )
1617 )
1609 newactions = []
1618 newactions = []
1610 for f, args, msg in mergeactions:
1619 for f, args, msg in mergeactions:
1611 if f in unresolvedf:
1620 if f in unresolvedf:
1612 newactions.append((f, args, msg))
1621 newactions.append((f, args, msg))
1613 mergeactions = newactions
1622 mergeactions = newactions
1614
1623
1615 try:
1624 try:
1616 # premerge
1625 # premerge
1617 tocomplete = []
1626 tocomplete = []
1618 for f, args, msg in mergeactions:
1627 for f, args, msg in mergeactions:
1619 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1628 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1620 progress.increment(item=f)
1629 progress.increment(item=f)
1621 if f == b'.hgsubstate': # subrepo states need updating
1630 if f == b'.hgsubstate': # subrepo states need updating
1622 subrepoutil.submerge(
1631 subrepoutil.submerge(
1623 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1632 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1624 )
1633 )
1625 continue
1634 continue
1626 wctx[f].audit()
1635 wctx[f].audit()
1627 complete, r = ms.preresolve(f, wctx)
1636 complete, r = ms.preresolve(f, wctx)
1628 if not complete:
1637 if not complete:
1629 numupdates += 1
1638 numupdates += 1
1630 tocomplete.append((f, args, msg))
1639 tocomplete.append((f, args, msg))
1631
1640
1632 # merge
1641 # merge
1633 for f, args, msg in tocomplete:
1642 for f, args, msg in tocomplete:
1634 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1643 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1635 progress.increment(item=f, total=numupdates)
1644 progress.increment(item=f, total=numupdates)
1636 ms.resolve(f, wctx)
1645 ms.resolve(f, wctx)
1637
1646
1638 finally:
1647 finally:
1639 ms.commit()
1648 ms.commit()
1640
1649
1641 unresolved = ms.unresolvedcount()
1650 unresolved = ms.unresolvedcount()
1642
1651
1643 if (
1652 if (
1644 usemergedriver
1653 usemergedriver
1645 and not unresolved
1654 and not unresolved
1646 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1655 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1647 ):
1656 ):
1648 if not driverconclude(repo, ms, wctx, labels=labels):
1657 if not driverconclude(repo, ms, wctx, labels=labels):
1649 # XXX setting unresolved to at least 1 is a hack to make sure we
1658 # XXX setting unresolved to at least 1 is a hack to make sure we
1650 # error out
1659 # error out
1651 unresolved = max(unresolved, 1)
1660 unresolved = max(unresolved, 1)
1652
1661
1653 ms.commit()
1662 ms.commit()
1654
1663
1655 msupdated, msmerged, msremoved = ms.counts()
1664 msupdated, msmerged, msremoved = ms.counts()
1656 updated += msupdated
1665 updated += msupdated
1657 merged += msmerged
1666 merged += msmerged
1658 removed += msremoved
1667 removed += msremoved
1659
1668
1660 extraactions = ms.actions()
1669 extraactions = ms.actions()
1661 if extraactions:
1670 if extraactions:
1662 mfiles = {
1671 mfiles = {
1663 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1672 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1664 }
1673 }
1665 for k, acts in pycompat.iteritems(extraactions):
1674 for k, acts in pycompat.iteritems(extraactions):
1666 for a in acts:
1675 for a in acts:
1667 mresult.addfile(a[0], k, *a[1:])
1676 mresult.addfile(a[0], k, *a[1:])
1668 if k == mergestatemod.ACTION_GET and wantfiledata:
1677 if k == mergestatemod.ACTION_GET and wantfiledata:
1669 # no filedata until mergestate is updated to provide it
1678 # no filedata until mergestate is updated to provide it
1670 for a in acts:
1679 for a in acts:
1671 getfiledata[a[0]] = None
1680 getfiledata[a[0]] = None
1672 # Remove these files from actions[ACTION_MERGE] as well. This is
1681 # Remove these files from actions[ACTION_MERGE] as well. This is
1673 # important because in recordupdates, files in actions[ACTION_MERGE]
1682 # important because in recordupdates, files in actions[ACTION_MERGE]
1674 # are processed after files in other actions, and the merge driver
1683 # are processed after files in other actions, and the merge driver
1675 # might add files to those actions via extraactions above. This can
1684 # might add files to those actions via extraactions above. This can
1676 # lead to a file being recorded twice, with poor results. This is
1685 # lead to a file being recorded twice, with poor results. This is
1677 # especially problematic for actions[ACTION_REMOVE] (currently only
1686 # especially problematic for actions[ACTION_REMOVE] (currently only
1678 # possible with the merge driver in the initial merge process;
1687 # possible with the merge driver in the initial merge process;
1679 # interrupted merges don't go through this flow).
1688 # interrupted merges don't go through this flow).
1680 #
1689 #
1681 # The real fix here is to have indexes by both file and action so
1690 # The real fix here is to have indexes by both file and action so
1682 # that when the action for a file is changed it is automatically
1691 # that when the action for a file is changed it is automatically
1683 # reflected in the other action lists. But that involves a more
1692 # reflected in the other action lists. But that involves a more
1684 # complex data structure, so this will do for now.
1693 # complex data structure, so this will do for now.
1685 #
1694 #
1686 # We don't need to do the same operation for 'dc' and 'cd' because
1695 # We don't need to do the same operation for 'dc' and 'cd' because
1687 # those lists aren't consulted again.
1696 # those lists aren't consulted again.
1688 mfiles.difference_update(a[0] for a in acts)
1697 mfiles.difference_update(a[0] for a in acts)
1689
1698
1690 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1699 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1691 if a[0] not in mfiles:
1700 if a[0] not in mfiles:
1692 mresult.removefile(a[0])
1701 mresult.removefile(a[0])
1693
1702
1694 progress.complete()
1703 progress.complete()
1695 assert len(getfiledata) == (
1704 assert len(getfiledata) == (
1696 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1705 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1697 )
1706 )
1698 return updateresult(updated, merged, removed, unresolved), getfiledata
1707 return updateresult(updated, merged, removed, unresolved), getfiledata
1699
1708
1700
1709
1701 def _advertisefsmonitor(repo, num_gets, p1node):
1710 def _advertisefsmonitor(repo, num_gets, p1node):
1702 # Advertise fsmonitor when its presence could be useful.
1711 # Advertise fsmonitor when its presence could be useful.
1703 #
1712 #
1704 # We only advertise when performing an update from an empty working
1713 # We only advertise when performing an update from an empty working
1705 # directory. This typically only occurs during initial clone.
1714 # directory. This typically only occurs during initial clone.
1706 #
1715 #
1707 # We give users a mechanism to disable the warning in case it is
1716 # We give users a mechanism to disable the warning in case it is
1708 # annoying.
1717 # annoying.
1709 #
1718 #
1710 # We only allow on Linux and MacOS because that's where fsmonitor is
1719 # We only allow on Linux and MacOS because that's where fsmonitor is
1711 # considered stable.
1720 # considered stable.
1712 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1721 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1713 fsmonitorthreshold = repo.ui.configint(
1722 fsmonitorthreshold = repo.ui.configint(
1714 b'fsmonitor', b'warn_update_file_count'
1723 b'fsmonitor', b'warn_update_file_count'
1715 )
1724 )
1716 try:
1725 try:
1717 # avoid cycle: extensions -> cmdutil -> merge
1726 # avoid cycle: extensions -> cmdutil -> merge
1718 from . import extensions
1727 from . import extensions
1719
1728
1720 extensions.find(b'fsmonitor')
1729 extensions.find(b'fsmonitor')
1721 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1730 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1722 # We intentionally don't look at whether fsmonitor has disabled
1731 # We intentionally don't look at whether fsmonitor has disabled
1723 # itself because a) fsmonitor may have already printed a warning
1732 # itself because a) fsmonitor may have already printed a warning
1724 # b) we only care about the config state here.
1733 # b) we only care about the config state here.
1725 except KeyError:
1734 except KeyError:
1726 fsmonitorenabled = False
1735 fsmonitorenabled = False
1727
1736
1728 if (
1737 if (
1729 fsmonitorwarning
1738 fsmonitorwarning
1730 and not fsmonitorenabled
1739 and not fsmonitorenabled
1731 and p1node == nullid
1740 and p1node == nullid
1732 and num_gets >= fsmonitorthreshold
1741 and num_gets >= fsmonitorthreshold
1733 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1742 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1734 ):
1743 ):
1735 repo.ui.warn(
1744 repo.ui.warn(
1736 _(
1745 _(
1737 b'(warning: large working directory being used without '
1746 b'(warning: large working directory being used without '
1738 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1747 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1739 b'see "hg help -e fsmonitor")\n'
1748 b'see "hg help -e fsmonitor")\n'
1740 )
1749 )
1741 )
1750 )
1742
1751
1743
1752
1744 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1753 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1745 UPDATECHECK_NONE = b'none'
1754 UPDATECHECK_NONE = b'none'
1746 UPDATECHECK_LINEAR = b'linear'
1755 UPDATECHECK_LINEAR = b'linear'
1747 UPDATECHECK_NO_CONFLICT = b'noconflict'
1756 UPDATECHECK_NO_CONFLICT = b'noconflict'
1748
1757
1749
1758
1750 def update(
1759 def update(
1751 repo,
1760 repo,
1752 node,
1761 node,
1753 branchmerge,
1762 branchmerge,
1754 force,
1763 force,
1755 ancestor=None,
1764 ancestor=None,
1756 mergeancestor=False,
1765 mergeancestor=False,
1757 labels=None,
1766 labels=None,
1758 matcher=None,
1767 matcher=None,
1759 mergeforce=False,
1768 mergeforce=False,
1760 updatedirstate=True,
1769 updatedirstate=True,
1761 updatecheck=None,
1770 updatecheck=None,
1762 wc=None,
1771 wc=None,
1763 ):
1772 ):
1764 """
1773 """
1765 Perform a merge between the working directory and the given node
1774 Perform a merge between the working directory and the given node
1766
1775
1767 node = the node to update to
1776 node = the node to update to
1768 branchmerge = whether to merge between branches
1777 branchmerge = whether to merge between branches
1769 force = whether to force branch merging or file overwriting
1778 force = whether to force branch merging or file overwriting
1770 matcher = a matcher to filter file lists (dirstate not updated)
1779 matcher = a matcher to filter file lists (dirstate not updated)
1771 mergeancestor = whether it is merging with an ancestor. If true,
1780 mergeancestor = whether it is merging with an ancestor. If true,
1772 we should accept the incoming changes for any prompts that occur.
1781 we should accept the incoming changes for any prompts that occur.
1773 If false, merging with an ancestor (fast-forward) is only allowed
1782 If false, merging with an ancestor (fast-forward) is only allowed
1774 between different named branches. This flag is used by rebase extension
1783 between different named branches. This flag is used by rebase extension
1775 as a temporary fix and should be avoided in general.
1784 as a temporary fix and should be avoided in general.
1776 labels = labels to use for base, local and other
1785 labels = labels to use for base, local and other
1777 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1786 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1778 this is True, then 'force' should be True as well.
1787 this is True, then 'force' should be True as well.
1779
1788
1780 The table below shows all the behaviors of the update command given the
1789 The table below shows all the behaviors of the update command given the
1781 -c/--check and -C/--clean or no options, whether the working directory is
1790 -c/--check and -C/--clean or no options, whether the working directory is
1782 dirty, whether a revision is specified, and the relationship of the parent
1791 dirty, whether a revision is specified, and the relationship of the parent
1783 rev to the target rev (linear or not). Match from top first. The -n
1792 rev to the target rev (linear or not). Match from top first. The -n
1784 option doesn't exist on the command line, but represents the
1793 option doesn't exist on the command line, but represents the
1785 experimental.updatecheck=noconflict option.
1794 experimental.updatecheck=noconflict option.
1786
1795
1787 This logic is tested by test-update-branches.t.
1796 This logic is tested by test-update-branches.t.
1788
1797
1789 -c -C -n -m dirty rev linear | result
1798 -c -C -n -m dirty rev linear | result
1790 y y * * * * * | (1)
1799 y y * * * * * | (1)
1791 y * y * * * * | (1)
1800 y * y * * * * | (1)
1792 y * * y * * * | (1)
1801 y * * y * * * | (1)
1793 * y y * * * * | (1)
1802 * y y * * * * | (1)
1794 * y * y * * * | (1)
1803 * y * y * * * | (1)
1795 * * y y * * * | (1)
1804 * * y y * * * | (1)
1796 * * * * * n n | x
1805 * * * * * n n | x
1797 * * * * n * * | ok
1806 * * * * n * * | ok
1798 n n n n y * y | merge
1807 n n n n y * y | merge
1799 n n n n y y n | (2)
1808 n n n n y y n | (2)
1800 n n n y y * * | merge
1809 n n n y y * * | merge
1801 n n y n y * * | merge if no conflict
1810 n n y n y * * | merge if no conflict
1802 n y n n y * * | discard
1811 n y n n y * * | discard
1803 y n n n y * * | (3)
1812 y n n n y * * | (3)
1804
1813
1805 x = can't happen
1814 x = can't happen
1806 * = don't-care
1815 * = don't-care
1807 1 = incompatible options (checked in commands.py)
1816 1 = incompatible options (checked in commands.py)
1808 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1817 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1809 3 = abort: uncommitted changes (checked in commands.py)
1818 3 = abort: uncommitted changes (checked in commands.py)
1810
1819
1811 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1820 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1812 to repo[None] if None is passed.
1821 to repo[None] if None is passed.
1813
1822
1814 Return the same tuple as applyupdates().
1823 Return the same tuple as applyupdates().
1815 """
1824 """
1816 # Avoid cycle.
1825 # Avoid cycle.
1817 from . import sparse
1826 from . import sparse
1818
1827
1819 # This function used to find the default destination if node was None, but
1828 # This function used to find the default destination if node was None, but
1820 # that's now in destutil.py.
1829 # that's now in destutil.py.
1821 assert node is not None
1830 assert node is not None
1822 if not branchmerge and not force:
1831 if not branchmerge and not force:
1823 # TODO: remove the default once all callers that pass branchmerge=False
1832 # TODO: remove the default once all callers that pass branchmerge=False
1824 # and force=False pass a value for updatecheck. We may want to allow
1833 # and force=False pass a value for updatecheck. We may want to allow
1825 # updatecheck='abort' to better suppport some of these callers.
1834 # updatecheck='abort' to better suppport some of these callers.
1826 if updatecheck is None:
1835 if updatecheck is None:
1827 updatecheck = UPDATECHECK_LINEAR
1836 updatecheck = UPDATECHECK_LINEAR
1828 if updatecheck not in (
1837 if updatecheck not in (
1829 UPDATECHECK_NONE,
1838 UPDATECHECK_NONE,
1830 UPDATECHECK_LINEAR,
1839 UPDATECHECK_LINEAR,
1831 UPDATECHECK_NO_CONFLICT,
1840 UPDATECHECK_NO_CONFLICT,
1832 ):
1841 ):
1833 raise ValueError(
1842 raise ValueError(
1834 r'Invalid updatecheck %r (can accept %r)'
1843 r'Invalid updatecheck %r (can accept %r)'
1835 % (
1844 % (
1836 updatecheck,
1845 updatecheck,
1837 (
1846 (
1838 UPDATECHECK_NONE,
1847 UPDATECHECK_NONE,
1839 UPDATECHECK_LINEAR,
1848 UPDATECHECK_LINEAR,
1840 UPDATECHECK_NO_CONFLICT,
1849 UPDATECHECK_NO_CONFLICT,
1841 ),
1850 ),
1842 )
1851 )
1843 )
1852 )
1844 if wc is not None and wc.isinmemory():
1853 if wc is not None and wc.isinmemory():
1845 maybe_wlock = util.nullcontextmanager()
1854 maybe_wlock = util.nullcontextmanager()
1846 else:
1855 else:
1847 maybe_wlock = repo.wlock()
1856 maybe_wlock = repo.wlock()
1848 with maybe_wlock:
1857 with maybe_wlock:
1849 if wc is None:
1858 if wc is None:
1850 wc = repo[None]
1859 wc = repo[None]
1851 pl = wc.parents()
1860 pl = wc.parents()
1852 p1 = pl[0]
1861 p1 = pl[0]
1853 p2 = repo[node]
1862 p2 = repo[node]
1854 if ancestor is not None:
1863 if ancestor is not None:
1855 pas = [repo[ancestor]]
1864 pas = [repo[ancestor]]
1856 else:
1865 else:
1857 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1866 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1858 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1867 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1859 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1868 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1860 else:
1869 else:
1861 pas = [p1.ancestor(p2, warn=branchmerge)]
1870 pas = [p1.ancestor(p2, warn=branchmerge)]
1862
1871
1863 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1872 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1864
1873
1865 overwrite = force and not branchmerge
1874 overwrite = force and not branchmerge
1866 ### check phase
1875 ### check phase
1867 if not overwrite:
1876 if not overwrite:
1868 if len(pl) > 1:
1877 if len(pl) > 1:
1869 raise error.Abort(_(b"outstanding uncommitted merge"))
1878 raise error.Abort(_(b"outstanding uncommitted merge"))
1870 ms = mergestatemod.mergestate.read(repo)
1879 ms = mergestatemod.mergestate.read(repo)
1871 if list(ms.unresolved()):
1880 if list(ms.unresolved()):
1872 raise error.Abort(
1881 raise error.Abort(
1873 _(b"outstanding merge conflicts"),
1882 _(b"outstanding merge conflicts"),
1874 hint=_(b"use 'hg resolve' to resolve"),
1883 hint=_(b"use 'hg resolve' to resolve"),
1875 )
1884 )
1876 if branchmerge:
1885 if branchmerge:
1877 if pas == [p2]:
1886 if pas == [p2]:
1878 raise error.Abort(
1887 raise error.Abort(
1879 _(
1888 _(
1880 b"merging with a working directory ancestor"
1889 b"merging with a working directory ancestor"
1881 b" has no effect"
1890 b" has no effect"
1882 )
1891 )
1883 )
1892 )
1884 elif pas == [p1]:
1893 elif pas == [p1]:
1885 if not mergeancestor and wc.branch() == p2.branch():
1894 if not mergeancestor and wc.branch() == p2.branch():
1886 raise error.Abort(
1895 raise error.Abort(
1887 _(b"nothing to merge"),
1896 _(b"nothing to merge"),
1888 hint=_(b"use 'hg update' or check 'hg heads'"),
1897 hint=_(b"use 'hg update' or check 'hg heads'"),
1889 )
1898 )
1890 if not force and (wc.files() or wc.deleted()):
1899 if not force and (wc.files() or wc.deleted()):
1891 raise error.Abort(
1900 raise error.Abort(
1892 _(b"uncommitted changes"),
1901 _(b"uncommitted changes"),
1893 hint=_(b"use 'hg status' to list changes"),
1902 hint=_(b"use 'hg status' to list changes"),
1894 )
1903 )
1895 if not wc.isinmemory():
1904 if not wc.isinmemory():
1896 for s in sorted(wc.substate):
1905 for s in sorted(wc.substate):
1897 wc.sub(s).bailifchanged()
1906 wc.sub(s).bailifchanged()
1898
1907
1899 elif not overwrite:
1908 elif not overwrite:
1900 if p1 == p2: # no-op update
1909 if p1 == p2: # no-op update
1901 # call the hooks and exit early
1910 # call the hooks and exit early
1902 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1911 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1903 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1912 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1904 return updateresult(0, 0, 0, 0)
1913 return updateresult(0, 0, 0, 0)
1905
1914
1906 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1915 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1907 [p1],
1916 [p1],
1908 [p2],
1917 [p2],
1909 ): # nonlinear
1918 ): # nonlinear
1910 dirty = wc.dirty(missing=True)
1919 dirty = wc.dirty(missing=True)
1911 if dirty:
1920 if dirty:
1912 # Branching is a bit strange to ensure we do the minimal
1921 # Branching is a bit strange to ensure we do the minimal
1913 # amount of call to obsutil.foreground.
1922 # amount of call to obsutil.foreground.
1914 foreground = obsutil.foreground(repo, [p1.node()])
1923 foreground = obsutil.foreground(repo, [p1.node()])
1915 # note: the <node> variable contains a random identifier
1924 # note: the <node> variable contains a random identifier
1916 if repo[node].node() in foreground:
1925 if repo[node].node() in foreground:
1917 pass # allow updating to successors
1926 pass # allow updating to successors
1918 else:
1927 else:
1919 msg = _(b"uncommitted changes")
1928 msg = _(b"uncommitted changes")
1920 hint = _(b"commit or update --clean to discard changes")
1929 hint = _(b"commit or update --clean to discard changes")
1921 raise error.UpdateAbort(msg, hint=hint)
1930 raise error.UpdateAbort(msg, hint=hint)
1922 else:
1931 else:
1923 # Allow jumping branches if clean and specific rev given
1932 # Allow jumping branches if clean and specific rev given
1924 pass
1933 pass
1925
1934
1926 if overwrite:
1935 if overwrite:
1927 pas = [wc]
1936 pas = [wc]
1928 elif not branchmerge:
1937 elif not branchmerge:
1929 pas = [p1]
1938 pas = [p1]
1930
1939
1931 # deprecated config: merge.followcopies
1940 # deprecated config: merge.followcopies
1932 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1941 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1933 if overwrite:
1942 if overwrite:
1934 followcopies = False
1943 followcopies = False
1935 elif not pas[0]:
1944 elif not pas[0]:
1936 followcopies = False
1945 followcopies = False
1937 if not branchmerge and not wc.dirty(missing=True):
1946 if not branchmerge and not wc.dirty(missing=True):
1938 followcopies = False
1947 followcopies = False
1939
1948
1940 ### calculate phase
1949 ### calculate phase
1941 mresult = calculateupdates(
1950 mresult = calculateupdates(
1942 repo,
1951 repo,
1943 wc,
1952 wc,
1944 p2,
1953 p2,
1945 pas,
1954 pas,
1946 branchmerge,
1955 branchmerge,
1947 force,
1956 force,
1948 mergeancestor,
1957 mergeancestor,
1949 followcopies,
1958 followcopies,
1950 matcher=matcher,
1959 matcher=matcher,
1951 mergeforce=mergeforce,
1960 mergeforce=mergeforce,
1952 )
1961 )
1953
1962
1954 if updatecheck == UPDATECHECK_NO_CONFLICT:
1963 if updatecheck == UPDATECHECK_NO_CONFLICT:
1955 if mresult.hasconflicts():
1964 if mresult.hasconflicts():
1956 msg = _(b"conflicting changes")
1965 msg = _(b"conflicting changes")
1957 hint = _(b"commit or update --clean to discard changes")
1966 hint = _(b"commit or update --clean to discard changes")
1958 raise error.Abort(msg, hint=hint)
1967 raise error.Abort(msg, hint=hint)
1959
1968
1960 # Prompt and create actions. Most of this is in the resolve phase
1969 # Prompt and create actions. Most of this is in the resolve phase
1961 # already, but we can't handle .hgsubstate in filemerge or
1970 # already, but we can't handle .hgsubstate in filemerge or
1962 # subrepoutil.submerge yet so we have to keep prompting for it.
1971 # subrepoutil.submerge yet so we have to keep prompting for it.
1963 if b'.hgsubstate' in mresult.actions:
1972 vals = mresult.getfile(b'.hgsubstate')
1973 if vals:
1964 f = b'.hgsubstate'
1974 f = b'.hgsubstate'
1965 m, args, msg = mresult.actions[f]
1975 m, args, msg = vals
1966 prompts = filemerge.partextras(labels)
1976 prompts = filemerge.partextras(labels)
1967 prompts[b'f'] = f
1977 prompts[b'f'] = f
1968 if m == mergestatemod.ACTION_CHANGED_DELETED:
1978 if m == mergestatemod.ACTION_CHANGED_DELETED:
1969 if repo.ui.promptchoice(
1979 if repo.ui.promptchoice(
1970 _(
1980 _(
1971 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1981 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1972 b"use (c)hanged version or (d)elete?"
1982 b"use (c)hanged version or (d)elete?"
1973 b"$$ &Changed $$ &Delete"
1983 b"$$ &Changed $$ &Delete"
1974 )
1984 )
1975 % prompts,
1985 % prompts,
1976 0,
1986 0,
1977 ):
1987 ):
1978 mresult.addfile(
1988 mresult.addfile(
1979 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1989 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1980 )
1990 )
1981 elif f in p1:
1991 elif f in p1:
1982 mresult.addfile(
1992 mresult.addfile(
1983 f,
1993 f,
1984 mergestatemod.ACTION_ADD_MODIFIED,
1994 mergestatemod.ACTION_ADD_MODIFIED,
1985 None,
1995 None,
1986 b'prompt keep',
1996 b'prompt keep',
1987 )
1997 )
1988 else:
1998 else:
1989 mresult.addfile(
1999 mresult.addfile(
1990 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
2000 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1991 )
2001 )
1992 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2002 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1993 f1, f2, fa, move, anc = args
2003 f1, f2, fa, move, anc = args
1994 flags = p2[f2].flags()
2004 flags = p2[f2].flags()
1995 if (
2005 if (
1996 repo.ui.promptchoice(
2006 repo.ui.promptchoice(
1997 _(
2007 _(
1998 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2008 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1999 b"use (c)hanged version or leave (d)eleted?"
2009 b"use (c)hanged version or leave (d)eleted?"
2000 b"$$ &Changed $$ &Deleted"
2010 b"$$ &Changed $$ &Deleted"
2001 )
2011 )
2002 % prompts,
2012 % prompts,
2003 0,
2013 0,
2004 )
2014 )
2005 == 0
2015 == 0
2006 ):
2016 ):
2007 mresult.addfile(
2017 mresult.addfile(
2008 f,
2018 f,
2009 mergestatemod.ACTION_GET,
2019 mergestatemod.ACTION_GET,
2010 (flags, False),
2020 (flags, False),
2011 b'prompt recreating',
2021 b'prompt recreating',
2012 )
2022 )
2013 else:
2023 else:
2014 mresult.removefile(f)
2024 mresult.removefile(f)
2015
2025
2016 if not util.fscasesensitive(repo.path):
2026 if not util.fscasesensitive(repo.path):
2017 # check collision between files only in p2 for clean update
2027 # check collision between files only in p2 for clean update
2018 if not branchmerge and (
2028 if not branchmerge and (
2019 force or not wc.dirty(missing=True, branch=False)
2029 force or not wc.dirty(missing=True, branch=False)
2020 ):
2030 ):
2021 _checkcollision(repo, p2.manifest(), None)
2031 _checkcollision(repo, p2.manifest(), None)
2022 else:
2032 else:
2023 _checkcollision(repo, wc.manifest(), mresult)
2033 _checkcollision(repo, wc.manifest(), mresult)
2024
2034
2025 # divergent renames
2035 # divergent renames
2026 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2036 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2027 repo.ui.warn(
2037 repo.ui.warn(
2028 _(
2038 _(
2029 b"note: possible conflict - %s was renamed "
2039 b"note: possible conflict - %s was renamed "
2030 b"multiple times to:\n"
2040 b"multiple times to:\n"
2031 )
2041 )
2032 % f
2042 % f
2033 )
2043 )
2034 for nf in sorted(fl):
2044 for nf in sorted(fl):
2035 repo.ui.warn(b" %s\n" % nf)
2045 repo.ui.warn(b" %s\n" % nf)
2036
2046
2037 # rename and delete
2047 # rename and delete
2038 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2048 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2039 repo.ui.warn(
2049 repo.ui.warn(
2040 _(
2050 _(
2041 b"note: possible conflict - %s was deleted "
2051 b"note: possible conflict - %s was deleted "
2042 b"and renamed to:\n"
2052 b"and renamed to:\n"
2043 )
2053 )
2044 % f
2054 % f
2045 )
2055 )
2046 for nf in sorted(fl):
2056 for nf in sorted(fl):
2047 repo.ui.warn(b" %s\n" % nf)
2057 repo.ui.warn(b" %s\n" % nf)
2048
2058
2049 ### apply phase
2059 ### apply phase
2050 if not branchmerge: # just jump to the new rev
2060 if not branchmerge: # just jump to the new rev
2051 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2061 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2052 # If we're doing a partial update, we need to skip updating
2062 # If we're doing a partial update, we need to skip updating
2053 # the dirstate.
2063 # the dirstate.
2054 always = matcher is None or matcher.always()
2064 always = matcher is None or matcher.always()
2055 updatedirstate = updatedirstate and always and not wc.isinmemory()
2065 updatedirstate = updatedirstate and always and not wc.isinmemory()
2056 if updatedirstate:
2066 if updatedirstate:
2057 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2067 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2058 # note that we're in the middle of an update
2068 # note that we're in the middle of an update
2059 repo.vfs.write(b'updatestate', p2.hex())
2069 repo.vfs.write(b'updatestate', p2.hex())
2060
2070
2061 _advertisefsmonitor(
2071 _advertisefsmonitor(
2062 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2072 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2063 )
2073 )
2064
2074
2065 wantfiledata = updatedirstate and not branchmerge
2075 wantfiledata = updatedirstate and not branchmerge
2066 stats, getfiledata = applyupdates(
2076 stats, getfiledata = applyupdates(
2067 repo,
2077 repo,
2068 mresult,
2078 mresult,
2069 wc,
2079 wc,
2070 p2,
2080 p2,
2071 overwrite,
2081 overwrite,
2072 wantfiledata,
2082 wantfiledata,
2073 labels=labels,
2083 labels=labels,
2074 commitinfo=mresult.commitinfo,
2084 commitinfo=mresult.commitinfo,
2075 )
2085 )
2076
2086
2077 if updatedirstate:
2087 if updatedirstate:
2078 with repo.dirstate.parentchange():
2088 with repo.dirstate.parentchange():
2079 repo.setparents(fp1, fp2)
2089 repo.setparents(fp1, fp2)
2080 mergestatemod.recordupdates(
2090 mergestatemod.recordupdates(
2081 repo, mresult.actionsdict, branchmerge, getfiledata
2091 repo, mresult.actionsdict, branchmerge, getfiledata
2082 )
2092 )
2083 # update completed, clear state
2093 # update completed, clear state
2084 util.unlink(repo.vfs.join(b'updatestate'))
2094 util.unlink(repo.vfs.join(b'updatestate'))
2085
2095
2086 if not branchmerge:
2096 if not branchmerge:
2087 repo.dirstate.setbranch(p2.branch())
2097 repo.dirstate.setbranch(p2.branch())
2088
2098
2089 # If we're updating to a location, clean up any stale temporary includes
2099 # If we're updating to a location, clean up any stale temporary includes
2090 # (ex: this happens during hg rebase --abort).
2100 # (ex: this happens during hg rebase --abort).
2091 if not branchmerge:
2101 if not branchmerge:
2092 sparse.prunetemporaryincludes(repo)
2102 sparse.prunetemporaryincludes(repo)
2093
2103
2094 if updatedirstate:
2104 if updatedirstate:
2095 repo.hook(
2105 repo.hook(
2096 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2106 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2097 )
2107 )
2098 return stats
2108 return stats
2099
2109
2100
2110
2101 def merge(ctx, labels=None, force=False, wc=None):
2111 def merge(ctx, labels=None, force=False, wc=None):
2102 """Merge another topological branch into the working copy.
2112 """Merge another topological branch into the working copy.
2103
2113
2104 force = whether the merge was run with 'merge --force' (deprecated)
2114 force = whether the merge was run with 'merge --force' (deprecated)
2105 """
2115 """
2106
2116
2107 return update(
2117 return update(
2108 ctx.repo(),
2118 ctx.repo(),
2109 ctx.rev(),
2119 ctx.rev(),
2110 labels=labels,
2120 labels=labels,
2111 branchmerge=True,
2121 branchmerge=True,
2112 force=force,
2122 force=force,
2113 mergeforce=force,
2123 mergeforce=force,
2114 wc=wc,
2124 wc=wc,
2115 )
2125 )
2116
2126
2117
2127
2118 def clean_update(ctx, wc=None):
2128 def clean_update(ctx, wc=None):
2119 """Do a clean update to the given commit.
2129 """Do a clean update to the given commit.
2120
2130
2121 This involves updating to the commit and discarding any changes in the
2131 This involves updating to the commit and discarding any changes in the
2122 working copy.
2132 working copy.
2123 """
2133 """
2124 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2134 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2125
2135
2126
2136
2127 def revert_to(ctx, matcher=None, wc=None):
2137 def revert_to(ctx, matcher=None, wc=None):
2128 """Revert the working copy to the given commit.
2138 """Revert the working copy to the given commit.
2129
2139
2130 The working copy will keep its current parent(s) but its content will
2140 The working copy will keep its current parent(s) but its content will
2131 be the same as in the given commit.
2141 be the same as in the given commit.
2132 """
2142 """
2133
2143
2134 return update(
2144 return update(
2135 ctx.repo(),
2145 ctx.repo(),
2136 ctx.rev(),
2146 ctx.rev(),
2137 branchmerge=False,
2147 branchmerge=False,
2138 force=True,
2148 force=True,
2139 updatedirstate=False,
2149 updatedirstate=False,
2140 matcher=matcher,
2150 matcher=matcher,
2141 wc=wc,
2151 wc=wc,
2142 )
2152 )
2143
2153
2144
2154
2145 def graft(
2155 def graft(
2146 repo,
2156 repo,
2147 ctx,
2157 ctx,
2148 base=None,
2158 base=None,
2149 labels=None,
2159 labels=None,
2150 keepparent=False,
2160 keepparent=False,
2151 keepconflictparent=False,
2161 keepconflictparent=False,
2152 wctx=None,
2162 wctx=None,
2153 ):
2163 ):
2154 """Do a graft-like merge.
2164 """Do a graft-like merge.
2155
2165
2156 This is a merge where the merge ancestor is chosen such that one
2166 This is a merge where the merge ancestor is chosen such that one
2157 or more changesets are grafted onto the current changeset. In
2167 or more changesets are grafted onto the current changeset. In
2158 addition to the merge, this fixes up the dirstate to include only
2168 addition to the merge, this fixes up the dirstate to include only
2159 a single parent (if keepparent is False) and tries to duplicate any
2169 a single parent (if keepparent is False) and tries to duplicate any
2160 renames/copies appropriately.
2170 renames/copies appropriately.
2161
2171
2162 ctx - changeset to rebase
2172 ctx - changeset to rebase
2163 base - merge base, or ctx.p1() if not specified
2173 base - merge base, or ctx.p1() if not specified
2164 labels - merge labels eg ['local', 'graft']
2174 labels - merge labels eg ['local', 'graft']
2165 keepparent - keep second parent if any
2175 keepparent - keep second parent if any
2166 keepconflictparent - if unresolved, keep parent used for the merge
2176 keepconflictparent - if unresolved, keep parent used for the merge
2167
2177
2168 """
2178 """
2169 # If we're grafting a descendant onto an ancestor, be sure to pass
2179 # If we're grafting a descendant onto an ancestor, be sure to pass
2170 # mergeancestor=True to update. This does two things: 1) allows the merge if
2180 # mergeancestor=True to update. This does two things: 1) allows the merge if
2171 # the destination is the same as the parent of the ctx (so we can use graft
2181 # the destination is the same as the parent of the ctx (so we can use graft
2172 # to copy commits), and 2) informs update that the incoming changes are
2182 # to copy commits), and 2) informs update that the incoming changes are
2173 # newer than the destination so it doesn't prompt about "remote changed foo
2183 # newer than the destination so it doesn't prompt about "remote changed foo
2174 # which local deleted".
2184 # which local deleted".
2175 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2185 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2176 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2186 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2177 wctx = wctx or repo[None]
2187 wctx = wctx or repo[None]
2178 pctx = wctx.p1()
2188 pctx = wctx.p1()
2179 base = base or ctx.p1()
2189 base = base or ctx.p1()
2180 mergeancestor = (
2190 mergeancestor = (
2181 repo.changelog.isancestor(pctx.node(), ctx.node())
2191 repo.changelog.isancestor(pctx.node(), ctx.node())
2182 or pctx.rev() == base.rev()
2192 or pctx.rev() == base.rev()
2183 )
2193 )
2184
2194
2185 stats = update(
2195 stats = update(
2186 repo,
2196 repo,
2187 ctx.node(),
2197 ctx.node(),
2188 True,
2198 True,
2189 True,
2199 True,
2190 base.node(),
2200 base.node(),
2191 mergeancestor=mergeancestor,
2201 mergeancestor=mergeancestor,
2192 labels=labels,
2202 labels=labels,
2193 wc=wctx,
2203 wc=wctx,
2194 )
2204 )
2195
2205
2196 if keepconflictparent and stats.unresolvedcount:
2206 if keepconflictparent and stats.unresolvedcount:
2197 pother = ctx.node()
2207 pother = ctx.node()
2198 else:
2208 else:
2199 pother = nullid
2209 pother = nullid
2200 parents = ctx.parents()
2210 parents = ctx.parents()
2201 if keepparent and len(parents) == 2 and base in parents:
2211 if keepparent and len(parents) == 2 and base in parents:
2202 parents.remove(base)
2212 parents.remove(base)
2203 pother = parents[0].node()
2213 pother = parents[0].node()
2204 # Never set both parents equal to each other
2214 # Never set both parents equal to each other
2205 if pother == pctx.node():
2215 if pother == pctx.node():
2206 pother = nullid
2216 pother = nullid
2207
2217
2208 if wctx.isinmemory():
2218 if wctx.isinmemory():
2209 wctx.setparents(pctx.node(), pother)
2219 wctx.setparents(pctx.node(), pother)
2210 # fix up dirstate for copies and renames
2220 # fix up dirstate for copies and renames
2211 copies.graftcopies(wctx, ctx, base)
2221 copies.graftcopies(wctx, ctx, base)
2212 else:
2222 else:
2213 with repo.dirstate.parentchange():
2223 with repo.dirstate.parentchange():
2214 repo.setparents(pctx.node(), pother)
2224 repo.setparents(pctx.node(), pother)
2215 repo.dirstate.write(repo.currenttransaction())
2225 repo.dirstate.write(repo.currenttransaction())
2216 # fix up dirstate for copies and renames
2226 # fix up dirstate for copies and renames
2217 copies.graftcopies(wctx, ctx, base)
2227 copies.graftcopies(wctx, ctx, base)
2218 return stats
2228 return stats
2219
2229
2220
2230
2221 def purge(
2231 def purge(
2222 repo,
2232 repo,
2223 matcher,
2233 matcher,
2224 unknown=True,
2234 unknown=True,
2225 ignored=False,
2235 ignored=False,
2226 removeemptydirs=True,
2236 removeemptydirs=True,
2227 removefiles=True,
2237 removefiles=True,
2228 abortonerror=False,
2238 abortonerror=False,
2229 noop=False,
2239 noop=False,
2230 ):
2240 ):
2231 """Purge the working directory of untracked files.
2241 """Purge the working directory of untracked files.
2232
2242
2233 ``matcher`` is a matcher configured to scan the working directory -
2243 ``matcher`` is a matcher configured to scan the working directory -
2234 potentially a subset.
2244 potentially a subset.
2235
2245
2236 ``unknown`` controls whether unknown files should be purged.
2246 ``unknown`` controls whether unknown files should be purged.
2237
2247
2238 ``ignored`` controls whether ignored files should be purged.
2248 ``ignored`` controls whether ignored files should be purged.
2239
2249
2240 ``removeemptydirs`` controls whether empty directories should be removed.
2250 ``removeemptydirs`` controls whether empty directories should be removed.
2241
2251
2242 ``removefiles`` controls whether files are removed.
2252 ``removefiles`` controls whether files are removed.
2243
2253
2244 ``abortonerror`` causes an exception to be raised if an error occurs
2254 ``abortonerror`` causes an exception to be raised if an error occurs
2245 deleting a file or directory.
2255 deleting a file or directory.
2246
2256
2247 ``noop`` controls whether to actually remove files. If not defined, actions
2257 ``noop`` controls whether to actually remove files. If not defined, actions
2248 will be taken.
2258 will be taken.
2249
2259
2250 Returns an iterable of relative paths in the working directory that were
2260 Returns an iterable of relative paths in the working directory that were
2251 or would be removed.
2261 or would be removed.
2252 """
2262 """
2253
2263
2254 def remove(removefn, path):
2264 def remove(removefn, path):
2255 try:
2265 try:
2256 removefn(path)
2266 removefn(path)
2257 except OSError:
2267 except OSError:
2258 m = _(b'%s cannot be removed') % path
2268 m = _(b'%s cannot be removed') % path
2259 if abortonerror:
2269 if abortonerror:
2260 raise error.Abort(m)
2270 raise error.Abort(m)
2261 else:
2271 else:
2262 repo.ui.warn(_(b'warning: %s\n') % m)
2272 repo.ui.warn(_(b'warning: %s\n') % m)
2263
2273
2264 # There's no API to copy a matcher. So mutate the passed matcher and
2274 # There's no API to copy a matcher. So mutate the passed matcher and
2265 # restore it when we're done.
2275 # restore it when we're done.
2266 oldtraversedir = matcher.traversedir
2276 oldtraversedir = matcher.traversedir
2267
2277
2268 res = []
2278 res = []
2269
2279
2270 try:
2280 try:
2271 if removeemptydirs:
2281 if removeemptydirs:
2272 directories = []
2282 directories = []
2273 matcher.traversedir = directories.append
2283 matcher.traversedir = directories.append
2274
2284
2275 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2285 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2276
2286
2277 if removefiles:
2287 if removefiles:
2278 for f in sorted(status.unknown + status.ignored):
2288 for f in sorted(status.unknown + status.ignored):
2279 if not noop:
2289 if not noop:
2280 repo.ui.note(_(b'removing file %s\n') % f)
2290 repo.ui.note(_(b'removing file %s\n') % f)
2281 remove(repo.wvfs.unlink, f)
2291 remove(repo.wvfs.unlink, f)
2282 res.append(f)
2292 res.append(f)
2283
2293
2284 if removeemptydirs:
2294 if removeemptydirs:
2285 for f in sorted(directories, reverse=True):
2295 for f in sorted(directories, reverse=True):
2286 if matcher(f) and not repo.wvfs.listdir(f):
2296 if matcher(f) and not repo.wvfs.listdir(f):
2287 if not noop:
2297 if not noop:
2288 repo.ui.note(_(b'removing directory %s\n') % f)
2298 repo.ui.note(_(b'removing directory %s\n') % f)
2289 remove(repo.wvfs.rmdir, f)
2299 remove(repo.wvfs.rmdir, f)
2290 res.append(f)
2300 res.append(f)
2291
2301
2292 return res
2302 return res
2293
2303
2294 finally:
2304 finally:
2295 matcher.traversedir = oldtraversedir
2305 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now